[
  {
    "path": ".github/workflows/build-comfy-base-images.yml",
    "content": "name: Build Comfy Base Images\n\non:\n  workflow_dispatch:\n    inputs:\n      comfy_version:\n        description: ComfyUI version\n        required: true\n        default: \"0.19.3\"\n      torch_version:\n        description: PyTorch version\n        required: true\n        default: \"2.8.0\"\n      cuda_version:\n        description: CUDA version\n        required: true\n        default: \"12.8\"\n\njobs:\n  build-and-push:\n    permissions:\n      contents: read\n      packages: write\n    runs-on: ubuntu-24.04-2core-8gb-75gb\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Login to GitHub Container Registry\n        run: echo \"${{ secrets.GITHUB_TOKEN }}\" | docker login ghcr.io -u ${{ github.actor }} --password-stdin\n\n      - name: Build base images\n        working-directory: ./docker\n        run: ./build-comfy-base-images ${{ inputs.comfy_version }} ${{ inputs.torch_version }} ${{ inputs.cuda_version }}\n\n      - name: Push base images\n        working-directory: ./docker\n        run: ./push-comfy-base-images ${{ inputs.comfy_version }} ${{ inputs.torch_version }} ${{ inputs.cuda_version }}\n"
  },
  {
    "path": ".github/workflows/create-release.yml",
    "content": "name: Create Release\n\non:\n  workflow_dispatch: {}\n\njobs:\n  build-and-release:\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Node.js\n        uses: actions/setup-node@v4\n        with:\n          node-version: \"20\"\n\n      - name: Install dependencies\n        run: npm install\n\n      - name: Build\n        run: npm run build-binary\n\n      - name: Get version from package.json\n        id: version\n        run: echo \"version=$(jq -r .version package.json)\" >> \"$GITHUB_OUTPUT\"\n\n      - name: Get the title and body from the last merged PR\n        id: pr-output\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          info=$(gh pr list --state merged --limit 1 --json title,body | jq -r '.[0]')\n          {\n            echo 'title<<EOF'\n            echo $info | jq -r '.title'\n            echo EOF\n          } >> \"$GITHUB_OUTPUT\"\n          {\n            echo 'body<<EOF'\n            echo $info | jq -r '.body'\n            echo EOF\n          } >> \"$GITHUB_OUTPUT\"\n\n      - name: Create a release\n        uses: actions/create-release@v1\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          tag_name: ${{ steps.version.outputs.version }}\n          release_name: ${{ steps.pr-output.outputs.title }}\n          body: ${{ steps.pr-output.outputs.body }}\n          draft: true\n          prerelease: false\n\n      - name: Upload release artifacts\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          gh release upload ${{ steps.version.outputs.version }} ./bin/comfyui-api#Linux_x64\n"
  },
  {
    "path": ".gitignore",
    "content": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\nreport.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n*.lcov\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# Snowpack dependency directory (https://snowpack.dev/)\nweb_modules/\n\n# TypeScript cache\n*.tsbuildinfo\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Optional stylelint cache\n.stylelintcache\n\n# Microbundle cache\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variable files\n.env\n.env.development.local\n.env.test.local\n.env.production.local\n.env.local\n\n# parcel-bundler cache (https://parceljs.org/)\n.cache\n.parcel-cache\n\n# Next.js build output\n.next\nout\n\n# Nuxt.js build / generate output\n.nuxt\ndist\n\n# Gatsby files\n.cache/\n# Comment in the public line in if your project uses Gatsby and not Next.js\n# https://nextjs.org/blog/next-9-1#public-directory-support\n# public\n\n# vuepress build output\n.vuepress/dist\n\n# vuepress v2.x temp and cache directory\n.temp\n.cache\n\n# Docusaurus cache and generated files\n.docusaurus\n\n# Serverless directories\n.serverless/\n\n# FuseBox cache\n.fusebox/\n\n# DynamoDB Local files\n.dynamodb/\n\n# TernJS port file\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n.vscode-test\n\n# yarn v2\n.yarn/cache\n.yarn/unplugged\n.yarn/build-state.yml\n.yarn/install-state.gz\n.pnp.*\n\nbin/\n\n*.safetensors\n*.ckpt\n*.pth\ntest/docker-image/models\n\nscratch\n\ntest/output/*.png\n\ncache/"
  },
  {
    "path": ".nvmrc",
    "content": "v20.18.1\n"
  },
  {
    "path": "DEVELOPING.md",
    "content": "# Developing ComfyUI-API\n\nThis document provides guidelines for developers who want to contribute to the ComfyUI-API project.\nIt covers setting up the development environment, coding standards, testing procedures, and how to submit contributions.\n\n- [Developing ComfyUI-API](#developing-comfyui-api)\n  - [Submitting Contributions](#submitting-contributions)\n  - [Core Design Principles](#core-design-principles)\n  - [Setting Up the Development Environment](#setting-up-the-development-environment)\n  - [Testing Procedures](#testing-procedures)\n    - [Running Tests](#running-tests)\n  - [Generating New Workflow Endpoints](#generating-new-workflow-endpoints)\n    - [Automating with Claude 4 Sonnet](#automating-with-claude-4-sonnet)\n    - [Debugging Custom Workflows](#debugging-custom-workflows)\n  - [Storage Providers](#storage-providers)\n    - [Adding a New Storage Provider](#adding-a-new-storage-provider)\n\n## Submitting Contributions\n\nContributions are welcome!\nComfyUI is a powerful tool with MANY options, and it's likely that not all of them are currently well supported by the `comfyui-api` server.\nPlease open an issue with as much information as possible about the problem you're facing or the feature you need.\nIf you have encountered a bug, please include the steps to reproduce it, and any relevant logs or error messages.\nIf you are able, adding a failing test is the best way to ensure your issue is resolved quickly.\nLet's make productionizing ComfyUI as easy as possible!\n\n## Core Design Principles\n\nWhen contributing to the ComfyUI-API project, please keep the following design principles in mind:\n\n- **Asynchronous Operations**: Use asynchronous programming practices wherever possible to ensure the server remains responsive. Avoid blocking the event loop.\n- **Modularity**: Because the range of uses for this API is so broad, strive to keep components modular and loosely coupled. This will make it easier to add new features and maintain existing ones.\n- **Don't Duplicate Existing ComfyUI functionality**: Wherever possible, leverage existing ComfyUI api endpoints and functionality, rather than re-implementing it in the API server. Local ComfyUI can be accessed from the the API server at `config.comfyURL`.\n- **Error Handling**: Implement robust error handling to gracefully manage unexpected situations. Provide clear and informative error messages to users. Errors should never crash the server unless recovery is deemed impossible.\n- **Testing**: If your feature or bug fix is significant, please include tests to verify its functionality. This helps maintain the integrity of the codebase.\n\n## Setting Up the Development Environment\n\n```shell\ngit clone https://github.com/SaladTechnologies/comfyui-api.git\ncd comfyui-api\nnpm install\nnpm run build-binary\n```\n\nThis will create a `comfyui-api` binary in the `dist/` directory, which is mounted into the Docker container when you run `docker compose up`.\n\nWhenever you make changes, you will need to re-run `npm run build-binary` to rebuild the binary, and then restart the Docker container to see your changes.\n\n## Testing Procedures\n\nThis project uses [mocha](https://mochajs.org/) and [earl](https://earl.fun/) for testing.\nTests are administered against a locally running instance of the ComfyUI API server, which can be started with Docker Compose, and actual images are generated during the tests.\n\nAdditional services are present in the docker-compose file to provide mock storage services for testing uploads and downloads.\nThese services are not required for normal operation of the API server.\n\n### Running Tests\n\nIn one terminal, start the test server:\n\n```shell\ndocker compose up --build\n```\n\n> --build is only needed the first time, or if you make changes to the file-server code.\n\nIn another terminal, run the tests:\n\n```shell\nnpm run quick-test\n```\n\nThis will take several minutes, but can be done with very modest hardware.\nAll tests in the `quick-test` suite use SD1.5 models, which are small and fast to run.\nThe models used are defined in [the manifest](./manifest.yml), as well in a couple [test workflows](./test/workflows/)\n\n## Generating New Workflow Endpoints\n\nSince the ComfyUI prompt format is a little obtuse, it's common to wrap the `/prompt` endpoint with a more user-friendly interface.\n\nThis can be done by adding conforming `.js` or `.ts` files to the `/workflows` directory in your dockerfile.\nYou can see some examples in [`./workflows`](./workflows/).\nTypescript files will be automatically transpiled to javascript files, so you can use either.\n\nEndpoints are loaded at runtime via `eval` in the context of `src/workflows`, so you can use any Node.js or TypeScript features you want, including importing other files such as the API config object.\nBy loading extra endpoints this way, no rebuild is required to add new endpoints, and you can continue using the pre-built binary.\nYou can see many examples of this in the [Salad Recipes](https://github.com/SaladTechnologies/salad-recipes/tree/master/src) repo, where this API powers all of the ComfyUI recipes.\n\nHere is an example text-to-image workflow file.\n\n```typescript\nimport { z } from \"zod\";\nimport config from \"../config\";\n\nconst ComfyNodeSchema = z.object({\n  inputs: z.any(),\n  class_type: z.string(),\n  _meta: z.any().optional(),\n});\n\ntype ComfyNode = z.infer<typeof ComfyNodeSchema>;\ntype ComfyPrompt = Record<string, ComfyNode>;\n\ninterface Workflow {\n  RequestSchema: z.ZodObject<any, any>;\n  generateWorkflow: (input: any) => Promise<ComfyPrompt> | ComfyPrompt;\n  description?: string;\n  summary?: string;\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .default(\"text, watermark\")\n    .describe(\"The negative prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 100000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(8)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"normal\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(1)\n    .describe(\"Denoising strength\"),\n  checkpoint: z\n    .string()\n    .refine((val) => config.models.checkpoints.all.includes(val))\n    .optional()\n    .default(config.warmupCkpt || config.models.checkpoints.all[0])\n    .describe(\"Checkpoint to use\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"3\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"4\", 0],\n        positive: [\"6\", 0],\n        negative: [\"7\", 0],\n        latent_image: [\"5\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"4\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"5\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptyLatentImage\",\n      _meta: {\n        title: \"Empty Latent Image\",\n      },\n    },\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"7\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"3\", 0],\n        vae: [\"4\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"output\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n  summary: \"Text to Image\",\n  description: \"Generate an image from a text prompt\",\n};\n\nexport default workflow;\n```\n\nNote your file MUST export a `Workflow` object, which contains a `RequestSchema` and a `generateWorkflow` function. The `RequestSchema` is a zod schema that describes the input to the workflow, and the `generateWorkflow` function takes the input and returns a ComfyUI API-format prompt.\n\nThe workflow endpoints will follow whatever directory structure you provide.\nFor example, a directory structure like this:\n\n```shell\n/workflows\n└── sdxl\n    ├── img2img.ts\n    ├── txt2img-with-refiner.ts\n    └── txt2img.ts\n```\n\nWould yield the following endpoints:\n\n- `POST /workflow/sdxl/img2img`\n- `POST /workflow/sdxl/txt2img-with-refiner`\n- `POST /workflow/sdxl/txt2img`\n\nThese endpoints will be present in the swagger docs, and can be used to interact with the API.\nIf you provide descriptions in your zod schemas, these will be used to create a markdown table of inputs in the swagger docs.\n\n### Automating with an LLM\n\nCreating these endpoints can be done mostly automatically by a large language model, given the JSON prompt graph.\nA [system prompt](./claude-endpoint-creation-prompt.md) to do this is included in this repository, as is [a script that uses this prompt](./generate-workflow) to create endpoints. It requires `jq` and `curl` to be installed.\n\n```shell\n./generate-workflow <inputFile> <outputFile>\n```\n\nThe script supports two LLM providers and picks one based on your environment:\n\n| Provider | Environment variable | Model used |\n|---|---|---|\n| Anthropic (default) | `ANTHROPIC_API_KEY` | `claude-sonnet-4-20250514` |\n| [MiniMax](https://www.minimax.io/) | `MINIMAX_API_KEY` | `MiniMax-M2.7` (204 K context) |\n\nWhen both variables are set, Anthropic is preferred.\nMiniMax uses the OpenAI-compatible endpoint (`https://api.minimax.io/v1`) so no extra dependencies are needed.\n\nWhere `<inputFile>` is the JSON prompt graph, and `<outputFile>` is the output file to write the generated workflow to.\n\nAs with all AI-generated code, it is strongly recommended to review the generated code before using it in production.\n\n### Debugging Custom Workflows\n\nWhen developing or troubleshooting custom workflows, enable debug logging to see detailed information about what's happening under the hood.\n\n#### Enabling Debug Logging\n\nSet the `LOG_LEVEL` environment variable to `debug`:\n\n```shell\n# Docker\ndocker run -e LOG_LEVEL=debug ...\n\n# Docker Compose\nenvironment:\n  - LOG_LEVEL=debug\n```\n\n#### What Debug Logging Shows\n\nWith `LOG_LEVEL=debug`, the server will log:\n\n1. **Workflow Loading** (at startup):\n   - Which workflow directories are being scanned\n   - TypeScript files being transpiled\n   - Each workflow file being evaluated\n   - Successfully loaded workflows\n   - Warnings for files that don't export valid Workflow objects\n   - Errors if workflow files fail to evaluate (with stack traces)\n\n2. **Workflow Execution** (per request):\n   - The input received from the request (`Workflow input received`)\n   - The generated ComfyUI prompt (`Generated ComfyUI prompt from workflow`)\n   - The full request body sent to `/prompt` (`Sending request to /prompt endpoint`)\n   - Any errors from the `/prompt` endpoint (including the full prompt that failed)\n\n#### Common Issues and Solutions\n\n**Problem: 400 error from `/prompt` endpoint with validation errors**\n\nDebug logs will show the exact prompt being sent. Common causes:\n- Missing required nodes (e.g., no `SaveImage` node with `filename_prefix`)\n- Invalid node references (e.g., referencing a node ID that doesn't exist)\n- Invalid input types (e.g., string where number expected)\n\nCheck the `promptRequestBody` in the error log to see exactly what was sent.\n\n**Problem: Workflow file not loading**\n\nDebug logs will show if the file:\n- Failed to transpile (TypeScript syntax error)\n- Failed to evaluate (JavaScript runtime error)\n- Doesn't export a valid Workflow object\n\n**Problem: Workflow generates wrong output**\n\nUse debug logs to compare:\n1. The `input` received by the workflow\n2. The `prompt` generated by your `generateWorkflow` function\n3. Compare against a known-working prompt from ComfyUI's web interface\n\n#### Example Debug Output\n\n```\n{\"level\":30,\"workflow\":\"txt2img\",\"msg\":\"Workflow input received\",\"input\":{\"prompt\":\"a cat\",\"width\":512}}\n{\"level\":30,\"workflow\":\"txt2img\",\"msg\":\"Generated ComfyUI prompt from workflow\",\"prompt\":{\"3\":{\"inputs\":{\"seed\":123...}}}}\n{\"level\":30,\"workflow\":\"txt2img\",\"msg\":\"Sending request to /prompt endpoint\",\"promptRequestBody\":{...}}\n{\"level\":30,\"workflow\":\"txt2img\",\"msg\":\"Workflow completed successfully\",\"status\":200}\n```\n\nWhen a workflow fails:\n```\n{\"level\":50,\"workflow\":\"txt2img\",\"msg\":\"Workflow request to /prompt endpoint failed\",\"status\":400,\"error\":\"Prompt must contain a node with a \\\"filename_prefix\\\" input\",\"location\":\"prompt\",\"promptRequestBody\":{...}}\n```\n\n#### Inspecting Prompts Without Debug Logging\n\nIf you can't enable debug logging, you can still inspect your generated prompts by:\n\n1. **Using the `/docs` endpoint**: Access the Swagger UI at `http://localhost:3000/docs` to test your workflow endpoints interactively\n2. **Testing generateWorkflow locally**: Import your workflow file and call `generateWorkflow()` with test inputs to see the output\n3. **Comparing with ComfyUI**: Export a working prompt from ComfyUI's web interface and compare it to your generated prompt\n\n## Storage Providers\n\nStorage providers are modular components that handle the downloading of models and input media, as well as the uploading of completed outputs.\nThe ComfyUI API server supports multiple storage backends, each with its own configuration and usage.\nThey all live in `src/storage-providers/` and must be exported in `src/storage-providers/index.ts`.\nThey are defined by the `StorageProvider` interface in `src/types.ts`:\n\n```typescript\nexport interface StorageProvider {\n  /**\n   * The key in a request body that indicates this storage provider should be used for upload.\n   * Must be unique across all storage providers, and must be included if `uploadFile` is implemented.\n   */\n  requestBodyUploadKey?: string;\n\n  /**\n   * The zod schema for the request body field that indicates this storage provider should\n   * be used for upload. Must be included if `requestBodyUploadKey` is defined.\n   */\n  requestBodyUploadSchema?: z.ZodObject<any, any>;\n\n  /**\n   * Takes the inputs from the request body and generates a URL for uploading.\n   * @param inputs\n   */\n  createUrl(inputs: any): string;\n\n  /**\n   * Test if the given URL can be handled by this storage provider.\n   * @param url URL to test\n   */\n  testUrl(url: string): boolean;\n\n  /**\n   * Upload a file to the given URL.\n   * @param url URL to upload to\n   * @param fileOrPath File path or buffer to upload\n   * @param contentType MIME type of the file\n   *\n   * @returns An Upload object that can be used to start and abort the upload.\n   */\n  uploadFile?(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): Upload;\n\n  /**\n   * Download a file from the given URL to the specified output directory.\n   * @param url URL to download from\n   * @param outputDir Directory to save the downloaded file\n   * @param filenameOverride Optional filename to use instead of auto-generated one\n   *\n   * @resolves The path to the downloaded file\n   */\n  downloadFile?(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string\n  ): Promise<string>;\n}\n```\n\n- Each storage provider must implement the `StorageProvider` interface, which includes methods for creating upload URLs, testing if a URL can be handled by the provider, uploading files, and downloading files.\n- The server will automatically select the appropriate storage provider based on the URL provided in the request body, using the `testUrl` method of each provider to determine which one can handle the URL.\n- Upload and download methods are optional, as some providers may only support one or the other.\n\n### Adding a New Storage Provider\n\nTo add a new storage provider, follow these steps:\n\n1. Create a new file in the `src/storage-providers/` directory for your provider, e.g., `src/storage-providers/my-provider.ts`.\n2. Implement the `StorageProvider` interface in your new file. **Be sure to use asynchronous methods** wherever possible to avoid blocking the event loop.\n3. Export your provider in `src/storage-providers/index.ts`, making sure to add it to the `storageProviders` array.\n4. **Always keep the HTTPStorageProvider as the last provider in the list**, as it acts as a catch-all for any URLs not matched by other providers.\n\nSee the existing providers for examples of how to implement the interface.\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2025 Salad Technologies\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# ComfyUI API - A Stateless and Extendable API for ComfyUI\n\nA simple wrapper that facilitates using [ComfyUI](https://github.com/comfyanonymous/ComfyUI/) as a stateless API, either by receiving images in the response, or by sending completed images to a webhook\n\n- [ComfyUI API - A Stateless and Extendable API for ComfyUI](#comfyui-api---a-stateless-and-extendable-api-for-comfyui)\n  - [Download and Use](#download-and-use)\n  - [Features](#features)\n  - [Full ComfyUI Support](#full-comfyui-support)\n  - [Stateless API](#stateless-api)\n    - [Request Format](#request-format)\n    - [Per-Request Credentials](#per-request-credentials)\n    - [Response Format](#response-format)\n    - [Example Usage](#example-usage)\n      - [Base64 Response](#base64-response)\n      - [Webhook Response with Base64 Images](#webhook-response-with-base64-images)\n      - [S3 Urls in Response](#s3-urls-in-response)\n      - [S3 Urls in Webhook Payload](#s3-urls-in-webhook-payload)\n      - [Azure Blob Urls in Response](#azure-blob-urls-in-response)\n      - [Azure Blob Urls in Webhook Payload](#azure-blob-urls-in-webhook-payload)\n  - [Model Manifest](#model-manifest)\n  - [Downloading Behavior](#downloading-behavior)\n  - [LRU Caching](#lru-caching)\n  - [Modular Storage Backends](#modular-storage-backends)\n    - [S3-Compatible Storage](#s3-compatible-storage)\n    - [Huggingface Repository](#huggingface-repository)\n    - [Azure Blob Storage](#azure-blob-storage)\n    - [HTTP](#http)\n  - [Image To Image Workflows](#image-to-image-workflows)\n  - [Dynamic Model Loading](#dynamic-model-loading)\n  - [On-Demand Model Download Endpoint](#on-demand-model-download-endpoint)\n    - [Authentication Types](#authentication-types)\n  - [Server-side image processing](#server-side-image-processing)\n  - [Probes](#probes)\n  - [API Configuration Guide](#api-configuration-guide)\n    - [Environment Variables](#environment-variables)\n      - [Kubernetes Deployment: Proxy Environment Variables](#kubernetes-deployment-proxy-environment-variables)\n    - [Configuration Details](#configuration-details)\n    - [Additional Notes](#additional-notes)\n  - [Using Synchronously](#using-synchronously)\n  - [Using with Webhooks](#using-with-webhooks)\n    - [prompt.complete](#promptcomplete)\n    - [prompt.failed](#promptfailed)\n    - [Validating Webhooks](#validating-webhooks)\n      - [Node.js Example](#nodejs-example)\n      - [Python Example](#python-example)\n    - [DEPRECATED: Legacy Webhook Behavior](#deprecated-legacy-webhook-behavior)\n      - [output.complete](#outputcomplete)\n      - [prompt.failed (legacy)](#promptfailed-legacy)\n  - [System Events](#system-events)\n    - [status](#status)\n    - [progress](#progress)\n    - [progress\\_state](#progress_state)\n    - [executing](#executing)\n    - [execution\\_start](#execution_start)\n    - [execution\\_cached](#execution_cached)\n    - [executed](#executed)\n    - [execution\\_success](#execution_success)\n    - [execution\\_interrupted](#execution_interrupted)\n    - [execution\\_error](#execution_error)\n    - [file\\_downloaded](#file_downloaded)\n    - [file\\_uploaded](#file_uploaded)\n    - [file\\_deleted](#file_deleted)\n  - [Prebuilt Docker Images](#prebuilt-docker-images)\n  - [Considerations for Running on SaladCloud](#considerations-for-running-on-saladcloud)\n  - [Custom Workflows](#custom-workflows)\n  - [Contributing](#contributing)\n  - [Architecture](#architecture)\n\n## Download and Use\n\nEither use a [pre-built Docker image](#prebuilt-docker-images), or build your own.\n\nDownload the latest version from the release page, and copy it into your existing ComfyUI dockerfile.\nYou can find good base dockerfiles in the [docker](./docker) directory.\nThere are also example dockerfiles for popular models in the [SaladCloud Recipes Repo](https://github.com/SaladTechnologies/salad-recipes/tree/master/src).\n\nIf you have your own ComfyUI dockerfile, you can add the comfyui-api server to it like so:\n\n```dockerfile\n# Change this to the version you want to use\nARG api_version=1.18.1\n\n# Download the comfyui-api binary, and make it executable\nADD https://github.com/SaladTechnologies/comfyui-api/releases/download/${api_version}/comfyui-api .\nRUN chmod +x comfyui-api\n\n# Set CMD to launch the comfyui-api binary. The comfyui-api binary will launch ComfyUI as a child process.\nCMD [\"./comfyui-api\"]\n```\n\nThe server will be available on port `3000` by default, but this can be customized with the `PORT` environment variable.\n\nThe server hosts swagger docs at `/docs`, which can be used to interact with the API.\n\n## Features\n\n- **Full Power Of ComfyUI**: The server supports the full ComfyUI /prompt API, and can be used to execute any ComfyUI workflow.\n- **Verified Model/Workflow Support**: Stable Diffusion 1.5, Stable Diffusion XL, Stable Diffusion 3.5, Flux, AnimateDiff, LTX Video, Hunyuan Video, CogVideoX, Mochi Video, Cosmos 1.0. My assumption is more model types are supported, but these are the ones I have verified.\n- **Stateless API**: The server is stateless, and can be scaled horizontally to handle more requests.\n- **Swagger Docs**: The server hosts swagger docs at `/docs`, which can be used to interact with the API.\n- **\"Synchronous\" Support**: The server will return base64-encoded images directly in the response, if no async behavior is requested.\n- **Async Support via Webhooks**: The server can send completed outputs to a webhook URL, allowing for asynchronous processing.\n- **Modular Storage Backends**: Completed outputs can be sent base64-encoded to a webhook, or uploaded to any s3-compatible storage, an http endpoint, a huggingface repo, or azure blob storage. All of these can be used to download input media as well. More storage backends can be added easily. Supports an optional LRU cache for downloaded models and files to keep local storage from overflowing.\n- **Warmup Workflow**: The server can be configured to run a warmup workflow on startup, which can be used to load and warm up models, and to ensure the server is ready to accept requests.\n- **Return Images In PNG (default), JPEG, or WebP**: The server can return images in PNG, JPEG, or WebP format, via a parameter in the API request. Most options supported by [sharp](https://sharp.pixelplumbing.com/) are supported.\n- **Probes**: The server has two probes, `/health` and `/ready`, which can be used to check the server's health and readiness to receive traffic.\n- **Dynamic Workflow Endpoints**: Automatically mount new workflow endpoints by adding conforming `.js` or `.ts` files to the `/workflows` directory in your docker image. See [the guide](./DEVELOPING.md#generating-new-workflow-endpoints) for more information. A [Claude 4 Sonnet](https://claude.ai) [prompt](./claude-endpoint-creation-prompt.md) is included to assist in automating this process.\n- **Bring Your Own Models And Extensions**: Use any model or extension you want by adding them to the normal ComfyUI directories `/opt/ComfyUI/`. You can configure a [manifest file](#model-manifest) to download models and install extensions automatically on startup.\n- **Dynamic Model Loading**: If you provide a URL in a model-loading node, the server will locally cache the model automatically before executing the workflow.\n- **On-Demand Model Download**: Trigger model downloads via a dedicated API endpoint, with support for both synchronous and asynchronous operations.\n- **Execution Stats**: The server will return [execution stats in the response](#response-format).\n- **Works Great with SaladCloud**: The server is designed to work well with SaladCloud, and can be used to host ComfyUI on the SaladCloud platform. It is likely to work well with other platforms as well.\n  - **Manages Deletion Cost**: _ONLY ON SALAD_. The server will automatically set the instance deletion cost to the queue length, so that busier nodes are less likely to be scaled in while they are processing requests.\n- **Single Binary**: The server is distributed as a single binary, and can be run with no dependencies.\n- **Websocket Events Via Webhook**: The server can forward ComfyUI websocket events to a configured webhook, which can be used to monitor the progress of a workflow.\n- **Friendly License**: The server is distributed under the MIT license, and can be used for any purpose. All of its dependencies are also MIT or Apache 2.0 licensed, except ComfyUI itself, which is GPL-3.0 licensed.\n\n## Full ComfyUI Support\n\nComfyUI API sits in front of ComfyUI, and uses the ComfyUI `/prompt` API to execute workflows, so any API-formatted prompt can be executed by the server. Before queueing the prompt, the server will download any required inputs, such as images. It also overrides the `filename_prefix` field in the prompt to ensure that output files are saved with a unique filename. Once the prompt is queued, the server will wait for the prompt to complete, and then return the outputs in the response body, via a webhook, or upload them to S3, depending on the request parameters. Because of this, anything you can run in ComfyUI can be run in the ComfyUI API server, including custom nodes and workflows, and any models ComfyUI supports.\n\n## Stateless API\n\nThe ComfyUI API server is designed to be stateless, meaning that it does not store any state between requests. This allows the server to be scaled horizontally behind a load balancer, and to handle more requests by adding more instances of the server. The server uses a configurable warmup workflow to ensure that ComfyUI is ready to accept requests, and to load any required models. The server also self-hosts swagger docs and an openapi spec at `/docs`, which can be used to interact with the API.\n\n### Request Format\n\nPrompts are submitted to the server via the `POST /prompt` endpoint, which accepts a JSON body containing the prompt graph, as well as any additional parameters such as the webhook URL, S3 bucket and prefix, and image conversion options. A request may look something like:\n\n```json\n{\n  \"id\": \"123e4567-e89b-12d3-a456-426614174000\",\n  \"prompt\": {\n    \"1\": {\n      \"inputs\": {\n        \"image\": \"https://salad-benchmark-assets.download/coco2017/train2017/000000000009.jpg\",\n        \"upload\": \"image\"\n      },\n      \"class_type\": \"LoadImage\"\n    }\n  },\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"convert_output\": {\n    \"format\": \"jpeg\",\n    \"options\": {\n      \"quality\": 80,\n      \"progressive\": true\n    }\n  }\n}\n```\n\n- Only the `prompt` field is required. The other fields are optional, and can be omitted if not needed.\n- Your prompt must be a valid ComfyUI prompt graph, which is a JSON object where each key is a node ID, and the value is an object containing the node's inputs, class type, and optional metadata.\n- Your prompt must include a node that saves an output, such as a `SaveImage` node.\n\n### Per-Request Credentials\n\nYou can provide authentication credentials for protected model URLs directly in the prompt request using the `credentials` field. This allows downloading gated models (like Hugging Face gated models) or private models from S3/Azure without configuring environment variables.\n\n```json\n{\n  \"prompt\": { ... },\n  \"credentials\": [\n    {\n      \"url_pattern\": \"https://huggingface.co/**\",\n      \"auth\": {\n        \"type\": \"bearer\",\n        \"token\": \"hf_xxxxxxxxxxxxx\"\n      }\n    },\n    {\n      \"url_pattern\": \"s3://my-private-bucket/**\",\n      \"auth\": {\n        \"type\": \"s3\",\n        \"access_key_id\": \"AKIAIOSFODNN7EXAMPLE\",\n        \"secret_access_key\": \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n      }\n    }\n  ]\n}\n```\n\nEach credential entry has:\n- `url_pattern`: A glob-style pattern to match URLs. Supports:\n  - `*` matches any characters except `/`\n  - `**` matches any characters including `/`\n  - `?` matches a single character\n- `auth`: Authentication configuration (see [Authentication Types](#authentication-types) for all supported types)\n\n**Pattern Examples:**\n- `https://huggingface.co/**` - matches all Hugging Face URLs\n- `https://*.s3.amazonaws.com/**` - matches any S3 bucket URL\n- `s3://my-bucket/*` - matches files in the root of `my-bucket`\n- `s3://my-bucket/**` - matches all files in `my-bucket` including subdirectories\n\nCredentials are matched in order - the first matching pattern wins. This allows you to provide different credentials for different sources in a single request.\n\n### Response Format\n\nFor async requests (i.e. when a webhook or S3 upload is used), the server will return a `202 Accepted` response immediately, and the outputs will be sent to the webhook or uploaded to S3 in the background.\n\nFor synchronous requests (i.e. no webhook or s3.async is false), the server will return a `200 OK` response once the prompt has completed, with a body containing the outputs. The response body will have the following format:\n\n```json\n{\n  \"id\": \"123e4567-e89b-12d3-a456-426614174000\",\n  \"prompt\": { ... },\n  \"images\": [\n    \"base64-encoded-image-1\",\n    \"base64-encoded-image-2\"\n  ],\n  \"filenames\": [\n    \"output-filename-1.png\",\n    \"output-filename-2.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1625247600000,\n        \"end\": 1625247605000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"1\": {\n          \"start\": 1625247600000\n        },\n        \"2\": {\n          \"start\": 1625247601000\n        }\n      }\n    },\n    \"preprocess_time\": 1500,\n    \"upload_time\": 1,\n    \"total_time\": 6576\n  }\n}\n```\n\nIf you requested image conversion, the images will be in the requested format (e.g. JPEG or WebP) instead of PNG.\n\n### Example Usage\n\n#### Base64 Response\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"images\": [\"base64-encoded-image-1\", \"base64-encoded-image-2\"],\n  \"filenames\": [\"generated-uuid_ComfyUI_0.png\", \"generated-uuid_ComfyUI_1.png\"],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 1,\n    \"total_time\": 6205\n  }\n}\n```\n\n#### Webhook Response with Base64 Images\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... },\n  \"webhook_v2\": \"https://example.com/webhook\"\n}\n```\n\n**HTTP Response: 202 Accepted**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"status\": \"ok\",\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"prompt\": { ... }\n}\n```\n\n**Webhook Payload**\n\n```json\n{\n  \"type\": \"prompt.complete\",\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"images\": [\n    \"base64-encoded-image-1\",\n    \"base64-encoded-image-2\"\n  ],\n  \"filenames\": [\n    \"output-filename-1.png\",\n    \"output-filename-2.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 1,\n    \"total_time\": 6205\n  }\n}\n```\n\n#### S3 Urls in Response\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... },\n  \"s3\": {\n    \"bucket\": \"my-bucket\",\n    \"prefix\": \"outputs/\",\n    \"async\": false\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"images\": [\n    \"s3://my-bucket/outputs/generated-uuid_ComfyUI_0.png\",\n    \"s3://my-bucket/outputs/generated-uuid_ComfyUI_1.png\"\n  ],\n  \"filenames\": [\n    \"generated-uuid_ComfyUI_0.png\",\n    \"generated-uuid_ComfyUI_1.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 300,\n    \"total_time\": 6505\n  }\n}\n```\n\n#### S3 Urls in Webhook Payload\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... },\n  \"s3\": {\n    \"bucket\": \"my-bucket\",\n    \"prefix\": \"outputs/\"\n  },\n  \"webhook_v2\": \"https://example.com/webhook\"\n}\n```\n\n**HTTP Response: 202 Accepted**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"status\": \"ok\",\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"s3\": {\n    \"bucket\": \"my-bucket\",\n    \"prefix\": \"outputs/\",\n  },\n  \"prompt\": { ... }\n}\n```\n\n**Webhook Payload**\n\n```json\n{\n  \"type\": \"prompt.complete\",\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"s3\": {\n    \"bucket\": \"my-bucket\",\n    \"prefix\": \"outputs/\"\n  },\n  \"images\": [\n    \"s3://my-bucket/outputs/generated-uuid_ComfyUI_0.png\",\n    \"s3://my-bucket/outputs/generated-uuid_ComfyUI_1.png\"\n  ],\n  \"filenames\": [\n    \"generated-uuid_ComfyUI_0.png\",\n    \"generated-uuid_ComfyUI_1.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 300,\n    \"total_time\": 6505\n  }\n}\n```\n\n#### Azure Blob Urls in Response\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... },\n  \"azure_blob_upload\": {\n    \"container\": \"my-container\",\n    \"blob_prefix\": \"outputs/\",\n    \"async\": false\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"images\": [\n    \"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_0.png\",\n    \"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_1.png\"\n  ],\n  \"filenames\": [\n    \"generated-uuid_ComfyUI_0.png\",\n    \"generated-uuid_ComfyUI_1.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 300,\n    \"total_time\": 6505\n  }\n}\n```\n\n#### Azure Blob Urls in Webhook Payload\n\n**Request:**\n\n```json\n{\n  \"prompt\": { ... },\n  \"azure_blob_upload\": {\n    \"container\": \"my-container\",\n    \"blob_prefix\": \"outputs/\"\n  },\n  \"webhook_v2\": \"https://example.com/webhook\"\n}\n```\n\n**HTTP Response: 202 Accepted**\n\n```json\n{\n  \"id\": \"generated-uuid\",\n  \"status\": \"ok\",\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"azure_blob_upload\": {\n    \"container\": \"my-container\",\n    \"blob_prefix\": \"outputs/\",\n  },\n  \"prompt\": { ... }\n}\n```\n\n**Webhook Payload**\n\n```json\n{\n  \"type\": \"prompt.complete\",\n  \"id\": \"generated-uuid\",\n  \"prompt\": { ... },\n  \"webhook_v2\": \"https://example.com/webhook\",\n  \"azure_blob_upload\": {\n    \"container\": \"my-container\",\n    \"blob_prefix\": \"outputs/\"\n  },\n  \"images\": [\n    \"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_0.png\",\n    \"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_1.png\"\n  ],\n  \"filenames\": [\n    \"generated-uuid_ComfyUI_0.png\",\n    \"generated-uuid_ComfyUI_1.png\"\n  ],\n  \"stats\": {\n    \"comfy_execution\": {\n      \"total\": {\n        \"start\": 1700000000000,\n        \"end\": 1700000005000,\n        \"duration\": 5000\n      },\n      \"nodes\": {\n        \"3\": {\n          \"start\": 1700000000000\n        },\n        ...\n      }\n    },\n    \"preprocess_time\": 1200,\n    \"upload_time\": 300,\n    \"total_time\": 6505\n  }\n}\n```\n\n## Model Manifest\n\nThe server can be configured to download models and install extensions automatically on startup, by providing a manifest file in either JSON or YAML format. The manifest filepath can be provided via the `MANIFEST` environment variable, or the full manifest as a JSON string via the `MANIFEST_JSON` environment variable. If both are provided, the `MANIFEST_JSON` variable will take precedence.\n\nThe manifest file should have the following format (all fields are optional):\n\n```yaml\napt:\n  - package1\n  - package2\npip:\n  - package3\n  - package4\ncustom_nodes:\n  - node-name-from-comfy-registry\n  - https://github.com/username/repo\n  - https://github.com/username/repo/tree/commit-hash-or-branch\n  - https://github.com/username/repo@v1.0.0\nmodels:\n  before_start:\n    - url: https://example.com/model.ckpt\n      local_path: /opt/ComfyUI/models/checkpoints/model1.ckpt\n    - url: s3://my-bucket/path/to/model.safetensors\n      local_path: /opt/ComfyUI/models/checkpoints/model2.safetensors\n  after_start:\n    - url: https://example.com/another-model.ckpt\n      local_path: /opt/ComfyUI/models/checkpoints/model3.ckpt\n```\n\nIf a manifest is provided, the server will perform the following in order:\n\n1. Install any apt packages listed in the `apt` field.\n2. Install any pip packages listed in the `pip` field. Uses `uv`, otherwise falls back to `pip`.\n3. Install any custom nodes listed in the `custom_nodes` field, using the `comfy` cli tool if available and a plain string is provided, or by cloning the provided git repository if a URL is provided. You can pin a specific commit, branch, or tag using various URL formats:\n   - **GitHub**: `/tree/{ref}`, `/commit/{sha}`, `/releases/tag/{tag}`\n   - **GitLab**: `/-/tree/{ref}`, `/-/commit/{sha}`\n   - **Bitbucket**: `/src/{ref}`, `/commits/{sha}`\n   - **Generic**: `repo@{ref}` (npm/pip style, e.g., `https://github.com/user/repo@v1.0.0`)\n\n   Example: `https://github.com/kijai/ComfyUI-KJNodes/tree/204f6d5aae73b10c0fe2fb26e61405fd6337bb77`. If cloned, `requirements.txt` will be installed if it exists, using `uv` if available, otherwise falling back to `pip`.\n4. Download any models listed in the `models.before_start` field, and save them to the specified `local_path`.\n5. Start background downloading any models listed in the `models.after_start` field, and save them to the specified `local_path`. These downloads will be started in the background and will not block the server from accepting requests. This is useful for preloading less frequently used models.\n\n## Downloading Behavior\n\nWhen downloading files, whether via the manifest, image-to-image workflows, or dynamic model loading, the server will first check if the file already exists at the specified path.\nIt does this by hashing the provided URL and looking for a matching file in the cache directory (`$HOME/.cache/comfyui-api` by default).\nFor example, the url `https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16` will always be saved in the cache as `Pk6VSKLStckZydwGhX0bM8TqaqHEW9yt.safetensors`.\nIf a matching file is found, it will be used instead of downloading the file again.\nThis helps to reduce bandwidth usage and speed up request times.\n\nIf the url is an S3 URL, the server will use the AWS SDK to download the file.\nThis allows the server to access private S3 buckets (or S3-compatible buckets), as long as the appropriate AWS credentials are provided via environment variables.\n\nIf the url is a huggingface URL, the server will use the `hf` cli tool to download the file.\nThis allows you to take advantage of high-speed [xet storage](https://huggingface.co/docs/hub/en/storage-backends#xet), as well as other optimizations provided by huggingface.\n\nIf the url is an azure blob storage URL, the server will use the Azure SDK to download the file.\n\nIf the url is a regular http(s) URL, the server will use `fetch` to stream the file to disk.\nIf the url has a file extension, the server will use that extension when saving the file.\nOtherwise, it will attempt to determine the file extension from the `Content-Disposition` or `Content-Type` headers.\n\nAll downloaded files live in the configured cache directory with a name taken as the first 32 characters of the URL hash plus the file extension, and are symbolically linked to the specified local path.\n\nIf a download for a given URL is already in progress, any subsequent requests for the same URL will wait for the first download to complete, and then use the downloaded file.\n\n## LRU Caching\n\nThe server uses an LRU cache to manage the cache directory, which is used to store downloaded models and other files.\nIt is configured to be disabled by default, but you can set a size via the `LRU_CACHE_SIZE_GB` environment variable.\nWhen the cache size exceeds the configured size, the server will delete the least recently used files until the cache size is below the configured size.\n**Note:** Cache-size is determined _after_ a download completes, so actual cache size can temporarily exceed the configured size while downloads are in progress.\n\n## Modular Storage Backends\n\nThe server supports multiple storage backends for downloading models and input media, and uploading completed outputs.\nAll uploads take a prefix of some kind, not a full path or URL.\n\nAll uploads can be handled synchronously or asynchronously, depending on the `async` field in the upload block of the request body.\n\n- If `async` is `true` or omitted, the server will return a `202 Accepted` response immediately, and the upload will be handled in the background.\n- If `async` is `false`, the server will wait for the upload to complete before returning a `200 OK` response with the uploaded urls in the response body.\n\nIf an upload for a particular url is in progress, a subsequent upload to the same url will abort the first request and take over the upload.\nThis is rooted in the assumption that you want the latest version of any particular output.\n\n### S3-Compatible Storage\n\nIncludes AWS S3, Cloudflare R2, etc.\nUses the AWS SDK. Requires appropriate AWS credentials to be provided via environment variables.\nUsed for URLs starting with `s3://`.\n\nFor downloads, use the format `s3://bucket-name/path/to/file`.\nFor uploads, include the `s3` field in the request body, like:\n\n```json\n{\n  \"prompt\": {...}, \n  \"s3\": { \n    \"bucket\": \"my-bucket\", \n    \"prefix\": \"optional/prefix\", \n    \"async\": false \n  }\n}\n```\n\n### Huggingface Repository\n\nUses the `hf` cli tool.\nRequires the `HF_TOKEN` environment variable to be set with a valid Huggingface token.\nUsed for URLs starting with `https://huggingface.co/`.\nWorks with both public and private repos, model and dataset repos, and large files stored with [xet storage](https://huggingface.co/docs/hub/en/storage-backends#xet).\n\nFor downloads, use the format `https://huggingface.co/username/repo/resolve/revision/path/to/file` or `https://huggingface.co/datasets/username/repo/resolve/revision/path/to/file`.\n\nFor uploads, include the `hf_upload` field in the request body, like \n\n```json\n{\n  \"prompt\": {}, \n  \"hf_upload\": { \n    \"repo\": \"username/repo\", \n    \"repo_type\": \"dataset\", \n    \"revision\": \"main\", \n    \"directory\": \"test-source-images\", \n    \"async\": false \n  }\n}\n```\n\nThe `repo_type` field can be either `model` or `dataset`, and defaults to `model`.\n\n### Azure Blob Storage\n\nUses the Azure SDK.\nRequires appropriate Azure credentials to be provided via environment variables.\nUsed for URLs matching `https://<your-account>.blob.core.windows.net/`.\n\nFor downloads, use the format `https://<your-account>.blob.core.windows.net/container/path/to/file`.\n\nFor uploads, include the `azure_blob_upload` field in the request body, like:\n\n```json\n{\n  \"prompt\": {}, \n  \"azure_blob_upload\": { \n    \"container\": \"my-container\", \n    \"blob_prefix\": \"optional/prefix\", \n    \"async\": false \n  }\n}\n```\n\n### HTTP\n\nUses Fetch.\nSupports custom headers via the `HTTP_AUTH_HEADER_NAME` and `HTTP_AUTH_HEADER_VALUE` environment variables.\nBasic auth can be used via the URL, i.e. `https://username:password@your-http-endpoint.com`.\n\nFor downloads, use any valid http(s) URL that is not matched by the other storage backends.\n\nFor uploads, makes a PUT request to the specified URL with the image as the body.  Matches any other URL not matched by the other storage backends.\n\n## Image To Image Workflows\n\nThe ComfyUI API server supports image-to-image workflows, allowing you to submit an image and receive a modified version of that image in response.\nThis is useful for tasks such as image in-painting, style transfer, and other image manipulation tasks.\n\nTo use image-to-image workflows, you can submit an image as a base64-encoded string, or a URL.\nThe server will automatically detect the input type and process the image accordingly, using an appropriate storage provider if necessary.\n\nHere's an example of doing this in a `LoadImage` node:\n\n```json\n{\n  \"inputs\": {\n    \"image\": \"https://salad-benchmark-assets.download/coco2017/train2017/000000000009.jpg\",\n    \"upload\": \"image\"\n  },\n  \"class_type\": \"LoadImage\",\n  \"_meta\": {\n    \"title\": \"Load Image\"\n  }\n}\n```\n\n## Dynamic Model Loading\n\nThe ComfyUI API server supports dynamic model loading, allowing you to specify a model URL in a model-loading node, and the server will automatically download and cache the model before executing the workflow.\nThis is useful for workflows that need to potentially use a different model for each request.\nAn example may be head-shot generation, which would specify a LoRA per person.\nThe LoRA may be generated on-the-fly by another service, and provided to the ComfyUI API server via a URL.\n\n```json\n{\n  \"inputs\": {\n    \"ckpt_name\": \"https://civitai.com/api/download/models/76750?type=Model&format=SafeTensor&size=pruned&fp=fp16\"\n  },\n  \"class_type\": \"CheckpointLoaderSimple\",\n  \"_meta\": {\n    \"title\": \"Load Checkpoint\"\n  }\n},\n```\n\n## On-Demand Model Download Endpoint\n\nThe server provides a `POST /download` endpoint that allows you to trigger model downloads on-demand. This is useful for pre-loading models before they are needed in a workflow, or for managing model availability across your infrastructure.\n\n### Request Format\n\n```json\n{\n  \"url\": \"https://example.com/model.safetensors\",\n  \"model_type\": \"checkpoints\",\n  \"filename\": \"my-model.safetensors\",\n  \"wait\": false,\n  \"auth\": {\n    \"type\": \"bearer\",\n    \"token\": \"hf_xxxxxxxxxxxxx\"\n  }\n}\n```\n\n| Field | Required | Description |\n|-------|----------|-------------|\n| `url` | Yes | The URL to download the model from. Supports all [storage backends](#modular-storage-backends). |\n| `model_type` | Yes | The type of model (e.g., `checkpoints`, `loras`, `vae`, `controlnet`, etc.). Must match a subdirectory in your models folder. |\n| `filename` | No | Override the filename. Defaults to the basename from the URL. |\n| `wait` | No | If `false` (default), returns immediately with `202 Accepted`. If `true`, waits for the download to complete and returns `200 OK` with file stats. |\n| `auth` | No | Authentication credentials for accessing protected resources. See [Authentication Types](#authentication-types) below. |\n\n### Authentication Types\n\nThe `auth` field supports multiple authentication methods for different storage providers:\n\n**Bearer Token** (e.g., Hugging Face gated models):\n```json\n{\n  \"type\": \"bearer\",\n  \"token\": \"hf_xxxxxxxxxxxxx\"\n}\n```\n\n**Basic Auth**:\n```json\n{\n  \"type\": \"basic\",\n  \"username\": \"user\",\n  \"password\": \"pass\"\n}\n```\n\n**Custom Header** (e.g., API keys):\n```json\n{\n  \"type\": \"header\",\n  \"header_name\": \"X-API-Key\",\n  \"header_value\": \"your-api-key\"\n}\n```\n\n**Query Parameter** (e.g., Azure SAS tokens):\n```json\n{\n  \"type\": \"query\",\n  \"query_param\": \"sig\",\n  \"query_value\": \"your-sas-token\"\n}\n```\n\n**S3 Credentials** (for private S3 buckets):\n```json\n{\n  \"type\": \"s3\",\n  \"access_key_id\": \"AKIAIOSFODNN7EXAMPLE\",\n  \"secret_access_key\": \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\",\n  \"session_token\": \"optional-sts-token\",\n  \"region\": \"us-east-1\",\n  \"endpoint\": \"https://s3.custom-endpoint.com\"\n}\n```\n\nThe `session_token`, `region`, and `endpoint` fields are optional for S3 auth.\n\n### Response Format\n\n**Asynchronous (default, `wait: false`):**\n\nReturns `202 Accepted` immediately:\n\n```json\n{\n  \"url\": \"https://example.com/model.safetensors\",\n  \"model_type\": \"checkpoints\",\n  \"filename\": \"my-model.safetensors\",\n  \"status\": \"started\"\n}\n```\n\n**Synchronous (`wait: true`):**\n\nReturns `200 OK` when the download completes:\n\n```json\n{\n  \"url\": \"https://example.com/model.safetensors\",\n  \"model_type\": \"checkpoints\",\n  \"filename\": \"my-model.safetensors\",\n  \"status\": \"completed\",\n  \"size\": 6938281472,\n  \"duration\": 45.23\n}\n```\n\n- `size`: File size in bytes\n- `duration`: Download time in seconds\n\n### Example Usage\n\n**Async download (fire-and-forget):**\n\n```bash\ncurl -X POST http://localhost:3000/download \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"url\": \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors\",\n    \"model_type\": \"checkpoints\"\n  }'\n```\n\n**Sync download (wait for completion):**\n\n```bash\ncurl -X POST http://localhost:3000/download \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"url\": \"s3://my-bucket/models/custom-lora.safetensors\",\n    \"model_type\": \"loras\",\n    \"filename\": \"my-custom-lora.safetensors\",\n    \"wait\": true\n  }'\n```\n\nThe download uses the same caching and storage provider infrastructure as [dynamic model loading](#dynamic-model-loading), so downloaded files are cached and deduplicated automatically.\n\n## Server-side image processing\n\nThe ComfyUI API server uses the [sharp](https://sharp.pixelplumbing.com/) library to process images. This allows you to return the images in different, more compact formats, such as JPEG or WebP. This can be accomplished by including the `convert_output` object in the request body, which can contain the following fields:\n\n```json\n{\n  \"format\": \"jpeg|webp\",\n  \"options\": {}\n}\n```\n\nOmitting the `convert_output` object will default to PNG format, which is lossless and has the best quality, but is also the largest in size.\n\n**JPEG options**:\n\n- `quality`: The quality of the JPEG image, between 1 and 100. Default is `80`.\n- `progressive`: Use progressive (interlace) scanning. Default is `false`.\n- `chromaSubsampling`: Set to `4:4:4` to prevent chroma subsampling otherwise defaults to `4:2:0` chroma subsampling.\n- `optimizeCoding`: Optimize the Huffman coding tables. Default is `true`.\n- `mozjpeg`: use mozjpeg defaults, equivalent to `{ trellisQuantisation: true, overshootDeringing: true, optimizeScans: true, quantisationTable: 3 }`\n- `trellisQuantisation`: Use trellis quantization. Default is `false`.\n- `overshootDeringing`: Use overshoot deringing. Default is `false`.\n- `optimizeScans`: Optimize the scan order. Default is `false`.\n- `quantisationTable`: Set the quantization table to use, 1 - 8. Default is `0`.\n\n**WebP options**:\n\n- `quality`: The quality of the WebP image, between 1 and 100. Default is `80`.\n- `alphaQuality`: The quality of the alpha channel, between 0 and 100. Default is `100`.\n- `lossless`: Use lossless compression. Default is `false`.\n- `nearLossless`: Use near-lossless compression. Default is `false`.\n- `smartSubsample`: Use smart subsampling. Default is `false`.\n- `preset`: named preset for preprocessing/filtering, one of `default`, `picture`, `photo`, `drawing`, `icon`, or `text`. Default is `default`.\n- `effort`: CPU effort level, between 0 (fastest) and 6 (slowest). Default is `4`.\n\n## Probes\n\nThe server has two probes, `/health` and `/ready`.\n\n- The `/health` probe will return a 200 status code once the warmup workflow has completed. It will stay healthy as long as the server is running, even if ComfyUI crashes.\n- The `/ready` probe will also return a 200 status code once the warmup workflow has completed. It will return a 503 status code if ComfyUI is not running, such as in the case it has crashed, but is being automatically restarted. If you have set `MAX_QUEUE_DEPTH` to a non-zero value, it will return a 503 status code if ComfyUI's queue has reached the maximum depth.\n\n## API Configuration Guide\n\n### Environment Variables\n\nThe following table lists the available environment variables and their default values.\nFor historical reasons, the default values mostly assume this will run on top of an [ai-dock](https://github.com/ai-dock/comfyui) image, but we currently provide [our own more minimal image](#prebuilt-docker-images) here in this repo.\n\nIf you are using the s3 storage functionality, make sure to set all of the appropriate environment variables for your S3 bucket, such as `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION`.\nThe server will automatically use these to upload images to S3.\n\nIf you are using the huggingface storage functionality, make sure to set the `HF_TOKEN` environment variable with a valid Huggingface token with appropriate permissions.\n\nIf you are using the azure blob storage functionality, make sure to set all of the appropriate environment variables for your Azure account, such as `AZURE_STORAGE_CONNECTION_STRING`.\n\n| Variable                     | Default Value              | Description                                                                                                                                                                                                                                  |\n| ---------------------------- | -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| ALWAYS_RESTART_COMFYUI       | \"false\"                    | If set to \"true\", the ComfyUI process will be automatically restarted if it exits. Otherwise, the API server will exit when ComfyUI exits.                                                                                                   |\n| BASE                         | (not set)                  | There are different ways to load the comfyui environment for determining config values that vary with the base image. Currently only \"ai-dock\" has a special preset value.                                                                   |\n| CACHE_DIR                    | \"$HOME/.cache/comfyui-api\" | Directory to use for caching downloaded models and other files.                                                                                                                                                                              |\n| CMD                          | \"init.sh\"                  | Command to launch ComfyUI                                                                                                                                                                                                                    |\n| COMFY_HOME                   | \"/opt/ComfyUI\"             | ComfyUI home directory                                                                                                                                                                                                                       |\n| COMFYUI_PORT_HOST            | \"8188\"                     | ComfyUI port number                                                                                                                                                                                                                          |\n| DIRECT_ADDRESS               | \"127.0.0.1\"                | Direct address for ComfyUI                                                                                                                                                                                                                   |\n| HOST                         | \"::\"                       | Wrapper host address                                                                                                                                                                                                                         |\n| HTTP_AUTH_HEADER_NAME        | (not set)                  | If set, the server will include this header name with the value from HTTP_AUTH_HEADER_VALUE in all outgoing HTTP requests for uploading and downloading files. This can be used to add basic auth or bearer tokens to requests.              |\n| HTTP_AUTH_HEADER_VALUE       | (not set)                  | The value to use for the HTTP_AUTH_HEADER_NAME header in all outgoing HTTP requests for uploading and downloading files.                                                                                                                     |\n| INPUT_DIR                    | \"/opt/ComfyUI/input\"       | Directory for input files                                                                                                                                                                                                                    |\n| LOG_LEVEL                    | \"info\"                     | Log level for the application. One of \"trace\", \"debug\", \"info\", \"warn\", \"error\", \"fatal\".                                                                                                                                                    |\n| LRU_CACHE_SIZE_GB            | \"0\"                        | Maximum size of the LRU cache in GB. If set to 0, this feature is disabled.                                                                                                                                                                  |\n| MANIFEST                     | (not set)                  | Path to the [manifest file](#model-manifest) (optional). Can be yml or json.                                                                                                                                                                 |\n| MANIFEST_JSON                | (not set)                  | A JSON string representing the [manifest](#model-manifest). If set, this will take precedence over the MANIFEST variable.                                                                                                                    |\n| MARKDOWN_SCHEMA_DESCRIPTIONS | \"true\"                     | If set to \"true\", the server will use the descriptions in the zod schemas to generate markdown tables in the swagger docs.                                                                                                                   |\n| MAX_BODY_SIZE_MB             | \"100\"                      | Maximum body size in MB                                                                                                                                                                                                                      |\n| MAX_BODY_SIZE_MB             | \"100\"                      | Maximum request body size in MB                                                                                                                                                                                                              |\n| MAX_QUEUE_DEPTH              | \"0\"                        | Maximum number of queued requests before the readiness probe will return 503. 0 indicates no limit.                                                                                                                                          |\n| MODEL_DIR                    | \"/opt/ComfyUI/models\"      | Directory for model files                                                                                                                                                                                                                    |\n| OUTPUT_DIR                   | \"/opt/ComfyUI/output\"      | Directory for output files                                                                                                                                                                                                                   |\n| PORT                         | \"3000\"                     | Wrapper port number                                                                                                                                                                                                                          |\n| PREPEND_FILENAMES            | \"true\"                     | If set to \"true\", the server will prepend a unique identifier to output filenames to avoid collisions. Otherwise, the server will overwrite filename prefixes with the unique identifier (legacy behavior).                                  |\n| PROMPT_WEBHOOK_RETRIES       | \"3\"                        | Number of times to retry sending a webhook for a prompt                                                                                                                                                                                      |\n| STARTUP_CHECK_INTERVAL_S     | \"1\"                        | Interval in seconds between startup checks                                                                                                                                                                                                   |\n| STARTUP_CHECK_MAX_TRIES      | \"20\"                       | Maximum number of startup check attempts                                                                                                                                                                                                     |\n| SYSTEM_META\\_\\*              | (not set)                  | Any environment variable starting with SYSTEM*META* will be sent to the system webhook as metadata. i.e. `SYSTEM_META_batch=abc` will add `{\"batch\": \"abc\"}` to the `.metadata` field on system webhooks.                                    |\n| SYSTEM_WEBHOOK_EVENTS        | (not set)                  | Comma separated list of events to send to the webhook. Only selected events will be sent. If not set, no events will be sent. See [System Events](#system-events). You may also use the special value `all` to subscribe to all event types. |\n| SYSTEM_WEBHOOK_URL           | (not set)                  | Optionally receive via webhook the events that ComfyUI emits on websocket. This includes progress events.                                                                                                                                    |\n| WARMUP_PROMPT_FILE           | (not set)                  | Path to warmup prompt file (optional). If both `WARMUP_PROMPT_FILE` and `WARMUP_PROMPT_URL` are set, `WARMUP_PROMPT_FILE` takes precedence.                                                                                                  |\n| WARMUP_PROMPT_URL            | (not set)                  | URL to download warmup prompt from (optional). Allows using a remote warmup workflow without building a custom Docker image. Downloaded and parsed at startup before ComfyUI launches.                                                       |\n| WEBHOOK_SECRET               | (empty string)             | If set, the server will sign webhook_v2 requests with this secret.                                                                                                                                                                           |\n| WORKFLOW_DIR                 | \"/workflows\"               | Directory for workflow files                                                                                                                                                                                                                 |\n\n#### Kubernetes Deployment: Proxy Environment Variables\n\nTo enable outbound requests (e.g., webhook delivery) to use a corporate proxy in Kubernetes, configure the standard proxy environment variables. The server uses undici's EnvHttpProxyAgent, which reads `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` and routes requests accordingly.\n\n- Recommended `NO_PROXY`: include localhost and common Kubernetes internal addresses so local and in-cluster services do not go through the proxy.\n- Set only one set of variables (prefer uppercase). If both lowercase and uppercase are set, the lowercase variables take precedence and the uppercase ones are ignored.\n\nExample Deployment snippet:\n\n```yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: comfyui-api\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: comfyui-api\n  template:\n    metadata:\n      labels:\n        app: comfyui-api\n    spec:\n      containers:\n        - name: comfyui-api\n          image: your.registry/comfyui-api:latest\n          env:\n            # Recommended: set only uppercase vars\n            - name: HTTP_PROXY\n              value: \"http://your-http-proxy:3128\"\n            - name: HTTPS_PROXY\n              value: \"http://your-https-proxy:3129\"\n            - name: NO_PROXY\n              value: \"localhost,127.0.0.1,::1,.svc,.svc.cluster.local,169.254.169.254\"\n          ports:\n            - name: http\n              containerPort: 3000\n```\n\nNotes:\n- If your proxy requires authentication, include credentials in the proxy URL (e.g., `http://user:pass@proxy.company:3128`).\n- `NO_PROXY=\"*\"` bypasses the proxy for all requests.\n- When only `HTTP_PROXY` is set, it is used for both HTTP and HTTPS.\n- Suggested `NO_PROXY` entries:\n  - `localhost,127.0.0.1,::1` for in-container loopback\n  - `.svc,.svc.cluster.local` for Kubernetes service DNS\n  - `169.254.169.254` for cloud/container metadata service\n\n### Configuration Details\n\n1. **ComfyUI Settings**:\n\n   - The application uses the `CMD` environment variable to specify the command for launching ComfyUI.\n   - ComfyUI is accessed at `http://${DIRECT_ADDRESS}:${COMFYUI_PORT_HOST}`.\n\n2. **Wrapper Settings**:\n\n   - The wrapper API listens on `HOST:PORT`.\n   - It can be accessed at `http://localhost:${PORT}`.\n   - Use an IPv6 address for `HOST` when deploying on Salad. This is the default behavior.\n\n3. **Startup Checks**:\n\n   - The application performs startup checks at intervals specified by `STARTUP_CHECK_INTERVAL_S`.\n   - It will attempt up to `STARTUP_CHECK_MAX_TRIES` before giving up.\n\n4. **Directories**:\n\n   - The application uses the `COMFY_HOME` environment variable to locate the ComfyUI installation.\n   - Output files are stored in `OUTPUT_DIR`.\n   - Input files are read from `INPUT_DIR`.\n   - Model files are located in `MODEL_DIR`.\n   - Workflow files are stored in `WORKFLOW_DIR`. See [below](#generating-new-workflow-endpoints) for more information.\n\n5. **Warmup Prompt**:\n\n   - If `WARMUP_PROMPT_FILE` is set, the application will load and parse a warmup prompt from this file.\n   - Alternatively, set `WARMUP_PROMPT_URL` to download the warmup prompt from a remote URL at startup. This allows using a custom warmup workflow without building a custom Docker image.\n   - If both are set, `WARMUP_PROMPT_FILE` takes precedence.\n   - The checkpoint used in this prompt can be used as the default for workflow models.\n\n6. **Models**:\n\n   - The application scans the `MODEL_DIR` for subdirectories and creates configurations for each model type found.\n   - Each model type will have its directory path, list of available models, and a Zod enum for validation.\n   - The model names are exposed via the `GET /models` endpoint, and via the config object throughout the application.\n\n7. **ComfyUI Description**:\n   - The application retrieves available samplers and schedulers from ComfyUI itself at startup. It does not take custom nodes or extensions into account.\n   - This information is used to create Zod enums for validation in workflows, but is otherwise not used by the application.\n\n### Additional Notes\n\n- The application uses Zod for runtime type checking and validation of configuration values.\n- The configuration includes setup for both the wrapper application and ComfyUI itself.\n\nRemember to set these environment variables according to your specific deployment needs before running the application.\n\n## Using Synchronously\n\nThe default behavior of the API is to return an array of base64-encoded outputs in the response body.\nAll that is needed to do this is to omit webhook and upload fields from the request body.\n\n## Using with Webhooks\n\nComfyUI API sends two types of webhooks: System Events, which are emitted by ComfyUI itself, and Workflow Events, which are emitted by the API server. See [System Events](#system-events) for more information on System Events.\n\nIf a user includes `.webhook_v2` field in a request to `/prompt` or any of the workflow endpoints, the server will send any completed outputs to the webhook URL provided in the request.\nIt will also send a webhook if the request fails.\n\nFor successful requests including the `.webhook_v2` field, a single webhook request will be sent once the entire workflow has completed, containing all outputs.\nWebhooks are sent as [Standard Webhooks](https://www.standardwebhooks.com/), and can be validated using the `WEBHOOK_SECRET` environment variable and any standard webhook validation library such as `svix`.\n\n### prompt.complete\n\nThe webhook type name for a completed prompt is `prompt.complete`. The webhook will have the same schema as the synchronous response, with the addition of the `type` and `timestamp` fields:\n\n```json\n{\n  \"type\": \"prompt.complete\",\n  \"timestamp\": \"2025-01-01T00:00:00Z\",\n  \"id\": \"request-id\",\n  \"images\": [\"base64-encoded-image-1\", \"base64-encoded-image-2\"],\n  \"filenames\": [\"output-filename-1.png\", \"output-filename-2.png\"],\n  \"prompt\": {},\n  \"stats\":{}\n}\n```\n\nNote that if you include upload fields in your request, the `.images` field will contain the uploaded URLs instead of base64-encoded images.\n\n### prompt.failed\n\nThe webhook type name for a failed request is `prompt.failed`. The webhook will have the following schema:\n\n```json\n{\n  \"type\": \"prompt.failed\",\n  \"timestamp\": \"2025-01-01T00:00:00Z\",\n  \"error\": \"error-message\",\n  \"id\": \"request-id\",\n  \"prompt\": {}\n}\n```\n\n### Validating Webhooks\n\n#### Node.js Example\n\n```shell\nnpm install svix\n```\n\n```javascript\nconst { Webhook } = require('svix')\n\n//Express.js middleware\nfunction validateWebhookSignature(req, res, next) {\n  const webhook = new Webhook(secret)\n  try {\n    webhook.verify(req.body, req.headers)\n    next()\n  } catch (error) {\n    console.error('Webhook verification failed:', error)\n    return res.status(401).send('Invalid signature')\n  }\n}\n```\n\n#### Python Example\n\n```shell\npip install svix\n```\n\n```python\nfrom fastapi import FastAPI, Request, HTTPException\nfrom svix import Webhook\nfrom typing import Any, Dict\n\nasync def validate_webhook(request: Request) -> Dict[str, Any]:\n    \"\"\"\n    FastAPI Dependency to validate webhook signatures\n    \"\"\"\n    try:\n        # Get the raw body\n        body = await request.body()\n\n        # Create webhook instance\n        webhook = Webhook(webhook_secret)\n\n        # Verify the webhook signature\n        payload = webhook.verify(body, dict(request.headers))\n\n        return payload\n    except Exception as e:\n        print(f\"Webhook verification failed: {e}\")\n        raise HTTPException(status_code=401, detail=\"Invalid webhook signature\")\n```\n\n### DEPRECATED: Legacy Webhook Behavior\n\n**LEGACY BEHAVIOR**: For successful requests including the now-deprecated `.webhook` field, every output from the workflow will be sent as individual webhook requests. That means if your request generates 4 images, you will receive 4 webhook requests, each with a single image.\nThese webhooks are not signed, so we recommend migrating to the new `.webhook_v2` field as soon as possible.\n\n#### output.complete\n\nThe webhook event name for a completed output is `output.complete`. The webhook will have the following schema:\n\n```json\n{\n  \"event\": \"output.complete\",\n  \"image\": \"base64-encoded-image\",\n  \"id\": \"request-id\",\n  \"filename\": \"output-filename.png\",\n  \"prompt\": {}\n}\n```\n\n#### prompt.failed (legacy)\n\nThe webhook event name for a failed request is `prompt.failed`. The webhook will have the following schema:\n\n```json\n{\n  \"event\": \"prompt.failed\",\n  \"error\": \"error-message\",\n  \"id\": \"request-id\",\n  \"prompt\": {}\n}\n```\n\n## System Events\n\n> Note: From version 1.14.0, the frontend aggregate progress event `progress_state` is included in the supported system event set and can be forwarded like other events. Use `SYSTEM_WEBHOOK_EVENTS=progress_state` or `SYSTEM_WEBHOOK_EVENTS=all` to subscribe.\n\nComfyUI emits a number of events over websocket during the course of a workflow. These can be configured to be sent to a webhook using the `SYSTEM_WEBHOOK_URL` and `SYSTEM_WEBHOOK_EVENTS` environment variables. Additionally, any environment variable starting with `SYSTEM_META_` will be sent as metadata with the event. From version 1.13.0, these are signed, and can be validated using the `WEBHOOK_SECRET` environment variable and any standard webhook validation library such as `svix`. See [above](#validating-webhooks) for examples.\n\nAll webhooks have the same format, which is as follows:\n\n```json\n{\n  \"event\": \"event_name\",\n  \"data\": {},\n  \"metadata\": {}\n}\n```\n\nWhen running on SaladCloud, `.metadata` will always include lowercase versions of the [Default Environment Variables](https://docs.salad.com/container-engine/how-to-guides/environment-variables#default-environment-variables).\n\nThe following events are available:\n\n- \"status\"\n- \"progress\"\n- \"progress_state\"\n- \"executing\"\n- \"execution_start\"\n- \"execution_cached\"\n- \"executed\"\n- \"execution_success\"\n- \"execution_interrupted\"\n- \"execution_error\"\n- \"file_downloaded\"\n- \"file_uploaded\"\n- \"file_deleted\"\n\nThe `SYSTEM_WEBHOOK_EVENTS` environment variable should be a comma-separated list of the events you want to send to the webhook. If not set, no events will be sent.\n\nThe event name received in the webhook will be `comfy.${event_name}`, i.e. `comfy.progress`, or `storage.${event_name}` for file events.\n\n**Example**:\n\n```shell\nexport SYSTEM_WEBHOOK_EVENTS=\"progress,execution_start,execution_success,execution_error\"\n```\n\nThis will cause the API to send the `progress`, `execution_start`, `execution_success`, and `execution_error` events to the webhook.\n\nThe `SYSTEM_META_*` environment variables can be used to add metadata to the webhook events. For example:\n\n```shell\nexport SYSTEM_META_batch=abc\nexport SYSTEM_META_purpose=testing\n```\n\nWill add `{\"batch\": \"abc\", \"purpose\": \"testing\"}` to the `.metadata` field on system webhooks.\n\nThe following are the schemas for the event data that will be sent to the webhook. This will populate the `.data` field on the webhook.\n\n### status\n\n```json\n{\n  \"type\": \"status\",\n  \"data\": {\n    \"status\": {\n      \"exec_info\": {\n        \"queue_remaining\": 3\n      }\n    }\n  },\n  \"sid\": \"abc123\"\n}\n```\n\n### progress\n\n```json\n{\n  \"type\": \"progress\",\n  \"data\": {\n    \"value\": 45,\n    \"max\": 100,\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"node\": \"42\"\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### progress_state\n\n```json\n{\n  \"type\": \"progress_state\",\n  \"data\": {\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"nodes\": {\n      \"42\": {\n        \"value\": 5,\n        \"max\": 20,\n        \"state\": \"executing\",\n        \"node_id\": \"42\",\n        \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\"\n      }\n    }\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### executing\n\n```json\n{\n  \"type\": \"executing\",\n  \"data\": {\n    \"node\": \"42\",\n    \"display_node\": \"42\",\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\"\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### execution_start\n\n```json\n{\n  \"type\": \"execution_start\",\n  \"data\": {\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"timestamp\": 1705505423000\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### execution_cached\n\n```json\n{\n  \"type\": \"execution_cached\",\n  \"data\": {\n    \"nodes\": [\"42\", \"7\", \"13\"],\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"timestamp\": 1705505423000\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### executed\n\n```json\n{\n  \"type\": \"executed\",\n  \"data\": {\n    \"node\": \"42\",\n    \"display_node\": \"42\",\n    \"output\": {},\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\"\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### execution_success\n\n```json\n{\n  \"type\": \"execution_success\",\n  \"data\": {\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"timestamp\": 1705505423000\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### execution_interrupted\n\n```json\n{\n  \"type\": \"execution_interrupted\",\n  \"data\": {\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"node_id\": \"42\",\n    \"node_type\": \"KSampler\",\n    \"executed\": []\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### execution_error\n\n```json\n{\n  \"type\": \"execution_error\",\n  \"data\": {\n    \"prompt_id\": \"123e4567-e89b-12d3-a456-426614174000\",\n    \"node_id\": \"42\",\n    \"node_type\": \"KSampler\",\n    \"executed\": [],\n    \"exception_message\": \"CUDA out of memory. Tried to allocate 2.20 GiB\",\n    \"exception_type\": \"RuntimeError\",\n    \"traceback\": \"Traceback (most recent call last):\\n  File \\\"nodes.py\\\", line 245, in sample\\n    samples = sampler.sample(model, noise, steps)\",\n    \"current_inputs\": {\n      \"seed\": 42,\n      \"steps\": 20,\n      \"cfg\": 7.5,\n      \"sampler_name\": \"euler\"\n    },\n    \"current_outputs\": []\n  },\n  \"sid\": \"xyz789\"\n}\n```\n\n### file_downloaded\n\n```jsonc\n{\n  // Where the file was downloaded from\n  \"url\": \"https://example.com/model.safetensors\",\n\n  // Local path where the file was saved\n  \"local_path\": \"/opt/ComfyUI/models/model.safetensors\",\n\n  // Size of the downloaded file in bytes\n  \"size\": 123456789,\n\n  // Duration of the download in seconds\n  \"duration\": 2.34\n}\n```\n\n### file_uploaded\n\n```jsonc\n{\n  // Local path of the file that was uploaded\n  \"local_path\": \"/opt/ComfyUI/output/image.png\",\n\n  // URL where the file was uploaded to\n  \"url\": \"s3://my-bucket/images/image.png\",\n\n  // Size of the uploaded file in bytes\n  \"size\": 123456,\n\n  // Duration of the upload in seconds\n  \"duration\": 0.56\n}\n```\n\n### file_deleted\n\n```jsonc\n{\n  // URL of the file that was deleted. Note there are edge cases where this may be unknown, and the value will be \"unknown\".\n  \"url\": \"s3://my-bucket/models/old_model.safetensors\",\n\n  // Local path of the file that was deleted\n  \"local_path\": \"/opt/ComfyUI/models/old_model.safetensors\",\n\n  // Size of the deleted file in bytes\n  \"size\": 987654321\n}\n```\n\n## Prebuilt Docker Images\n\nYou can find ready-to-go docker images under [Packages](https://github.com/orgs/SaladTechnologies/packages?repo_name=comfyui-api) in this repository.\n\nThe images are tagged with the comfyui-api version they are built with, and the comfyui version they are built for, along with their pytorch version and CUDA version. There are versions for both CUDA runtime and CUDA devel, so you can choose the one that best fits your needs.\n\nThe tag pattern is `ghcr.io/saladtechnologies/comfyui-api:comfy<comfy-version>-api<api-version>-torch<pytorch-version>-cuda<cuda-version>-<runtime|devel>` where:\n\n- `<comfy-version>` is the version of ComfyUI used\n- `<api-version>` is the version of the comfyui-api server\n- `<pytorch-version>` is the version of PyTorch used\n- `<cuda-version>` is the version of CUDA used\n- `<runtime|devel>` is whether the image is built with the CUDA runtime or the CUDA devel image. The devel image is much larger, but includes the full CUDA toolkit, which is required for some custom nodes.\n\n**If the tag doesn't have `api<api-version>`, it does not include the api, and is just the ComfyUI base image.**\n\nIncluded in the API images are the following utilities:\n\n- `git`\n- `curl`\n- `wget`\n- `unzip`\n- `ComfyUI`\n- `comfy` cli\n\nAll of SaladCloud's image and video generation [recipes](https://docs.salad.com/products/recipes/overview) are built on top of these images, so you can use them as a base for your own workflows. For examples of using this with custom models and nodes, check out the [Salad Recipes](https://github.com/SaladTechnologies/salad-recipes/tree/master/src) repository on GitHub.\n\n## Considerations for Running on SaladCloud\n\n- **SaladCloud's Container Gateway has a 100s timeout.** It is possible to construct very long running ComfyUI workflows, such as for video generation, that would exceed this timeout. In this scenario, you will need to either use a webhook to receive the results, or integrate with SaladCloud's [Job Queues](https://docs.salad.com/products/sce/job-queues/job-queues#job-queues) to handle long-running workflows.\n- **SaladCloud's maximum container image size is 35GB(compressed).** The base [comfyui-api image](https://github.com/SaladTechnologies/comfyui-api/pkgs/container/comfyui-api) is around 3.25GB(compressed), so any models and extensions must fit in the remaining space.\n\n## Custom Workflows\n\nCustom workflows offer a simple and powerful way to create new endpoints for your specific use cases which abstract away the complexities of the underlying ComfyUI node-based prompt format.\nYou can create workflows in either javascript or typescript, and they can be as simple or complex as you need them to be.\nWorkflows are loaded at runtime, even when you use the pre-compiled binary releases or docker images, so you can easily add new workflows without needing to rebuild the image.\n\n[See the guide on generating new workflow endpoints](./DEVELOPING.md#generating-new-workflow-endpoints) for more information.\n\n## Contributing\n\nContributions are welcome!\nSee the [Development](./DEVELOPMENT.md) guide for more information on how to develop, test, and contribute to this project.\nComfyUI is a powerful tool with MANY options, and it's likely that not all of them are currently well supported by the `comfyui-api` server.\nPlease open an issue with as much information as possible about the problem you're facing or the feature you need.\nIf you have encountered a bug, please include the steps to reproduce it, and any relevant logs or error messages.\nIf you are able, adding a failing test is the best way to ensure your issue is resolved quickly.\nLet's make productionizing ComfyUI as easy as possible!\n\n## Architecture\n\nThe server is built with [Fastify](https://www.fastify.io/), a fast and low overhead web framework for Node.js.\nIt sits in front of ComfyUI, and provides a RESTful API for interacting with ComfyUI.\n\n![Architecture Diagram](./ComfyUI%20API%20Diagram.png)\n"
  },
  {
    "path": "build-and-release",
    "content": "#! /usr/bin/env bash\n\nnpm install\nnpm run build-binary\nversion=$(node -p \"require('./package.json').version\")\necho \"Version: $version\"\n\ngh release create $version \\\n  --title \"Release $version\" \\\n  --notes \"Release $version\"\n\ngh release upload $version ./bin/comfyui-api#Linux_x64 --clobber"
  },
  {
    "path": "build-binary",
    "content": "#! /usr/bin/env bash\nset -e\n\nnpm install\nnpx tsc\nnpx pkg --options \"stack-size=65500\" ."
  },
  {
    "path": "claude-endpoint-creation-prompt.md",
    "content": "# Instructions\nYour job is to convert a json workflow graph for ai image generation into a typescript function.\n- You should define a type for the input, using Zod for validation.\n- You should use `.describe` to describe each parameter to the best of your ability.\n- Filename prefix is always set by the system in a different location.\n- Do not extrapolate enum values. Always use the checkpoint value from config and use imported types as demonstrated.\n- Use snake_case for multi-word parameters.\n- LoadImage inputs will always be accepted as either a url or base64 encoded string\n- Only output the typescript, with no additional commentary.\n\n\n# Example Output\n\nimport { z } from \"zod\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst ComfyNodeSchema = z.object({\n  inputs: z.any(),\n  class_type: z.string(),\n  _meta: z.any().optional(),\n});\n\ntype ComfyNode = z.infer<typeof ComfyNodeSchema>;\n\ninterface Workflow {\n  RequestSchema: z.ZodObject<any, any>;\n  generateWorkflow: (input: any) => ComfyPrompt;\n  description?: string;\n  summary?: string;\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()``\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(4)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(1)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"simple\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(1)\n    .describe(\"Denoising strength\"),\n  checkpoint,\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Positive Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"31\", 0],\n        vae: [\"30\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"Flux\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"27\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptySD3LatentImage\",\n      _meta: {\n        title: \"EmptySD3LatentImage\",\n      },\n    },\n    \"30\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"31\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"30\", 0],\n        positive: [\"6\", 0],\n        negative: [\"33\", 0],\n        latent_image: [\"27\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"33\": {\n      inputs: {\n        text: \"\",\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Negative Prompt)\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n  summary: \"Text to Image\",\n  description: \"Generate an image from a text prompt\",\n};\n\nexport default workflow;"
  },
  {
    "path": "docker/api.dockerfile",
    "content": "ARG base=runtime\nARG comfy_version=0.19.3\nARG pytorch_version=2.8.0\nARG cuda_version=12.8\n\nFROM ghcr.io/saladtechnologies/comfyui-api:comfy${comfy_version}-torch${pytorch_version}-cuda${cuda_version}-${base}\n\nENV WORKFLOW_DIR=/workflows\nENV STARTUP_CHECK_MAX_TRIES=30\n\nARG api_version=1.18.1\n\nADD https://github.com/SaladTechnologies/comfyui-api/releases/download/${api_version}/comfyui-api .\n\nRUN chmod +x comfyui-api\n\nCMD [\"./comfyui-api\"]"
  },
  {
    "path": "docker/build-api-images",
    "content": "#! /usr/bin/bash\n\nusage=\"Usage: $0 [comfy_version] [torch_version] [cuda_version] [api_version]\"\n\ncomfy_version=${1:-0.19.3}\ntorch_version=${2:-2.8.0}\ncuda_version=${3:-12.8}\n\ncurrent_api_version=$(cat ../package.json | jq -r '.version')\napi_version=${4:-$current_api_version}\n\nbases=(\"devel\" \"runtime\")\n\nfor base in \"${bases[@]}\"; do\n  docker build -t ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base \\\n    -f api.dockerfile \\\n    --build-arg comfy_version=$comfy_version \\\n    --build-arg base=$base \\\n    --build-arg pytorch_version=$torch_version \\\n    --build-arg cuda_version=$cuda_version \\\n    --build-arg api_version=$api_version \\\n    .\n  docker push ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base\n\n  if [ $base == \"runtime\" ]; then\n    docker tag ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-runtime ghcr.io/saladtechnologies/comfyui-api:latest\n    docker push ghcr.io/saladtechnologies/comfyui-api:latest\n    docker image rm ghcr.io/saladtechnologies/comfyui-api:latest\n  fi\n\n   # Remove the image to make space for the next one. Github actions runners don't get much storage.\n  docker image rm ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base\ndone\n"
  },
  {
    "path": "docker/build-comfy-base-images",
    "content": "#! /usr/bin/bash\n\ncomfy_version=${1:-0.19.3}\ntorch_version=${2:-2.8.0}\ncuda_version=${3:-12.8}\nbases=(\"devel\" \"runtime\")\n\nfor base in \"${bases[@]}\"; do\n  docker build -t ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-$base \\\n    -f comfyui.dockerfile \\\n    --no-cache \\\n    --build-arg comfy_version=$comfy_version \\\n    --build-arg base=$base \\\n    --build-arg pytorch_version=$torch_version \\\n    --build-arg cuda_version=$cuda_version \\\n    .\ndone\n"
  },
  {
    "path": "docker/comfyui.dockerfile",
    "content": "ARG base=runtime\nARG pytorch_version=2.8.0\nARG cuda_version=12.8\n\nFROM pytorch/pytorch:${pytorch_version}-cuda${cuda_version}-cudnn9-${base}\n\nENV DEBIAN_FRONTEND=noninteractive\nENV PIP_PREFER_BINARY=1\nENV CMAKE_BUILD_PARALLEL_LEVEL=8\n\nRUN apt-get update && apt-get upgrade -y && apt-get install -y \\\n  curl \\\n  git \\\n  unzip \\\n  wget \\\n  && apt clean -y && rm -rf /var/lib/apt/lists/*\n\n# Install comfy-cli, which makes it easy to install custom nodes and other comfy specific functionality.\nSHELL [\"/bin/bash\", \"-c\"]\n\nRUN pip install --no-cache-dir --upgrade pip\nRUN pip install --no-cache-dir uv\nRUN uv pip install --no-cache-dir --system \"comfy-cli==1.5.1\" \"huggingface_hub[cli]\"\n\nWORKDIR /opt\n\nARG comfy_version=0.19.3\n\nRUN git clone --depth 1 --branch v${comfy_version} https://github.com/comfyanonymous/ComfyUI.git\n\nWORKDIR /opt/ComfyUI\n\nARG cuda_version=12.8\n\nRUN uv pip install --no-cache-dir --system torchaudio --index-url https://download.pytorch.org/whl/cu${cuda_version//./}\nRUN uv pip install --no-cache-dir --system -r requirements.txt\n\nENV COMFY_HOME=/opt/ComfyUI\n\nRUN comfy --skip-prompt tracking disable\nRUN comfy --skip-prompt set-default ${COMFY_HOME}\n\nRUN git clone https://github.com/Comfy-Org/ComfyUI-Manager.git ./custom_nodes/ComfyUI-Manager\nRUN uv pip install --system --no-cache-dir -r ./custom_nodes/ComfyUI-Manager/requirements.txt\n\nENV MODEL_DIR=${COMFY_HOME}/models\nENV OUTPUT_DIR=${COMFY_HOME}/output\nENV INPUT_DIR=${COMFY_HOME}/input\nENV CMD=\"comfy --workspace ${COMFY_HOME} launch -- --listen *\"\nENV BASE=\"\"\n\nCMD [\"bash\", \"-c\", \"comfy --workspace ${COMFY_HOME} launch -- --listen '*'\"]"
  },
  {
    "path": "docker/push-comfy-base-images",
    "content": "#! /usr/bin/bash\n\nusage=\"Usage: $0 [comfy_version] [torch_version] [cuda_version]\"\n\ncomfy_version=${1:-0.19.3}\ntorch_version=${2:-2.8.0}\ncuda_version=${3:-12.8}\n\nbases=(\"devel\" \"runtime\")\n\nfor base in \"${bases[@]}\"; do\n  docker push ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-$base\ndone\n\ndocker tag ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-runtime ghcr.io/saladtechnologies/comfyui-api:base\ndocker push ghcr.io/saladtechnologies/comfyui-api:base"
  },
  {
    "path": "docker-compose.yml",
    "content": "services:\n  comfyui:\n    image: ghcr.io/saladtechnologies/comfyui-api:comfy0.19.3-torch2.8.0-cuda12.8-runtime\n    volumes:\n      - type: bind\n        source: ./bin\n        target: /app/bin\n      - type: bind\n        source: ./manifest.yml\n        target: /app/manifest.yml\n      - ./cache:/root/.cache/comfyui-api\n      - ./example-workflows/sd1.5:/workflows\n    command: [\"/app/bin/comfyui-api\"]\n    ports:\n      - \"3000:3000\"\n      - \"8188:8188\"\n    environment:\n      LOG_LEVEL: \"debug\"\n      AWS_ENDPOINT_URL: \"http://localstack:4566\"\n      AWS_ACCESS_KEY_ID: \"test\"\n      AWS_SECRET_ACCESS_KEY: \"test\"\n      AWS_REGION: \"us-east-1\"\n      MANIFEST: \"/app/manifest.yml\"\n      STARTUP_CHECK_MAX_TRIES: \"30\"\n      HF_TOKEN: ${HF_TOKEN}\n      AZURE_STORAGE_CONNECTION_STRING: \"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite:10000/devstoreaccount1;\"\n    #   ALWAYS_RESTART_COMFYUI: \"true\"\n      SYSTEM_WEBHOOK_URL: \"http://host.docker.internal:1234/system\"\n      SYSTEM_WEBHOOK_EVENTS: all\n      WEBHOOK_SECRET: testsecret\n    deploy:\n      resources:\n        reservations:\n          devices:\n            - driver: nvidia\n              capabilities: [ gpu ]\n              count: all\n  localstack:\n    image: localstack/localstack\n    ports:\n      - \"4566:4566\"  # LocalStack Gateway\n      - \"4510-4559:4510-4559\"  # External services\n    environment:\n      - SERVICES=s3\n    volumes:\n      - \"/var/run/docker.sock:/var/run/docker.sock\"\n  azurite:\n    image: mcr.microsoft.com/azure-storage/azurite\n    ports:\n      - \"10000:10000\"  # Blob service\n    command: \"azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --skipApiVersionCheck\"\n  file-server:\n    build:\n      context: .\n      dockerfile: test/Dockerfile.file-server\n    ports:\n      - \"8080:8080\"\n    environment:\n      - PORT=8080\n      - STORAGE_DIR=/storage\n      - REQUIRE_AUTH=false"
  },
  {
    "path": "example-workflows/flux/img2img.json",
    "content": "{\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"A noble wolf stands by a raging river in the style of a japanese scroll\",\n      \"clip\": [\n        \"30\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Positive Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"31\",\n        0\n      ],\n      \"vae\": [\n        \"30\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"27\": {\n    \"inputs\": {\n      \"width\": 1024,\n      \"height\": 1024,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptySD3LatentImage\",\n    \"_meta\": {\n      \"title\": \"EmptySD3LatentImage\"\n    }\n  },\n  \"30\": {\n    \"inputs\": {\n      \"ckpt_name\": \"flux1-schnell-fp8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"31\": {\n    \"inputs\": {\n      \"seed\": 226018262510838,\n      \"steps\": 4,\n      \"cfg\": 1,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"simple\",\n      \"denoise\": 0.8,\n      \"model\": [\n        \"30\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"33\",\n        0\n      ],\n      \"latent_image\": [\n        \"38\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"33\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"30\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Negative Prompt)\"\n    }\n  },\n  \"37\": {\n    \"inputs\": {\n      \"image\": \"IMG_0655.JPG\",\n      \"upload\": \"image\"\n    },\n    \"class_type\": \"LoadImage\",\n    \"_meta\": {\n      \"title\": \"Load Image\"\n    }\n  },\n  \"38\": {\n    \"inputs\": {\n      \"pixels\": [\n        \"40\",\n        0\n      ],\n      \"vae\": [\n        \"30\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEEncode\",\n    \"_meta\": {\n      \"title\": \"VAE Encode\"\n    }\n  },\n  \"40\": {\n    \"inputs\": {\n      \"width\": 1024,\n      \"height\": 1024,\n      \"interpolation\": \"nearest\",\n      \"method\": \"fill / crop\",\n      \"condition\": \"always\",\n      \"multiple_of\": 8,\n      \"image\": [\n        \"37\",\n        0\n      ]\n    },\n    \"class_type\": \"ImageResize+\",\n    \"_meta\": {\n      \"title\": \"🔧 Image Resize\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/flux/img2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(4)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(1)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"simple\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(0.8)\n    .describe(\"Denoising strength\"),\n  checkpoint,\n  image: z.string().describe(\"Input image for img2img\"),\n  interpolation: z\n    .enum([\"nearest\"])\n    .optional()\n    .default(\"nearest\")\n    .describe(\"Interpolation method for image resizing\"),\n  resize_method: z\n    .enum([\"fill / crop\"])\n    .optional()\n    .default(\"fill / crop\")\n    .describe(\"Method for resizing the image\"),\n  resize_condition: z\n    .enum([\"always\"])\n    .optional()\n    .default(\"always\")\n    .describe(\"Condition for when to resize the image\"),\n  multiple_of: z\n    .number()\n    .int()\n    .optional()\n    .default(8)\n    .describe(\"Ensure image dimensions are multiples of this value\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Positive Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"31\", 0],\n        vae: [\"30\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"ComfyUI\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"30\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"31\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"30\", 0],\n        positive: [\"6\", 0],\n        negative: [\"33\", 0],\n        latent_image: [\"38\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"33\": {\n      inputs: {\n        text: \"\",\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Negative Prompt)\",\n      },\n    },\n    \"37\": {\n      inputs: {\n        image: input.image,\n        upload: \"image\",\n      },\n      class_type: \"LoadImage\",\n      _meta: {\n        title: \"Load Image\",\n      },\n    },\n    \"38\": {\n      inputs: {\n        pixels: [\"40\", 0],\n        vae: [\"30\", 2],\n      },\n      class_type: \"VAEEncode\",\n      _meta: {\n        title: \"VAE Encode\",\n      },\n    },\n    \"40\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        interpolation: input.interpolation,\n        method: input.resize_method,\n        condition: input.resize_condition,\n        multiple_of: input.multiple_of,\n        image: [\"37\", 0],\n      },\n      class_type: \"ImageResize+\",\n      _meta: {\n        title: \"🔧 Image Resize\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n  summary: \"Image-to-Image\",\n  description: \"Text-guided Image-to-Image generation\",\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/flux/txt2img.json",
    "content": "{\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"a bottle with a beautiful rainbow galaxy inside it on top of a wooden table in the middle of a modern kitchen beside a plate of vegetables and mushrooms and a wine glasse that contains a planet earth with a plate with a half eaten apple pie on it\",\n      \"clip\": [\n        \"30\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Positive Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"31\",\n        0\n      ],\n      \"vae\": [\n        \"30\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"Flux\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"27\": {\n    \"inputs\": {\n      \"width\": 1024,\n      \"height\": 1024,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptySD3LatentImage\",\n    \"_meta\": {\n      \"title\": \"EmptySD3LatentImage\"\n    }\n  },\n  \"30\": {\n    \"inputs\": {\n      \"ckpt_name\": \"flux1-schnell-fp8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"31\": {\n    \"inputs\": {\n      \"seed\": 1030319533692526,\n      \"steps\": 4,\n      \"cfg\": 1,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"simple\",\n      \"denoise\": 1,\n      \"model\": [\n        \"30\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"33\",\n        0\n      ],\n      \"latent_image\": [\n        \"27\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"33\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"30\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Negative Prompt)\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/flux/txt2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(4)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(1)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"simple\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(1)\n    .describe(\"Denoising strength\"),\n  checkpoint,\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Positive Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"31\", 0],\n        vae: [\"30\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"Flux\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"27\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptySD3LatentImage\",\n      _meta: {\n        title: \"EmptySD3LatentImage\",\n      },\n    },\n    \"30\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"31\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"30\", 0],\n        positive: [\"6\", 0],\n        negative: [\"33\", 0],\n        latent_image: [\"27\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"33\": {\n      inputs: {\n        text: \"\",\n        clip: [\"30\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Negative Prompt)\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/sd1.5/img2img.js",
    "content": "\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar zod_1 = require(\"zod\");\nvar config_1 = require(\"../config\");\nvar RequestSchema = zod_1.z.object({\n    prompt: zod_1.z.string().describe(\"The positive prompt for image generation\"),\n    negative_prompt: zod_1.z\n        .string()\n        .optional()\n        .default(\"text, watermark\")\n        .describe(\"The negative prompt for image generation\"),\n    seed: zod_1.z\n        .number()\n        .int()\n        .optional()\n        .default(function () { return Math.floor(Math.random() * 1000000000000000); })\n        .describe(\"Seed for random number generation\"),\n    steps: zod_1.z\n        .number()\n        .int()\n        .min(1)\n        .max(100)\n        .optional()\n        .default(15)\n        .describe(\"Number of sampling steps\"),\n    cfg_scale: zod_1.z\n        .number()\n        .min(0)\n        .max(20)\n        .optional()\n        .default(8)\n        .describe(\"Classifier-free guidance scale\"),\n    sampler_name: config_1.default.samplers\n        .optional()\n        .default(\"euler\")\n        .describe(\"Name of the sampler to use\"),\n    scheduler: config_1.default.schedulers\n        .optional()\n        .default(\"normal\")\n        .describe(\"Type of scheduler to use\"),\n    denoise: zod_1.z\n        .number()\n        .min(0)\n        .max(1)\n        .optional()\n        .default(0.8)\n        .describe(\"Denoising strength\"),\n    checkpoint: zod_1.z\n        .string()\n        .refine(function (val) { return config_1.default.models.checkpoints.all.includes(val); })\n        .optional()\n        .default(config_1.default.warmupCkpt || config_1.default.models.checkpoints.all[0])\n        .describe(\"Checkpoint to use\"),\n    image: zod_1.z.string().describe(\"Input image for img2img\"),\n    width: zod_1.z\n        .number()\n        .int()\n        .min(64)\n        .max(2048)\n        .optional()\n        .default(512)\n        .describe(\"Width of the generated image\"),\n    height: zod_1.z\n        .number()\n        .int()\n        .min(64)\n        .max(2048)\n        .optional()\n        .default(512)\n        .describe(\"Height of the generated image\"),\n    interpolation: zod_1.z\n        .enum([\"nearest\"])\n        .optional()\n        .default(\"nearest\")\n        .describe(\"Interpolation method for image resizing\"),\n    resize_method: zod_1.z\n        .enum([\"keep proportion\"])\n        .optional()\n        .default(\"keep proportion\")\n        .describe(\"Method for resizing the image\"),\n    resize_condition: zod_1.z\n        .enum([\"always\"])\n        .optional()\n        .default(\"always\")\n        .describe(\"Condition for when to resize the image\"),\n    multiple_of: zod_1.z\n        .number()\n        .int()\n        .min(0)\n        .optional()\n        .default(0)\n        .describe(\"Ensure dimensions are multiples of this value\"),\n});\nfunction generateWorkflow(input) {\n    return {\n        \"3\": {\n            inputs: {\n                seed: input.seed,\n                steps: input.steps,\n                cfg: input.cfg_scale,\n                sampler_name: input.sampler_name,\n                scheduler: input.scheduler,\n                denoise: input.denoise,\n                model: [\"4\", 0],\n                positive: [\"6\", 0],\n                negative: [\"7\", 0],\n                latent_image: [\"12\", 0],\n            },\n            class_type: \"KSampler\",\n            _meta: {\n                title: \"KSampler\",\n            },\n        },\n        \"4\": {\n            inputs: {\n                ckpt_name: input.checkpoint,\n            },\n            class_type: \"CheckpointLoaderSimple\",\n            _meta: {\n                title: \"Load Checkpoint\",\n            },\n        },\n        \"6\": {\n            inputs: {\n                text: input.prompt,\n                clip: [\"4\", 1],\n            },\n            class_type: \"CLIPTextEncode\",\n            _meta: {\n                title: \"CLIP Text Encode (Prompt)\",\n            },\n        },\n        \"7\": {\n            inputs: {\n                text: input.negative_prompt,\n                clip: [\"4\", 1],\n            },\n            class_type: \"CLIPTextEncode\",\n            _meta: {\n                title: \"CLIP Text Encode (Prompt)\",\n            },\n        },\n        \"8\": {\n            inputs: {\n                samples: [\"3\", 0],\n                vae: [\"4\", 2],\n            },\n            class_type: \"VAEDecode\",\n            _meta: {\n                title: \"VAE Decode\",\n            },\n        },\n        \"9\": {\n            inputs: {\n                filename_prefix: \"output\",\n                images: [\"8\", 0],\n            },\n            class_type: \"SaveImage\",\n            _meta: {\n                title: \"Save Image\",\n            },\n        },\n        \"10\": {\n            inputs: {\n                image: input.image,\n                upload: \"image\",\n            },\n            class_type: \"LoadImage\",\n            _meta: {\n                title: \"Load Image\",\n            },\n        },\n        \"11\": {\n            inputs: {\n                width: input.width,\n                height: input.height,\n                interpolation: input.interpolation,\n                method: input.resize_method,\n                condition: input.resize_condition,\n                multiple_of: input.multiple_of,\n                image: [\"10\", 0],\n            },\n            class_type: \"ImageResize+\",\n            _meta: {\n                title: \"🔧 Image Resize\",\n            },\n        },\n        \"12\": {\n            inputs: {\n                pixels: [\"11\", 0],\n                vae: [\"4\", 2],\n            },\n            class_type: \"VAEEncode\",\n            _meta: {\n                title: \"VAE Encode\",\n            },\n        },\n    };\n}\nvar workflow = {\n    RequestSchema: RequestSchema,\n    generateWorkflow: generateWorkflow,\n    summary: \"Image-to-Image\",\n    description: \"Text-guided Image-to-Image generation\",\n};\nexports.default = workflow;\n"
  },
  {
    "path": "example-workflows/sd1.5/img2img.json",
    "content": "{\n  \"3\": {\n    \"inputs\": {\n      \"seed\": 818335187507771,\n      \"steps\": 15,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 0.8,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"12\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"A girl in a pink dress with cat ears, magazine photograph\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"3\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"10\": {\n    \"inputs\": {\n      \"image\": \"example.png\",\n      \"upload\": \"image\"\n    },\n    \"class_type\": \"LoadImage\",\n    \"_meta\": {\n      \"title\": \"Load Image\"\n    }\n  },\n  \"11\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"interpolation\": \"nearest\",\n      \"method\": \"keep proportion\",\n      \"condition\": \"always\",\n      \"multiple_of\": 0,\n      \"image\": [\n        \"10\",\n        0\n      ]\n    },\n    \"class_type\": \"ImageResize+\",\n    \"_meta\": {\n      \"title\": \"🔧 Image Resize\"\n    }\n  },\n  \"12\": {\n    \"inputs\": {\n      \"pixels\": [\n        \"11\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEEncode\",\n    \"_meta\": {\n      \"title\": \"VAE Encode\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/sd1.5/img2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .default(\"text, watermark\")\n    .describe(\"The negative prompt for image generation\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(15)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(8)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"normal\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(0.8)\n    .describe(\"Denoising strength\"),\n  checkpoint: z\n    .string()\n    .refine((val) => config.models.checkpoints.all.includes(val))\n    .optional()\n    .default(config.warmupCkpt || config.models.checkpoints.all[0])\n    .describe(\"Checkpoint to use\"),\n  image: z.string().describe(\"Input image for img2img\"),\n  width: z\n    .number()\n    .int()\n    .min(64)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(64)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Height of the generated image\"),\n  interpolation: z\n    .enum([\"nearest\"])\n    .optional()\n    .default(\"nearest\")\n    .describe(\"Interpolation method for image resizing\"),\n  resize_method: z\n    .enum([\"keep proportion\"])\n    .optional()\n    .default(\"keep proportion\")\n    .describe(\"Method for resizing the image\"),\n  resize_condition: z\n    .enum([\"always\"])\n    .optional()\n    .default(\"always\")\n    .describe(\"Condition for when to resize the image\"),\n  multiple_of: z\n    .number()\n    .int()\n    .min(0)\n    .optional()\n    .default(0)\n    .describe(\"Ensure dimensions are multiples of this value\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"3\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"4\", 0],\n        positive: [\"6\", 0],\n        negative: [\"7\", 0],\n        latent_image: [\"12\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"4\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"7\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"3\", 0],\n        vae: [\"4\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"output\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"10\": {\n      inputs: {\n        image: input.image,\n        upload: \"image\",\n      },\n      class_type: \"LoadImage\",\n      _meta: {\n        title: \"Load Image\",\n      },\n    },\n    \"11\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        interpolation: input.interpolation,\n        method: input.resize_method,\n        condition: input.resize_condition,\n        multiple_of: input.multiple_of,\n        image: [\"10\", 0],\n      },\n      class_type: \"ImageResize+\",\n      _meta: {\n        title: \"🔧 Image Resize\",\n      },\n    },\n    \"12\": {\n      inputs: {\n        pixels: [\"11\", 0],\n        vae: [\"4\", 2],\n      },\n      class_type: \"VAEEncode\",\n      _meta: {\n        title: \"VAE Encode\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n  summary: \"Image-to-Image\",\n  description: \"Text-guided Image-to-Image generation\",\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/sd1.5/txt2img.js",
    "content": "\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar zod_1 = require(\"zod\");\nvar config_1 = require(\"../config\");\nvar RequestSchema = zod_1.z.object({\n    prompt: zod_1.z.string().describe(\"The positive prompt for image generation\"),\n    negative_prompt: zod_1.z\n        .string()\n        .optional()\n        .default(\"\")\n        .describe(\"The negative prompt for image generation\"),\n    width: zod_1.z\n        .number()\n        .int()\n        .min(256)\n        .max(2048)\n        .optional()\n        .default(512)\n        .describe(\"Width of the generated image\"),\n    height: zod_1.z\n        .number()\n        .int()\n        .min(256)\n        .max(2048)\n        .optional()\n        .default(512)\n        .describe(\"Height of the generated image\"),\n    seed: zod_1.z\n        .number()\n        .int()\n        .optional()\n        .default(function () { return Math.floor(Math.random() * 100000000000); })\n        .describe(\"Seed for random number generation\"),\n    steps: zod_1.z\n        .number()\n        .int()\n        .min(1)\n        .max(100)\n        .optional()\n        .default(20)\n        .describe(\"Number of sampling steps\"),\n    cfg_scale: zod_1.z\n        .number()\n        .min(0)\n        .max(20)\n        .optional()\n        .default(8)\n        .describe(\"Classifier-free guidance scale\"),\n    sampler_name: config_1.default.samplers\n        .optional()\n        .default(\"euler\")\n        .describe(\"Name of the sampler to use\"),\n    scheduler: config_1.default.schedulers\n        .optional()\n        .default(\"normal\")\n        .describe(\"Type of scheduler to use\"),\n    denoise: zod_1.z\n        .number()\n        .min(0)\n        .max(1)\n        .optional()\n        .default(1)\n        .describe(\"Denoising strength\"),\n    checkpoint: zod_1.z\n        .string()\n        .refine(function (val) { return config_1.default.models.checkpoints.all.includes(val); })\n        .optional()\n        .default(config_1.default.warmupCkpt || config_1.default.models.checkpoints.all[0])\n        .describe(\"Checkpoint to use\"),\n});\nfunction generateWorkflow(input) {\n    return {\n        \"3\": {\n            inputs: {\n                seed: input.seed,\n                steps: input.steps,\n                cfg: input.cfg_scale,\n                sampler_name: input.sampler_name,\n                scheduler: input.scheduler,\n                denoise: input.denoise,\n                model: [\"4\", 0],\n                positive: [\"6\", 0],\n                negative: [\"7\", 0],\n                latent_image: [\"5\", 0],\n            },\n            class_type: \"KSampler\",\n            _meta: {\n                title: \"KSampler\",\n            },\n        },\n        \"4\": {\n            inputs: {\n                ckpt_name: input.checkpoint,\n            },\n            class_type: \"CheckpointLoaderSimple\",\n            _meta: {\n                title: \"Load Checkpoint\",\n            },\n        },\n        \"5\": {\n            inputs: {\n                width: input.width,\n                height: input.height,\n                batch_size: 1,\n            },\n            class_type: \"EmptyLatentImage\",\n            _meta: {\n                title: \"Empty Latent Image\",\n            },\n        },\n        \"6\": {\n            inputs: {\n                text: input.prompt,\n                clip: [\"4\", 1],\n            },\n            class_type: \"CLIPTextEncode\",\n            _meta: {\n                title: \"CLIP Text Encode (Prompt)\",\n            },\n        },\n        \"7\": {\n            inputs: {\n                text: input.negative_prompt,\n                clip: [\"4\", 1],\n            },\n            class_type: \"CLIPTextEncode\",\n            _meta: {\n                title: \"CLIP Text Encode (Prompt)\",\n            },\n        },\n        \"8\": {\n            inputs: {\n                samples: [\"3\", 0],\n                vae: [\"4\", 2],\n            },\n            class_type: \"VAEDecode\",\n            _meta: {\n                title: \"VAE Decode\",\n            },\n        },\n        \"9\": {\n            inputs: {\n                filename_prefix: \"output\",\n                images: [\"8\", 0],\n            },\n            class_type: \"SaveImage\",\n            _meta: {\n                title: \"Save Image\",\n            },\n        },\n    };\n}\nvar workflow = {\n    RequestSchema: RequestSchema,\n    generateWorkflow: generateWorkflow,\n};\nexports.default = workflow;\n"
  },
  {
    "path": "example-workflows/sd1.5/txt2img.json",
    "content": "{\n  \"3\": {\n    \"inputs\": {\n      \"seed\": 712610403220747,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"5\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"5\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"3\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/sd1.5/txt2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .default(\"\")\n    .describe(\"The negative prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(512)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 100000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(8)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"normal\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(1)\n    .describe(\"Denoising strength\"),\n  checkpoint: z\n    .string()\n    .refine((val) => config.models.checkpoints.all.includes(val))\n    .optional()\n    .default(config.warmupCkpt || config.models.checkpoints.all[0])\n    .describe(\"Checkpoint to use\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"3\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"4\", 0],\n        positive: [\"6\", 0],\n        negative: [\"7\", 0],\n        latent_image: [\"5\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"4\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint\",\n      },\n    },\n    \"5\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptyLatentImage\",\n      _meta: {\n        title: \"Empty Latent Image\",\n      },\n    },\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"7\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"8\": {\n      inputs: {\n        samples: [\"3\", 0],\n        vae: [\"4\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"output\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/sdxl/img2img.json",
    "content": "{\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"36\",\n        0\n      ],\n      \"vae\": [\n        \"14\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"img2img\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"14\": {\n    \"inputs\": {\n      \"ckpt_name\": \"sd_xl_base_1.0.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint Base\"\n    }\n  },\n  \"16\": {\n    \"inputs\": {\n      \"width\": 4096,\n      \"height\": 4096,\n      \"crop_w\": 0,\n      \"crop_h\": 0,\n      \"target_width\": 4096,\n      \"target_height\": 4096,\n      \"text_g\": \"a professional photo of a young man smiling\\n\\nhigh resolution, highly detailed, 4k\",\n      \"text_l\": \"a professional photo of a young man smiling\\n\\nhigh resolution, highly detailed, 4k\",\n      \"clip\": [\n        \"14\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncodeSDXL\",\n    \"_meta\": {\n      \"title\": \"CLIPTextEncodeSDXL\"\n    }\n  },\n  \"19\": {\n    \"inputs\": {\n      \"width\": 4096,\n      \"height\": 4096,\n      \"crop_w\": 0,\n      \"crop_h\": 0,\n      \"target_width\": 4096,\n      \"target_height\": 4096,\n      \"text_g\": \"blurry, horror, rendering, illustration, drawing, painting\",\n      \"text_l\": \"blurry, horror, rendering, illustration, drawing, painting\",\n      \"clip\": [\n        \"14\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncodeSDXL\",\n    \"_meta\": {\n      \"title\": \"CLIPTextEncodeSDXL\"\n    }\n  },\n  \"36\": {\n    \"inputs\": {\n      \"seed\": 887855663168366,\n      \"steps\": 20,\n      \"cfg\": 5.5,\n      \"sampler_name\": \"dpmpp_2m_sde_gpu\",\n      \"scheduler\": \"exponential\",\n      \"denoise\": 0.75,\n      \"model\": [\n        \"14\",\n        0\n      ],\n      \"positive\": [\n        \"16\",\n        0\n      ],\n      \"negative\": [\n        \"19\",\n        0\n      ],\n      \"latent_image\": [\n        \"39\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"38\": {\n    \"inputs\": {\n      \"image\": \"ComfyUI_00376_.png\",\n      \"upload\": \"image\"\n    },\n    \"class_type\": \"LoadImage\",\n    \"_meta\": {\n      \"title\": \"Load Image\"\n    }\n  },\n  \"39\": {\n    \"inputs\": {\n      \"pixels\": [\n        \"40\",\n        0\n      ],\n      \"vae\": [\n        \"14\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEEncode\",\n    \"_meta\": {\n      \"title\": \"VAE Encode\"\n    }\n  },\n  \"40\": {\n    \"inputs\": {\n      \"upscale_method\": \"nearest-exact\",\n      \"width\": 1024,\n      \"height\": 1024,\n      \"crop\": \"center\",\n      \"image\": [\n        \"38\",\n        0\n      ]\n    },\n    \"class_type\": \"ImageScale\",\n    \"_meta\": {\n      \"title\": \"Upscale Image\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/sdxl/img2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .describe(\"The negative prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(4096)\n    .optional()\n    .default(4096)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(4096)\n    .optional()\n    .default(4096)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(5.5)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"dpmpp_2m_sde_gpu\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"exponential\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(0.75)\n    .describe(\"Denoising strength\"),\n  checkpoint,\n  image: z.string().describe(\"Input image for img2img\"),\n  upscale_method: z\n    .enum([\"nearest-exact\"])\n    .optional()\n    .default(\"nearest-exact\")\n    .describe(\n      \"Method used for upscaling if input image is smaller than target size\"\n    ),\n  target_width: z\n    .number()\n    .int()\n    .min(256)\n    .max(4096)\n    .optional()\n    .default(1024)\n    .describe(\"Target width for upscaling\"),\n  target_height: z\n    .number()\n    .int()\n    .min(256)\n    .max(4096)\n    .optional()\n    .default(1024)\n    .describe(\"Target height for upscaling\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"8\": {\n      inputs: {\n        samples: [\"36\", 0],\n        vae: [\"14\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"9\": {\n      inputs: {\n        filename_prefix: \"img2img\",\n        images: [\"8\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"14\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint Base\",\n      },\n    },\n    \"16\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        crop_w: 0,\n        crop_h: 0,\n        target_width: input.width,\n        target_height: input.height,\n        text_g: input.prompt,\n        text_l: input.prompt,\n        clip: [\"14\", 1],\n      },\n      class_type: \"CLIPTextEncodeSDXL\",\n      _meta: {\n        title: \"CLIPTextEncodeSDXL\",\n      },\n    },\n    \"19\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        crop_w: 0,\n        crop_h: 0,\n        target_width: input.width,\n        target_height: input.height,\n        text_g: input.negative_prompt,\n        text_l: input.negative_prompt,\n        clip: [\"14\", 1],\n      },\n      class_type: \"CLIPTextEncodeSDXL\",\n      _meta: {\n        title: \"CLIPTextEncodeSDXL\",\n      },\n    },\n    \"36\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"14\", 0],\n        positive: [\"16\", 0],\n        negative: [\"19\", 0],\n        latent_image: [\"39\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n    \"38\": {\n      inputs: {\n        image: input.image,\n        upload: \"image\",\n      },\n      class_type: \"LoadImage\",\n      _meta: {\n        title: \"Load Image\",\n      },\n    },\n    \"39\": {\n      inputs: {\n        pixels: [\"40\", 0],\n        vae: [\"14\", 2],\n      },\n      class_type: \"VAEEncode\",\n      _meta: {\n        title: \"VAE Encode\",\n      },\n    },\n    \"40\": {\n      inputs: {\n        upscale_method: input.upscale_method,\n        width: input.target_width,\n        height: input.target_height,\n        crop: \"center\",\n        image: [\"38\", 0],\n      },\n      class_type: \"ImageScale\",\n      _meta: {\n        title: \"Upscale Image\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n  summary: \"Image-to-Image\",\n  description: \"Text-guided Image-to-Image generation\",\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/sdxl/txt2img-with-refiner.json",
    "content": "{\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"sd_xl_base_1.0.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint - BASE\"\n    }\n  },\n  \"5\": {\n    \"inputs\": {\n      \"width\": 1024,\n      \"height\": 1024,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"evening sunset scenery blue sky nature, glass bottle with a galaxy in it\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"10\": {\n    \"inputs\": {\n      \"add_noise\": \"enable\",\n      \"noise_seed\": 721897303308196,\n      \"steps\": 25,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"start_at_step\": 0,\n      \"end_at_step\": 20,\n      \"return_with_leftover_noise\": \"enable\",\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"5\",\n        0\n      ]\n    },\n    \"class_type\": \"KSamplerAdvanced\",\n    \"_meta\": {\n      \"title\": \"KSampler (Advanced) - BASE\"\n    }\n  },\n  \"11\": {\n    \"inputs\": {\n      \"add_noise\": \"disable\",\n      \"noise_seed\": 0,\n      \"steps\": 25,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"start_at_step\": 20,\n      \"end_at_step\": 10000,\n      \"return_with_leftover_noise\": \"disable\",\n      \"model\": [\n        \"12\",\n        0\n      ],\n      \"positive\": [\n        \"15\",\n        0\n      ],\n      \"negative\": [\n        \"16\",\n        0\n      ],\n      \"latent_image\": [\n        \"10\",\n        0\n      ]\n    },\n    \"class_type\": \"KSamplerAdvanced\",\n    \"_meta\": {\n      \"title\": \"KSampler (Advanced) - REFINER\"\n    }\n  },\n  \"12\": {\n    \"inputs\": {\n      \"ckpt_name\": \"sd_xl_refiner_1.0.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint - REFINER\"\n    }\n  },\n  \"15\": {\n    \"inputs\": {\n      \"text\": \"evening sunset scenery blue sky nature, glass bottle with a galaxy in it\",\n      \"clip\": [\n        \"12\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"16\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"12\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"17\": {\n    \"inputs\": {\n      \"samples\": [\n        \"11\",\n        0\n      ],\n      \"vae\": [\n        \"12\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"19\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"17\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/sdxl/txt2img-with-refiner.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .default(\"text, watermark\")\n    .describe(\"The negative prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 1000000000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(25)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(8)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"normal\")\n    .describe(\"Type of scheduler to use\"),\n  base_start_step: z\n    .number()\n    .int()\n    .min(0)\n    .max(100)\n    .optional()\n    .default(0)\n    .describe(\"Start step for base model sampling\"),\n  base_end_step: z\n    .number()\n    .int()\n    .min(0)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"End step for base model sampling\"),\n  refiner_start_step: z\n    .number()\n    .int()\n    .min(0)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"Start step for refiner model sampling\"),\n  checkpoint,\n  refiner_checkpoint: z\n    .string()\n    .optional()\n    .default(\"sd_xl_refiner_1.0.safetensors\")\n    .describe(\"Checkpoint for the refiner model\"),\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"4\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint - BASE\",\n      },\n    },\n    \"5\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptyLatentImage\",\n      _meta: {\n        title: \"Empty Latent Image\",\n      },\n    },\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"7\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"10\": {\n      inputs: {\n        add_noise: \"enable\",\n        noise_seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        start_at_step: input.base_start_step,\n        end_at_step: input.base_end_step,\n        return_with_leftover_noise: \"enable\",\n        model: [\"4\", 0],\n        positive: [\"6\", 0],\n        negative: [\"7\", 0],\n        latent_image: [\"5\", 0],\n      },\n      class_type: \"KSamplerAdvanced\",\n      _meta: {\n        title: \"KSampler (Advanced) - BASE\",\n      },\n    },\n    \"11\": {\n      inputs: {\n        add_noise: \"disable\",\n        noise_seed: 0,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        start_at_step: input.refiner_start_step,\n        end_at_step: 10000,\n        return_with_leftover_noise: \"disable\",\n        model: [\"12\", 0],\n        positive: [\"15\", 0],\n        negative: [\"16\", 0],\n        latent_image: [\"10\", 0],\n      },\n      class_type: \"KSamplerAdvanced\",\n      _meta: {\n        title: \"KSampler (Advanced) - REFINER\",\n      },\n    },\n    \"12\": {\n      inputs: {\n        ckpt_name: input.refiner_checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint - REFINER\",\n      },\n    },\n    \"15\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"12\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"16\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"12\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"17\": {\n      inputs: {\n        samples: [\"11\", 0],\n        vae: [\"12\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"19\": {\n      inputs: {\n        filename_prefix: \"ComfyUI\",\n        images: [\"17\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n};\n\nexport default workflow;\n"
  },
  {
    "path": "example-workflows/sdxl/txt2img.json",
    "content": "{\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"sd_xl_base_1.0.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint - BASE\"\n    }\n  },\n  \"5\": {\n    \"inputs\": {\n      \"width\": 1024,\n      \"height\": 1024,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"evening sunset scenery blue sky nature, glass bottle with a galaxy in it\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"17\": {\n    \"inputs\": {\n      \"samples\": [\n        \"49\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"19\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"17\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"49\": {\n    \"inputs\": {\n      \"seed\": 0,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"5\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  }\n}"
  },
  {
    "path": "example-workflows/sdxl/txt2img.ts",
    "content": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that directory\nimport { ComfyPrompt, Workflow } from \"../types\";\nimport config from \"../config\";\n\nlet checkpoint: any = config.models.checkpoints.enum.optional();\nif (config.warmupCkpt) {\n  checkpoint = checkpoint.default(config.warmupCkpt);\n}\n\nconst RequestSchema = z.object({\n  prompt: z.string().describe(\"The positive prompt for image generation\"),\n  negative_prompt: z\n    .string()\n    .optional()\n    .default(\"text, watermark\")\n    .describe(\"The negative prompt for image generation\"),\n  width: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Width of the generated image\"),\n  height: z\n    .number()\n    .int()\n    .min(256)\n    .max(2048)\n    .optional()\n    .default(1024)\n    .describe(\"Height of the generated image\"),\n  seed: z\n    .number()\n    .int()\n    .optional()\n    .default(() => Math.floor(Math.random() * 100000000000))\n    .describe(\"Seed for random number generation\"),\n  steps: z\n    .number()\n    .int()\n    .min(1)\n    .max(100)\n    .optional()\n    .default(20)\n    .describe(\"Number of sampling steps\"),\n  cfg_scale: z\n    .number()\n    .min(0)\n    .max(20)\n    .optional()\n    .default(8)\n    .describe(\"Classifier-free guidance scale\"),\n  sampler_name: config.samplers\n    .optional()\n    .default(\"euler\")\n    .describe(\"Name of the sampler to use\"),\n  scheduler: config.schedulers\n    .optional()\n    .default(\"normal\")\n    .describe(\"Type of scheduler to use\"),\n  denoise: z\n    .number()\n    .min(0)\n    .max(1)\n    .optional()\n    .default(1)\n    .describe(\"Denoising strength\"),\n  checkpoint,\n});\n\ntype InputType = z.infer<typeof RequestSchema>;\n\nfunction generateWorkflow(input: InputType): ComfyPrompt {\n  return {\n    \"4\": {\n      inputs: {\n        ckpt_name: input.checkpoint,\n      },\n      class_type: \"CheckpointLoaderSimple\",\n      _meta: {\n        title: \"Load Checkpoint - BASE\",\n      },\n    },\n    \"5\": {\n      inputs: {\n        width: input.width,\n        height: input.height,\n        batch_size: 1,\n      },\n      class_type: \"EmptyLatentImage\",\n      _meta: {\n        title: \"Empty Latent Image\",\n      },\n    },\n    \"6\": {\n      inputs: {\n        text: input.prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"7\": {\n      inputs: {\n        text: input.negative_prompt,\n        clip: [\"4\", 1],\n      },\n      class_type: \"CLIPTextEncode\",\n      _meta: {\n        title: \"CLIP Text Encode (Prompt)\",\n      },\n    },\n    \"17\": {\n      inputs: {\n        samples: [\"49\", 0],\n        vae: [\"4\", 2],\n      },\n      class_type: \"VAEDecode\",\n      _meta: {\n        title: \"VAE Decode\",\n      },\n    },\n    \"19\": {\n      inputs: {\n        filename_prefix: \"ComfyUI\",\n        images: [\"17\", 0],\n      },\n      class_type: \"SaveImage\",\n      _meta: {\n        title: \"Save Image\",\n      },\n    },\n    \"49\": {\n      inputs: {\n        seed: input.seed,\n        steps: input.steps,\n        cfg: input.cfg_scale,\n        sampler_name: input.sampler_name,\n        scheduler: input.scheduler,\n        denoise: input.denoise,\n        model: [\"4\", 0],\n        positive: [\"6\", 0],\n        negative: [\"7\", 0],\n        latent_image: [\"5\", 0],\n      },\n      class_type: \"KSampler\",\n      _meta: {\n        title: \"KSampler\",\n      },\n    },\n  };\n}\n\nconst workflow: Workflow = {\n  RequestSchema,\n  generateWorkflow,\n};\n\nexport default workflow;\n"
  },
  {
    "path": "generate-workflow",
    "content": "#! /bin/bash\n\nusage=\"Usage: $0 <input-prompt-json> <output-typescript-file>\"\n\ninput_prompt_json=$1\noutput_typescript_file=$2\n\nset -f # Disable globbing, there's a * in the input prompt\nsystem_prompt=$(jq -R -s '{\"text\": .}' claude-endpoint-creation-prompt.md | jq .text)\ninput_prompt=$(jq @json $input_prompt_json)\n\n# Select LLM provider based on available API keys.\n# Anthropic (Claude) is preferred when both keys are set.\n# MiniMax is used as a fallback via its OpenAI-compatible API.\nif [ -n \"$ANTHROPIC_API_KEY\" ]; then\n  provider=\"anthropic\"\n  api_key=\"$ANTHROPIC_API_KEY\"\n  api_url=\"https://api.anthropic.com/v1/messages\"\n  model_id=\"claude-sonnet-4-20250514\"\n  anthropic_version=\"2023-06-01\"\nelif [ -n \"$MINIMAX_API_KEY\" ]; then\n  provider=\"minimax\"\n  api_key=\"$MINIMAX_API_KEY\"\n  api_url=\"https://api.minimax.io/v1/chat/completions\"\n  model_id=\"MiniMax-M2.7\"\nelse\n  echo \"Please set the ANTHROPIC_API_KEY or MINIMAX_API_KEY environment variable\" >&2\n  exit 1\nfi\n\nif [ \"$provider\" = \"minimax\" ]; then\n  # MiniMax uses the OpenAI-compatible API format.\n  # temperature must be in (0.0, 1.0] for MiniMax; 0.01 gives near-deterministic output.\n  api_body=$(\n    cat <<EOF\n{\n  \"model\": \"$model_id\",\n  \"messages\": [\n    {\"role\": \"system\", \"content\": $system_prompt},\n    {\"role\": \"user\", \"content\": $input_prompt}\n  ],\n  \"max_tokens\": 8192,\n  \"temperature\": 0.01\n}\nEOF\n  )\n\n  response=$(\n    curl -s -X POST \\\n      -H \"Authorization: Bearer $api_key\" \\\n      -H \"Content-Type: application/json\" \\\n      -d \"$api_body\" \\\n      \"$api_url\"\n  )\n\n  response_text=$(echo \"$response\" | jq -r '.choices[0].message.content // empty')\nelse\n  # Anthropic API format\n  api_body=$(\n    cat <<EOF\n{\n  \"model\": \"$model_id\",\n  \"system\": $system_prompt,\n  \"max_tokens\": 8192,\n  \"temperature\": 0,\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": $input_prompt\n    }\n  ]\n}\nEOF\n  )\n\n  response=$(\n    curl -s -X POST \\\n      -H \"x-api-key: $api_key\" \\\n      -H \"Content-Type: application/json\" \\\n      -H \"anthropic-version: $anthropic_version\" \\\n      -d \"$api_body\" \\\n      \"$api_url\"\n  )\n\n  response_text=$(echo \"$response\" | jq -r '.content[0].text // empty')\nfi\n\nif [ -z \"$response_text\" ]; then\n  echo \"Error: API call failed\" >&2\n  echo \"$response\" | jq . >&2\n  exit 1\nfi\n\n# Strip code-block delimiters if the model wrapped the output in ``` fences.\nfirst_line=$(echo \"$response_text\" | head -n 1)\nif [[ \"$first_line\" == '```'* ]]; then\n  echo \"$response_text\" | tail -n +2 | head -n -1 > \"$output_typescript_file\"\nelse\n  echo \"$response_text\" > \"$output_typescript_file\"\nfi\n\nset +f\n"
  },
  {
    "path": "manifest.yml",
    "content": "# apt:\n#   - git\n#   - ffmpeg\n#   - libgl1\ncustom_nodes:\n  - comfyui_essentials\n#   - comfyui-kjnodes\n#   - https://github.com/visualbruno/ComfyUI-Hunyuan3d-2-1.git\n# pip:\n#   - numpy\nmodels:\n  before_start:\n    - url: https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16\n      local_path: models/checkpoints/dreamshaper_8.safetensors\n    - url: https://huggingface.co/Lykon/DreamShaper/resolve/main/DreamShaper_5_beta2_noVae_half_pruned.safetensors?download=true\n      local_path: models/checkpoints/dreamshaper5.safetensors"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"comfyui-api\",\n  \"version\": \"1.18.0\",\n  \"description\": \"Wraps comfyui to make it easier to use as a stateless web service\",\n  \"main\": \"dist/src/index.js\",\n  \"scripts\": {\n    \"test\": \"vitest run\",\n    \"unit-test\": \"vitest run test/utils.spec.ts\",\n    \"build\": \"tsc\",\n    \"build-binary\": \"./build-binary\",\n    \"postinstall\": \"npm install --cpu=wasm32 sharp\"\n  },\n  \"author\": \"Shawn Rushefsky\",\n  \"license\": \"MIT\",\n  \"devDependencies\": {\n    \"@anthropic-ai/sdk\": \"^0.26.1\",\n    \"@types/node\": \"^20.12.7\",\n    \"@types/ws\": \"^8.5.13\",\n    \"@yao-pkg/pkg\": \"^6.1.0\",\n    \"minimist\": \"^1.2.8\",\n    \"svix\": \"^1.78.0\",\n    \"vitest\": \"^3.0.0\"\n  },\n  \"bin\": {\n    \"comfyui-api\": \"dist/src/index.js\"\n  },\n  \"dependencies\": {\n    \"@aws-sdk/client-s3\": \"^3.820.0\",\n    \"@azure/identity\": \"^4.13.0\",\n    \"@azure/storage-blob\": \"^12.28.0\",\n    \"@fastify/swagger\": \"^9.5.0\",\n    \"@fastify/swagger-ui\": \"^5.2.2\",\n    \"@rollup/rollup-linux-x64-gnu\": \"^4.60.2\",\n    \"@smithy/node-http-handler\": \"^4.0.5\",\n    \"fastify\": \"^5.8.5\",\n    \"fastify-type-provider-zod\": \"^4.0.2\",\n    \"sharp\": \"^0.34.5\",\n    \"typescript\": \"^5.8.3\",\n    \"undici\": \"^7.24.0\",\n    \"ws\": \"^8.18.2\",\n    \"yaml\": \"^2.8.3\",\n    \"zod\": \"^3.25.36\"\n  },\n  \"pkg\": {\n    \"targets\": [\n      \"node20-linux-x64\"\n    ],\n    \"outputPath\": \"bin\",\n    \"public\": true\n  },\n  \"engines\": {\n    \"node\": \">=20.18.1\"\n  }\n}\n"
  },
  {
    "path": "scripts/smoke-proxy.mjs",
    "content": "import fastify from \"fastify\";\nimport { fetch } from \"undici\";\nimport { getProxyDispatcher } from \"../dist/src/proxy-dispatcher.js\";\n\nasync function main() {\n  const app = fastify({ logger: true });\n  app.post(\"/webhook\", async (req, reply) => {\n    return reply.send({ success: true, received: req.body || null });\n  });\n\n  await app.listen({ port: 12345, host: \"127.0.0.1\" });\n  await app.ready();\n\n  console.log(\"Local webhook server listening on http://127.0.0.1:12345/webhook\");\n\n  const resp = await fetch(\"http://127.0.0.1:12345/webhook\", {\n    method: \"POST\",\n    headers: { \"Content-Type\": \"application/json\" },\n    body: JSON.stringify({ ping: true }),\n    dispatcher: getProxyDispatcher(),\n  });\n\n  console.log(\"Fetch status:\", resp.status, resp.statusText);\n  const body = await resp.json();\n  console.log(\"Response:\", body);\n\n  await app.close();\n}\n\nmain().catch((err) => {\n  console.error(\"Smoke test failed:\", err);\n  process.exit(1);\n});\n\n"
  },
  {
    "path": "src/comfy-node-preprocessors.ts",
    "content": "import path from \"path\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport { ComfyNode, ComfyPrompt, WorkflowCredential } from \"./types\";\nimport config from \"./config\";\nimport getStorageManager from \"./remote-storage-manager\";\nimport { isValidUrl } from \"./utils\";\nimport { processInputMedia } from \"./image-tools\";\nimport { z } from \"zod\";\nimport { CredentialProvider, createCredentialProvider } from \"./credential-resolver\";\n\nconst configPath = path.join(config.comfyDir, \"models\", \"configs\");\nconst checkpointPath = path.join(config.comfyDir, \"models\", \"checkpoints\");\nconst diffusersPath = path.join(config.comfyDir, \"models\", \"diffusers\");\nconst vaePath = path.join(config.comfyDir, \"models\", \"vae\");\nconst loraPath = path.join(config.comfyDir, \"models\", \"loras\");\nconst controlNetPath = path.join(config.comfyDir, \"models\", \"controlnet\");\nconst clipPath = path.join(config.comfyDir, \"models\", \"text_encoders\");\nconst styleModelPath = path.join(config.comfyDir, \"models\", \"style_models\");\nconst gligenPath = path.join(config.comfyDir, \"models\", \"gligen\");\nconst upscaleModelPath = path.join(config.comfyDir, \"models\", \"upscale_models\");\n\nfunction updateModelsInConfig(modelType: string, modelName: string) {\n  if (config.models[modelType].all.includes(modelName)) {\n    return;\n  }\n  config.models[modelType].all.push(modelName);\n  config.models[modelType].all = Array.from(\n    new Set(config.models[modelType].all)\n  ).sort();\n  config.models[modelType].enum = z.enum(\n    config.models[modelType].all as [string, ...string[]]\n  );\n}\n\nasync function processCheckpointLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { config_name, ckpt_name } = node.inputs;\n\n  if (isValidUrl(config_name)) {\n    const localConfigPath = await storageManager.downloadFile(\n      config_name,\n      configPath,\n      undefined,\n      getCredentials(config_name)\n    );\n    const filename = path.basename(localConfigPath);\n    updateModelsInConfig(\"configs\", filename);\n    node.inputs.config_name = filename;\n  }\n\n  if (isValidUrl(ckpt_name)) {\n    const localCkptPath = await storageManager.downloadFile(\n      ckpt_name,\n      checkpointPath,\n      undefined,\n      getCredentials(ckpt_name)\n    );\n    const filename = path.basename(localCkptPath);\n    updateModelsInConfig(\"checkpoints\", filename);\n    node.inputs.ckpt_name = filename;\n  }\n\n  return node;\n}\n\nasync function processCheckpointLoaderSimpleNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { ckpt_name } = node.inputs;\n\n  if (isValidUrl(ckpt_name)) {\n    const localCkptPath = await storageManager.downloadFile(\n      ckpt_name,\n      checkpointPath,\n      undefined,\n      getCredentials(ckpt_name)\n    );\n    const filename = path.basename(localCkptPath);\n    updateModelsInConfig(\"checkpoints\", filename);\n    node.inputs.ckpt_name = filename;\n  }\n\n  return node;\n}\n\nasync function processDiffusersLoaderNode(\n  node: ComfyNode,\n  _getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { model_path } = node.inputs;\n\n  // Note: downloadRepo doesn't support credentials yet (git clone)\n  if (isValidUrl(model_path)) {\n    const downloadedPath = await storageManager.downloadRepo(\n      model_path,\n      diffusersPath\n    );\n    const filename = path.basename(downloadedPath);\n    updateModelsInConfig(\"diffusers\", filename);\n    node.inputs.model_path = filename;\n  }\n\n  return node;\n}\n\nasync function processLoraLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { lora_name } = node.inputs;\n\n  if (isValidUrl(lora_name)) {\n    const localLoraPath = await storageManager.downloadFile(\n      lora_name,\n      loraPath,\n      undefined,\n      getCredentials(lora_name)\n    );\n    const filename = path.basename(localLoraPath);\n    updateModelsInConfig(\"loras\", filename);\n    node.inputs.lora_name = filename;\n  }\n\n  return node;\n}\n\nasync function processVAELoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { vae_name } = node.inputs;\n\n  if (isValidUrl(vae_name)) {\n    const localVaePath = await storageManager.downloadFile(\n      vae_name,\n      vaePath,\n      undefined,\n      getCredentials(vae_name)\n    );\n    const filename = path.basename(localVaePath);\n    updateModelsInConfig(\"vae\", filename);\n    node.inputs.vae_name = filename;\n  }\n\n  return node;\n}\n\nasync function processControlNetLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { control_net_name } = node.inputs;\n\n  if (isValidUrl(control_net_name)) {\n    const localControlNetPath = await storageManager.downloadFile(\n      control_net_name,\n      controlNetPath,\n      undefined,\n      getCredentials(control_net_name)\n    );\n    const filename = path.basename(localControlNetPath);\n    updateModelsInConfig(\"controlnet\", filename);\n    node.inputs.control_net_name = filename;\n  }\n\n  return node;\n}\n\nasync function processUNETLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { unet_name } = node.inputs;\n\n  if (isValidUrl(unet_name)) {\n    const localUNETPath = await storageManager.downloadFile(\n      unet_name,\n      diffusersPath,\n      undefined,\n      getCredentials(unet_name)\n    );\n    const filename = path.basename(localUNETPath);\n    updateModelsInConfig(\"diffusers\", filename);\n    node.inputs.unet_name = filename;\n  }\n\n  return node;\n}\n\nasync function processCLIPLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { clip_name } = node.inputs;\n\n  if (isValidUrl(clip_name)) {\n    const localCLIPPath = await storageManager.downloadFile(\n      clip_name,\n      clipPath,\n      undefined,\n      getCredentials(clip_name)\n    );\n    const filename = path.basename(localCLIPPath);\n    updateModelsInConfig(\"text_encoders\", filename);\n    node.inputs.clip_name = filename;\n  }\n\n  return node;\n}\n\nasync function processDualCLIPLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { clip_name1, clip_name2 } = node.inputs;\n  if (isValidUrl(clip_name1)) {\n    const localCLIPPath1 = await storageManager.downloadFile(\n      clip_name1,\n      clipPath,\n      undefined,\n      getCredentials(clip_name1)\n    );\n    const filename = path.basename(localCLIPPath1);\n    updateModelsInConfig(\"text_encoders\", filename);\n    node.inputs.clip_name1 = filename;\n  }\n  if (isValidUrl(clip_name2)) {\n    const localCLIPPath2 = await storageManager.downloadFile(\n      clip_name2,\n      clipPath,\n      undefined,\n      getCredentials(clip_name2)\n    );\n    const filename = path.basename(localCLIPPath2);\n    updateModelsInConfig(\"text_encoders\", filename);\n    node.inputs.clip_name2 = filename;\n  }\n\n  return node;\n}\n\nasync function processStyleModelLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { style_model_name } = node.inputs;\n\n  if (isValidUrl(style_model_name)) {\n    const localStyleModelPath = await storageManager.downloadFile(\n      style_model_name,\n      styleModelPath,\n      undefined,\n      getCredentials(style_model_name)\n    );\n    const filename = path.basename(localStyleModelPath);\n    updateModelsInConfig(\"style_models\", filename);\n    node.inputs.style_model_name = filename;\n  }\n\n  return node;\n}\n\nasync function processGLIGENLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { gligen_name } = node.inputs;\n\n  if (isValidUrl(gligen_name)) {\n    const localGLIGENPath = await storageManager.downloadFile(\n      gligen_name,\n      gligenPath,\n      undefined,\n      getCredentials(gligen_name)\n    );\n    const filename = path.basename(localGLIGENPath);\n    updateModelsInConfig(\"gligen\", filename);\n    node.inputs.gligen_name = filename;\n  }\n\n  return node;\n}\n\nasync function processUpscaleModelLoaderNode(\n  node: ComfyNode,\n  getCredentials: CredentialProvider\n): Promise<ComfyNode> {\n  const storageManager = getStorageManager();\n  const { model_name } = node.inputs;\n\n  if (isValidUrl(model_name)) {\n    const localModelPath = await storageManager.downloadFile(\n      model_name,\n      upscaleModelPath,\n      undefined,\n      getCredentials(model_name)\n    );\n    const filename = path.basename(localModelPath);\n    updateModelsInConfig(\"upscale_models\", filename);\n    node.inputs.model_name = filename;\n  }\n\n  return node;\n}\n\nexport async function processModelLoadingNode(\n  node: ComfyNode,\n  log: FastifyBaseLogger,\n  getCredentials: CredentialProvider = () => undefined\n): Promise<ComfyNode> {\n  switch (node.class_type) {\n    case \"CheckpointLoader\":\n      return processCheckpointLoaderNode(node, getCredentials);\n    case \"CheckpointLoaderSimple\":\n    case \"unCLIPCheckpointLoader\":\n      return processCheckpointLoaderSimpleNode(node, getCredentials);\n    case \"DiffusersLoader\":\n      return processDiffusersLoaderNode(node, getCredentials);\n    case \"LoraLoader\":\n    case \"LoraLoaderModelOnly\":\n      return processLoraLoaderNode(node, getCredentials);\n    case \"VAELoader\":\n      return processVAELoaderNode(node, getCredentials);\n    case \"ControlNetLoader\":\n    case \"DiffControlNetLoader\":\n      return processControlNetLoaderNode(node, getCredentials);\n    case \"UNETLoader\":\n      return processUNETLoaderNode(node, getCredentials);\n    case \"CLIPLoader\":\n    case \"CLIPVisionLoader\":\n      return processCLIPLoaderNode(node, getCredentials);\n    case \"DualCLIPLoader\":\n      return processDualCLIPLoaderNode(node, getCredentials);\n    case \"StyleModelLoader\":\n      return processStyleModelLoaderNode(node, getCredentials);\n    case \"GLIGENLoader\":\n      return processGLIGENLoaderNode(node, getCredentials);\n    case \"UpscaleModelLoader\":\n      return processUpscaleModelLoaderNode(node, getCredentials);\n    default:\n      return node;\n  }\n}\n\nexport async function processLoadImageNode(\n  node: ComfyNode,\n  log: FastifyBaseLogger\n): Promise<ComfyNode> {\n  node.inputs.image = await processInputMedia(node.inputs.image, log);\n  return node;\n}\n\nexport async function processLoadDirectoryOfImagesNode(\n  node: ComfyNode,\n  jobId: string,\n  log: FastifyBaseLogger\n): Promise<ComfyNode> {\n  const processPromises: Promise<string>[] = [];\n  for (const imageInput of node.inputs.directory) {\n    processPromises.push(processInputMedia(imageInput, log, jobId));\n  }\n  await Promise.all(processPromises);\n  node.inputs.directory = jobId;\n  return node;\n}\n\nexport async function processLoadVideoNode(\n  node: ComfyNode,\n  log: FastifyBaseLogger\n): Promise<ComfyNode> {\n  const { video, file } = node.inputs;\n  if (video) {\n    node.inputs.video = await processInputMedia(video, log);\n  }\n  if (file) {\n    node.inputs.file = await processInputMedia(file, log);\n  }\n  return node;\n}\n\nexport async function processLoadAudioNode(\n  node: ComfyNode,\n  log: FastifyBaseLogger\n): Promise<ComfyNode> {\n  const { audio } = node.inputs;\n  if (audio) {\n    node.inputs.audio = await processInputMedia(audio, log);\n  }\n  return node;\n}\n\nconst loadDirectoryOfImagesNodeTypes = new Set<string>([\n  \"VHS_LoadImages\",\n  \"VHS_LoadImagesPath\",\n]);\nconst loadVideoNodeTypes = new Set<string>([\n  \"LoadVideo\",\n  \"VHS_LoadVideo\",\n  \"VHS_LoadVideoPath\",\n  \"VHS_LoadVideoFFmpegPath\",\n  \"VHS_LoadVideoFFmpeg\",\n]);\n\nconst modelLoadingNodeTypes = new Set([\n  \"CheckpointLoader\",\n  \"CheckpointLoaderSimple\",\n  \"DiffusersLoader\",\n  \"unCLIPCheckpointLoader\",\n  \"LoraLoader\",\n  \"LoraLoaderModelOnly\",\n  \"VAELoader\",\n  \"ControlNetLoader\",\n  \"DiffControlNetLoader\",\n  \"UNETLoader\",\n  \"CLIPLoader\",\n  \"DualCLIPLoader\",\n  \"CLIPVisionLoader\",\n  \"StyleModelLoader\",\n  \"GLIGENLoader\",\n  \"UpscaleModelLoader\",\n]);\n\nconst audioLoadingNodeTypes = new Set([\"LoadAudio\"]);\n\nexport type NodeProcessError = Error & {\n  code?: number;\n  location?: string;\n  message?: string;\n};\n\nexport async function preprocessNodes(\n  prompt: ComfyPrompt,\n  id: string,\n  log: FastifyBaseLogger,\n  credentials?: WorkflowCredential[]\n): Promise<{ prompt: ComfyPrompt; hasSaveImage: boolean }> {\n  // Create a credential provider for URL pattern matching\n  const getCredentials = createCredentialProvider(credentials);\n\n  let hasSaveImage = false;\n  for (const nodeId in prompt) {\n    const node = prompt[nodeId];\n    if (\n      node.inputs.filename_prefix &&\n      typeof node.inputs.filename_prefix === \"string\"\n    ) {\n      /**\n       * If the node is for saving files, we want to set the filename_prefix\n       * to the id of the prompt. This ensures no collisions between prompts\n       * from different users.\n       */\n      node.inputs.filename_prefix = config.prependFilenames\n        ? id + \"_\" + node.inputs.filename_prefix\n        : id;\n      if (\n        typeof node.inputs.save_output !== \"undefined\" &&\n        !node.inputs.save_output\n      ) {\n        continue;\n      }\n      hasSaveImage = true;\n    } else if (node?.inputs?.image && typeof node.inputs.image === \"string\") {\n      /**\n       * If the node is for loading an image, the user will have provided\n       * the image as base64 encoded data, or as a url. we need to download\n       * the image if it's a url, and save it to a local file.\n       */\n      try {\n        Object.assign(node, await processLoadImageNode(node, log));\n      } catch (e: any) {\n        const err = new Error(\n          `Failed to download image for node ${nodeId}: ${e.message}`\n        ) as NodeProcessError;\n        err.code = 400;\n        err.location = `prompt.${nodeId}.inputs.image`;\n        throw err;\n      }\n    } else if (\n      loadDirectoryOfImagesNodeTypes.has(node.class_type) &&\n      Array.isArray(node.inputs.directory) &&\n      node.inputs.directory.every((x: any) => typeof x === \"string\")\n    ) {\n      /**\n       * If the node is for loading a directory of images, the user will have\n       * provided the local directory as a string or an array of strings. If it's an\n       * array, we need to download each image to a local file, and update the input\n       * to be the local directory.\n       */\n      try {\n        Object.assign(\n          node,\n          await processLoadDirectoryOfImagesNode(node, id, log)\n        );\n      } catch (e: any) {\n        const err = new Error(\n          `Failed to download images for node ${nodeId}: ${e.message}`\n        ) as NodeProcessError;\n        err.code = 400;\n        err.location = `prompt.${nodeId}.inputs.directory`;\n        throw err;\n      }\n    } else if (loadVideoNodeTypes.has(node.class_type)) {\n      /**\n       * If the node is for loading a video, the user will have provided\n       * the video as base64 encoded data, or as a url. we need to download\n       * the video if it's a url, and save it to a local file.\n       */\n      try {\n        Object.assign(node, await processLoadVideoNode(node, log));\n      } catch (e: any) {\n        const err = new Error(\n          `Failed to download video for node ${nodeId}: ${e.message}`\n        ) as NodeProcessError;\n        err.code = 400;\n        err.location = `prompt.${nodeId}.inputs.video`;\n        throw err;\n      }\n    } else if (audioLoadingNodeTypes.has(node.class_type)) {\n      /**\n       * If the node is for loading audio, the user will have provided\n       * the audio as base64 encoded data, or as a url. we need to download\n       * the audio if it's a url, and save it to a local file.\n       */\n      try {\n        Object.assign(node, await processLoadAudioNode(node, log));\n      } catch (e: any) {\n        const err = new Error(\n          `Failed to download audio for node ${nodeId}: ${e.message}`\n        ) as NodeProcessError;\n        err.code = 400;\n        err.location = `prompt.${nodeId}.inputs.audio`;\n        throw err;\n      }\n    } else if (modelLoadingNodeTypes.has(node.class_type)) {\n      try {\n        Object.assign(node, await processModelLoadingNode(node, log, getCredentials));\n      } catch (e: any) {\n        const err = new Error(\n          `Failed to process model for node ${nodeId}: ${e.message}`\n        ) as NodeProcessError;\n        err.code = 400;\n        err.location = `prompt.${nodeId}.inputs`;\n        throw err;\n      }\n    }\n  }\n\n  return { prompt, hasSaveImage };\n}\n"
  },
  {
    "path": "src/comfy.ts",
    "content": "import { sleep } from \"./utils\";\nimport config from \"./config\";\nimport { CommandExecutor } from \"./commands\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport {\n  ComfyPrompt,\n  ComfyWSMessage,\n  isStatusMessage,\n  isProgressMessage,\n  isProgressStateMessage,\n  isExecutionStartMessage,\n  isExecutionCachedMessage,\n  isExecutedMessage,\n  isExecutionSuccessMessage,\n  isExecutingMessage,\n  isExecutionInterruptedMessage,\n  isExecutionErrorMessage,\n  WebhookHandlers,\n  ComfyPromptResponse,\n  ComfyHistoryResponse,\n  ExecutionStats,\n  isExecutionStats,\n} from \"./types\";\nimport path from \"path\";\nimport fsPromises from \"fs/promises\";\nimport WebSocket, { MessageEvent } from \"ws\";\nimport { fetch } from \"undici\";\nimport { getProxyDispatcher } from \"./proxy-dispatcher\";\nimport { z } from \"zod\";\n\nconst commandExecutor = new CommandExecutor();\n\nexport function launchComfyUI() {\n  const cmdAndArgs = config.comfyLaunchCmd.split(\" \");\n  const cmd = cmdAndArgs[0];\n  const args = cmdAndArgs.slice(1);\n  return commandExecutor.execute(cmd, args, {\n    DIRECT_ADDRESS: config.comfyHost,\n    COMFYUI_PORT_HOST: config.comfyPort,\n    WEB_ENABLE_AUTH: \"false\",\n    CF_QUICK_TUNNELS: \"false\",\n  });\n}\n\nexport function shutdownComfyUI() {\n  commandExecutor.interrupt();\n}\n\nexport async function pingComfyUI(): Promise<void> {\n  const res = await fetch(config.comfyURL, {\n    dispatcher: getProxyDispatcher(),\n  });\n  if (!res.ok) {\n    throw new Error(`Failed to ping Comfy UI: ${await res.text()}`);\n  }\n}\n\nexport async function waitForComfyUIToStart(\n  log: FastifyBaseLogger\n): Promise<void> {\n  let retries = 0;\n  while (retries < config.startupCheckMaxTries) {\n    try {\n      await pingComfyUI();\n      log.info(\"Comfy UI started\");\n      return;\n    } catch (e) {\n      // Ignore\n    }\n    retries++;\n    await sleep(config.startupCheckInterval);\n  }\n\n  throw new Error(\n    `Comfy UI did not start after ${\n      (config.startupCheckInterval / 1000) * config.startupCheckMaxTries\n    } seconds`\n  );\n}\n\nexport async function warmupComfyUI(): Promise<void> {\n  if (config.warmupPrompt) {\n    const resp = await fetch(`http://localhost:${config.wrapperPort}/prompt`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify({ prompt: config.warmupPrompt }),\n      dispatcher: getProxyDispatcher(),\n    });\n    if (!resp.ok) {\n      throw new Error(`Failed to warmup Comfy UI: ${await resp.text()}`);\n    }\n  }\n}\n\nexport async function queuePrompt(prompt: ComfyPrompt): Promise<string> {\n  const resp = await fetch(`${config.comfyURL}/prompt`, {\n    method: \"POST\",\n    headers: {\n      \"Content-Type\": \"application/json\",\n    },\n    body: JSON.stringify({ prompt, client_id: config.wsClientId }),\n    dispatcher: getProxyDispatcher(),\n  });\n  if (!resp.ok) {\n    throw new Error(`Failed to queue prompt: ${await resp.text()}`);\n  }\n  const { prompt_id } = (await resp.json()) as ComfyPromptResponse;\n  return prompt_id;\n}\n\nexport async function getPromptOutputs(\n  promptId: string,\n  log: FastifyBaseLogger\n): Promise<Record<string, Buffer> | null> {\n  const resp = await fetch(`${config.comfyURL}/history/${promptId}`, {\n    dispatcher: getProxyDispatcher(),\n  });\n  if (!resp.ok) {\n    const txt = await resp.text();\n    log.error(`Failed to get prompt outputs: ${txt}`);\n    throw new Error(`Failed to get prompt outputs: ${txt}`);\n  }\n  const body = (await resp.json()) as ComfyHistoryResponse;\n  const allOutputs: Record<string, Buffer> = {};\n  const fileLoadPromises: Promise<void>[] = [];\n  if (!body[promptId]) {\n    log.debug(`Prompt ${promptId} not found in history endpoint response`);\n    return null;\n  }\n  const { status, outputs } = body[promptId];\n  if (status.completed) {\n    for (const nodeId in outputs) {\n      const node = outputs[nodeId];\n      for (const outputType in node) {\n        for (let outputFile of node[outputType]) {\n          const filename = outputFile.filename;\n          if (!filename) {\n            /**\n             * Some nodes have fields in the outputs that are not actual files.\n             * For example, the SaveAnimatedWebP node has a field called \"animated\"\n             * that only container boolean values mapping to the files present in\n             * .images. We can safely ignore these.\n             */\n            continue;\n          }\n          const filepath = path.join(config.outputDir, filename);\n          fileLoadPromises.push(\n            fsPromises\n              .readFile(filepath)\n              .then((data) => {\n                allOutputs[filename] = data;\n              })\n              .catch((e: any) => {\n                /**\n                 * The most likely reason for this is a node that has an optonal\n                 * output. If the node doesn't produce that output, the file won't\n                 * exist.\n                 */\n                log.warn(`Failed to read file ${filepath}: ${e.message}`);\n              })\n          );\n        }\n      }\n    }\n  } else if (status.status_str === \"error\") {\n    log.error(JSON.stringify(status));\n    throw new Error(\"Prompt execution failed\");\n  } else {\n    log.debug(JSON.stringify(status));\n    throw new Error(\"Prompt is not completed\");\n  }\n  await Promise.all(fileLoadPromises);\n  return allOutputs;\n}\n\nasync function collectExecutionStats(\n  promptId: string,\n  log: FastifyBaseLogger\n): Promise<ExecutionStats> {\n  let start = Date.now();\n  return new Promise((resolve, reject) => {\n    const stats: ExecutionStats = {\n      comfy_execution: { start, end: 0, duration: 0, nodes: {} },\n    };\n    const handleMessage = (event: MessageEvent) => {\n      const { data } = event;\n      if (typeof data === \"string\") {\n        const message = JSON.parse(data) as ComfyWSMessage;\n        if (message?.data?.prompt_id !== promptId) return;\n        if (isExecutionStartMessage(message)) {\n          start = Date.now();\n          stats.comfy_execution.start = start;\n          log.info(`Prompt ${promptId} started execution`);\n        } else if (isExecutingMessage(message)) {\n          const nodeId = message.data.node;\n          if (!nodeId) return;\n          stats.comfy_execution.nodes[nodeId] = {\n            start: Date.now(),\n          };\n        } else if (isExecutionSuccessMessage(message)) {\n          stats.comfy_execution.end = Date.now();\n          stats.comfy_execution.duration =\n            stats.comfy_execution.end - stats.comfy_execution.start;\n          wsClient?.removeEventListener(\"close\", onClose);\n          wsClient?.removeEventListener(\"message\", handleMessage);\n          log.info(`Prompt ${promptId} completed execution`);\n          return resolve(stats);\n        } else if (isExecutionErrorMessage(message)) {\n          wsClient?.removeEventListener(\"close\", onClose);\n          wsClient?.removeEventListener(\"message\", handleMessage);\n          return reject(new Error(\"Prompt execution failed\"));\n        } else if (isExecutionInterruptedMessage(message)) {\n          wsClient?.removeEventListener(\"close\", onClose);\n          wsClient?.removeEventListener(\"message\", handleMessage);\n          return reject(new Error(\"Prompt execution interrupted\"));\n        }\n      }\n    };\n\n    const onClose = () => {\n      wsClient?.removeEventListener(\"message\", handleMessage);\n      wsClient?.removeEventListener(\"close\", onClose);\n      return reject(new Error(\"Websocket closed\"));\n    };\n    wsClient?.addEventListener(\"message\", handleMessage);\n    wsClient?.addEventListener(\"close\", onClose);\n  });\n}\n\nexport const comfyIDToApiID: Record<string, string> = {};\n\nclass HistoryEndpointPoller {\n  private promptId: string;\n  private log: FastifyBaseLogger;\n  private maxTries: number;\n  private interval: number;\n  private currentTries: number = 0;\n  private sleepTimer: NodeJS.Timeout | null = null;\n  private resolveCurrentSleep: (() => void) | null = null;\n  constructor(options: {\n    promptId: string;\n    log: FastifyBaseLogger;\n    maxTries: number;\n    interval: number;\n  }) {\n    this.promptId = options.promptId;\n    this.log = options.log;\n    this.maxTries = options.maxTries;\n    this.interval = options.interval;\n  }\n  async poll(): Promise<Record<string, Buffer> | null> {\n    while (this.currentTries < this.getMaxTries() || this.maxTries === 0) {\n      this.log.debug(\n        `Polling history endpoint for prompt ${this.promptId}, try ${\n          this.currentTries\n        } of ${this.getMaxTries()}`\n      );\n      const outputs = await getPromptOutputs(this.promptId, this.log);\n      if (outputs) {\n        return outputs;\n      }\n      this.currentTries++;\n      this.log.debug(\n        `Polling history endpoint for prompt ${\n          this.promptId\n        }, sleep for ${this.getInterval()}ms`\n      );\n      await new Promise<void>((resolve) => {\n        this.resolveCurrentSleep = resolve;\n        this.sleepTimer = setTimeout(resolve, this.getInterval());\n      });\n    }\n    return null;\n  }\n\n  getInterval(): number {\n    return this.interval;\n  }\n\n  getMaxTries(): number {\n    return this.maxTries;\n  }\n\n  setInterval(interval: number, skipCurrentTimeout: boolean = true): void {\n    this.interval = interval;\n    if (skipCurrentTimeout && this.sleepTimer) {\n      clearTimeout(this.sleepTimer);\n      this.sleepTimer = null;\n    }\n    if (skipCurrentTimeout && this.resolveCurrentSleep) {\n      this.resolveCurrentSleep();\n      this.resolveCurrentSleep = null;\n    }\n  }\n\n  setMaxTries(maxTries: number, reset: boolean = true): void {\n    this.maxTries = maxTries;\n    if (reset) {\n      this.currentTries = 0;\n    }\n  }\n\n  stop(): void {\n    this.setMaxTries(this.currentTries);\n    this.setInterval(0);\n  }\n}\n\nexport type PromptOutputsWithStats = {\n  outputs: Record<string, Buffer>;\n  stats: ExecutionStats;\n};\n\nexport async function runPromptAndGetOutputs(\n  id: string,\n  prompt: ComfyPrompt,\n  log: FastifyBaseLogger\n): Promise<PromptOutputsWithStats> {\n  const promptId = await queuePrompt(prompt);\n  comfyIDToApiID[promptId] = id;\n  log.debug(`Prompt ${id} queued as comfy prompt id: ${promptId}`);\n  /**\n   * We start with a slow poll to the history endpoint, both as a safety measure around websocket\n   * failures, and to avoid hammering the history endpoint with requests in the case of many queued\n   * prompts.\n   */\n  const poller = new HistoryEndpointPoller({\n    promptId,\n    log,\n    maxTries: 0,\n    interval: 1000,\n  });\n  const historyPoll = poller.poll();\n\n  /**\n   * We also listen to the websocket stream for the prompt to complete.\n   */\n  const executionStatsPromise = collectExecutionStats(promptId, log);\n\n  /**\n   * We wait for either the history endpoint to return the outputs, or the websocket\n   * to signal that the prompt has completed.\n   */\n  let firstToComplete: Record<string, Buffer> | ExecutionStats | null;\n  try {\n    firstToComplete = await Promise.race([historyPoll, executionStatsPromise]);\n  } catch (e) {\n    /**\n     * If an error is thrown by either of those processes, we stop the polling and\n     * throw an error.\n     */\n    log.error(`Error waiting for prompt to complete: ${e}`);\n    firstToComplete = null;\n  }\n\n  if (isExecutionStats(firstToComplete)) {\n    /**\n     * If the websocket signals that the prompt has completed (this is typical), we can speed\n     * up the history endpoint polling, as it should only need 1-2 tries to get the outputs.\n     */\n    log.info(`Prompt ${id} completed`);\n    poller.setMaxTries(100);\n    poller.setInterval(30);\n    const outputs = await historyPoll;\n    /**\n     * We delete the comfyIDToApiID mapping after a short delay to prevent\n     * this object from growing indefinitely.\n     */\n    setTimeout(() => {\n      delete comfyIDToApiID[promptId];\n    }, 1000);\n    if (outputs) {\n      return { outputs, stats: firstToComplete };\n    }\n    throw new Error(\"Failed to get prompt outputs\");\n  } else if (firstToComplete === null) {\n    poller.stop();\n    throw new Error(\"Failed to get prompt outputs\");\n  }\n  /**\n   * If we reach this point, it means that the history endpoint returned the outputs\n   * before the websocket signaled that the prompt had completed. This is unexpected,\n   * but fine. We return the outputs and delete the comfyIDToApiID mapping.\n   */\n  setTimeout(() => {\n    /**\n     * We delete the comfyIDToApiID mapping after a short delay to prevent\n     * this object from growing indefinitely.\n     */\n    delete comfyIDToApiID[promptId];\n  }, 1000);\n  const outputs = firstToComplete as Record<string, Buffer>;\n  const stats = await executionStatsPromise;\n  return { outputs, stats };\n}\n\nlet wsClient: WebSocket | null = null;\n\nexport function connectToComfyUIWebsocketStream(\n  hooks: WebhookHandlers,\n  log: FastifyBaseLogger,\n  useApiIDs: boolean = true\n): Promise<WebSocket> {\n  return new Promise((resolve, reject) => {\n    wsClient = new WebSocket(config.comfyWSURL);\n    wsClient.on(\"message\", (data, isBinary) => {\n      if (hooks.onMessage) {\n        hooks.onMessage(data);\n      }\n      if (!isBinary) {\n        const message = JSON.parse(data.toString(\"utf8\")) as ComfyWSMessage;\n        if (\n          useApiIDs &&\n          message.data.prompt_id &&\n          comfyIDToApiID[message.data.prompt_id]\n        ) {\n          message.data.prompt_id = comfyIDToApiID[message.data.prompt_id];\n        }\n        if (isStatusMessage(message) && hooks.onStatus) {\n          hooks.onStatus(message);\n        } else if (isProgressMessage(message) && hooks.onProgress) {\n          hooks.onProgress(message);\n        } else if (isExecutionStartMessage(message) && hooks.onExecutionStart) {\n          hooks.onExecutionStart(message);\n        } else if (\n          isExecutionCachedMessage(message) &&\n          hooks.onExecutionCached\n        ) {\n          hooks.onExecutionCached(message);\n        } else if (isExecutingMessage(message) && hooks.onExecuting) {\n          hooks.onExecuting(message);\n        } else if (isExecutedMessage(message) && hooks.onExecuted) {\n          hooks.onExecuted(message);\n        } else if (\n          isExecutionSuccessMessage(message) &&\n          hooks.onExecutionSuccess\n        ) {\n          hooks.onExecutionSuccess(message);\n        } else if (\n          isExecutionInterruptedMessage(message) &&\n          hooks.onExecutionInterrupted\n        ) {\n          hooks.onExecutionInterrupted(message);\n        } else if (isExecutionErrorMessage(message) && hooks.onExecutionError) {\n          hooks.onExecutionError(message);\n        } else if (isProgressStateMessage(message) && hooks.onProgressState) {\n          if (useApiIDs && message.data.nodes) {\n            for (const nodeId in message.data.nodes) {\n              const node = message.data.nodes[nodeId];\n              if (node.prompt_id && comfyIDToApiID[node.prompt_id]) {\n                node.prompt_id = comfyIDToApiID[node.prompt_id];\n              }\n            }\n          }\n          hooks.onProgressState(message);\n        }\n      } else {\n        log.info(`Received binary message`);\n      }\n    });\n\n    wsClient.on(\"open\", () => {\n      log.info(\"Connected to Comfy UI websocket\");\n\n      return resolve(wsClient as WebSocket);\n    });\n    wsClient.on(\"error\", (error) => {\n      log.error(`Failed to connect to Comfy UI websocket: ${error}`);\n      return reject(error);\n    });\n\n    wsClient.on(\"close\", () => {\n      log.info(\"Disconnected from Comfy UI websocket\");\n    });\n  });\n}\n\nexport async function getModels(): Promise<\n  Record<\n    string,\n    {\n      dir: string;\n      all: string[];\n      enum: z.ZodEnum<[string, ...string[]]>;\n    }\n  >\n> {\n  const modelsResp = await fetch(`${config.comfyURL}/models`, {\n    dispatcher: getProxyDispatcher(),\n  });\n\n  if (!modelsResp.ok) {\n    throw new Error(`Failed to fetch model types: ${await modelsResp.text()}`);\n  }\n\n  const modelTypes = (await modelsResp.json()) as Array<string>;\n  const modelsByType: Record<\n    string,\n    { dir: string; all: string[]; enum: z.ZodEnum<[string, ...string[]]> }\n  > = {};\n\n  const modelPromises = modelTypes.map(async (modelType) => {\n    const resp = await fetch(`${config.comfyURL}/models/${modelType}`, {\n      dispatcher: getProxyDispatcher(),\n    });\n\n    if (!resp.ok) {\n      throw new Error(\n        `Failed to fetch models for type ${modelType}: ${await resp.text()}`\n      );\n    }\n\n    const models = (await resp.json()) as Array<string>;\n    modelsByType[modelType] = {\n      dir: path.join(config.modelDir, modelType),\n      all: models,\n      enum: z.enum(models as [string, ...string[]]),\n    };\n  });\n  await Promise.all(modelPromises);\n\n  config.models = modelsByType;\n  return modelsByType;\n}\n\nexport async function interruptPrompt(id: string): Promise<void> {\n  const comfyPromptId = Object.keys(comfyIDToApiID).find(\n    (key) => comfyIDToApiID[key] === id\n  );\n\n  if (!comfyPromptId) {\n    throw new Error(`Prompt ${id} not found`);\n  }\n\n  const resp = await fetch(`${config.comfyURL}/interrupt`, {\n    method: \"POST\",\n    headers: {\n      \"Content-Type\": \"application/json\",\n    },\n    body: JSON.stringify({ prompt_id: comfyPromptId }),\n    dispatcher: getProxyDispatcher(),\n  });\n  if (!resp.ok) {\n    throw new Error(`Failed to interrupt prompt: ${await resp.text()}`);\n  }\n}"
  },
  {
    "path": "src/commands.ts",
    "content": "import { spawn, ChildProcess } from \"child_process\";\n\nexport class CommandExecutor {\n  private process: ChildProcess | null = null;\n\n  /**\n   * Executes a command with arguments and custom environment variables.\n   * Returns a promise that resolves with the exit code of the subprocess.\n   * @param command The command to execute.\n   * @param args An array of arguments for the command.\n   * @param envAdditions Object with additional environment variables.\n   */\n  async execute(\n    command: string,\n    args: string[],\n    envAdditions: NodeJS.ProcessEnv\n  ): Promise<number | null> {\n    const env = { ...process.env, ...envAdditions }; // Merge parent environment with additions\n\n    return new Promise((resolve, reject) => {\n      this.process = spawn(command, args, {\n        env: env,\n        stdio: \"inherit\", // Use the parent's stdin, stdout, and stderr\n      });\n\n      this.process.on(\"error\", (err) => {\n        console.error(`Failed to start subprocess: ${err.message}`);\n        return reject(err);\n      });\n\n      this.process.on(\"exit\", (code, signal) => {\n        console.log(`Process exited with code ${code}, signal ${signal}`);\n        this.process = null;\n        if (code !== null) {\n          if (code === 0) {\n            console.log(\"Command executed successfully\");\n            return resolve(code);\n          }\n          return reject(new Error(`Process exited with code ${code}`));\n        } else {\n          return reject(\n            new Error(`Process terminated due to signal: ${signal}`)\n          );\n        }\n      });\n    });\n  }\n\n  /**\n   * Interrupts the currently running subprocess.\n   */\n  interrupt(): void {\n    if (this.process) {\n      this.process.kill(\"SIGINT\"); // Sends the interrupt signal\n      console.log(\"Process was interrupted\");\n    } else {\n      console.log(\"No process to interrupt\");\n    }\n  }\n}\n"
  },
  {
    "path": "src/config.ts",
    "content": "import assert from \"node:assert\";\nimport fs from \"node:fs\";\nimport path from \"node:path\";\nimport { randomUUID } from \"node:crypto\";\nimport { execSync } from \"child_process\";\nimport { z } from \"zod\";\nimport { version } from \"../package.json\";\nimport yaml from \"yaml\";\n\nconst {\n  ALWAYS_RESTART_COMFYUI = \"false\",\n  AWS_DEFAULT_REGION,\n  AWS_REGION,\n  AZURE_STORAGE_ACCOUNT,\n  AZURE_STORAGE_CONNECTION_STRING,\n  AZURE_STORAGE_KEY,\n  AZURE_STORAGE_SAS_TOKEN,\n  BASE = \"\",\n  CACHE_DIR = `${process.env.HOME}/.cache/comfyui-api`,\n  CMD = \"init.sh\",\n  COMFY_HOME = \"/opt/ComfyUI\",\n  COMFYUI_PORT_HOST = \"8188\",\n  DIRECT_ADDRESS = \"127.0.0.1\",\n  HOST = \"::\",\n  HTTP_AUTH_HEADER_NAME,\n  HTTP_AUTH_HEADER_VALUE,\n  INPUT_DIR,\n  LOG_LEVEL = \"info\",\n  LRU_CACHE_SIZE_GB = \"0\",\n  MANIFEST_JSON,\n  MANIFEST,\n  MARKDOWN_SCHEMA_DESCRIPTIONS = \"true\",\n  MAX_BODY_SIZE_MB = \"100\",\n  MAX_QUEUE_DEPTH = \"0\",\n  MODEL_DIR,\n  OUTPUT_DIR,\n  PORT = \"3000\",\n  PREPEND_FILENAMES = \"true\",\n  PROMPT_WEBHOOK_RETRIES = \"3\",\n  SALAD_CONTAINER_GROUP_ID,\n  SALAD_MACHINE_ID,\n  STARTUP_CHECK_INTERVAL_S = \"1\",\n  STARTUP_CHECK_MAX_TRIES = \"20\",\n  SYSTEM_WEBHOOK_EVENTS,\n  SYSTEM_WEBHOOK_URL,\n  WARMUP_PROMPT_FILE,\n  WARMUP_PROMPT_URL,\n  WEBHOOK_SECRET,\n  WORKFLOW_DIR = \"/workflows\",\n} = process.env;\n\nfs.mkdirSync(WORKFLOW_DIR, { recursive: true });\n\nconst comfyURL = `http://${DIRECT_ADDRESS}:${COMFYUI_PORT_HOST}`;\nconst wsClientId = randomUUID();\nconst comfyWSURL = `ws://${DIRECT_ADDRESS}:${COMFYUI_PORT_HOST}/ws?clientId=${wsClientId}`;\nconst selfURL = `http://localhost:${PORT}`;\nconst port = parseInt(PORT, 10);\nconst promptWebhookRetries = parseInt(PROMPT_WEBHOOK_RETRIES, 10);\n\nconst startupCheckInterval = parseInt(STARTUP_CHECK_INTERVAL_S, 10) * 1000;\nassert(\n  startupCheckInterval > 0,\n  \"STARTUP_CHECK_INTERVAL_S must be a positive integer\",\n);\n\nconst startupCheckMaxTries = parseInt(STARTUP_CHECK_MAX_TRIES, 10);\nassert(\n  startupCheckMaxTries > 0,\n  \"STARTUP_CHECK_MAX_TRIES must be a positive integer\",\n);\n\nconst maxBodySize = parseInt(MAX_BODY_SIZE_MB, 10) * 1024 * 1024;\nassert(maxBodySize > 0, \"MAX_BODY_SIZE_MB must be a positive integer\");\n\nconst maxQueueDepth = parseInt(MAX_QUEUE_DEPTH, 10);\nassert(maxQueueDepth >= 0, \"MAX_QUEUE_DEPTH must be a non-negative integer\");\n\nconst alwaysRestartComfyUI = ALWAYS_RESTART_COMFYUI.toLowerCase() === \"true\";\nconst prependFilenames = PREPEND_FILENAMES.toLowerCase() === \"true\";\n\nconst lruCacheSizeBytes = parseFloat(LRU_CACHE_SIZE_GB) * 1024 * 1024 * 1024;\nassert(\n  lruCacheSizeBytes >= 0,\n  \"LRU_CACHE_SIZE_GB must be a non-negative number\",\n);\n\nconst systemWebhook = SYSTEM_WEBHOOK_URL ?? \"\";\nif (systemWebhook) {\n  try {\n    const webhook = new URL(systemWebhook);\n    assert(webhook.protocol === \"http:\" || webhook.protocol === \"https:\");\n  } catch (e: any) {\n    throw new Error(`Invalid system webhook: ${e.message}`);\n  }\n}\n\nconst allEvents = new Set([\n  \"status\",\n  \"progress\",\n  \"progress_state\",\n  \"executing\",\n  \"execution_start\",\n  \"execution_cached\",\n  \"executed\",\n  \"execution_success\",\n  \"execution_interrupted\",\n  \"execution_error\",\n  \"file_downloaded\",\n  \"file_uploaded\",\n  \"file_deleted\",\n]);\nlet systemWebhookEvents: string[] = [];\nif (SYSTEM_WEBHOOK_EVENTS === \"all\") {\n  systemWebhookEvents = Array.from(allEvents);\n} else {\n  systemWebhookEvents = SYSTEM_WEBHOOK_EVENTS?.split(\",\") ?? [];\n  assert(\n    systemWebhookEvents.every((e) => allEvents.has(e)),\n    `Invalid system webhook events. Supported options: ${Array.from(\n      allEvents,\n    ).join(\", \")}`,\n  );\n}\n\nconst loadEnvCommand: Record<string, string> = {\n  \"ai-dock\": `source /opt/ai-dock/etc/environment.sh \\\n  && source /opt/ai-dock/bin/venv-set.sh comfyui \\\n  && source \"$COMFYUI_VENV/bin/activate\"`,\n};\n\n// The parent directory of model_dir\nconst comfyDir = COMFY_HOME;\nconst modelDir = MODEL_DIR ?? path.join(comfyDir, \"models\");\n\nlet warmupPrompt: any | undefined;\nlet warmupCkpt: string | undefined;\nif (WARMUP_PROMPT_FILE) {\n  assert(fs.existsSync(WARMUP_PROMPT_FILE), \"Warmup prompt file not found\");\n  try {\n    warmupPrompt = JSON.parse(\n      fs.readFileSync(WARMUP_PROMPT_FILE, { encoding: \"utf-8\" }),\n    );\n    for (const nodeId in warmupPrompt) {\n      const node = warmupPrompt[nodeId];\n      if (node.class_type === \"CheckpointLoaderSimple\") {\n        warmupCkpt = node.inputs.ckpt_name;\n        break;\n      }\n    }\n  } catch (e: any) {\n    throw new Error(`Failed to parse warmup prompt: ${e.message}`);\n  }\n}\n\ninterface ComfyDescription {\n  samplers: string[];\n  schedulers: string[];\n  version: string;\n}\n\nfunction getPythonCommand(): string {\n  try {\n    execSync(\"python3 --version\", { stdio: \"ignore\" });\n    return \"python3\";\n  } catch {\n    try {\n      execSync(\"python --version\", { stdio: \"ignore\" });\n      return \"python\";\n    } catch {\n      return \"python3\";\n    }\n  }\n}\n\n/**\n * This function uses python to import some of the ComfyUI code and get the\n * description of the samplers and schedulers.\n * @returns ComfyDescription\n */\nfunction getComfyUIDescription(): ComfyDescription {\n  const temptComfyFilePath = path.join(comfyDir, \"temp_comfy_description.json\");\n  const pythonCode = `\nimport comfy.samplers\nimport comfyui_version\nimport json\n\ncomfy_description = {\n    \"samplers\": comfy.samplers.KSampler.SAMPLERS,\n    \"schedulers\": comfy.samplers.KSampler.SCHEDULERS,\n    \"version\": comfyui_version.__version__\n}\n\nwith open(\"${temptComfyFilePath}\", \"w\") as f:\n    json.dump(comfy_description, f)\n`;\n\n  const tempFilePath = path.join(comfyDir, \"temp_comfy_description.py\");\n  const pythonCommand = getPythonCommand();\n  let command = `${pythonCommand} ${tempFilePath}`;\n  if (BASE in loadEnvCommand) {\n    command = `${loadEnvCommand[BASE]} \\\n    && ${pythonCommand} ${tempFilePath}`;\n  }\n\n  try {\n    // Write the Python code to a temporary file\n    fs.writeFileSync(tempFilePath, pythonCode);\n\n    // Execute the Python script synchronously\n    execSync(command, {\n      cwd: comfyDir,\n      encoding: \"utf-8\",\n      shell: process.env.SHELL,\n      env: {\n        ...process.env,\n      },\n    });\n    const output = fs.readFileSync(temptComfyFilePath, { encoding: \"utf-8\" });\n    return JSON.parse(output.trim()) as ComfyDescription;\n  } catch (error: any) {\n    console.warn(\n      `Failed to get ComfyUI description: ${error.message}. Using default values.`,\n    );\n    let ver = \"unknown\";\n    try {\n      const versionTxt = fs.readFileSync(\n        path.join(comfyDir, \"comfyui_version.py\"),\n        { encoding: \"utf-8\" },\n      );\n      const m = versionTxt.match(/__version__\\s*=\\s*[\"']([^\"']+)[\"']/);\n      if (m) ver = m[1];\n    } catch {}\n    return {\n      samplers: [\"euler\", \"euler_a\", \"heun\", \"dpmpp_2m\"],\n      schedulers: [\"normal\", \"karras\", \"exponential\", \"sgm\"],\n      version: ver,\n    };\n  } finally {\n    // Clean up the temporary file\n    try {\n      fs.unlinkSync(tempFilePath);\n    } catch (unlinkError: any) {\n      console.error(`Failed to delete temporary file: ${unlinkError.message}`);\n    }\n  }\n}\n\nconst comfyDescription = getComfyUIDescription();\n\nfunction parseManifest(manifestPath: string): any {\n  if (!fs.existsSync(manifestPath)) {\n    throw new Error(`Manifest file not found at path: ${manifestPath}`);\n  }\n\n  const isYAML =\n    manifestPath.endsWith(\".yaml\") || manifestPath.endsWith(\".yml\");\n  const isJSON = manifestPath.endsWith(\".json\");\n\n  if (!isYAML && !isJSON) {\n    throw new Error(\"Manifest file must be in JSON or YAML format.\");\n  }\n\n  const fileContent = fs.readFileSync(manifestPath, \"utf-8\");\n\n  if (isYAML) {\n    return yaml.parse(fileContent);\n  }\n\n  return JSON.parse(fileContent);\n}\n\nconst modelDownloadConfigSpec = z.object({\n  url: z.string().url(),\n  local_path: z.string(),\n});\n\nconst manifestSpec = z.object({\n  apt: z.string().array().optional(),\n  pip: z.string().array().optional(),\n  custom_nodes: z.string().array().optional(),\n  models: z.object({\n    before_start: modelDownloadConfigSpec.array().optional(),\n    after_start: modelDownloadConfigSpec.array().optional(),\n  }),\n});\n\nconst isValidManifest = (obj: any): obj is z.infer<typeof manifestSpec> => {\n  const result = manifestSpec.safeParse(obj);\n  return result.success;\n};\n\nlet manifest: z.infer<typeof manifestSpec> | null = null;\nif (MANIFEST_JSON) {\n  try {\n    const parsed = JSON.parse(MANIFEST_JSON);\n    if (!isValidManifest(parsed)) {\n      throw new Error(\"Invalid manifest JSON format.\");\n    }\n    manifest = parsed;\n  } catch (e: any) {\n    throw new Error(`Failed to parse MANIFEST_JSON: ${e.message}`);\n  }\n} else if (MANIFEST) {\n  manifest = parseManifest(MANIFEST);\n  if (!isValidManifest(manifest)) {\n    throw new Error(\"Invalid manifest file format.\");\n  }\n}\n\nconst hfCLIVersion = (() => {\n  try {\n    const version = execSync(\"hf version\", { encoding: \"utf-8\" }).trim();\n    if (version.includes(\":\")) {\n      const [_, ver] = version.split(\":\");\n      return ver.trim();\n    } else {\n      return version.trim();\n    }\n  } catch {\n    return null;\n  }\n})();\n\nconst comfyCLIVersion = (() => {\n  try {\n    const version = execSync(\"comfy --version\", { encoding: \"utf-8\" }).trim();\n    return version;\n  } catch {\n    return null;\n  }\n})();\n\nconst uvInstalled = (() => {\n  try {\n    execSync(\"uv --version\", { encoding: \"utf-8\" }).trim();\n    return true;\n  } catch {\n    return false;\n  }\n})();\n\nconst config = {\n  /**\n   * If true, the wrapper will always try to restart ComfyUI when it crashes.\n   * Specified by ALWAYS_RESTART_COMFYUI env var.\n   * default: false\n   */\n  alwaysRestartComfyUI,\n\n  /**\n   * The version of ComfyUI-API. From package.json\n   */\n  apiVersion: version,\n\n  /**\n   * (optional) The AWS region to use for S3 operations.\n   */\n  awsRegion: AWS_REGION ?? AWS_DEFAULT_REGION ?? null,\n\n  /**\n   * (optional) The Azure Storage account name to use for Azure Blob operations.\n   */\n  azureStorageAccount: AZURE_STORAGE_ACCOUNT ?? null,\n\n  /**\n   * (optional) The Azure Storage connection string for local development (e.g., Azurite).\n   */\n  azureStorageConnectionString: AZURE_STORAGE_CONNECTION_STRING ?? null,\n\n  /**\n   * (optional) The Azure Storage account key for shared key authentication.\n   */\n  azureStorageKey: AZURE_STORAGE_KEY ?? null,\n\n  /**\n   * (optional) The Azure Storage SAS token for SAS authentication.\n   */\n  azureStorageSasToken: AZURE_STORAGE_SAS_TOKEN ?? null,\n\n  /**\n   * The directory where cached files are stored, specified by CACHE_DIR env var.\n   * default: {HOME}/.cache/comfyui-api\n   */\n  cacheDir: CACHE_DIR,\n\n  /**\n   * The version of the Comfy CLI, if installed. If not installed, null.\n   */\n  comfyCLIVersion,\n\n  /**\n   * ComfyUI's home directory, specified by COMFY_HOME env var.\n   */\n  comfyDir,\n\n  /**\n   * The address to directly access ComfyUI, specified by DIRECT_ADDRESS env var.\n   */\n  comfyHost: DIRECT_ADDRESS,\n\n  /**\n   * The command to launch ComfyUI, specified by CMD env var.\n   * It should be a command that can be executed in a shell.\n   */\n  comfyLaunchCmd: CMD,\n\n  /**\n   * The port that ComfyUI is listening on the host machine,\n   * specified by COMFYUI_PORT_HOST env var.\n   */\n  comfyPort: COMFYUI_PORT_HOST,\n\n  /**\n   * ComfyUI's HTTP URL, constructed from comfyHost and comfyPort.\n   */\n  comfyURL,\n\n  /**\n   * The version of ComfyUI, fetched from the ComfyUI codebase.\n   */\n  comfyVersion: comfyDescription.version,\n\n  /**\n   * ComfyUI's WebSocket URL, constructed from comfyHost, comfyPort, and a random client ID.\n   */\n  comfyWSURL,\n\n  /**\n   * The version of the HuggingFace CLI, if installed. If not installed, null.\n   */\n  hfCLIVersion,\n\n  /**\n   * If HTTP_AUTH_HEADER_NAME and HTTP_AUTH_HEADER_VALUE are set, this will be an object to merge with headers when making http requests.\n   */\n  httpAuthHeader:\n    HTTP_AUTH_HEADER_NAME && HTTP_AUTH_HEADER_VALUE\n      ? { [HTTP_AUTH_HEADER_NAME]: HTTP_AUTH_HEADER_VALUE }\n      : {},\n\n  /**\n   * The directory where input files are stored, specified by INPUT_DIR env var.\n   * default: {comfyDir}/input\n   */\n  inputDir: INPUT_DIR ?? path.join(comfyDir, \"input\"),\n\n  /**\n   * The log level for the wrapper, specified by LOG_LEVEL env var.\n   */\n  logLevel: LOG_LEVEL.toLowerCase(),\n\n  /**\n   * The size of the LRU cache for models and files, in bytes.\n   * Specified by LRU_CACHE_SIZE_GB env var.\n   * default: 0 (disabled)\n   */\n  lruCacheSizeBytes,\n\n  /**\n   * If a manifest file is provided, this is its parsed contents.\n   */\n  manifest,\n\n  /**\n   * If true, the wrapper will include markdown descriptions in the\n   * generated JSON schema. Specified by MARKDOWN_SCHEMA_DESCRIPTIONS env var.\n   * default: true\n   */\n  markdownSchemaDescriptions: MARKDOWN_SCHEMA_DESCRIPTIONS === \"true\",\n\n  /**\n   * The maximum size of request bodies, in bytes.\n   * Specified by MAX_BODY_SIZE_MB env var.\n   * default: 100MB\n   */\n  maxBodySize,\n\n  /**\n   * The maximum number of requests allowed in the queue.\n   * Specified by MAX_QUEUE_DEPTH env var.\n   * default: 0 (unlimited)\n   */\n  maxQueueDepth,\n\n  modelDir,\n\n  /**\n   * The contents of the models directory\n   */\n  models: {} as Record<\n    string,\n    {\n      dir: string;\n      all: string[];\n      enum: z.ZodEnum<[string, ...string[]]>;\n    }\n  >,\n\n  /**\n   * The directory where output files are stored, specified by OUTPUT_DIR env var.\n   * default: {comfyDir}/output\n   */\n  outputDir: OUTPUT_DIR ?? path.join(comfyDir, \"output\"),\n\n  /**\n   * If true, unique IDs will be prepended to existing filename prefixes, as opposed to replacing them.\n   * Specified by PREPEND_FILENAMES env var.\n   * default: true\n   */\n  prependFilenames,\n\n  /**\n   * The number of times to retry a post-prompt webhook if it fails.\n   * Specified by PROMPT_WEBHOOK_RETRIES env var.\n   * default: 3\n   */\n  promptWebhookRetries,\n\n  /**\n   * (optional) The Salad container group ID, specified by SALAD_CONTAINER_GROUP_ID env var.\n   * This is provided automatically in SaladCloud's environment. These values will be undefined if not running in SaladCloud.\n   */\n  saladMetadata: {\n    organizationName: process.env.SALAD_ORGANIZATION_NAME,\n    organizationId: process.env.SALAD_ORGANIZATION_ID,\n    projectName: process.env.SALAD_PROJECT_NAME,\n    projectId: process.env.SALAD_PROJECT_ID,\n    containerGroupName: process.env.SALAD_CONTAINER_GROUP_NAME,\n    containerGroupId: SALAD_CONTAINER_GROUP_ID,\n    instanceId: process.env.SALAD_INSTANCE_ID,\n    machineId: SALAD_MACHINE_ID,\n  } as {\n    organizationName?: string;\n    organizationId?: string;\n    projectName?: string;\n    projectId?: string;\n    containerGroupName?: string;\n    containerGroupId?: string;\n    instanceId?: string;\n    machineId?: string;\n  } | null,\n\n  /**\n   * The list of samplers supported by ComfyUI, fetched from the ComfyUI codebase.\n   * Does not include custom nodes.\n   */\n  samplers: z.enum(comfyDescription.samplers as [string, ...string[]]),\n\n  /**\n   * The list of schedulers supported by ComfyUI, fetched from the ComfyUI codebase.\n   * Does not include custom nodes.\n   */\n  schedulers: z.enum(comfyDescription.schedulers as [string, ...string[]]),\n\n  /**\n   * The URL of this wrapper, constructed from HOST and PORT env vars.\n   */\n  selfURL,\n\n  /**\n   * The interval between startup checks, in milliseconds.\n   * Specified by STARTUP_CHECK_INTERVAL_S env var.\n   * default: 1000ms\n   */\n  startupCheckInterval,\n\n  /**\n   * The maximum number of tries for startup checks.\n   * Specified by STARTUP_CHECK_MAX_TRIES env var.\n   * default: 10\n   */\n  startupCheckMaxTries,\n\n  /**\n   * (Optional) Any metadata to include in system webhooks. Provided by SYSTEM_META_* env vars.\n   * For example, SYSTEM_META_foo=bar will include \"foo\": \"bar\" in the metadata.\n   */\n  systemMetaData: {} as Record<string, string>,\n\n  /**\n   * (Optional) The URL of to send webhooks of system events to.\n   * Specified by SYSTEM_WEBHOOK_URL env var.\n   * If not specified, no webhooks will be sent.\n   */\n  systemWebhook,\n\n  /**\n   * The list of system events to send webhooks for.\n   * Specified by SYSTEM_WEBHOOK_EVENTS env var.\n   * default: [] (no events)\n   * Supported events: all, status, progress, executing, execution_start,\n   * execution_cached, executed, execution_success, execution_interrupted, execution_error\n   * If SYSTEM_WEBHOOK_EVENTS=all, all events will be sent.\n   * Otherwise, it should be a comma-separated list of events.\n   */\n  systemWebhookEvents,\n\n  /**\n   * If true, uv is installed and available to use.\n   */\n  uvInstalled,\n\n  /**\n   * If a warmup prompt is available, this is the checkpoint from it.\n   */\n  warmupCkpt,\n\n  /**\n   * If a warmup prompt file is provided, this is its parsed contents.\n   */\n  warmupPrompt,\n\n  /**\n   * (Optional) URL to download the warmup prompt from. Specified by WARMUP_PROMPT_URL env var.\n   * If both WARMUP_PROMPT_FILE and WARMUP_PROMPT_URL are set, WARMUP_PROMPT_FILE takes precedence.\n   */\n  warmupPromptUrl: WARMUP_PROMPT_URL,\n\n  /**\n   * (Optional) The secret used to sign webhooks. Specified by WEBHOOK_SECRET env var.\n   */\n  webhookSecret: WEBHOOK_SECRET,\n\n  /**\n   * The directory where custom workflows are stored, specified by WORKFLOW_DIR env var.\n   * default: /workflows\n   */\n  workflowDir: WORKFLOW_DIR,\n\n  /**\n   * The host address that the wrapper listens on, specified by HOST env var.\n   * default: ::\n   */\n  wrapperHost: HOST,\n\n  /**\n   * The port that the wrapper listens on, specified by PORT env var.\n   * default: 8080\n   */\n  wrapperPort: port,\n\n  /**\n   * A unique ID for this WebSocket client connection to ComfyUI.\n   * Generated randomly on each startup.\n   */\n  wsClientId,\n};\n\nconst modelSubDirs = fs.readdirSync(modelDir);\nfor (const modelType of modelSubDirs) {\n  const model_path = path.join(modelDir, modelType);\n  if (fs.statSync(model_path).isDirectory()) {\n    const all = fs\n      .readdirSync(model_path)\n      .filter((f) => !(f.startsWith(\"put_\") && f.endsWith(\"_here\")))\n      .sort();\n    config.models[modelType] = {\n      dir: model_path,\n      all,\n      enum: z.enum(all as [string, ...string[]]),\n    };\n  }\n}\n\nfor (const varName of Object.keys(process.env)) {\n  if (varName.startsWith(\"SYSTEM_META_\")) {\n    const key = varName.substring(\"SYSTEM_META_\".length);\n    config.systemMetaData[key] = process.env[varName] ?? \"\";\n  }\n}\n\nif (\n  config.saladMetadata &&\n  Object.entries(config.saladMetadata).every(([_, v]) => v === undefined)\n) {\n  config.saladMetadata = null;\n}\n\n/**\n * Set the warmup prompt from downloaded content.\n * This function is called when WARMUP_PROMPT_URL is used to download the warmup file.\n */\nexport function setWarmupPrompt(content: string): void {\n  try {\n    const parsed = JSON.parse(content);\n    config.warmupPrompt = parsed;\n\n    // Extract checkpoint from warmup prompt\n    for (const nodeId in parsed) {\n      const node = parsed[nodeId];\n      if (node.class_type === \"CheckpointLoaderSimple\") {\n        config.warmupCkpt = node.inputs.ckpt_name;\n        break;\n      }\n    }\n  } catch (e: any) {\n    throw new Error(`Failed to parse warmup prompt: ${e.message}`);\n  }\n}\n\nexport default config;\n"
  },
  {
    "path": "src/credential-resolver.ts",
    "content": "import { WorkflowCredential, DownloadAuth, DownloadOptions } from \"./types\";\n\n/**\n * Resolves credentials for a given URL by matching against patterns.\n * Returns the first matching credential's auth configuration.\n */\nexport function resolveCredentials(\n  url: string,\n  credentials?: WorkflowCredential[]\n): DownloadOptions | undefined {\n  if (!credentials || credentials.length === 0) {\n    return undefined;\n  }\n\n  for (const cred of credentials) {\n    if (matchesPattern(url, cred.url_pattern)) {\n      return { auth: cred.auth };\n    }\n  }\n\n  return undefined;\n}\n\n/**\n * Match a URL against a pattern that supports glob-style wildcards.\n * Supports:\n * - * matches any characters except /\n * - ** matches any characters including /\n * - ? matches a single character\n *\n * Examples:\n * - \"https://example.com/*\" matches \"https://example.com/file.txt\"\n * - \"https://example.com/**\" matches \"https://example.com/path/to/file.txt\"\n * - \"https://*.s3.amazonaws.com/**\" matches \"https://mybucket.s3.amazonaws.com/models/flux.safetensors\"\n */\nexport function matchesPattern(url: string, pattern: string): boolean {\n  // Escape special regex characters except our wildcards\n  const escaped = pattern\n    .replace(/[.+^${}()|[\\]\\\\]/g, \"\\\\$&\")\n    // Replace ** first (before *) to avoid double replacement\n    .replace(/\\*\\*/g, \"<<<DOUBLE_STAR>>>\")\n    .replace(/\\*/g, \"[^/]*\")\n    .replace(/<<<DOUBLE_STAR>>>/g, \".*\")\n    .replace(/\\?/g, \".\");\n\n  const regex = new RegExp(`^${escaped}$`);\n  return regex.test(url);\n}\n\n/**\n * Type for functions that can receive credentials for model downloads.\n */\nexport type CredentialProvider = (url: string) => DownloadOptions | undefined;\n\n/**\n * Create a credential provider function from a list of credentials.\n * This allows passing credentials to functions without exposing the full list.\n */\nexport function createCredentialProvider(\n  credentials?: WorkflowCredential[]\n): CredentialProvider {\n  return (url: string) => resolveCredentials(url, credentials);\n}\n"
  },
  {
    "path": "src/event-emitters.ts",
    "content": "import crypto from \"crypto\";\nimport config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport { getProxyDispatcher } from \"./proxy-dispatcher\";\nimport {\n  fetchWithRetries,\n  snakeCaseToUpperCamelCase,\n  camelCaseToSnakeCase,\n} from \"./utils\";\nimport { WebhookHandlers } from \"./types\";\n\nexport function signWebhookPayload(payload: string): string {\n  return crypto\n    .createHmac(\"sha256\", Buffer.from(config.webhookSecret ?? \"\", \"base64\"))\n    .update(payload)\n    .digest(\"base64\");\n}\n\nexport async function sendWebhook(\n  url: string,\n  body: any,\n  log: FastifyBaseLogger,\n  version: number = 1\n): Promise<void> {\n  const headers: Record<string, string> = {\n    \"Content-Type\": \"application/json\",\n  };\n  const bodyString = JSON.stringify(body);\n  if (version === 2) {\n    const webhookId = body.id || crypto.randomUUID();\n    const timestamp = Math.round(Date.now() / 1000).toString();\n    const signedContent = `${webhookId}.${timestamp}.${bodyString}`;\n    const signature = signWebhookPayload(signedContent);\n    Object.assign(headers, {\n      \"webhook-id\": webhookId,\n      \"webhook-timestamp\": timestamp,\n      \"webhook-signature\": `v1,${signature}`,\n    });\n  }\n  try {\n    await fetchWithRetries(\n      url,\n      {\n        method: \"POST\",\n        headers,\n        body: bodyString,\n        dispatcher: getProxyDispatcher(),\n      },\n      config.promptWebhookRetries,\n      log\n    );\n  } catch (e: any) {\n    log.error(`Failed to send webhook to ${url}: ${e.message}`);\n  }\n}\n\nexport async function sendSystemWebhook(\n  eventName: string,\n  data: any,\n  log: FastifyBaseLogger\n): Promise<void> {\n  if (\n    !config.systemWebhookEvents.includes(eventName) ||\n    !config.systemWebhook\n  ) {\n    log.debug(\n      `System webhook for event ${eventName} is not configured to be sent.`\n    );\n    return;\n  }\n\n  const eventLabel = [\n    \"file_downloaded\",\n    \"file_uploaded\",\n    \"file_deleted\",\n  ].includes(eventName)\n    ? \"storage\"\n    : \"comfy\";\n\n  const metadata: Record<string, string> = { ...config.systemMetaData };\n  if (config.saladMetadata) {\n    for (const [key, value] of Object.entries(config.saladMetadata)) {\n      if (value) {\n        metadata[`salad_${camelCaseToSnakeCase(key)}`] = value;\n      }\n    }\n  }\n  const payload = { event: `${eventLabel}.${eventName}`, data, metadata };\n  await sendWebhook(config.systemWebhook, payload, log, 2);\n}\n\nexport function getConfiguredWebhookHandlers(\n  log: FastifyBaseLogger\n): WebhookHandlers {\n  const handlers: Record<string, (d: any) => void> = {};\n  if (config.systemWebhook) {\n    const systemWebhookEvents = config.systemWebhookEvents;\n    for (const eventName of systemWebhookEvents) {\n      const handlerName = `on${snakeCaseToUpperCamelCase(eventName)}`;\n      handlers[handlerName] = (data: any) => {\n        log.debug(`Sending system webhook for event: ${eventName}`);\n\n        sendSystemWebhook(eventName, data, log);\n      };\n    }\n  }\n\n  log.debug(\n    `Configured webhook handlers for events: ${Object.keys(handlers).join(\n      \", \"\n    )}`\n  );\n\n  return handlers as WebhookHandlers;\n}\n"
  },
  {
    "path": "src/git-url-parser.ts",
    "content": "/**\n * Parse a Git URL to extract the base repository URL and optional ref (branch/commit/tag).\n * Supports multiple formats from different Git hosting platforms:\n *\n * GitHub:\n * - https://github.com/user/repo/tree/{ref}\n * - https://github.com/user/repo/commit/{sha}\n * - https://github.com/user/repo/releases/tag/{tag}\n *\n * GitLab:\n * - https://gitlab.com/user/repo/-/tree/{ref}\n * - https://gitlab.com/user/repo/-/commit/{sha}\n *\n * Bitbucket:\n * - https://bitbucket.org/user/repo/src/{ref}\n * - https://bitbucket.org/user/repo/commits/{sha}\n *\n * Generic (npm/pip style):\n * - https://github.com/user/repo@{ref}\n * - https://github.com/user/repo.git@{ref}\n *\n * Plain URLs (no ref):\n * - https://github.com/user/repo\n * - https://github.com/user/repo.git\n */\nexport function parseGitUrl(repoUrl: string): {\n  baseUrl: string;\n  ref: string | null;\n} {\n  // npm/pip style: repo@ref or repo.git@ref\n  const atRefMatch = repoUrl.match(/^(.+?)@([^@\\/]+)$/);\n  if (atRefMatch) {\n    return { baseUrl: atRefMatch[1], ref: atRefMatch[2] };\n  }\n\n  // GitLab: /-/tree/{ref}, /-/commit/{sha}\n  // Check GitLab BEFORE GitHub because GitLab URLs contain /tree/ and /commit/\n  // which would otherwise match the less specific GitHub patterns\n  const gitlabTreeMatch = repoUrl.match(/^(.+?)\\/-\\/tree\\/([^\\/]+)$/);\n  if (gitlabTreeMatch) {\n    return { baseUrl: gitlabTreeMatch[1], ref: gitlabTreeMatch[2] };\n  }\n\n  const gitlabCommitMatch = repoUrl.match(/^(.+?)\\/-\\/commit\\/([^\\/]+)$/);\n  if (gitlabCommitMatch) {\n    return { baseUrl: gitlabCommitMatch[1], ref: gitlabCommitMatch[2] };\n  }\n\n  // GitHub: /tree/{ref}, /commit/{sha}, /releases/tag/{tag}\n  const githubTreeMatch = repoUrl.match(/^(.+?)\\/tree\\/([^\\/]+)$/);\n  if (githubTreeMatch) {\n    return { baseUrl: githubTreeMatch[1], ref: githubTreeMatch[2] };\n  }\n\n  const githubCommitMatch = repoUrl.match(/^(.+?)\\/commit\\/([^\\/]+)$/);\n  if (githubCommitMatch) {\n    return { baseUrl: githubCommitMatch[1], ref: githubCommitMatch[2] };\n  }\n\n  const githubReleaseMatch = repoUrl.match(/^(.+?)\\/releases\\/tag\\/([^\\/]+)$/);\n  if (githubReleaseMatch) {\n    return { baseUrl: githubReleaseMatch[1], ref: githubReleaseMatch[2] };\n  }\n\n  // Bitbucket: /src/{ref} (may have trailing path), /commits/{sha}\n  const bitbucketSrcMatch = repoUrl.match(/^(.+?)\\/src\\/([^\\/]+)(?:\\/.*)?$/);\n  if (bitbucketSrcMatch) {\n    return { baseUrl: bitbucketSrcMatch[1], ref: bitbucketSrcMatch[2] };\n  }\n\n  const bitbucketCommitsMatch = repoUrl.match(/^(.+?)\\/commits\\/([^\\/]+)$/);\n  if (bitbucketCommitsMatch) {\n    return { baseUrl: bitbucketCommitsMatch[1], ref: bitbucketCommitsMatch[2] };\n  }\n\n  // No ref found, return URL as-is\n  return { baseUrl: repoUrl, ref: null };\n}\n"
  },
  {
    "path": "src/image-tools.ts",
    "content": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport fsPromises from \"fs/promises\";\nimport path from \"path\";\nimport { randomUUID } from \"crypto\";\nimport getStorageManager from \"./remote-storage-manager\";\nimport sharp from \"sharp\";\nimport { OutputConversionOptions } from \"./types\";\nimport { isValidUrl } from \"./utils\";\n\nexport async function processInputMedia(\n  fileInput: string,\n  log: FastifyBaseLogger,\n  dirWithinInputDir?: string\n): Promise<string> {\n  const storageManager = getStorageManager();\n  let localFilePath: string;\n  const ext = path.extname(fileInput).split(\"?\")[0];\n  const localFileName = `${randomUUID()}${ext}`;\n  if (dirWithinInputDir) {\n    localFilePath = path.join(\n      config.inputDir,\n      dirWithinInputDir,\n      localFileName\n    );\n    // Create the directory if it doesn't exist\n    await fsPromises.mkdir(path.dirname(localFilePath), { recursive: true });\n  } else {\n    localFilePath = path.join(config.inputDir, localFileName);\n  }\n  if (\n    (fileInput.startsWith(\"/\") &&\n      fileInput.length < 4096 &&\n      !fileInput.endsWith(\"==\")) ||\n    fileInput.startsWith(\"./\") ||\n    fileInput.startsWith(\"../\")\n  ) {\n    return path.resolve(fileInput);\n  } else if (isValidUrl(fileInput)) {\n    const dir = path.dirname(localFilePath);\n    return storageManager.downloadFile(fileInput, dir);\n  } else {\n    // Assume it's base64 encoded data\n    try {\n      const base64Data = Buffer.from(fileInput, \"base64\");\n      const extension = guessFileExtensionFromBase64(fileInput);\n      if (!extension) {\n        throw new Error(\"Could not determine file type from base64 data\");\n      }\n      localFilePath = `${localFilePath}.${extension}`;\n      log.debug(`Saving decoded file to ${localFilePath}`);\n      await fsPromises.writeFile(localFilePath, base64Data);\n      return localFilePath;\n    } catch (e: any) {\n      throw new Error(`Failed to parse base64 encoded file: ${e.message}`);\n    }\n  }\n}\n\nfunction guessFileExtensionFromBase64(base64Data: string): string | null {\n  try {\n    // Remove data URL prefix if present (e.g., \"data:video/mp4;base64,\")\n    const cleanBase64 = base64Data.replace(/^data:[^;]+;base64,/, \"\");\n\n    // Decode first 32 bytes to check file signatures\n    const buffer = Buffer.from(cleanBase64.slice(0, 44), \"base64\"); // 44 chars = ~33 bytes\n    const bytes = Array.from(buffer);\n\n    // Helper function to check bytes at specific positions\n    const checkBytes = (offset: number, expected: number[]): boolean => {\n      return expected.every((byte, index) => bytes[offset + index] === byte);\n    };\n\n    // Helper function to check for string in buffer\n    const hasString = (str: string): boolean => {\n      return buffer.includes(Buffer.from(str));\n    };\n\n    // Images\n    if (checkBytes(0, [0xff, 0xd8, 0xff])) return \"jpg\";\n    if (checkBytes(0, [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]))\n      return \"png\";\n    if (\n      checkBytes(0, [0x47, 0x49, 0x46, 0x38]) &&\n      (bytes[4] === 0x37 || bytes[4] === 0x39)\n    )\n      return \"gif\";\n    if (\n      checkBytes(0, [0x52, 0x49, 0x46, 0x46]) &&\n      checkBytes(8, [0x57, 0x45, 0x42, 0x50])\n    )\n      return \"webp\";\n    if (checkBytes(0, [0x42, 0x4d])) return \"bmp\";\n    if (\n      checkBytes(0, [0x49, 0x49, 0x2a, 0x00]) ||\n      checkBytes(0, [0x4d, 0x4d, 0x00, 0x2a])\n    )\n      return \"tiff\";\n    if (checkBytes(0, [0x00, 0x00, 0x01, 0x00])) return \"ico\";\n\n    // Videos\n    if (hasString(\"ftyp\")) {\n      const ftypIndex = buffer.indexOf(Buffer.from(\"ftyp\"));\n      if (ftypIndex !== -1 && ftypIndex + 8 <= buffer.length) {\n        const brand = buffer.subarray(ftypIndex + 4, ftypIndex + 8).toString();\n        if (brand.startsWith(\"mp4\") || brand.startsWith(\"isom\")) return \"mp4\";\n        if (brand.startsWith(\"M4V\")) return \"m4v\";\n        if (brand.startsWith(\"3gp\")) return \"3gp\";\n        if (brand.startsWith(\"qt\")) return \"mov\";\n      }\n    }\n    if (\n      checkBytes(0, [0x52, 0x49, 0x46, 0x46]) &&\n      checkBytes(8, [0x41, 0x56, 0x49, 0x20])\n    )\n      return \"avi\";\n    if (checkBytes(0, [0x1a, 0x45, 0xdf, 0xa3])) {\n      // Both WebM and MKV use EBML, need deeper inspection\n      if (hasString(\"webm\")) return \"webm\";\n      return \"mkv\"; // Default to MKV for EBML\n    }\n    if (checkBytes(0, [0x46, 0x4c, 0x56])) return \"flv\";\n    if (checkBytes(0, [0x30, 0x26, 0xb2, 0x75])) return \"wmv\";\n\n    // Audio\n    if (\n      checkBytes(0, [0xff, 0xfb]) ||\n      checkBytes(0, [0xff, 0xf3]) ||\n      checkBytes(0, [0xff, 0xf2])\n    )\n      return \"mp3\";\n    if (checkBytes(0, [0x49, 0x44, 0x33])) return \"mp3\"; // ID3 tag\n    if (\n      checkBytes(0, [0x52, 0x49, 0x46, 0x46]) &&\n      checkBytes(8, [0x57, 0x41, 0x56, 0x45])\n    )\n      return \"wav\";\n    if (checkBytes(0, [0x4f, 0x67, 0x67, 0x53])) return \"ogg\";\n    if (checkBytes(0, [0x66, 0x4c, 0x61, 0x43])) return \"flac\";\n    if (hasString(\"ftypM4A\")) return \"m4a\";\n\n    // Archives\n    if (\n      checkBytes(0, [0x50, 0x4b, 0x03, 0x04]) ||\n      checkBytes(0, [0x50, 0x4b, 0x05, 0x06]) ||\n      checkBytes(0, [0x50, 0x4b, 0x07, 0x08])\n    )\n      return \"zip\";\n    if (checkBytes(0, [0x52, 0x61, 0x72, 0x21, 0x1a, 0x07, 0x00])) return \"rar\";\n    if (checkBytes(0, [0x37, 0x7a, 0xbc, 0xaf, 0x27, 0x1c])) return \"7z\";\n    if (checkBytes(0, [0x1f, 0x8b])) return \"gz\";\n    if (checkBytes(0, [0x42, 0x5a, 0x68])) return \"bz2\";\n\n    // Documents\n    if (checkBytes(0, [0x25, 0x50, 0x44, 0x46])) return \"pdf\";\n    if (checkBytes(0, [0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1, 0x1a, 0xe1])) {\n      // Microsoft Office formats (legacy)\n      return \"doc\"; // Could also be .xls, .ppt - would need deeper inspection\n    }\n    if (checkBytes(0, [0x50, 0x4b]) && hasString(\"word/\")) return \"docx\";\n    if (checkBytes(0, [0x50, 0x4b]) && hasString(\"xl/\")) return \"xlsx\";\n    if (checkBytes(0, [0x50, 0x4b]) && hasString(\"ppt/\")) return \"pptx\";\n\n    // Text/Code\n    if (checkBytes(0, [0xef, 0xbb, 0xbf])) return \"txt\"; // UTF-8 BOM\n    if (checkBytes(0, [0xff, 0xfe])) return \"txt\"; // UTF-16 LE BOM\n    if (checkBytes(0, [0xfe, 0xff])) return \"txt\"; // UTF-16 BE BOM\n\n    // Fonts\n    if (checkBytes(0, [0x00, 0x01, 0x00, 0x00, 0x00])) return \"ttf\";\n    if (checkBytes(0, [0x4f, 0x54, 0x54, 0x4f])) return \"otf\";\n    if (checkBytes(0, [0x77, 0x4f, 0x46, 0x46])) return \"woff\";\n    if (checkBytes(0, [0x77, 0x4f, 0x46, 0x32])) return \"woff2\";\n\n    // Try to detect if it's likely text-based by checking for printable ASCII\n    let printableCount = 0;\n    for (let i = 0; i < Math.min(buffer.length, 32); i++) {\n      if (\n        (bytes[i] >= 32 && bytes[i] <= 126) ||\n        bytes[i] === 9 ||\n        bytes[i] === 10 ||\n        bytes[i] === 13\n      ) {\n        printableCount++;\n      }\n    }\n\n    // If mostly printable characters, assume it's a text file\n    if (printableCount / Math.min(buffer.length, 32) > 0.7) {\n      return \"txt\";\n    }\n\n    return null; // Unknown format\n  } catch (error) {\n    console.error(\"Error detecting file format:\", error);\n    return null;\n  }\n}\n\nexport async function convertImageBuffer(\n  imageBuffer: Buffer,\n  options: OutputConversionOptions\n) {\n  const { format, options: conversionOptions } = options;\n  let image = sharp(imageBuffer);\n\n  if (format === \"webp\") {\n    image = image.webp(conversionOptions);\n  } else if (format === \"jpg\" || format === \"jpeg\") {\n    image = image.jpeg(conversionOptions);\n  }\n\n  return image.toBuffer();\n}\n"
  },
  {
    "path": "src/index.ts",
    "content": "import { start } from \"./server\";\n\nstart();\n"
  },
  {
    "path": "src/llm-providers.ts",
    "content": "/**\n * LLM provider configurations for the generate-workflow script.\n *\n * Supported providers:\n * - Anthropic (Claude): set ANTHROPIC_API_KEY\n * - MiniMax: set MINIMAX_API_KEY (uses OpenAI-compatible API)\n */\n\nexport interface LLMProviderConfig {\n  name: string;\n  apiUrl: string;\n  model: string;\n  /** Temperature value (MiniMax requires > 0.0, Anthropic accepts 0) */\n  temperature: number;\n  /** Returns auth headers for the provider */\n  authHeaders(apiKey: string): Record<string, string>;\n  /** Build the JSON request body for the given system and user prompts */\n  buildRequestBody(systemPrompt: string, userPrompt: string): object;\n  /** Extract the generated text from the API response */\n  parseResponse(response: unknown): string;\n}\n\nexport const anthropicProvider: LLMProviderConfig = {\n  name: \"anthropic\",\n  apiUrl: \"https://api.anthropic.com/v1/messages\",\n  model: \"claude-sonnet-4-20250514\",\n  temperature: 0,\n  authHeaders(apiKey) {\n    return {\n      \"x-api-key\": apiKey,\n      \"anthropic-version\": \"2023-06-01\",\n    };\n  },\n  buildRequestBody(systemPrompt, userPrompt) {\n    return {\n      model: this.model,\n      system: systemPrompt,\n      max_tokens: 8192,\n      temperature: this.temperature,\n      messages: [{ role: \"user\", content: userPrompt }],\n    };\n  },\n  parseResponse(response) {\n    const r = response as { content?: Array<{ text?: string }> };\n    return r.content?.[0]?.text ?? \"\";\n  },\n};\n\nexport const minimaxProvider: LLMProviderConfig = {\n  name: \"minimax\",\n  apiUrl: \"https://api.minimax.io/v1/chat/completions\",\n  model: \"MiniMax-M2.7\",\n  // MiniMax requires temperature in (0.0, 1.0] — use a near-zero value for deterministic output\n  temperature: 0.01,\n  authHeaders(apiKey) {\n    return { Authorization: `Bearer ${apiKey}` };\n  },\n  buildRequestBody(systemPrompt, userPrompt) {\n    return {\n      model: this.model,\n      messages: [\n        { role: \"system\", content: systemPrompt },\n        { role: \"user\", content: userPrompt },\n      ],\n      max_tokens: 8192,\n      temperature: this.temperature,\n    };\n  },\n  parseResponse(response) {\n    const r = response as {\n      choices?: Array<{ message?: { content?: string } }>;\n    };\n    return r.choices?.[0]?.message?.content ?? \"\";\n  },\n};\n\n/**\n * Selects an LLM provider based on available environment variables.\n * Prefers Anthropic when both keys are set.\n *\n * @param anthropicKey - value of ANTHROPIC_API_KEY (if set)\n * @param minimaxKey   - value of MINIMAX_API_KEY (if set)\n * @returns the selected provider config\n * @throws if neither key is provided\n */\nexport function selectProvider(\n  anthropicKey?: string,\n  minimaxKey?: string\n): LLMProviderConfig {\n  if (anthropicKey) return anthropicProvider;\n  if (minimaxKey) return minimaxProvider;\n  throw new Error(\n    \"Please set ANTHROPIC_API_KEY or MINIMAX_API_KEY environment variable\"\n  );\n}\n\n/**\n * Strip code-fence delimiters from a model response.\n * Models may wrap generated TypeScript in ```typescript ... ``` blocks even\n * when instructed not to; this removes those wrappers when present.\n */\nexport function stripCodeFences(text: string): string {\n  const lines = text.split(\"\\n\");\n  if (lines.length >= 2 && lines[0].startsWith(\"```\")) {\n    // Drop the opening fence and, if the last non-empty line is a fence, it too\n    const inner = lines.slice(1);\n    const lastNonEmpty = inner.reduceRight(\n      (found, line, i) => (found === -1 && line.trim() !== \"\" ? i : found),\n      -1\n    );\n    if (lastNonEmpty !== -1 && inner[lastNonEmpty].startsWith(\"```\")) {\n      inner.splice(lastNonEmpty, 1);\n    }\n    return inner.join(\"\\n\");\n  }\n  return text;\n}\n"
  },
  {
    "path": "src/proxy-dispatcher.ts",
    "content": "import { EnvHttpProxyAgent, type Dispatcher } from \"undici\";\n\n// Create a singleton proxy-aware dispatcher that:\n// - Reads HTTP_PROXY/HTTPS_PROXY/NO_PROXY from environment\n// - Honors NO_PROXY for localhost/cluster/internal hosts\n// - Uses unlimited timeouts to match existing Agent usage\nlet cachedDispatcher: Dispatcher | null = null;\n\nexport function getProxyDispatcher(): Dispatcher {\n  if (!cachedDispatcher) {\n    cachedDispatcher = new EnvHttpProxyAgent({\n      headersTimeout: 0,\n      bodyTimeout: 0,\n      connectTimeout: 0,\n    });\n  }\n  return cachedDispatcher;\n}\n\n"
  },
  {
    "path": "src/remote-storage-manager.ts",
    "content": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport fs from \"fs\";\nimport fsPromises from \"fs/promises\";\nimport path from \"path\";\nimport { execFile } from \"child_process\";\nimport { promisify } from \"util\";\nimport storageProviders from \"./storage-providers\";\nimport { StorageProvider, Upload, DownloadOptions } from \"./types\";\nimport { sendSystemWebhook } from \"./event-emitters\";\nimport {\n  makeHumanReadableSize,\n  hashUrlBase64,\n  getContentTypeFromUrl,\n  getDirectorySizeInBytes,\n} from \"./utils\";\nimport { parseGitUrl } from \"./git-url-parser\";\n\n/**\n * Metadata for cached files, stored in sidecar .meta files.\n */\ninterface CacheMetadata {\n  authRequired: boolean;\n  url: string;\n  cachedAt: string;\n}\n\nconst execFilePromise = promisify(execFile);\n\nasync function linkIfDoesNotExist(\n  src: string,\n  dest: string,\n  log: FastifyBaseLogger\n): Promise<void> {\n  return fsPromises\n    .lstat(dest)\n    .then(() => {\n      log.debug(`Link target ${dest} already exists, skipping link`);\n    })\n    .catch(async (err: any) => {\n      if (err.code === \"ENOENT\") {\n        log.debug(`Linking ${src} to ${dest}`);\n        await fsPromises.mkdir(path.dirname(dest), { recursive: true });\n        await fsPromises.symlink(src, dest);\n        log.debug(`Linked ${src} to ${dest}`);\n      } else {\n        log.error(\n          `Error linking ${src} to ${dest}: (${err.code}) ${err.message}`\n        );\n        throw err;\n      }\n    });\n}\n\nasync function getFileByPrefix(\n  dir: string,\n  prefix: string\n): Promise<string | null> {\n  const files = await fsPromises.readdir(dir);\n  // Exclude .meta files from the search\n  const matchingFile = files.find((file) => file.startsWith(prefix) && !file.endsWith(\".meta\"));\n  return matchingFile ? path.join(dir, matchingFile) : null;\n}\n\n/**\n * Get the metadata file path for a cached file.\n */\nfunction getMetaFilePath(cachedFilePath: string): string {\n  return `${cachedFilePath}.meta`;\n}\n\n/**\n * Read metadata for a cached file if it exists.\n */\nasync function readCacheMetadata(cachedFilePath: string): Promise<CacheMetadata | null> {\n  const metaPath = getMetaFilePath(cachedFilePath);\n  try {\n    const content = await fsPromises.readFile(metaPath, \"utf-8\");\n    return JSON.parse(content) as CacheMetadata;\n  } catch {\n    return null;\n  }\n}\n\n/**\n * Write metadata for a cached file.\n */\nasync function writeCacheMetadata(cachedFilePath: string, metadata: CacheMetadata): Promise<void> {\n  const metaPath = getMetaFilePath(cachedFilePath);\n  await fsPromises.writeFile(metaPath, JSON.stringify(metadata, null, 2));\n}\n\n/**\n * Sanitize a URL by removing embedded credentials (username/password).\n * This prevents credentials from being written to disk in cache metadata.\n */\nfunction sanitizeUrlForMetadata(url: string): string {\n  try {\n    const parsed = new URL(url);\n    parsed.username = \"\";\n    parsed.password = \"\";\n    return parsed.toString();\n  } catch {\n    // If URL parsing fails, return as-is (shouldn't happen for valid URLs)\n    return url;\n  }\n}\n\nclass RemoteStorageManager {\n  private cache: Record<string, string> = {};\n  private activeDownloads: Record<string, Promise<string>> = {};\n  private activeUploads: Record<string, Upload> = {};\n  log: FastifyBaseLogger;\n  cacheDir: string;\n  storageProviders: StorageProvider[] = [];\n\n  constructor(cacheDir: string, log: FastifyBaseLogger) {\n    this.cacheDir = cacheDir;\n    this.log = log.child({ module: \"RemoteStorageManager\" });\n    fs.mkdirSync(this.cacheDir, { recursive: true });\n    this.storageProviders = storageProviders\n      .map((Provider) => {\n        try {\n          return new Provider(this.log);\n        } catch (error) {\n          this.log.warn(\n            { error },\n            `Error initializing storage provider ${Provider.name}`\n          );\n        }\n      })\n      .filter(Boolean) as StorageProvider[];\n    this.log.info(\n      `Initialized with ${this.storageProviders.length} storage providers`\n    );\n  }\n\n  async enforceCacheSize(): Promise<void> {\n    const { totalSize, files } = await this.getCacheSizeInfo();\n    this.log.info(\n      `Cache populated with ${\n        files.length\n      } files, total size: ${makeHumanReadableSize(totalSize)}`\n    );\n\n    if (config.lruCacheSizeBytes > 0 && totalSize > config.lruCacheSizeBytes) {\n      this.log.info(\n        `Cache size ${makeHumanReadableSize(\n          totalSize\n        )} exceeds max size of ${makeHumanReadableSize(\n          config.lruCacheSizeBytes\n        )}, performing eviction`\n      );\n      const spaceNeeded = totalSize - config.lruCacheSizeBytes;\n      const freedSpace = await this.makeSpace(spaceNeeded, files);\n      this.log.info(`Freed up ${makeHumanReadableSize(freedSpace)} in cache`);\n    }\n  }\n\n  /**\n   * Gets the total size of the cache directory and a list of files sorted by last accessed time.\n   * @returns An object containing the total size in bytes and an array of file info objects.\n   */\n  async getCacheSizeInfo(): Promise<{\n    totalSize: number;\n    files: Array<{ path: string; size: number; lastAccessed: number }>;\n  }> {\n    let totalSize = 0;\n    const files: Array<{ path: string; size: number; lastAccessed: number }> =\n      [];\n\n    const dirFiles = await fsPromises.readdir(this.cacheDir);\n    const statsPromises = dirFiles.map(async (file) => {\n      const filePath = path.join(this.cacheDir, file);\n      const stats = await fsPromises.stat(filePath);\n      if (stats.isFile()) {\n        totalSize += stats.size;\n        files.push({\n          path: filePath,\n          size: stats.size,\n          lastAccessed: stats.atimeMs,\n        });\n      }\n    });\n\n    await Promise.all(statsPromises);\n    const sortedByLastAccessed = files.sort(\n      (a, b) => a.lastAccessed - b.lastAccessed\n    );\n\n    return { totalSize, files: sortedByLastAccessed };\n  }\n\n  /**\n   *\n   * @param spaceNeeded A number in bytes that needs to be removed\n   * @param files A list of files. Files will be removed in the order of this array.\n   * @returns\n   */\n  private async makeSpace(\n    spaceNeeded: number,\n    files: Array<{ path: string; size: number }>\n  ): Promise<number> {\n    let freedSpace = 0;\n    for (const file of files) {\n      if (freedSpace >= spaceNeeded) {\n        break;\n      }\n      try {\n        const urlInCache = Object.keys(this.cache).find(\n          (url) => this.cache[url] === file.path\n        );\n        sendSystemWebhook(\n          \"file_deleted\",\n          {\n            url: urlInCache || \"unknown\",\n            local_path: file.path,\n            size: file.size,\n          },\n          this.log\n        );\n        if (urlInCache) {\n          delete this.cache[urlInCache];\n        }\n        await fsPromises.unlink(file.path);\n        // Also delete the metadata file if it exists\n        const metaPath = getMetaFilePath(file.path);\n        await fsPromises.unlink(metaPath).catch(() => {\n          // Metadata file may not exist, ignore\n        });\n        freedSpace += file.size;\n        this.log.info(\n          `Evicted ${file.path} (${makeHumanReadableSize(\n            file.size\n          )}) from cache to free up space`\n        );\n      } catch (error) {\n        this.log.error(\n          { error },\n          `Error evicting file ${file.path} from cache`\n        );\n      }\n    }\n    return freedSpace;\n  }\n\n  async downloadFile(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string,\n    options?: DownloadOptions\n  ): Promise<string> {\n    const hasAuth = !!options?.auth;\n\n    // Check in-memory cache first\n    if (this.cache[url]) {\n      const cachedPath = this.cache[url];\n      await this.validateCacheAccess(url, cachedPath, options);\n      const finalLocation = path.join(\n        outputDir,\n        filenameOverride || path.basename(cachedPath)\n      );\n      await linkIfDoesNotExist(cachedPath, finalLocation, this.log);\n      this.log.debug(`Using cached file for ${url}`);\n      return finalLocation;\n    }\n\n    // Check if there's an in-progress download we can wait for\n    if (url in this.activeDownloads) {\n      this.log.info(`Awaiting in-progress download for ${url}`);\n      const cachedPath = await this.activeDownloads[url];\n      await this.validateCacheAccess(url, cachedPath, options);\n      const finalLocation = path.join(\n        outputDir,\n        filenameOverride || path.basename(cachedPath)\n      );\n      await linkIfDoesNotExist(cachedPath, finalLocation, this.log);\n      return finalLocation;\n    }\n\n    // Check disk cache\n    const hashedUrl = hashUrlBase64(url);\n    const preDownloadedFile = await getFileByPrefix(this.cacheDir, hashedUrl);\n    if (preDownloadedFile) {\n      this.log.debug(`Found ${preDownloadedFile} for ${url} in cache dir`);\n      await this.validateCacheAccess(url, preDownloadedFile, options);\n      this.cache[url] = preDownloadedFile;\n      const finalLocation = path.join(\n        outputDir,\n        filenameOverride || path.basename(preDownloadedFile)\n      );\n      await linkIfDoesNotExist(preDownloadedFile, finalLocation, this.log);\n      return finalLocation;\n    }\n\n    // No cache hit - need to download\n    const start = Date.now();\n    const ext = path.extname(new URL(url).pathname);\n    const tempFilename = `${hashedUrl}${ext}`;\n\n    // Find appropriate provider and start download\n    for (const provider of this.storageProviders) {\n      if (provider.downloadFile && provider.testUrl(url)) {\n        this.log.info(\n          `Downloading ${url} using provider ${provider.constructor.name}`\n        );\n        this.activeDownloads[url] = provider\n          .downloadFile(url, this.cacheDir, filenameOverride || tempFilename, options)\n          .then(async (outputLocation: string) => {\n            this.cache[url] = outputLocation;\n            // Write metadata to track if auth was required\n            // Sanitize URL to prevent credentials from being written to disk\n            const metadata: CacheMetadata = {\n              authRequired: hasAuth,\n              url: sanitizeUrlForMetadata(url),\n              cachedAt: new Date().toISOString(),\n            };\n            await writeCacheMetadata(outputLocation, metadata);\n            return outputLocation;\n          })\n          .finally(() => {\n            delete this.activeDownloads[url];\n          });\n        break;\n      }\n    }\n    if (!this.activeDownloads[url]) {\n      throw new Error(`No storage provider found for URL: ${url}`);\n    }\n    const outputPath = await this.activeDownloads[url];\n    const finalLocation = path.join(\n      outputDir,\n      filenameOverride || path.basename(this.cache[url])\n    );\n    await linkIfDoesNotExist(outputPath, finalLocation, this.log);\n\n    const duration = (Date.now() - start) / 1000;\n    const size = (await fsPromises.stat(await fsPromises.realpath(outputPath)))\n      .size;\n    const sizeInMB = size / (1024 * 1024);\n\n    const speed = sizeInMB / duration;\n    const sizeStr = makeHumanReadableSize(sizeInMB * 1024 * 1024);\n    this.log.info(\n      `Downloaded ${sizeStr} from ${url} in ${duration.toFixed(\n        2\n      )}s (${speed.toFixed(2)} MB/s)`\n    );\n    sendSystemWebhook(\n      \"file_downloaded\",\n      { url, local_path: finalLocation, size, duration },\n      this.log\n    );\n\n    this.enforceCacheSize().catch((error) => {\n      this.log.error({ error }, \"Error enforcing cache size after download\");\n    });\n\n    return finalLocation;\n  }\n\n  /**\n   * Validate that the request is allowed to access a cached file.\n   * For auth-required URLs, this validates credentials before serving from cache.\n   *\n   * SECURITY NOTE: There is a TOCTOU (time-of-check-to-time-of-use) race condition\n   * between validating auth and serving the file. An attacker could theoretically:\n   * 1. Time a request without credentials after auth validation succeeds\n   * 2. Get the symlink created before the legitimate request completes\n   *\n   * This risk is accepted because:\n   * - The file content is identical (same cached data)\n   * - The attacker would need precise timing\n   * - The worst case is serving cached data the attacker could access with valid credentials\n   * - Fixing this would require exclusive locks, adding complexity and latency\n   *\n   * If stricter isolation is needed in the future, consider per-request temp directories\n   * or exclusive file locking during the validation-to-symlink window.\n   */\n  private async validateCacheAccess(\n    url: string,\n    cachedPath: string,\n    options?: DownloadOptions\n  ): Promise<void> {\n    const metadata = await readCacheMetadata(cachedPath);\n\n    // If no metadata or auth not required, allow access\n    if (!metadata || !metadata.authRequired) {\n      return;\n    }\n\n    // Auth is required - check if credentials were provided\n    if (!options?.auth) {\n      throw new Error(\n        `Authentication required to access cached file for URL: ${url}`\n      );\n    }\n\n    // Validate the credentials with the storage provider\n    const provider = this.storageProviders.find((p) => p.testUrl(url));\n    if (!provider) {\n      throw new Error(`No storage provider found for URL: ${url}`);\n    }\n\n    if (provider.validateAuth) {\n      this.log.debug({ url }, \"Validating auth for cached file access\");\n      await provider.validateAuth(url, options);\n      this.log.debug({ url }, \"Auth validated, serving from cache\");\n    } else {\n      // Provider doesn't support auth validation, allow access if auth was provided\n      this.log.warn(\n        { url },\n        \"Provider does not support auth validation, allowing access\"\n      );\n    }\n  }\n\n  async downloadRepo(repoUrl: string, targetDir: string): Promise<string> {\n    if (repoUrl in this.cache) {\n      return this.cache[repoUrl];\n    }\n    if (repoUrl in this.activeDownloads) {\n      this.log.info(`Awaiting in-progress clone for ${repoUrl}`);\n      return this.activeDownloads[repoUrl];\n    }\n    try {\n      const start = Date.now();\n      this.activeDownloads[repoUrl] = this._cloneWithinDirectory(\n        repoUrl,\n        targetDir\n      );\n      const result = await this.activeDownloads[repoUrl];\n      delete this.activeDownloads[repoUrl];\n      this.cache[repoUrl] = result;\n      const duration = (Date.now() - start) / 1000;\n      const dirSize = await getDirectorySizeInBytes(result);\n      this.log.info(\n        `Cloned repository ${repoUrl} (${makeHumanReadableSize(\n          dirSize\n        )}) in ${duration.toFixed(2)}s (${makeHumanReadableSize(\n          (dirSize / duration) * 1000\n        )}/s)`\n      );\n      sendSystemWebhook(\n        \"file_downloaded\",\n        { url: repoUrl, local_path: result, size: dirSize, duration },\n        this.log\n      );\n      return result;\n    } catch (error: any) {\n      this.log.error(\"Error cloning repository:\", error);\n      throw error;\n    }\n  }\n\n  async uploadFile(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType?: string\n  ): Promise<void> {\n    if (url in this.activeUploads) {\n      await this.activeUploads[url].abort();\n      delete this.activeUploads[url];\n    }\n\n    // Determine content type from URL if not provided\n    const mimeType = contentType || getContentTypeFromUrl(url);\n\n    for (const provider of this.storageProviders) {\n      if (provider.uploadFile && provider.testUrl(url)) {\n        this.log.info(\n          `Uploading to ${url} using provider ${provider.constructor.name}`\n        );\n        this.activeUploads[url] = provider.uploadFile(\n          url,\n          fileOrPath,\n          mimeType\n        );\n        break; // Use only the first matching provider\n      }\n    }\n    if (!this.activeUploads[url]) {\n      throw new Error(`No storage provider found for URL: ${url}`);\n    }\n    const start = Date.now();\n    const size =\n      fileOrPath instanceof Buffer\n        ? fileOrPath.length\n        : (await fsPromises.stat(fileOrPath)).size;\n\n    await this.activeUploads[url].upload();\n    delete this.activeUploads[url];\n    const duration = (Date.now() - start) / 1000;\n    sendSystemWebhook(\n      \"file_uploaded\",\n      { url, local_path: fileOrPath, size, duration },\n      this.log\n    );\n  }\n\n  private async _cloneWithinDirectory(\n    repoUrl: string,\n    targetDir: string\n  ): Promise<string> {\n    await fsPromises.mkdir(targetDir, { recursive: true });\n\n    // Parse the URL to extract base URL and optional ref (branch/commit/tag)\n    const { baseUrl, ref } = parseGitUrl(repoUrl);\n\n    // Check to see if the repo is already cloned\n    const repoName = baseUrl\n      .substring(baseUrl.lastIndexOf(\"/\") + 1)\n      .replace(/\\.git$/, \"\");\n    const existingDir = path.join(targetDir, repoName);\n    if (fs.existsSync(existingDir)) {\n      // Check if it's a git repo\n      if (fs.existsSync(path.join(existingDir, \".git\"))) {\n        if (ref) {\n          // If a specific ref is requested, fetch and checkout that ref\n          this.log.info(\n            `Repository ${baseUrl} already cloned, checking out ref: ${ref}`\n          );\n          try {\n            await execFilePromise(\"git\", [\"fetch\", \"--all\"], {\n              cwd: existingDir,\n            });\n            await execFilePromise(\"git\", [\"checkout\", ref], {\n              cwd: existingDir,\n            });\n          } catch (error) {\n            this.log.error(\n              { error },\n              `Error checking out ref ${ref} for ${baseUrl}, using existing copy`\n            );\n          }\n        } else {\n          // No specific ref, just pull latest\n          this.log.info(`Repository ${baseUrl} already cloned, pulling latest`);\n          try {\n            await execFilePromise(\"git\", [\"pull\"], { cwd: existingDir });\n          } catch (error) {\n            this.log.error(\n              { error },\n              `Error pulling latest changes for ${baseUrl}, using existing copy`\n            );\n          }\n        }\n        return existingDir;\n      } else {\n        throw new Error(\n          `Directory ${existingDir} already exists and is not a git repository`\n        );\n      }\n    }\n\n    // Clone the repo to the custom nodes directory\n    this.log.info(`Cloning ${baseUrl} to ${targetDir}`);\n    await execFilePromise(\"git\", [\"clone\", baseUrl], { cwd: targetDir });\n\n    // If a specific ref was requested, checkout that ref\n    if (ref) {\n      this.log.info(`Checking out ref: ${ref}`);\n      await execFilePromise(\"git\", [\"checkout\", ref], { cwd: existingDir });\n    }\n\n    return path.join(targetDir, repoName);\n  }\n}\n\nlet storageManager: RemoteStorageManager | undefined;\nexport default function getStorageManager(log?: FastifyBaseLogger) {\n  if (!storageManager && log) {\n    storageManager = new RemoteStorageManager(config.cacheDir, log);\n  } else if (!storageManager && !log) {\n    throw new Error(\n      \"RemoteStorageManager not initialized yet, log parameter required\"\n    );\n  }\n  if (!storageManager) {\n    throw new Error(\"RemoteStorageManager not initialized yet\");\n  }\n  return storageManager;\n}\n"
  },
  {
    "path": "src/server.ts",
    "content": "import Fastify from \"fastify\";\nimport fastifySwagger from \"@fastify/swagger\";\nimport fastifySwaggerUI from \"@fastify/swagger-ui\";\nimport {\n  jsonSchemaTransform,\n  serializerCompiler,\n  validatorCompiler,\n  ZodTypeProvider,\n} from \"fastify-type-provider-zod\";\nimport fsPromises from \"fs/promises\";\nimport path from \"path\";\nimport config, { setWarmupPrompt } from \"./config\";\nimport {\n  zodToMarkdownTable,\n  setDeletionCost,\n  installCustomNode,\n  aptInstallPackages,\n  pipInstallPackages,\n} from \"./utils\";\nimport { getConfiguredWebhookHandlers, sendWebhook } from \"./event-emitters\";\nimport { convertImageBuffer } from \"./image-tools\";\nimport getStorageManager from \"./remote-storage-manager\";\nimport { NodeProcessError, preprocessNodes } from \"./comfy-node-preprocessors\";\nimport {\n  warmupComfyUI,\n  waitForComfyUIToStart,\n  launchComfyUI,\n  shutdownComfyUI,\n  runPromptAndGetOutputs,\n  connectToComfyUIWebsocketStream,\n  PromptOutputsWithStats,\n  getModels,\n  interruptPrompt,\n} from \"./comfy\";\nimport {\n  PromptRequestSchema as BasePromptRequestSchema,\n  PromptErrorResponseSchema,\n  WorkflowTree,\n  isWorkflow,\n  ExecutionStatsSchema,\n  DownloadRequestSchema as BaseDownloadRequestSchema,\n  DownloadResponseSchema,\n  DownloadErrorResponseSchema,\n} from \"./types\";\nimport workflows from \"./workflows\";\nimport { z } from \"zod\";\nimport { WebSocket } from \"ws\";\nimport { fetch } from \"undici\";\nimport { getProxyDispatcher } from \"./proxy-dispatcher\";\n\nconst { apiVersion: version } = config;\n\nconst server = Fastify({\n  bodyLimit: config.maxBodySize,\n  logger: { level: config.logLevel },\n  connectionTimeout: 0,\n  keepAliveTimeout: 0,\n  requestTimeout: 0,\n});\nserver.setValidatorCompiler(validatorCompiler);\nserver.setSerializerCompiler(serializerCompiler);\n\nconst remoteStorageManager = getStorageManager(server.log);\n\nlet PromptRequestSchema: z.ZodObject<any, any> = BasePromptRequestSchema;\n\nfor (const provider of remoteStorageManager.storageProviders) {\n  if (\n    !provider.uploadFile ||\n    !provider.requestBodyUploadKey ||\n    !provider.requestBodyUploadSchema\n  )\n    continue;\n  PromptRequestSchema = PromptRequestSchema.extend({\n    [provider.requestBodyUploadKey]: provider.requestBodyUploadSchema\n      .extend({ async: z.boolean().optional().default(false) })\n      .optional(),\n  });\n}\n\ntype PromptRequest = z.infer<typeof PromptRequestSchema>;\n\nconst WorkflowRequestSchema = PromptRequestSchema.omit({ prompt: true }).extend(\n  {\n    input: z.record(z.any()),\n  }\n);\n\nexport type WorkflowRequest = z.infer<typeof WorkflowRequestSchema>;\n\nconst PromptResponseSchema = PromptRequestSchema.extend({\n  images: z.array(z.string()).optional(),\n  filenames: z.array(z.string()).optional(),\n  status: z.enum([\"ok\"]).optional(),\n  stats: ExecutionStatsSchema.optional(),\n});\n\nconst WorkflowResponseSchema = PromptResponseSchema.extend({\n  input: z.record(z.any()),\n});\n\nconst modelSchema: any = {};\nfor (const modelType in config.models) {\n  modelSchema[modelType] = z.string().array();\n}\n\nconst ModelResponseSchema = z.object(modelSchema);\ntype ModelResponse = z.infer<typeof ModelResponseSchema>;\n\nlet warm = false;\nlet wasEverWarm = false;\nlet queueDepth = 0;\n\nserver.register(fastifySwagger, {\n  openapi: {\n    openapi: \"3.0.0\",\n    info: {\n      title: \"ComfyUI API\",\n      version,\n    },\n    servers: [\n      {\n        url: `{accessDomainName}`,\n        description: \"Your server\",\n        variables: {\n          accessDomainName: {\n            default: `http://localhost:${config.wrapperPort}`,\n            description:\n              \"The domain name of the server, protocol included, port optional\",\n          },\n        },\n      },\n    ],\n  },\n  transform: jsonSchemaTransform,\n});\nserver.register(fastifySwaggerUI, {\n  routePrefix: \"/docs\",\n  uiConfig: {\n    deepLinking: true,\n  },\n});\n\nserver.after(() => {\n  const app = server.withTypeProvider<ZodTypeProvider>();\n  app.get(\n    \"/health\",\n    {\n      schema: {\n        summary: \"Health Probe\",\n        description: \"Check if the server is healthy\",\n        response: {\n          200: z.object({\n            version: z.literal(version),\n            status: z.literal(\"healthy\"),\n          }),\n          500: z.object({\n            version: z.literal(version),\n            status: z.literal(\"not healthy\"),\n          }),\n        },\n      },\n    },\n    async (request, reply) => {\n      // 200 if ready, 500 if not\n      if (wasEverWarm) {\n        return reply.code(200).send({ version, status: \"healthy\" });\n      }\n      return reply.code(500).send({ version, status: \"not healthy\" });\n    }\n  );\n\n  app.get(\n    \"/ready\",\n    {\n      schema: {\n        summary: \"Readiness Probe\",\n        description: \"Check if the server is ready to serve traffic\",\n        response: {\n          200: z.object({\n            version: z.literal(version),\n            status: z.literal(\"ready\"),\n          }),\n          503: z.object({\n            version: z.literal(version),\n            status: z.literal(\"not ready\"),\n          }),\n        },\n      },\n    },\n    async (request, reply) => {\n      if (\n        warm &&\n        (!config.maxQueueDepth || queueDepth < config.maxQueueDepth)\n      ) {\n        return reply.code(200).send({ version, status: \"ready\" });\n      }\n      return reply.code(503).send({ version, status: \"not ready\" });\n    }\n  );\n\n  app.get(\n    \"/models\",\n    {\n      schema: {\n        summary: \"List Models\",\n        description:\n          \"List all available models. This is from the contents of the models directory.\",\n        response: {\n          200: ModelResponseSchema,\n        },\n      },\n    },\n    async (request, reply) => {\n      const modelResponse: ModelResponse = {};\n      const modelsByType = await getModels();\n      for (const modelType in modelsByType) {\n        modelResponse[modelType] = modelsByType[modelType].all;\n      }\n      return modelResponse;\n    }\n  );\n\n  /**\n   * This route is the primary wrapper around the ComfyUI /prompt endpoint.\n   * It shares the same schema as the ComfyUI /prompt endpoint, but adds the\n   * ability to convert the output image to a different format, and to send\n   * the output image to a webhook, or return it in the response.\n   *\n   * If your application has it's own ID scheme, you can provide the ID in the\n   * request body. If you don't provide an ID, one will be generated for you.\n   */\n  app.post<{\n    Body: PromptRequest;\n  }>(\n    \"/prompt\",\n    {\n      schema: {\n        summary: \"Submit Prompt\",\n        description: \"Submit an API-formatted ComfyUI prompt.\",\n        body: PromptRequestSchema,\n        response: {\n          200: PromptResponseSchema,\n          202: PromptResponseSchema,\n          400: PromptErrorResponseSchema,\n        },\n      },\n    },\n    async (request, reply) => {\n      let { prompt, id, webhook, webhook_v2, convert_output, credentials } = request.body;\n\n      /**\n       * Here we go through all the nodes in the prompt to validate it,\n       * and also to do some pre-processing.\n       */\n      let hasSaveImage = false;\n\n      const log = app.log.child({ id });\n\n      const start = Date.now();\n      try {\n        const { prompt: preprocessedPrompt, hasSaveImage: saveImageFound } =\n          await preprocessNodes(prompt, id, log, credentials);\n        prompt = preprocessedPrompt;\n        hasSaveImage = saveImageFound;\n      } catch (e: NodeProcessError | any) {\n        log.error(`Failed to preprocess nodes: ${e.message}`);\n        const code = e.code && [400, 422].includes(e.code) ? e.code : 400;\n        return reply.code(code).send({\n          error: e.message || \"Failed to preprocess nodes\",\n          location: e.location || \"prompt\",\n        });\n      }\n\n      const preprocessTime = Date.now();\n      log.debug(`Preprocessed prompt in ${preprocessTime}ms`);\n\n      /**\n       * If the prompt has no outputs, there's no point in running it.\n       */\n      if (!hasSaveImage) {\n        return reply.code(400).send({\n          error:\n            'Prompt must contain a node with a \"filename_prefix\" input, such as \"SaveImage\"',\n          location: \"prompt\",\n        });\n      }\n\n      type ProcessedOutput = {\n        buffers: Buffer[];\n        filenames: string[];\n        stats: any;\n      };\n\n      const postProcessOutputs = async ({\n        outputs,\n        stats,\n      }: PromptOutputsWithStats): Promise<ProcessedOutput> => {\n        stats.preprocess_time = preprocessTime - start;\n        stats.comfy_round_trip_time = Date.now() - preprocessTime;\n        const filenames: string[] = [];\n        const buffers: Buffer[] = [];\n        const unlinks: Promise<void>[] = [];\n        for (const originalFilename in outputs) {\n          let filename = originalFilename;\n          let fileBuffer = outputs[filename];\n          if (convert_output) {\n            try {\n              fileBuffer = await convertImageBuffer(fileBuffer, convert_output);\n\n              /**\n               * If the user has provided an output format, we need to update the filename\n               */\n              filename = originalFilename.replace(\n                /\\.[^/.]+$/,\n                `.${convert_output.format}`\n              );\n            } catch (e: any) {\n              log.warn(`Failed to convert image: ${e.message}`);\n            }\n          }\n          filenames.push(filename);\n          buffers.push(fileBuffer);\n          unlinks.push(\n            fsPromises.unlink(path.join(config.outputDir, originalFilename))\n          );\n        }\n        await Promise.all(unlinks);\n        stats.postprocess_time =\n          Date.now() - stats.comfy_round_trip_time - preprocessTime;\n        return {\n          buffers,\n          filenames,\n          stats,\n        };\n      };\n\n      const runPromptPromise = runPromptAndGetOutputs(id, prompt, log)\n        .catch((e: any) => {\n          log.error(`Failed to run prompt: ${e.message}`);\n          if (webhook_v2) {\n            const webhookBody = {\n              type: \"prompt.failed\",\n              timestamp: new Date().toISOString(),\n              id,\n              prompt,\n              error: e.message,\n            };\n            sendWebhook(webhook_v2, webhookBody, log, 2);\n          } else if (webhook) {\n            log.warn(\n              `.webhook has been deprecated in favor of .webhook_v2. Support for .webhook will be removed in a future version.`\n            );\n            const webhookBody = {\n              event: \"prompt.failed\",\n              id,\n              prompt,\n              error: e.message,\n            };\n            sendWebhook(webhook, webhookBody, log, 1);\n          }\n          throw e;\n        })\n        .then(postProcessOutputs);\n\n      let uploadPromise: Promise<{\n        images: string[];\n        filenames: string[];\n        stats: any;\n      }> | null = null;\n\n      type Handler = (data: ProcessedOutput) => Promise<{\n        images: string[];\n        filenames: string[];\n        stats: any;\n      }>;\n\n      const webhookHandler: Handler = async ({\n        buffers,\n        filenames,\n        stats,\n      }: ProcessedOutput) => {\n        if (!webhook) {\n          throw new Error(\"Webhook URL is not defined\");\n        }\n        log.warn(\n          `.webhook has been deprecated in favor of .webhook_v2. Support for .webhook will be removed in a future version.`\n        );\n        const webhookPromises: Promise<any>[] = [];\n        const images: string[] = [];\n        for (let i = 0; i < buffers.length; i++) {\n          const base64File = buffers[i].toString(\"base64\");\n          images.push(base64File);\n          const filename = filenames[i];\n          log.info(`Sending image ${filename} to webhook: ${webhook}`);\n          webhookPromises.push(\n            sendWebhook(\n              webhook,\n              {\n                event: \"output.complete\",\n                image: base64File,\n                id,\n                filename,\n                prompt,\n                stats,\n              },\n              log,\n              1\n            )\n          );\n        }\n        await Promise.all(webhookPromises);\n        return { images, filenames, stats };\n      };\n\n      const uploadHandler: Handler = async ({\n        buffers,\n        filenames,\n        stats,\n      }): Promise<{\n        images: string[];\n        filenames: string[];\n        stats: any;\n      }> => {\n        const uploadPromises: Promise<void>[] = [];\n        const images: string[] = [];\n        for (let i = 0; i < buffers.length; i++) {\n          const fileBuffer = buffers[i];\n          const filename = filenames[i];\n          for (const provider of remoteStorageManager.storageProviders) {\n            if (\n              provider.requestBodyUploadKey &&\n              request.body[provider.requestBodyUploadKey]\n            ) {\n              images.push(\n                provider.createUrl({\n                  ...request.body[provider.requestBodyUploadKey],\n                  filename,\n                })\n              );\n              break;\n            }\n          }\n          uploadPromises.push(\n            remoteStorageManager.uploadFile(images[i], fileBuffer)\n          );\n        }\n\n        await Promise.all(uploadPromises);\n        return { images, filenames, stats };\n      };\n\n      const storageProvider = remoteStorageManager.storageProviders.find(\n        (provider) =>\n          provider.requestBodyUploadKey &&\n          !!request.body[provider.requestBodyUploadKey]\n      );\n      const asyncUpload =\n        webhook ||\n        webhook_v2 ||\n        (storageProvider &&\n          storageProvider.requestBodyUploadKey &&\n          request.body[storageProvider.requestBodyUploadKey]?.async);\n\n      if (webhook) {\n        uploadPromise = runPromptPromise.then(webhookHandler);\n      } else if (!!storageProvider) {\n        uploadPromise = runPromptPromise.then(uploadHandler);\n      } else {\n        uploadPromise = runPromptPromise.then(\n          async ({ buffers, filenames, stats }) => {\n            const images: string[] = buffers.map((b) => b.toString(\"base64\"));\n            return { images, filenames, stats };\n          }\n        );\n      }\n\n      const finalStatsPromise = uploadPromise.then(\n        ({ images, stats, filenames }) => {\n          stats.upload_time =\n            Date.now() -\n            start -\n            stats.preprocess_time -\n            stats.comfy_round_trip_time -\n            stats.postprocess_time;\n          stats.total_time = Date.now() - start;\n          log.debug(stats);\n          return { images, stats, filenames };\n        }\n      );\n\n      if (asyncUpload) {\n        reply.code(202).send({ ...request.body, status: \"ok\", id, prompt });\n      }\n\n      const { images, stats, filenames } = await finalStatsPromise;\n\n      const outputPayload = {\n        ...request.body,\n        id,\n        prompt,\n        images,\n        filenames,\n        stats,\n      };\n\n      if (webhook_v2) {\n        log.debug(`Sending final response to webhook_v2: ${webhook_v2}`);\n        const webhookBody = {\n          type: \"prompt.complete\",\n          timestamp: new Date().toISOString(),\n          ...outputPayload,\n        };\n        sendWebhook(webhook_v2, webhookBody, log, 2);\n      }\n\n      if (!asyncUpload) {\n        return reply.send(outputPayload);\n      }\n    }\n  );\n\n  app.post(\n    \"/interrupt\",\n    {\n      schema: {\n        summary: \"Interrupt Prompt\",\n        description: \"Interrupt a running prompt by ID.\",\n        body: z.object({\n          id: z.string(),\n        }),\n        response: {\n          200: z.object({\n            id: z.string(),\n            interrupted: z.literal(\"success\"),\n          }),\n          404: z.object({\n            id: z.string(),\n            interrupted: z.literal(\"failed\"),\n          }),\n        },\n      },\n    },\n    async (request, reply) => {\n      const { id } = request.body as { id: string };\n\n      const log = app.log.child({ id });\n\n      try {\n        await interruptPrompt(id);\n        log.info(`Successfully interrupted prompt: ${id}`);\n        return reply.code(200).send({\n          id,\n          interrupted: \"success\",\n        });\n      } catch (e: any) {\n        log.error(`Failed to interrupt prompt: ${e.message}`);\n        return reply.code(404).send({\n          id,\n          interrupted: \"failed\",\n        });\n      }\n    }\n  );\n\n  const modelTypes = Object.keys(config.models);\n  const ModelTypeSchema =\n    modelTypes.length > 0\n      ? z.enum(modelTypes as [string, ...string[]])\n      : z.string();\n\n  const DownloadRequestSchema = BaseDownloadRequestSchema.extend({\n    model_type: ModelTypeSchema,\n  });\n\n  app.post(\n    \"/download\",\n    {\n      schema: {\n        summary: \"Download Model\",\n        description:\n          \"Download a model from a URL to the appropriate model directory. By default, the download runs asynchronously and returns immediately with a 202 status. Set `wait: true` to hold the request open until the download completes.\",\n        body: DownloadRequestSchema,\n        response: {\n          200: DownloadResponseSchema,\n          202: DownloadResponseSchema,\n          400: DownloadErrorResponseSchema,\n        },\n      },\n    },\n    async (request, reply) => {\n      const { url, model_type, filename: filenameOverride, wait, auth } =\n        request.body as z.infer<typeof DownloadRequestSchema>;\n\n      // Log without auth details to prevent credential exposure\n      const log = app.log.child({\n        url,\n        model_type,\n        hasAuth: !!auth,\n        authType: auth?.type,\n      });\n\n      const modelConfig = config.models[model_type];\n      if (!modelConfig) {\n        return reply.code(400).send({\n          error: `Unknown model type: ${model_type}. Available types: ${Object.keys(config.models).join(\", \")}`,\n        });\n      }\n\n      const outputDir = modelConfig.dir;\n      let filename: string;\n      try {\n        filename = filenameOverride || path.basename(new URL(url).pathname);\n      } catch (err: any) {\n        log.error(`Invalid URL: ${err.message}`);\n        return reply.code(400).send({\n          error: `Invalid URL: ${err.message}`,\n        });\n      }\n\n      // Build download options with auth if provided\n      const downloadOptions = auth ? { auth } : undefined;\n\n      if (!wait) {\n        log.info(`Starting async download of ${url} to ${outputDir}`);\n        remoteStorageManager\n          .downloadFile(url, outputDir, filename, downloadOptions)\n          .then((finalPath) => {\n            log.info(`Download completed: ${finalPath}`);\n          })\n          .catch((err) => {\n            log.error(`Download failed: ${err.message}`);\n          });\n\n        return reply.code(202).send({\n          url,\n          model_type,\n          filename,\n          status: \"started\",\n        });\n      }\n\n      log.info(`Starting sync download of ${url} to ${outputDir}`);\n      const start = Date.now();\n\n      try {\n        const finalPath = await remoteStorageManager.downloadFile(\n          url,\n          outputDir,\n          filename,\n          downloadOptions\n        );\n        const duration = (Date.now() - start) / 1000;\n        const stats = await fsPromises.stat(\n          await fsPromises.realpath(finalPath)\n        );\n\n        log.info(\n          `Download completed: ${finalPath} (${stats.size} bytes in ${duration}s)`\n        );\n\n        return reply.code(200).send({\n          url,\n          model_type,\n          filename: path.basename(finalPath),\n          status: \"completed\",\n          size: stats.size,\n          duration,\n        });\n      } catch (err: any) {\n        log.error(`Download failed: ${err.message}`);\n        return reply.code(400).send({\n          error: err.message,\n        });\n      }\n    }\n  );\n\n  // Recursively build the route tree from workflows\n  const walk = (tree: WorkflowTree, route = \"/workflow\") => {\n    for (const key in tree) {\n      const node = tree[key];\n      if (isWorkflow(node)) {\n        const BodySchema = WorkflowRequestSchema.extend({\n          input: node.RequestSchema,\n        });\n\n        type BodyType = z.infer<typeof BodySchema>;\n\n        let description = \"\";\n        if (config.markdownSchemaDescriptions) {\n          description = zodToMarkdownTable(node.RequestSchema);\n        } else if (node.description) {\n          description = node.description;\n        }\n\n        let summary = key;\n        if (node.summary) {\n          summary = node.summary;\n        }\n\n        /**\n         * Workflow endpoints expose a simpler API to users, and then perform the transformation\n         * to a ComfyUI prompt behind the scenes. These endpoints under the hood just call the /prompt\n         * endpoint with the appropriate parameters.\n         */\n        app.post<{\n          Body: BodyType;\n        }>(\n          `${route}/${key}`,\n          {\n            schema: {\n              summary,\n              description,\n              body: BodySchema,\n              response: {\n                200: WorkflowResponseSchema,\n                202: WorkflowResponseSchema,\n              },\n            },\n          },\n          async (request, reply) => {\n            const log = app.log.child({ workflow: `${route}/${key}` });\n\n            log.debug({ input: request.body.input }, \"Workflow input received\");\n\n            let prompt;\n            try {\n              prompt = await node.generateWorkflow(request.body.input);\n              log.debug({ prompt }, \"Generated ComfyUI prompt from workflow\");\n            } catch (e: any) {\n              log.error(\n                { error: e.message, stack: e.stack },\n                \"Failed to generate workflow prompt\"\n              );\n              return reply.code(400).send({\n                error: `Failed to generate workflow prompt: ${e.message}`,\n                location: \"input\",\n              });\n            }\n\n            const promptRequestBody = {\n              ...request.body,\n              prompt,\n              input: undefined,\n            };\n\n            log.debug(\n              { promptRequestBody },\n              \"Sending request to /prompt endpoint\"\n            );\n\n            const resp = await fetch(\n              `http://localhost:${config.wrapperPort}/prompt`,\n              {\n                method: \"POST\",\n                headers: {\n                  \"Content-Type\": \"application/json\",\n                },\n                body: JSON.stringify(promptRequestBody),\n                dispatcher: getProxyDispatcher(),\n              }\n            );\n            const body = (await resp.json()) as any;\n            if (!resp.ok) {\n              log.error(\n                {\n                  status: resp.status,\n                  error: body.error,\n                  location: body.location,\n                  promptRequestBody,\n                },\n                \"Workflow request to /prompt endpoint failed\"\n              );\n              return reply.code(resp.status).send(body);\n            }\n\n            log.debug({ status: resp.status }, \"Workflow completed successfully\");\n\n            body.input = request.body.input;\n\n            return reply.code(resp.status).send(body);\n          }\n        );\n\n        server.log.info(`Registered workflow ${route}/${key}`);\n      } else {\n        walk(node as WorkflowTree, `${route}/${key}`);\n      }\n    }\n  };\n  walk(workflows);\n});\n\nlet comfyWebsocketClient: WebSocket | null = null;\n\nprocess.on(\"SIGINT\", async () => {\n  server.log.info(\"Received SIGINT, interrupting process\");\n  shutdownComfyUI();\n  if (comfyWebsocketClient) {\n    comfyWebsocketClient.terminate();\n  }\n  process.exit(0);\n});\n\nasync function launchComfyUIAndAPIServerAndWaitForWarmup() {\n  warm = false;\n  server.log.info(\n    `Starting ComfyUI API ${config.apiVersion} with ComfyUI ${config.comfyVersion}`\n  );\n  launchComfyUI().catch((err: any) => {\n    server.log.error(err.message);\n    if (config.alwaysRestartComfyUI) {\n      server.log.info(\"Restarting ComfyUI\");\n      launchComfyUIAndAPIServerAndWaitForWarmup();\n    } else {\n      server.log.info(\"Exiting\");\n      process.exit(1);\n    }\n  });\n  await waitForComfyUIToStart(server.log);\n  server.log.info(`ComfyUI ${config.comfyVersion} started.`);\n  if (!wasEverWarm) {\n    await server.ready();\n    server.swagger();\n    // Start the server\n    await server.listen({ port: config.wrapperPort, host: config.wrapperHost });\n    server.log.info(`ComfyUI API ${config.apiVersion} started.`);\n  }\n  const handlers = getConfiguredWebhookHandlers(server.log);\n  if (handlers.onStatus) {\n    const originalHandler = handlers.onStatus;\n    handlers.onStatus = (msg) => {\n      queueDepth = msg.data.status.exec_info.queue_remaining;\n      server.log.debug(`Queue depth: ${queueDepth}`);\n      setDeletionCost(queueDepth);\n      originalHandler(msg);\n    };\n  } else {\n    handlers.onStatus = (msg) => {\n      queueDepth = msg.data.status.exec_info.queue_remaining;\n      server.log.debug(`Queue depth: ${queueDepth}`);\n      setDeletionCost(queueDepth);\n    };\n  }\n  comfyWebsocketClient = await connectToComfyUIWebsocketStream(\n    handlers,\n    server.log,\n    true\n  );\n  await warmupComfyUI();\n  wasEverWarm = true;\n  warm = true;\n}\n\nasync function downloadAllModels(\n  models: { url: string; local_path: string }[]\n) {\n  for (const { url, local_path } of models) {\n    const dir = path.dirname(local_path);\n    const filename = path.basename(local_path);\n    await remoteStorageManager.downloadFile(url, dir, filename);\n  }\n}\n\nasync function downloadWarmupPrompt() {\n  // Skip if warmup prompt is already set (from WARMUP_PROMPT_FILE)\n  if (config.warmupPrompt) {\n    return;\n  }\n\n  // Download warmup prompt from URL if specified\n  if (config.warmupPromptUrl) {\n    server.log.info(\n      `Downloading warmup prompt from ${config.warmupPromptUrl}`\n    );\n    const start = Date.now();\n    const resp = await fetch(config.warmupPromptUrl, {\n      headers: config.httpAuthHeader,\n      dispatcher: getProxyDispatcher(),\n    });\n    if (!resp.ok) {\n      throw new Error(\n        `Failed to download warmup prompt from ${config.warmupPromptUrl}: ${resp.status} ${resp.statusText}`\n      );\n    }\n    const content = await resp.text();\n    setWarmupPrompt(content);\n    const duration = (Date.now() - start) / 1000;\n    server.log.info(\n      `Downloaded and parsed warmup prompt in ${duration.toFixed(2)}s`\n    );\n  }\n}\n\nasync function processManifest() {\n  if (config.manifest) {\n    if (config.manifest.apt) {\n      server.log.info(\n        `Installing ${config.manifest.apt.length} apt packages specified in manifest`\n      );\n      await aptInstallPackages(config.manifest.apt, server.log);\n    }\n    if (config.manifest.pip) {\n      server.log.info(\n        `Installing ${config.manifest.pip.length} pip packages specified in manifest`\n      );\n      await pipInstallPackages(config.manifest.pip, server.log);\n    }\n    if (config.manifest.custom_nodes) {\n      server.log.info(\n        `Installing ${config.manifest.custom_nodes.length} custom nodes specified in manifest`\n      );\n      for (const node of config.manifest.custom_nodes) {\n        await installCustomNode(node, server.log);\n      }\n    }\n    if (config.manifest.models.before_start) {\n      server.log.info(\n        `Downloading ${config.manifest.models.before_start.length} models specified in manifest before startup`\n      );\n      await downloadAllModels(config.manifest.models.before_start);\n    }\n    if (config.manifest.models.after_start) {\n      server.log.info(\n        `Downloading ${config.manifest.models.after_start.length} models specified in manifest after startup`\n      );\n\n      // Don't await, do it in the background\n      downloadAllModels(config.manifest.models.after_start);\n    }\n  }\n}\n\nexport async function start() {\n  try {\n    const start = Date.now();\n    await remoteStorageManager.enforceCacheSize();\n    await downloadWarmupPrompt();\n    await processManifest();\n    if (config.manifest) {\n      server.log.info(\n        `Processed manifest file in ${(Date.now() - start) / 1000}s`\n      );\n    }\n\n    // Start ComfyUI\n    await launchComfyUIAndAPIServerAndWaitForWarmup();\n    await getModels();\n    const warmupTime = Date.now() - start;\n    server.log.info(`ComfyUI fully ready in ${warmupTime / 1000}s`);\n  } catch (err: any) {\n    server.log.error(`Failed to start server: ${err.message}`);\n    process.exit(1);\n  }\n}\n"
  },
  {
    "path": "src/storage-providers/azure-blob.ts",
    "content": "import path from \"path\";\nimport fsPromises from \"fs/promises\";\nimport { StorageProvider, Upload } from \"../types\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport config from \"../config\";\nimport { z } from \"zod\";\nimport { DefaultAzureCredential } from \"@azure/identity\";\nimport {\n  BlobServiceClient,\n  StorageSharedKeyCredential,\n} from \"@azure/storage-blob\";\nimport fs, { ReadStream } from \"fs\";\n\nexport class AzureBlobStorageProvider implements StorageProvider {\n  log: FastifyBaseLogger;\n  requestBodyUploadKey = \"azure_blob_upload\";\n  requestBodyUploadSchema = z.object({\n    container: z.string().describe(\"Azure Blob Storage container name\"),\n    blob_prefix: z\n      .string()\n      .optional()\n      .default(\"\")\n      .describe(\"Path in the container to upload files to\"),\n  });\n  private urlRequestSchema = this.requestBodyUploadSchema.extend({\n    filename: z.string().describe(\"The name of the file to upload\"),\n  });\n  private client: BlobServiceClient | null = null;\n\n  constructor(log: FastifyBaseLogger) {\n    this.log = log.child({ provider: \"AzureBlobStorageProvider\" });\n\n    // Priority 1: Connection string (for Azurite or full connection strings)\n    if (config.azureStorageConnectionString) {\n      this.log.debug(\"Using Azure Storage connection string\");\n      this.client = BlobServiceClient.fromConnectionString(\n        config.azureStorageConnectionString\n      );\n    }\n    // Priority 2: Storage account with explicit key\n    else if (config.azureStorageAccount && config.azureStorageKey) {\n      this.log.debug(\"Using Azure Storage account with shared key\");\n      const sharedKeyCredential = new StorageSharedKeyCredential(\n        config.azureStorageAccount,\n        config.azureStorageKey\n      );\n      this.client = new BlobServiceClient(\n        `https://${config.azureStorageAccount}.blob.core.windows.net`,\n        sharedKeyCredential\n      );\n    }\n    // Priority 3: Storage account with SAS token\n    else if (config.azureStorageAccount && config.azureStorageSasToken) {\n      this.log.debug(\"Using Azure Storage account with SAS token\");\n      // SAS tokens are appended to the URL, not passed as credentials\n      const sasToken = config.azureStorageSasToken.startsWith(\"?\")\n        ? config.azureStorageSasToken\n        : `?${config.azureStorageSasToken}`;\n      this.client = new BlobServiceClient(\n        `https://${config.azureStorageAccount}.blob.core.windows.net${sasToken}`\n      );\n    }\n    // Priority 4: DefaultAzureCredential (handles many auth methods automatically)\n    else if (config.azureStorageAccount) {\n      this.log.debug(\"Using DefaultAzureCredential with storage account\");\n      const defaultAzureCredential = new DefaultAzureCredential();\n      this.client = new BlobServiceClient(\n        `https://${config.azureStorageAccount}.blob.core.windows.net`,\n        defaultAzureCredential\n      );\n    } else {\n      throw new Error(\n        \"Azure Storage configuration required. Set either:\\n\" +\n          \"- AZURE_STORAGE_CONNECTION_STRING (for Azurite or full connection)\\n\" +\n          \"- AZURE_STORAGE_ACCOUNT with AZURE_STORAGE_KEY (shared key auth)\\n\" +\n          \"- AZURE_STORAGE_ACCOUNT with AZURE_STORAGE_SAS_TOKEN (SAS auth)\\n\" +\n          \"- AZURE_STORAGE_ACCOUNT with DefaultAzureCredential (Azure AD/CLI/etc)\"\n      );\n    }\n  }\n\n  createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {\n    const { container, blob_prefix, filename } = inputs;\n    if (!container) {\n      throw new Error(\"Container is required to create Azure Blob URL\");\n    }\n    const encodedBlobPrefix = blob_prefix\n      ? `${blob_prefix.replace(/^\\//, \"\").replace(/\\/$/, \"/\")}`\n      : \"\";\n\n    // Get the base URL from the client\n    if (this.client) {\n      let baseUrl = this.client.url;\n      // For local development, ensure we use the Docker service name\n      if (baseUrl.includes(\"localhost:10000\")) {\n        baseUrl = baseUrl.replace(\"localhost\", \"azurite\");\n      }\n      return `${baseUrl}/${container}/${encodedBlobPrefix}${filename}`;\n    }\n\n    // Fallback to constructing URL from storage account\n    if (config.azureStorageAccount) {\n      return `https://${config.azureStorageAccount}.blob.core.windows.net/${container}/${encodedBlobPrefix}${filename}`;\n    }\n\n    throw new Error(\"Unable to create Azure Blob URL\");\n  }\n\n  testUrl(url: string): boolean {\n    // Support both HTTPS (production) and HTTP (local Azurite)\n    return (\n      (url.startsWith(\"https://\") && url.includes(\".blob.core.windows.net/\")) ||\n      (url.startsWith(\"http://\") &&\n        (url.includes(\"devstoreaccount\") || url.includes(\"azurite\")))\n    );\n  }\n\n  uploadFile(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): Upload {\n    if (!this.client) {\n      throw new Error(\"Azure Blob Service Client is not initialized\");\n    }\n    return new AzureBlobUpload(\n      url,\n      fileOrPath,\n      contentType,\n      this.client,\n      this.log\n    );\n  }\n\n  async downloadFile(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string\n  ): Promise<string> {\n    if (!this.client) {\n      throw new Error(\"Azure Blob Service Client is not initialized\");\n    }\n    // Parse the URL to extract container name and blob name\n    const parsedUrl = new URL(url);\n    let pathParts = parsedUrl.pathname.split(\"/\").filter(Boolean); // Remove empty parts\n\n    // For Azurite URLs, skip the account name (devstoreaccount1)\n    if (pathParts[0] === \"devstoreaccount1\") {\n      pathParts = pathParts.slice(1);\n    }\n\n    if (pathParts.length < 2) {\n      throw new Error(\"Invalid Azure Blob URL format\");\n    }\n    const containerName = pathParts[0];\n    const blobName = pathParts.slice(1).join(\"/\");\n\n    const containerClient = this.client.getContainerClient(containerName);\n    const blobClient = containerClient.getBlobClient(blobName);\n\n    const downloadResponse = await blobClient.download();\n    if (!downloadResponse.readableStreamBody) {\n      throw new Error(\"Failed to get readable stream from blob download\");\n    }\n    const downloadedFilePath = path.join(\n      outputDir,\n      filenameOverride || path.basename(blobName)\n    );\n    const writableStream = fs.createWriteStream(downloadedFilePath);\n    downloadResponse.readableStreamBody.pipe(writableStream);\n    await new Promise((resolve, reject) => {\n      writableStream.on(\"finish\", () => resolve);\n      writableStream.on(\"error\", reject);\n    });\n    return downloadedFilePath;\n  }\n}\n\nclass AzureBlobUpload implements Upload {\n  url: string;\n  fileOrPath: string | Buffer;\n  contentType: string;\n  log: FastifyBaseLogger;\n  state: \"in-progress\" | \"completed\" | \"failed\" | \"aborted\" = \"in-progress\";\n  client: BlobServiceClient;\n  private abortController = new AbortController();\n\n  constructor(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    client: BlobServiceClient,\n    log: FastifyBaseLogger\n  ) {\n    this.url = url;\n    this.fileOrPath = fileOrPath;\n    this.contentType = contentType;\n    this.client = client;\n    this.log = log.child({ uploader: \"AzureBlobUpload\" });\n  }\n\n  private createInputStream(fileOrPath: string | Buffer): ReadStream | Buffer {\n    if (typeof fileOrPath === \"string\") {\n      return fs.createReadStream(fileOrPath);\n    } else {\n      return fileOrPath;\n    }\n  }\n\n  async upload(): Promise<void> {\n    // Parse the URL to extract container name and blob name\n    const url = new URL(this.url);\n    let pathParts = url.pathname.split(\"/\").filter(Boolean); // Remove empty parts\n\n    // For Azurite/emulator URLs in path-style format (http://host:port/accountname/container/blob)\n    // vs Azure URLs in host-style format (https://accountname.blob.core.windows.net/container/blob)\n    if (!url.hostname.includes(\".blob.core.windows.net\")) {\n      // Path-style URL - first part is account name, skip it\n      if (pathParts.length > 0) {\n        pathParts = pathParts.slice(1);\n      }\n    }\n\n    if (pathParts.length < 2) {\n      throw new Error(\"Invalid Azure Blob URL format\");\n    }\n    const containerName = pathParts[0];\n    const blobName = pathParts.slice(1).join(\"/\");\n    this.state = \"in-progress\";\n\n    try {\n      const blockBlobClient = this.client\n        .getContainerClient(containerName)\n        .getBlockBlobClient(blobName);\n      const inputStream = this.createInputStream(this.fileOrPath);\n      const fileSize =\n        typeof this.fileOrPath === \"string\"\n          ? (await fsPromises.stat(this.fileOrPath)).size\n          : this.fileOrPath.length;\n      await blockBlobClient.upload(inputStream, fileSize, {\n        abortSignal: this.abortController.signal,\n        blobHTTPHeaders: { blobContentType: this.contentType },\n      });\n      this.log.info({ containerName, blobName }, \"File uploaded successfully\");\n      this.state = \"completed\";\n    } catch (error) {\n      if (this.abortController.signal.aborted) {\n        this.state = \"aborted\";\n        this.log.warn(\"Upload aborted by user\");\n      } else {\n        this.state = \"failed\";\n        this.log.error({ error }, \"Error uploading file to Azure Blob Storage\");\n      }\n    }\n  }\n\n  async abort(): Promise<void> {\n    if (this.state !== \"in-progress\") {\n      this.log.warn(`Cannot abort upload in state ${this.state}`);\n      return;\n    }\n    this.abortController.abort();\n    this.state = \"aborted\";\n    this.log.info(`Upload to ${this.url} aborted`);\n  }\n}\n"
  },
  {
    "path": "src/storage-providers/hf.ts",
    "content": "import path from \"path\";\nimport fsPromises from \"fs/promises\";\nimport { StorageProvider, Upload } from \"../types\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport config from \"../config\";\nimport { execFile } from \"child_process\";\nimport { promisify } from \"util\";\nimport os from \"os\";\nimport { z } from \"zod\";\n\nconst execFilePromise = promisify(execFile);\n\nexport class HFStorageProvider implements StorageProvider {\n  log: FastifyBaseLogger;\n  requestBodyUploadKey = \"hf_upload\";\n  requestBodyUploadSchema = z.object({\n    repo: z.string().describe(\"HuggingFace repo name, e.g. user/repo\"),\n    repo_type: z\n      .enum([\"model\", \"dataset\"])\n      .optional()\n      .default(\"model\")\n      .describe(\"Type of HuggingFace repository\"),\n    revision: z\n      .string()\n      .optional()\n      .default(\"main\")\n      .describe(\"HuggingFace repo revision, e.g. main or a branch name\"),\n    directory: z\n      .string()\n      .optional()\n      .default(\"/\")\n      .describe(\"Directory in the repo to upload files to\"),\n  });\n\n  private urlRequestSchema = this.requestBodyUploadSchema.extend({\n    filename: z.string().describe(\"The name of the file to upload\"),\n  });\n\n  constructor(log: FastifyBaseLogger) {\n    this.log = log.child({ provider: \"HFStorageProvider\" });\n  }\n\n  createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {\n    const { repo, repo_type, revision, directory, filename } = inputs;\n    if (!repo) {\n      throw new Error(\"Repo is required to create HuggingFace URL\");\n    }\n    // Add repo type prefix for datasets\n    const repoPrefix = repo_type === \"dataset\" ? \"datasets/\" : \"\";\n    // URL-encode directory and filename to handle spaces and special characters\n    const encodedDirectory = directory\n      .split(\"/\")\n      .map((part) => encodeURIComponent(part))\n      .join(\"/\");\n    const encodedFilename = encodeURIComponent(filename);\n    return `https://huggingface.co/${repoPrefix}${repo}/resolve/${revision}/${encodedDirectory}/${encodedFilename}`;\n  }\n\n  testUrl(url: string): boolean {\n    return url.startsWith(\"https://huggingface.co/\") && !!config.hfCLIVersion;\n  }\n\n  uploadFile(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): Upload {\n    return new HFUpload(url, fileOrPath, contentType, this.log);\n  }\n\n  async downloadFile(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string\n  ): Promise<string> {\n    const outputPath = path.join(\n      outputDir,\n      filenameOverride || path.basename(new URL(url).pathname)\n    );\n    const { repo, repoType, revision, filePath } = parseHfUrl(url);\n    this.log.info(\n      `Using hf CLI to download ${filePath} from ${repo} (${repoType}) at revision ${revision}`\n    );\n\n    // For datasets, we need to use --repo-type dataset flag\n    const args =\n      repoType === \"dataset\"\n        ? [\n            \"download\",\n            repo,\n            filePath,\n            \"--repo-type\",\n            \"dataset\",\n            \"--revision\",\n            revision,\n          ]\n        : [\"download\", repo, filePath, \"--revision\", revision];\n\n    const downloadResult = await execFilePromise(\"hf\", args, {\n      env: process.env,\n    });\n\n    // Newer hf CLI (>=1.11.0) emits colored output like \"✓ Downloaded\\n  path: /cache/path\"\n    // instead of a bare path. Strip ANSI codes and extract the absolute path.\n    const stdout = downloadResult.stdout.replace(/\\x1b\\[[0-9;]*m/g, \"\");\n    const resolvedOutput = (() => {\n      for (const line of stdout.split(\"\\n\").map((l) => l.trim())) {\n        if (line.startsWith(\"/\")) return line;\n        const m = line.match(/^path:\\s+(\\/.*)/);\n        if (m) return m[1];\n      }\n      return stdout.trim();\n    })();\n\n    const downloadedPath = await fsPromises.realpath(resolvedOutput);\n\n    await execFilePromise(\"mv\", [downloadedPath, outputPath]);\n\n    return outputPath;\n  }\n}\n\nclass HFUpload implements Upload {\n  url: string;\n  fileOrPath: string;\n  contentType: string;\n  log: FastifyBaseLogger;\n  state: \"in-progress\" | \"completed\" | \"failed\" | \"aborted\" = \"in-progress\";\n  private fileIsReady: Promise<void>;\n  private abortController: AbortController | null = null;\n\n  constructor(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    log: FastifyBaseLogger\n  ) {\n    this.url = url;\n    // If fileOrPath is a Buffer, we need to write it to a temp file first\n    if (Buffer.isBuffer(fileOrPath)) {\n      const tempFilePath = path.join(\n        os.tmpdir(),\n        `hf-upload-${Date.now()}-${Math.random().toString(36).substring(2, 15)}`\n      );\n      this.fileIsReady = fsPromises.writeFile(tempFilePath, fileOrPath);\n      this.fileOrPath = tempFilePath;\n    } else {\n      this.fileOrPath = fileOrPath;\n      this.fileIsReady = Promise.resolve();\n    }\n    this.contentType = contentType;\n    this.log = log.child({ uploader: \"HFUpload\" });\n    this.state = \"in-progress\";\n  }\n\n  async upload(): Promise<void> {\n    await this.fileIsReady;\n    const { repo, repoType, revision, filePath } = parseHfUrl(this.url);\n    this.log.info(\n      `Using hf CLI to upload ${filePath} to ${repo} (${repoType}) at revision ${revision}`\n    );\n    this.abortController = new AbortController();\n    try {\n      // For datasets, we need to use --repo-type dataset flag\n      const args = [\n        \"upload\",\n        repo,\n        this.fileOrPath,\n        filePath,\n        \"--revision\",\n        revision,\n      ];\n      if (repoType === \"dataset\") {\n        args.push(\"--repo-type\", \"dataset\");\n      }\n\n      await execFilePromise(\"hf\", args, {\n        env: process.env,\n        signal: this.abortController.signal,\n      });\n      this.state = \"completed\";\n      this.log.info(`Upload to ${this.url} completed`);\n    } catch (error: any) {\n      console.error(error);\n      this.state = \"failed\";\n      this.log.error(\"Error uploading file to HuggingFace:\", error);\n    }\n  }\n\n  async abort(): Promise<void> {\n    if (this.state !== \"in-progress\") {\n      this.log.warn(`Cannot abort upload in state ${this.state}`);\n      return;\n    }\n    if (this.abortController) {\n      this.abortController.abort();\n    }\n    this.state = \"aborted\";\n    this.log.info(`Upload to ${this.url} aborted`);\n  }\n}\n\nfunction parseHfUrl(url: string): {\n  repo: string;\n  repoType: \"model\" | \"dataset\";\n  revision: string;\n  filePath: string;\n} {\n  // Example URLs:\n  // Model: https://huggingface.co/tencent/Hunyuan3D-2.1/resolve/main/hunyuan3d-dit-v2-1/model.fp16.ckpt\n  // Dataset: https://huggingface.co/datasets/user/repo/resolve/main/path/file.ext\n  const parsedUrl = new URL(url);\n  const parts = parsedUrl.pathname.split(\"/\");\n\n  let repoType: \"model\" | \"dataset\" = \"model\";\n  let startIdx = 1;\n\n  // Check if it's a dataset URL\n  if (parts[1] === \"datasets\") {\n    repoType = \"dataset\";\n    startIdx = 2;\n  }\n\n  if (parts.length >= startIdx + 4) {\n    const repo = parts[startIdx] + \"/\" + parts[startIdx + 1];\n    const revision = parts[startIdx + 3];\n    const filePath = decodeURIComponent(parts.slice(startIdx + 4).join(\"/\"));\n    return { repo, repoType, revision, filePath };\n  } else {\n    throw new Error(`Invalid HuggingFace URL: ${url.toString()}`);\n  }\n}\n"
  },
  {
    "path": "src/storage-providers/http.ts",
    "content": "import path from \"path\";\nimport { StorageProvider, Upload, DownloadOptions, DownloadAuth } from \"../types\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport fs from \"fs\";\nimport { Readable } from \"stream\";\nimport config from \"../config\";\nimport { z } from \"zod\";\nimport { fetch } from \"undici\";\nimport type { Response as UndiciResponse } from \"undici\";\nimport { getProxyDispatcher } from \"../proxy-dispatcher\";\n\nexport class HTTPStorageProvider implements StorageProvider {\n  log: FastifyBaseLogger;\n  requestBodyUploadKey = \"http_upload\";\n  requestBodyUploadSchema = z.object({\n    url_prefix: z.string(),\n  });\n  private urlRequestSchema = this.requestBodyUploadSchema.extend({\n    filename: z.string().describe(\"The name of the file to upload\"),\n  });\n\n  constructor(log: FastifyBaseLogger) {\n    this.log = log.child({ module: \"HTTPStorageProvider\" });\n  }\n\n  createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {\n    const { url_prefix, filename } = inputs;\n    if (!url_prefix) {\n      throw new Error(\"url_prefix is required to create HTTP URL\");\n    }\n    return `${url_prefix.replace(/\\/+$/, \"\")}/${filename}`;\n  }\n\n  testUrl(url: string): boolean {\n    return url.startsWith(\"http://\") || url.startsWith(\"https://\");\n  }\n\n  uploadFile(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): HTTPUpload {\n    return new HTTPUpload(url, fileOrPath, contentType, this.log);\n  }\n\n  /**\n   * Validate authentication credentials using a HEAD request.\n   * Falls back to GET with Range: bytes=0-0 if HEAD returns 405 Method Not Allowed,\n   * as some servers don't support HEAD requests.\n   */\n  async validateAuth(url: string, options: DownloadOptions): Promise<void> {\n    const requestUrl = applyQueryAuth(url, options.auth);\n    const headers = getAuthHeaders(requestUrl, options.auth);\n\n    this.log.debug({ url }, \"Validating auth with HEAD request\");\n\n    let response = await fetch(requestUrl, {\n      method: \"HEAD\",\n      headers,\n      dispatcher: getProxyDispatcher(),\n    });\n\n    // If HEAD is not supported, try GET with Range header to minimize data transfer\n    if (response.status === 405) {\n      this.log.debug({ url }, \"HEAD not supported, falling back to GET with Range\");\n      response = await fetch(requestUrl, {\n        method: \"GET\",\n        headers: {\n          ...headers,\n          \"Range\": \"bytes=0-0\",\n        },\n        dispatcher: getProxyDispatcher(),\n      });\n      // 206 Partial Content is success for range requests\n      if (response.status === 206) {\n        this.log.debug({ url }, \"Auth validation successful (via Range request)\");\n        return;\n      }\n    }\n\n    if (!response.ok) {\n      if (response.status === 401 || response.status === 403) {\n        throw new Error(`Authentication failed: ${response.status} ${response.statusText}`);\n      }\n      throw new Error(`Auth validation failed: ${response.status} ${response.statusText}`);\n    }\n\n    this.log.debug({ url }, \"Auth validation successful\");\n  }\n\n  async downloadFile(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string,\n    options?: DownloadOptions\n  ): Promise<string> {\n    try {\n      // Build headers with auth - per-request auth takes priority over URL-based auth\n      const requestUrl = applyQueryAuth(url, options?.auth);\n      const headers = getAuthHeaders(requestUrl, options?.auth);\n      const response = await fetch(requestUrl, { headers, dispatcher: getProxyDispatcher() });\n\n      if (!response.ok) {\n        throw new Error(`Error downloading file: ${response.statusText}`);\n      }\n\n      let outputPath = path.join(\n        outputDir,\n        filenameOverride || path.basename(new URL(url).pathname)\n      );\n\n      if (path.extname(outputPath) === \"\") {\n        const ext = getIntendedFileExtensionFromResponse(response) || \"\";\n        if (ext) {\n          outputPath = outputPath + ext;\n        }\n      }\n\n      // Get the response as a readable stream\n      const body = response.body;\n      if (!body) {\n        throw new Error(\"Response body is null\");\n      }\n\n      this.log.info(`Downloading file to ${outputPath}`);\n\n      // Create a writable stream to save the file\n      const fileStream = fs.createWriteStream(outputPath);\n\n      // Pipe the response to the file\n      await new Promise<void>((resolve, reject) => {\n        Readable.fromWeb(body as any)\n          .pipe(fileStream)\n          .on(\"finish\", () => resolve())\n          .on(\"error\", reject);\n      });\n\n      this.log.info(`File downloaded and saved to ${outputPath}`);\n      return outputPath;\n    } catch (error: any) {\n      this.log.error(\"Error downloading file:\", error);\n      throw error;\n    }\n  }\n}\n\nclass HTTPUpload implements Upload {\n  url: string;\n  fileOrPath: string | Buffer;\n  contentType: string;\n  log: FastifyBaseLogger;\n  state: \"in-progress\" | \"completed\" | \"failed\" | \"aborted\" = \"in-progress\";\n  private abortController: AbortController | null = null;\n\n  constructor(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    log: FastifyBaseLogger\n  ) {\n    this.url = url;\n    this.fileOrPath = fileOrPath;\n    this.contentType = contentType;\n    this.log = log.child({ uploader: \"HTTPUpload\" });\n    this.state = \"in-progress\";\n  }\n\n  async upload(): Promise<void> {\n    if (this.state !== \"in-progress\") {\n      throw new Error(`Cannot upload: state is ${this.state}`);\n    }\n\n    this.abortController = new AbortController();\n\n    try {\n      this.log.info({ url: this.url }, \"Starting upload\");\n\n      let body: Buffer | fs.ReadStream;\n\n      if (Buffer.isBuffer(this.fileOrPath)) {\n        body = this.fileOrPath;\n      } else {\n        body = fs.createReadStream(this.fileOrPath);\n      }\n\n      // Parse URL and build headers with auth from URL-embedded credentials\n      const parsedUrl = new URL(this.url);\n      const headers: Record<string, string> = {\n        \"Content-Type\": this.contentType,\n        ...getAuthHeaders(this.url),\n      };\n\n      const response = await fetch(parsedUrl.toString(), {\n        method: \"PUT\",\n        headers,\n        body: body as any,\n        signal: this.abortController.signal,\n        dispatcher: getProxyDispatcher(),\n      });\n\n      if (!response.ok) {\n        throw new Error(\n          `Upload failed with status ${response.status}: ${response.statusText}`\n        );\n      }\n\n      this.state = \"completed\";\n      this.log.info({ url: this.url }, \"Upload completed successfully\");\n    } catch (error) {\n      if (error instanceof Error && error.name === \"AbortError\") {\n        this.state = \"aborted\";\n        this.log.info({ url: this.url }, \"Upload aborted\");\n      } else {\n        this.state = \"failed\";\n        this.log.error({ url: this.url, error }, \"Upload failed\");\n        throw error;\n      }\n    } finally {\n      this.abortController = null;\n    }\n  }\n\n  async abort(): Promise<void> {\n    if (this.state !== \"in-progress\") {\n      this.log.warn(\n        { state: this.state },\n        \"Cannot abort: upload is not in progress\"\n      );\n      return;\n    }\n\n    if (this.abortController) {\n      this.log.info({ url: this.url }, \"Aborting upload\");\n      this.abortController.abort();\n      this.state = \"aborted\";\n    }\n  }\n}\n\nfunction mimeToExtension(mimeType: string): string | null {\n  const mimeMap: Record<string, string> = {\n    // Documents\n    \"application/pdf\": \".pdf\",\n    \"application/msword\": \".doc\",\n    \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\":\n      \".docx\",\n    \"application/vnd.ms-excel\": \".xls\",\n    \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\":\n      \".xlsx\",\n    \"application/vnd.ms-powerpoint\": \".ppt\",\n    \"application/vnd.openxmlformats-officedocument.presentationml.presentation\":\n      \".pptx\",\n    \"text/plain\": \".txt\",\n    \"text/csv\": \".csv\",\n    \"text/html\": \".html\",\n    \"application/rtf\": \".rtf\",\n\n    // Images\n    \"image/jpeg\": \".jpg\",\n    \"image/png\": \".png\",\n    \"image/gif\": \".gif\",\n    \"image/webp\": \".webp\",\n    \"image/svg+xml\": \".svg\",\n    \"image/bmp\": \".bmp\",\n    \"image/tiff\": \".tiff\",\n    \"image/x-icon\": \".ico\",\n\n    // Video\n    \"video/mp4\": \".mp4\",\n    \"video/mpeg\": \".mpeg\",\n    \"video/webm\": \".webm\",\n    \"video/quicktime\": \".mov\",\n    \"video/x-msvideo\": \".avi\",\n\n    // Audio\n    \"audio/mpeg\": \".mp3\",\n    \"audio/wav\": \".wav\",\n    \"audio/ogg\": \".ogg\",\n    \"audio/webm\": \".weba\",\n    \"audio/aac\": \".aac\",\n\n    // Archives\n    \"application/zip\": \".zip\",\n    \"application/x-tar\": \".tar\",\n    \"application/gzip\": \".gz\",\n    \"application/x-7z-compressed\": \".7z\",\n    \"application/x-rar-compressed\": \".rar\",\n\n    // Code/Data\n    \"application/json\": \".json\",\n    \"application/xml\": \".xml\",\n    \"text/xml\": \".xml\",\n    \"application/javascript\": \".js\",\n    \"text/javascript\": \".js\",\n    \"text/css\": \".css\",\n\n    // Binary/ML Model formats\n    \"application/octet-stream\": \".bin\", // Generic binary, but commonly used for model files\n    \"application/x-pytorch\": \".pt\",\n    \"application/x-tensorflow\": \".pb\",\n  };\n\n  return mimeMap[mimeType] || null;\n}\n\nfunction getIntendedFileExtensionFromResponse(\n  response: UndiciResponse\n): string | null {\n  // 1. Try content-disposition header for filename\n  const contentDisposition = response.headers.get(\"content-disposition\");\n  if (contentDisposition) {\n    const match = contentDisposition.match(\n      /filename[^;=\\n]*=((['\"]).*?\\2|[^;\\n]*)/\n    );\n    if (match != null && match[1]) {\n      const filename = match[1].replace(/['\"]/g, \"\");\n      const ext = path.extname(filename);\n      if (ext) return ext;\n    }\n  }\n\n  // 2. Try to get extension from the URL\n  try {\n    const url = new URL(response.url);\n    const pathname = url.pathname;\n    const ext = path.extname(pathname);\n    // Only use if it looks like a real extension (not empty and reasonable length)\n    if (ext && ext.length <= 15) return ext; // Increased to handle .safetensors\n  } catch {\n    // Invalid URL, continue to next method\n  }\n\n  // 3. Map content-type to common extensions\n  const contentType = response.headers.get(\"content-type\");\n  if (contentType) {\n    const mimeType = contentType.split(\";\")[0].trim().toLowerCase();\n    const ext = mimeToExtension(mimeType);\n    if (ext) return ext;\n  }\n\n  return null;\n}\n\n/**\n * Apply query parameter authentication to a URL (e.g., Azure SAS tokens).\n * Returns the URL with auth query param appended if applicable.\n */\nfunction applyQueryAuth(url: string, auth?: DownloadAuth): string {\n  if (!auth || auth.type !== \"query\") {\n    return url;\n  }\n  const parsedUrl = new URL(url);\n  parsedUrl.searchParams.set(auth.query_param, auth.query_value);\n  return parsedUrl.toString();\n}\n\n/**\n * Build authentication headers for HTTP requests.\n * Priority: per-request auth > URL-embedded auth > env config auth\n *\n * When per-request auth is provided, URL-embedded credentials are NOT used,\n * even if they exist. This prevents credential mixing and ensures explicit\n * auth takes full precedence.\n */\nfunction getAuthHeaders(url: string, auth?: DownloadAuth): Record<string, string> {\n  const headers: Record<string, string> = {};\n\n  // If per-request auth is provided, use it exclusively (no fallback to URL credentials)\n  if (auth) {\n    switch (auth.type) {\n      case \"bearer\":\n        headers[\"Authorization\"] = `Bearer ${auth.token}`;\n        return headers;\n      case \"basic\": {\n        const credentials = `${auth.username}:${auth.password}`;\n        headers[\"Authorization\"] = `Basic ${Buffer.from(credentials).toString(\"base64\")}`;\n        return headers;\n      }\n      case \"header\":\n        headers[auth.header_name] = auth.header_value;\n        return headers;\n      case \"query\":\n        // Query auth is applied to URL, not headers - but still return empty headers\n        // to avoid falling back to URL-embedded or env credentials\n        return headers;\n      case \"s3\":\n        // S3 auth is handled by S3StorageProvider, not HTTP\n        // Return empty headers - don't fall back to URL/env credentials\n        return headers;\n    }\n  }\n\n  // No per-request auth provided - fall back to URL-embedded credentials\n  const parsedUrl = new URL(url);\n  if (parsedUrl.username || parsedUrl.password) {\n    const credentials = `${parsedUrl.username}:${parsedUrl.password}`;\n    headers[\"Authorization\"] = `Basic ${Buffer.from(credentials).toString(\"base64\")}`;\n    return headers;\n  }\n\n  // Fall back to env-configured auth headers\n  if (Object.keys(config.httpAuthHeader).length > 0) {\n    Object.assign(headers, config.httpAuthHeader);\n  }\n\n  return headers;\n}\n\n"
  },
  {
    "path": "src/storage-providers/index.ts",
    "content": "import { StorageProvider } from \"../types\";\nimport { S3StorageProvider } from \"./s3\";\nimport { HTTPStorageProvider } from \"./http\";\nimport { HFStorageProvider } from \"./hf\";\nimport { AzureBlobStorageProvider } from \"./azure-blob\";\n\nexport default [\n  S3StorageProvider,\n  HFStorageProvider,\n  AzureBlobStorageProvider,\n  HTTPStorageProvider, // Should always be last\n] as Array<new (log: any) => StorageProvider>;\n"
  },
  {
    "path": "src/storage-providers/s3.ts",
    "content": "import path from \"path\";\nimport fs, { ReadStream } from \"fs\";\nimport { Readable } from \"stream\";\nimport {\n  S3Client,\n  GetObjectCommand,\n  PutObjectCommand,\n  HeadObjectCommand,\n  S3ClientConfig,\n} from \"@aws-sdk/client-s3\";\nimport { NodeHttpHandler } from \"@smithy/node-http-handler\";\nimport config from \"../config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport { StorageProvider, Upload, DownloadOptions, DownloadAuth } from \"../types\";\nimport { z } from \"zod\";\n\nexport class S3StorageProvider implements StorageProvider {\n  log: FastifyBaseLogger;\n  s3: S3Client;\n  requestBodyUploadKey = \"s3\";\n  requestBodyUploadSchema = z.object({\n    bucket: z.string(),\n    prefix: z.string(),\n  });\n  private urlRequestSchema = this.requestBodyUploadSchema.extend({\n    filename: z.string().describe(\"The name of the file to upload\"),\n  });\n\n  constructor(log: FastifyBaseLogger) {\n    this.log = log.child({ provider: \"S3StorageProvider\" });\n    if (!config.awsRegion) {\n      throw new Error(\"AWS_REGION is not configured\");\n    }\n    this.s3 = new S3Client({\n      region: config.awsRegion,\n      requestHandler: new NodeHttpHandler({\n        connectionTimeout: 10000, // 10 seconds\n        requestTimeout: 0, // No timeout\n      }),\n      forcePathStyle: true, // Required for LocalStack or custom S3 endpoints\n    });\n  }\n\n  createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {\n    const { bucket, prefix, filename } = inputs;\n    if (!bucket) {\n      throw new Error(\"Bucket is required to create S3 URL\");\n    }\n    return `s3://${bucket}/${prefix || \"\"}${filename}`;\n  }\n\n  testUrl(url: string): boolean {\n    return url.startsWith(\"s3://\");\n  }\n\n  uploadFile(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): S3Upload {\n    return new S3Upload(url, fileOrPath, contentType, this.s3, this.log);\n  }\n\n  /**\n   * Validate authentication credentials using a HeadObject request.\n   * This allows verifying access without downloading the full object.\n   */\n  async validateAuth(url: string, options: DownloadOptions): Promise<void> {\n    const { bucket, key } = parseS3Url(url);\n    const { client: s3Client, isPerRequest } = this.getS3ClientWithInfo(options.auth);\n\n    this.log.debug({ url, bucket, key }, \"Validating S3 auth with HeadObject\");\n\n    try {\n      const command = new HeadObjectCommand({ Bucket: bucket, Key: key });\n      await s3Client.send(command);\n      this.log.debug({ url }, \"S3 auth validation successful\");\n    } catch (error: any) {\n      // Log full error for debugging but sanitize the thrown error message\n      // to avoid leaking sensitive info from AWS SDK errors\n      this.log.error({ error, url }, \"S3 auth validation failed\");\n\n      const statusCode = error.$metadata?.httpStatusCode;\n      if (error.name === \"NotFound\" || statusCode === 404) {\n        throw new Error(\"S3 object not found\");\n      }\n      if (error.name === \"AccessDenied\" || statusCode === 403) {\n        throw new Error(\"S3 access denied\");\n      }\n      // Generic error without exposing SDK internals\n      throw new Error(\"S3 auth validation failed\");\n    } finally {\n      // Dispose per-request clients to free up resources\n      if (isPerRequest) {\n        s3Client.destroy();\n      }\n    }\n  }\n\n  async downloadFile(\n    s3Url: string,\n    outputDir: string,\n    filenameOverride?: string,\n    options?: DownloadOptions\n  ): Promise<string> {\n    const { bucket, key } = parseS3Url(s3Url);\n    const { client: s3Client, isPerRequest } = this.getS3ClientWithInfo(options?.auth);\n\n    try {\n      const outputPath = path.join(\n        outputDir,\n        filenameOverride || path.basename(key)\n      );\n\n      const command = new GetObjectCommand({ Bucket: bucket, Key: key });\n      const response = await s3Client.send(command);\n\n      if (!response.Body) {\n        throw new Error(\"Response body is null\");\n      }\n\n      const fileStream = fs.createWriteStream(outputPath);\n      await new Promise<void>((resolve, reject) => {\n        (response.Body as Readable)\n          .pipe(fileStream)\n          .on(\"finish\", resolve)\n          .on(\"error\", reject);\n      });\n\n      this.log.info(`File downloaded from S3 and saved to ${outputPath}`);\n      return outputPath;\n    } catch (error: any) {\n      console.error(error);\n      this.log.error(\"Error downloading file from S3:\", error);\n      throw error;\n    } finally {\n      // Dispose per-request clients to free up resources\n      if (isPerRequest) {\n        s3Client.destroy();\n      }\n    }\n  }\n\n  /**\n   * Get an S3 client with info about whether it was created per-request.\n   * Per-request clients should be destroyed after use to free resources.\n   */\n  private getS3ClientWithInfo(auth?: DownloadAuth): { client: S3Client; isPerRequest: boolean } {\n    // If no S3 auth provided, use the default client\n    if (!auth || auth.type !== \"s3\") {\n      return { client: this.s3, isPerRequest: false };\n    }\n\n    // Create a new client with per-request credentials\n    const clientConfig: S3ClientConfig = {\n      region: auth.region || config.awsRegion || undefined,\n      credentials: {\n        accessKeyId: auth.access_key_id,\n        secretAccessKey: auth.secret_access_key,\n        // Include session token for temporary credentials (STS)\n        ...(auth.session_token && { sessionToken: auth.session_token }),\n      },\n      requestHandler: new NodeHttpHandler({\n        connectionTimeout: 10000,\n        requestTimeout: 0,\n      }),\n      forcePathStyle: true,\n    };\n\n    // Add custom endpoint if provided\n    if (auth.endpoint) {\n      clientConfig.endpoint = auth.endpoint;\n    }\n\n    this.log.debug(\"Creating S3 client with per-request credentials\");\n    return { client: new S3Client(clientConfig), isPerRequest: true };\n  }\n}\n\nfunction parseS3Url(s3Url: string): { bucket: string; key: string } {\n  const url = new URL(s3Url);\n  const bucket = url.hostname;\n  const key = url.pathname.slice(1); // Remove leading slash\n  return { bucket, key };\n}\n\nexport class S3Upload implements Upload {\n  url: string;\n  fileOrPath: string | Buffer;\n  contentType: string;\n  log: FastifyBaseLogger;\n  state: \"in-progress\" | \"completed\" | \"failed\" | \"aborted\" = \"in-progress\";\n  s3: S3Client;\n\n  private abortController = new AbortController();\n\n  constructor(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    s3: S3Client,\n    log: FastifyBaseLogger\n  ) {\n    this.url = url;\n    this.fileOrPath = fileOrPath;\n    this.contentType = contentType;\n    this.s3 = s3;\n    this.log = log.child({ uploader: \"S3Upload\" });\n    this.state = \"in-progress\";\n  }\n\n  async upload(): Promise<void> {\n    try {\n      await this._uploadFileToS3Url(\n        this.url,\n        this.fileOrPath,\n        this.contentType,\n        this.abortController.signal\n      );\n    } catch (error: any) {\n      console.error(error);\n      this.state = \"failed\";\n      this.log.error(\"Error uploading file to S3:\", error);\n    }\n  }\n\n  async abort(): Promise<void> {\n    if (this.state !== \"in-progress\") {\n      this.log.warn(`Cannot abort upload in state ${this.state}`);\n      return;\n    }\n    this.abortController.abort();\n    this.state = \"aborted\";\n    this.log.info(`Upload to ${this.url} aborted`);\n  }\n\n  private createInputStream(fileOrPath: string | Buffer): ReadStream | Buffer {\n    if (typeof fileOrPath === \"string\") {\n      return fs.createReadStream(fileOrPath);\n    } else {\n      return Buffer.from(fileOrPath);\n    }\n  }\n\n  private async _uploadFileToS3(\n    bucket: string,\n    key: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    abortSignal: AbortSignal\n  ): Promise<void> {\n    if (!this.s3) {\n      throw new Error(\"S3 client is not configured\");\n    }\n    this.log.info(`Uploading file to S3 at s3://${bucket}/${key}`);\n\n    try {\n      const fileStream = this.createInputStream(fileOrPath);\n      const command = new PutObjectCommand({\n        Bucket: bucket,\n        Key: key,\n        Body: fileStream,\n        ContentType: contentType,\n      });\n      await this.s3.send(command, { abortSignal: abortSignal });\n      this.state = \"completed\";\n      this.log.info(`File uploaded to S3 at s3://${bucket}/${key}`);\n    } catch (error: any) {\n      console.error(error);\n      this.state = \"failed\";\n      this.log.error(\"Error uploading file to S3:\", error);\n    }\n  }\n\n  private async _uploadFileToS3Url(\n    s3Url: string,\n    fileOrPath: string | Buffer,\n    contentType: string,\n    abortSignal: AbortSignal\n  ): Promise<void> {\n    const { bucket, key } = parseS3Url(s3Url);\n    return this._uploadFileToS3(\n      bucket,\n      key,\n      fileOrPath,\n      contentType,\n      abortSignal\n    );\n  }\n}\n"
  },
  {
    "path": "src/types.ts",
    "content": "import { z } from \"zod\";\nimport { randomUUID } from \"crypto\";\nimport { RawData } from \"ws\";\n\nexport const ComfyNodeSchema = z.object({\n  inputs: z.any(),\n  class_type: z.string(),\n  _meta: z.any().optional(),\n});\n\nexport type ComfyNode = z.infer<typeof ComfyNodeSchema>;\n\nexport type ComfyPrompt = Record<string, ComfyNode>;\n\nexport const JPEGOptionsSchema = z.object({\n  quality: z.number().optional().default(80).describe(\"quality, integer 1-100\"),\n  progressive: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"use progressive (interlace) scan\"),\n  chromaSubsampling: z\n    .string()\n    .optional()\n    .default(\"4:2:0\")\n    .describe(\n      \"set to '4:4:4' to prevent chroma subsampling otherwise defaults to '4:2:0' chroma subsampling\"\n    ),\n  optimizeCoding: z\n    .boolean()\n    .optional()\n    .default(true)\n    .describe(\"optimize Huffman coding tables\"),\n  mozjpeg: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\n      \"use mozjpeg defaults, equivalent to { trellisQuantisation: true, overshootDeringing: true, optimizeScans: true, quantisationTable: 3 }\"\n    ),\n  trellisQuantisation: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"apply trellis quantisation\"),\n  overshootDeringing: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"apply overshoot deringing\"),\n  optimizeScans: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"optimize progressive scans\"),\n  quantisationTable: z\n    .number()\n    .optional()\n    .default(0)\n    .describe(\"set quantization table (0-8)\"),\n});\n\nexport type JPEGOptions = z.infer<typeof JPEGOptionsSchema>;\n\nexport const WebpOptionsSchema = z.object({\n  quality: z\n    .number()\n    .int()\n    .optional()\n    .default(80)\n    .describe(\"quality, integer 1-100\"),\n  alphaQuality: z\n    .number()\n    .int()\n    .optional()\n    .default(100)\n    .describe(\"quality of alpha layer, integer 1-100\"),\n  lossless: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"use lossless compression mode\"),\n  nearLossless: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"use near_lossless compression mode\"),\n  smartSubsample: z\n    .boolean()\n    .optional()\n    .default(false)\n    .describe(\"use smart_subsample mode\"),\n  preset: z\n    .enum([\"default\", \"photo\", \"picture\", \"drawing\", \"icon\", \"text\"])\n    .optional()\n    .default(\"default\")\n    .describe(\n      \"named preset for preprocessing/filtering, one of: default, photo, picture, drawing, icon, text\"\n    ),\n  effort: z\n    .number()\n    .int()\n    .min(0)\n    .max(6)\n    .optional()\n    .default(4)\n    .describe(\"CPU effort, between 0 (fastest) and 6 (slowest)\"),\n});\n\nexport type WebpOptions = z.infer<typeof WebpOptionsSchema>;\n\nexport const OutputConversionOptionsSchema = z.object({\n  format: z.enum([\"jpeg\", \"jpg\", \"webp\"]).describe(\"output format\"),\n  options: z.union([JPEGOptionsSchema, WebpOptionsSchema]).optional(),\n});\n\nexport type OutputConversionOptions = z.infer<\n  typeof OutputConversionOptionsSchema\n>;\n\nexport const ExecutionStatsSchema = z.object({\n  comfy_execution: z.object({\n    start: z.number(),\n    end: z.number(),\n    duration: z.number(),\n    nodes: z.record(\n      z.object({\n        start: z.number(),\n      })\n    ),\n  }),\n  preprocess_time: z.number().optional(),\n  comfy_round_trip_time: z.number().optional(),\n  postprocess_time: z.number().optional(),\n  upload_time: z.number().optional(),\n  total_time: z.number().optional(),\n});\n\nexport type ExecutionStats = z.infer<typeof ExecutionStatsSchema>;\nexport function isExecutionStats(obj: any): obj is ExecutionStats {\n  return ExecutionStatsSchema.safeParse(obj).success;\n}\n\nexport const PromptErrorResponseSchema = z.object({\n  error: z.string(),\n  location: z.string().optional(),\n});\n\nexport type PromptErrorResponse = z.infer<typeof PromptErrorResponseSchema>;\n\nexport const WorkflowSchema = z.object({\n  RequestSchema: z.object({}),\n  generateWorkflow: z.function(),\n});\n\nexport interface Workflow {\n  RequestSchema: z.ZodObject<any, any>;\n  generateWorkflow: (input: any) => Promise<ComfyPrompt> | ComfyPrompt;\n  description?: string;\n  summary?: string;\n}\n\nexport function isWorkflow(obj: any): obj is Workflow {\n  return (\n    obj != null &&\n    typeof obj === \"object\" &&\n    \"RequestSchema\" in obj &&\n    \"generateWorkflow\" in obj\n  );\n}\n\nexport interface WorkflowTree {\n  [key: string]: WorkflowTree | Workflow;\n}\n\nexport interface ComfyWSMessage {\n  type:\n    | \"status\"\n    | \"progress\"\n    | \"progress_state\"\n    | \"executing\"\n    | \"execution_start\"\n    | \"execution_cached\"\n    | \"executed\"\n    | \"execution_success\"\n    | \"execution_interrupted\"\n    | \"execution_error\";\n  data: any;\n  sid: string | null;\n}\n\nexport interface ComfyWSStatusMessage extends ComfyWSMessage {\n  type: \"status\";\n  data: {\n    status: {\n      exec_info: {\n        queue_remaining: number;\n      };\n    };\n  };\n}\n\nexport interface ComfyWSProgressMessage extends ComfyWSMessage {\n  type: \"progress\";\n  data: {\n    value: number;\n    max: number;\n    prompt_id: string;\n    node: string | null;\n  };\n}\n\nexport interface ComfyWSProgressStateMessage extends ComfyWSMessage {\n  type: \"progress_state\";\n  data: {\n    prompt_id: string;\n    nodes: Record<\n      string,\n      {\n        value: number;\n        max: number;\n        state: string;\n        node_id: string;\n        prompt_id: string;\n        display_node_id?: string;\n        parent_node_id?: string;\n        real_node_id?: string;\n      }\n    >;\n  };\n}\n\nexport interface ComfyWSExecutingMessage extends ComfyWSMessage {\n  type: \"executing\";\n  data: {\n    node: string | null;\n    display_node: string;\n    prompt_id: string;\n  };\n}\n\nexport interface ComfyWSExecutionStartMessage extends ComfyWSMessage {\n  type: \"execution_start\";\n  data: {\n    prompt_id: string;\n    timestamp: number;\n  };\n}\n\nexport interface ComfyWSExecutionCachedMessage extends ComfyWSMessage {\n  type: \"execution_cached\";\n  data: {\n    nodes: string[];\n    prompt_id: string;\n    timestamp: number;\n  };\n}\n\nexport interface ComfyWSExecutedMessage extends ComfyWSMessage {\n  type: \"executed\";\n  data: {\n    node: string;\n    display_node: string;\n    output: any;\n    prompt_id: string;\n  };\n}\n\nexport interface ComfyWSExecutionSuccessMessage extends ComfyWSMessage {\n  type: \"execution_success\";\n  data: {\n    prompt_id: string;\n    timestamp: number;\n  };\n}\n\nexport interface ComfyWSExecutionInterruptedMessage extends ComfyWSMessage {\n  type: \"execution_interrupted\";\n  data: {\n    prompt_id: string;\n    node_id: string;\n    node_type: string;\n    executed: any[];\n  };\n}\n\nexport interface ComfyWSExecutionErrorMessage extends ComfyWSMessage {\n  type: \"execution_error\";\n  data: {\n    prompt_id: string;\n    node_id: string;\n    node_type: string;\n    executed: any[];\n    exception_message: string;\n    exception_type: string;\n    traceback: string;\n    current_inputs: any;\n    current_outputs: any[];\n  };\n}\n\nexport function isStatusMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSStatusMessage {\n  return msg.type === \"status\";\n}\n\nexport function isProgressMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSProgressMessage {\n  return msg.type === \"progress\";\n}\n\nexport function isProgressStateMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSProgressStateMessage {\n  return msg.type === \"progress_state\";\n}\n\nexport function isExecutingMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutingMessage {\n  return msg.type === \"executing\";\n}\n\nexport function isExecutionStartMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutionStartMessage {\n  return msg.type === \"execution_start\";\n}\n\nexport function isExecutionCachedMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutionCachedMessage {\n  return msg.type === \"execution_cached\";\n}\n\nexport function isExecutedMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutedMessage {\n  return msg.type === \"executed\";\n}\n\nexport function isExecutionSuccessMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutionSuccessMessage {\n  return msg.type === \"execution_success\";\n}\n\nexport function isExecutionInterruptedMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutionInterruptedMessage {\n  return msg.type === \"execution_interrupted\";\n}\n\nexport function isExecutionErrorMessage(\n  msg: ComfyWSMessage\n): msg is ComfyWSExecutionErrorMessage {\n  return msg.type === \"execution_error\";\n}\n\nexport type WebhookHandlers = {\n  onMessage?: (msg: RawData) => Promise<void> | void;\n  onStatus?: (data: ComfyWSStatusMessage) => Promise<void> | void;\n  onProgress?: (data: ComfyWSProgressMessage) => Promise<void> | void;\n  onProgressState?: (data: ComfyWSProgressStateMessage) => Promise<void> | void;\n  onExecuting?: (data: ComfyWSExecutingMessage) => Promise<void> | void;\n  onExecutionStart?: (\n    data: ComfyWSExecutionStartMessage\n  ) => Promise<void> | void;\n  onExecutionCached?: (\n    data: ComfyWSExecutionCachedMessage\n  ) => Promise<void> | void;\n  onExecuted?: (data: ComfyWSExecutedMessage) => Promise<void> | void;\n  onExecutionSuccess?: (data: ComfyWSExecutionSuccessMessage) => Promise<void>;\n  onExecutionError?: (\n    data: ComfyWSExecutionErrorMessage\n  ) => Promise<void> | void;\n  onExecutionInterrupted?: (\n    data: ComfyWSExecutionInterruptedMessage\n  ) => Promise<void> | void;\n  onFileDownloaded?: (data: {\n    url: string;\n    local_path: string;\n    size: number;\n    duration: number;\n  }) => Promise<void> | void;\n  onFileUploaded?: (data: {\n    url: string;\n    local_path: string;\n    size: number;\n    duration: number;\n  }) => Promise<void> | void;\n  onFileDeleted?: (data: {\n    url: string;\n    local_path: string;\n    size: number;\n  }) => Promise<void> | void;\n};\n\nexport const SystemWebhookEvents = [\n  \"message\",\n  \"status\",\n  \"progress\",\n  \"progress_state\",\n  \"executing\",\n  \"execution_start\",\n  \"execution_cached\",\n  \"executed\",\n  \"execution_success\",\n  \"execution_interrupted\",\n  \"execution_error\",\n  \"file_downloaded\",\n  \"file_uploaded\",\n  \"file_deleted\",\n] as const;\n\nexport type ComfyPromptResponse = {\n  prompt_id: string;\n  number: number;\n  node_errors: any[];\n};\n\nexport type ComfyHistoryResponse = Record<\n  string,\n  {\n    prompt: [number, string, ComfyPrompt, any, string[]];\n    outputs: Record<\n      string,\n      Record<\n        string,\n        {\n          filename: string;\n        }[]\n      >\n    >;\n    status: {\n      status_str: string;\n      completed: boolean;\n      messages: any[];\n    };\n  }\n>;\n\nexport interface Upload {\n  state: \"in-progress\" | \"completed\" | \"failed\" | \"aborted\";\n\n  upload(): Promise<void>;\n  abort(): Promise<void>;\n}\n\n/**\n * Authentication configuration for download requests.\n * Supports multiple auth types for different storage providers and services.\n */\nexport const DownloadAuthSchema = z.discriminatedUnion(\"type\", [\n  z.object({\n    type: z.literal(\"bearer\"),\n    token: z.string().describe(\"Bearer token for Authorization header\"),\n  }),\n  z.object({\n    type: z.literal(\"basic\"),\n    username: z.string().describe(\"Username for basic auth\"),\n    password: z.string().describe(\"Password for basic auth\"),\n  }),\n  z.object({\n    type: z.literal(\"header\"),\n    header_name: z.string().describe(\"Custom header name\"),\n    header_value: z.string().describe(\"Custom header value\"),\n  }),\n  z.object({\n    type: z.literal(\"query\"),\n    query_param: z.string().describe(\"Query parameter name (e.g., 'sig' for Azure SAS)\"),\n    query_value: z.string().describe(\"Query parameter value\"),\n  }),\n  z.object({\n    type: z.literal(\"s3\"),\n    access_key_id: z.string().describe(\"AWS access key ID\"),\n    secret_access_key: z.string().describe(\"AWS secret access key\"),\n    session_token: z.string().optional().describe(\"AWS session token for temporary credentials (STS)\"),\n    endpoint: z.string().optional().describe(\"Custom S3 endpoint (for non-AWS S3-compatible services)\"),\n    region: z.string().optional().describe(\"AWS region (defaults to env config)\"),\n  }),\n]);\n\nexport type DownloadAuth = z.infer<typeof DownloadAuthSchema>;\n\n/**\n * Options for download operations, including optional authentication.\n */\nexport const DownloadOptionsSchema = z.object({\n  auth: DownloadAuthSchema.optional(),\n});\n\nexport type DownloadOptions = z.infer<typeof DownloadOptionsSchema>;\n\n/**\n * Credential entry for per-request authentication.\n * Associates a URL pattern with authentication credentials.\n */\nexport const WorkflowCredentialSchema = z.object({\n  url_pattern: z.string().describe(\"URL pattern to match (supports glob-style wildcards like https://example.com/*)\"),\n  auth: DownloadAuthSchema,\n});\n\nexport type WorkflowCredential = z.infer<typeof WorkflowCredentialSchema>;\n\nexport const PromptRequestSchema = z.object({\n  prompt: z.record(ComfyNodeSchema),\n  id: z\n    .string()\n    .optional()\n    .default(() => randomUUID()),\n  webhook: z.string().optional(),\n  webhook_v2: z.string().optional(),\n  convert_output: OutputConversionOptionsSchema.optional(),\n  credentials: z\n    .array(WorkflowCredentialSchema)\n    .optional()\n    .describe(\"Per-request credentials for protected URLs, matched by URL pattern\"),\n});\n\nexport interface StorageProvider {\n  /**\n   * The key in a request body that indicates this storage provider should be used for upload.\n   * Must be unique across all storage providers, and must be included if `uploadFile` is implemented.\n   */\n  requestBodyUploadKey?: string;\n\n  /**\n   * The zod schema for the request body field that indicates this storage provider should\n   * be used for upload. Must be included if `requestBodyUploadKey` is defined.\n   */\n  requestBodyUploadSchema?: z.ZodObject<any, any>;\n\n  /**\n   * Takes the inputs from the request body and generates a URL for uploading.\n   * @param inputs\n   */\n  createUrl(inputs: any): string;\n\n  /**\n   * Test if the given URL can be handled by this storage provider.\n   * @param url URL to test\n   */\n  testUrl(url: string): boolean;\n\n  /**\n   * Upload a file to the given URL.\n   * @param url URL to upload to\n   * @param fileOrPath File path or buffer to upload\n   * @param contentType MIME type of the file\n   *\n   * @returns An Upload object that can be used to start and abort the upload.\n   */\n  uploadFile?(\n    url: string,\n    fileOrPath: string | Buffer,\n    contentType: string\n  ): Upload;\n\n  /**\n   * Download a file from the given URL to the specified output directory.\n   * @param url URL to download from\n   * @param outputDir Directory to save the downloaded file\n   * @param filenameOverride Optional filename to use instead of auto-generated one\n   * @param options Optional download options including authentication\n   *\n   * @resolves The path to the downloaded file\n   */\n  downloadFile?(\n    url: string,\n    outputDir: string,\n    filenameOverride?: string,\n    options?: DownloadOptions\n  ): Promise<string>;\n\n  /**\n   * Validate authentication credentials without downloading the file.\n   * Used to verify credentials on cache hits for auth-required URLs.\n   * @param url URL to validate access to\n   * @param options Download options containing authentication\n   *\n   * @resolves void if auth is valid\n   * @throws Error if auth is invalid or access is denied\n   */\n  validateAuth?(url: string, options: DownloadOptions): Promise<void>;\n}\n\nexport const DownloadRequestSchema = z.object({\n  url: z.string().url(),\n  model_type: z.string(),\n  filename: z.string().optional(),\n  wait: z.boolean().optional().default(false),\n  auth: DownloadAuthSchema.optional().describe(\"Optional authentication for accessing protected resources\"),\n});\n\nexport type DownloadRequest = z.infer<typeof DownloadRequestSchema>;\n\nexport const DownloadResponseSchema = z.object({\n  url: z.string(),\n  model_type: z.string(),\n  filename: z.string(),\n  status: z.enum([\"started\", \"completed\"]),\n  size: z.number().optional(),\n  duration: z.number().optional(),\n});\n\nexport type DownloadResponse = z.infer<typeof DownloadResponseSchema>;\n\nexport const DownloadErrorResponseSchema = z.object({\n  error: z.string(),\n});\n"
  },
  {
    "path": "src/utils.ts",
    "content": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport { ZodObject, ZodRawShape, ZodTypeAny, ZodDefault } from \"zod\";\nimport { fetch, RequestInit, Response } from \"undici\";\nimport { getProxyDispatcher } from \"./proxy-dispatcher\";\nimport { execFile } from \"child_process\";\nimport { promisify } from \"util\";\nimport getStorageManager from \"./remote-storage-manager\";\nimport crypto from \"crypto\";\n\nconst execFilePromise = promisify(execFile);\n\nexport async function sleep(ms: number): Promise<void> {\n  return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\nexport function isValidUrl(str: string): boolean {\n  try {\n    new URL(str);\n  } catch (_) {\n    return false;\n  }\n  return (\n    str.startsWith(\"http://\") ||\n    str.startsWith(\"https://\") ||\n    str.startsWith(\"s3://\")\n  );\n}\n\nexport function zodToMarkdownTable(schema: ZodObject<ZodRawShape>): string {\n  const shape = schema.shape;\n  let markdownTable = \"| Field | Type | Description | Default |\\n|-|-|-|-|\\n\";\n\n  for (const [key, value] of Object.entries(shape)) {\n    const fieldName = key;\n    const { type: fieldType, isOptional } = getZodTypeName(value);\n    const fieldDescription = getZodDescription(value);\n    const defaultValue = getZodDefault(value);\n\n    markdownTable += `| ${fieldName} | ${fieldType}${\n      isOptional ? \"\" : \"\"\n    } | ${fieldDescription} | ${defaultValue || \"**Required**\"} |\\n`;\n  }\n\n  return markdownTable;\n}\n\nfunction getZodTypeName(zodType: ZodTypeAny): {\n  type: string;\n  isOptional: boolean;\n} {\n  let currentType = zodType;\n  let isOptional = false;\n\n  while (currentType instanceof ZodDefault) {\n    currentType = currentType._def.innerType;\n  }\n\n  if (currentType._def.typeName === \"ZodOptional\") {\n    isOptional = true;\n    currentType = currentType._def.innerType;\n  }\n\n  let type: string;\n  switch (currentType._def.typeName) {\n    case \"ZodString\":\n      type = \"string\";\n      break;\n    case \"ZodNumber\":\n      type = \"number\";\n      break;\n    case \"ZodBoolean\":\n      type = \"boolean\";\n      break;\n    case \"ZodArray\":\n      type = `${getZodTypeName(currentType._def.type).type}[]`;\n      break;\n    case \"ZodObject\":\n      type = \"object\";\n      break;\n    case \"ZodEnum\":\n      type = `enum (${(currentType._def.values as string[])\n        .map((val: string) => `\\`${val}\\``)\n        .join(\", \")})`;\n      break;\n    case \"ZodUnion\":\n      type = currentType._def.options\n        .map((opt: any) => getZodTypeName(opt).type)\n        .join(\", \");\n      break;\n    case \"ZodLiteral\":\n      type = `literal (${JSON.stringify(currentType._def.value)})`;\n      break;\n    default:\n      type = currentType._def.typeName.replace(\"Zod\", \"\").toLowerCase();\n  }\n\n  return { type, isOptional };\n}\n\nfunction getZodDescription(zodType: ZodTypeAny): string {\n  let currentType: ZodTypeAny | undefined = zodType;\n  while (currentType) {\n    if (currentType.description) {\n      return currentType.description;\n    }\n    currentType = currentType._def.innerType;\n  }\n  return \"\";\n}\n\nfunction getZodDefault(zodType: ZodTypeAny): string {\n  if (zodType instanceof ZodDefault) {\n    const defaultValue = zodType._def.defaultValue();\n    return JSON.stringify(defaultValue);\n  }\n  return \"-\";\n}\n\n/**\n * Converts a snake_case string to UpperCamelCase\n */\nexport function snakeCaseToUpperCamelCase(str: string): string {\n  const camel = str.replace(/(_\\w)/g, (match) => match[1].toUpperCase());\n  const upperCamel = camel.charAt(0).toUpperCase() + camel.slice(1);\n  return upperCamel;\n}\n\nexport function camelCaseToSnakeCase(str: string): string {\n  return str\n    .replace(/([A-Z])/g, \"_$1\")\n    .toLowerCase()\n    .replace(/^_/, \"\");\n}\n\nexport async function fetchWithRetries(\n  url: string,\n  options: RequestInit,\n  maxRetries: number,\n  log: FastifyBaseLogger\n): Promise<Response> {\n  let retries = 0;\n  while (retries < maxRetries) {\n    try {\n      const response = await fetch(\n        url,\n        options.dispatcher ? options : { ...options, dispatcher: getProxyDispatcher() }\n      );\n      if (response.ok) {\n        return response;\n      }\n      log.error(\n        `Failed to fetch ${url}: ${response.status} ${response.statusText}`\n      );\n    } catch (error) {\n      log.error(`Error fetching ${url}: ${error}`);\n    }\n    retries++;\n    await sleep(1000);\n  }\n  throw new Error(`Failed to fetch ${url} after ${maxRetries} retries`);\n}\n\nexport async function setDeletionCost(cost: number): Promise<void> {\n  if (!config.saladMetadata) {\n    // If not running in Salad environment, skip setting deletion cost\n    return;\n  }\n  try {\n    await fetch(`http://169.254.169.254/v1/deletion-cost`, {\n      method: \"PUT\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Metadata: \"true\",\n      },\n      body: JSON.stringify({ deletion_cost: cost }),\n      dispatcher: getProxyDispatcher(),\n    });\n  } catch (error) {\n    console.error(\"Error setting deletion cost:\", error);\n  }\n}\n\nfunction isPythonVenvActive(): boolean {\n  // Check for VIRTUAL_ENV environment variable (most common indicator)\n  if (process.env.VIRTUAL_ENV) {\n    return true;\n  }\n\n  // Check for CONDA_DEFAULT_ENV (for conda environments)\n  if (process.env.CONDA_DEFAULT_ENV) {\n    return true;\n  }\n\n  // Additional check: VIRTUAL_ENV_PROMPT is set when venv is active\n  if (process.env.VIRTUAL_ENV_PROMPT) {\n    return true;\n  }\n\n  return false;\n}\n\nexport async function installCustomNode(\n  nodeNameOrUrl: string,\n  log: FastifyBaseLogger\n): Promise<void> {\n  const storageManager = getStorageManager();\n  const isUrl =\n    nodeNameOrUrl.startsWith(\"http://\") || nodeNameOrUrl.startsWith(\"https://\");\n  if (!isUrl && config.comfyCLIVersion) {\n    // Install from ComfyUI community nodes if comfy cli is available\n    log.info(`Installing custom node ${nodeNameOrUrl} using comfy cli`);\n    await execFilePromise(\"comfy\", [\n      \"node\",\n      \"install\",\n      nodeNameOrUrl,\n      \"--fast-deps\",\n      \"--exit-on-fail\",\n    ]);\n  } else if (!isUrl) {\n    throw new Error(\n      \"ComfyUI CLI is not available to install custom node by name\"\n    );\n  } else {\n    const customNodesDir = path.join(config.comfyDir, \"custom_nodes\");\n    const customNodePath = await storageManager.downloadRepo(\n      nodeNameOrUrl,\n      customNodesDir\n    );\n    const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n    if (!fs.existsSync(requirementsPath)) {\n      log.info(`No requirements.txt found for ${nodeNameOrUrl}, skipping dependency installation`);\n      return;\n    }\n    const activeVenv = isPythonVenvActive();\n    const args = [\"pip\", \"install\", \"--system\", \"-r\", \"requirements.txt\"];\n    if (activeVenv) {\n      args.splice(2, 1); // Remove --system if venv is active\n      log.info(\n        `Installing custom node ${nodeNameOrUrl} in active Python virtual environment`\n      );\n    }\n\n    const cmd = config.uvInstalled ? \"uv\" : (args.shift() as string);\n\n    await execFilePromise(cmd, args, { cwd: customNodePath });\n  }\n}\n\nexport async function aptInstallPackages(\n  packages: string[],\n  log: FastifyBaseLogger\n): Promise<void> {\n  if (packages.length === 0) {\n    return;\n  }\n  await execFilePromise(\"apt-get\", [\"update\"]);\n  log.info(`Installing apt packages: ${packages.join(\", \")}`);\n  await execFilePromise(\"apt-get\", [\"install\", \"-y\", ...packages]);\n}\n\nexport async function pipInstallPackages(\n  packages: string[],\n  log: FastifyBaseLogger\n): Promise<void> {\n  if (packages.length === 0) {\n    return;\n  }\n  const activeVenv = isPythonVenvActive();\n  const args = [\"pip\", \"install\", \"--system\", ...packages];\n  if (activeVenv) {\n    args.splice(2, 1); // Remove --system if venv is active\n    log.info(\n      `Installing pip packages in active Python virtual environment: ${packages.join(\n        \", \"\n      )}`\n    );\n  } else {\n    log.info(`Installing pip packages: ${packages.join(\", \")}`);\n  }\n\n  const cmd = config.uvInstalled ? \"uv\" : (args.shift() as string);\n\n  await execFilePromise(cmd, args);\n}\n\nexport function makeHumanReadableSize(sizeInBytes: number): string {\n  const units = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\"];\n  let size = sizeInBytes;\n  let unitIndex = 0;\n\n  while (size >= 1024 && unitIndex < units.length - 1) {\n    size /= 1024;\n    unitIndex++;\n  }\n\n  return `${size.toFixed(2)} ${units[unitIndex]}`;\n}\n\nexport function hashUrlBase64(url: string, length = 32): string {\n  return crypto\n    .createHash(\"sha256\")\n    .update(url)\n    .digest(\"base64url\") // URL-safe base64\n    .substring(0, length);\n}\n\nexport function getContentTypeFromUrl(url: string): string {\n  const ext = path.extname(new URL(url).pathname).toLowerCase();\n  const mimeTypes: Record<string, string> = {\n    \".jpg\": \"image/jpeg\",\n    \".jpeg\": \"image/jpeg\",\n    \".png\": \"image/png\",\n    \".gif\": \"image/gif\",\n    \".webp\": \"image/webp\",\n    \".svg\": \"image/svg+xml\",\n    \".bmp\": \"image/bmp\",\n    \".tiff\": \"image/tiff\",\n    \".ico\": \"image/x-icon\",\n    \".mp4\": \"video/mp4\",\n    \".mpeg\": \"video/mpeg\",\n    \".webm\": \"video/webm\",\n    \".mov\": \"video/quicktime\",\n    \".avi\": \"video/x-msvideo\",\n    \".mp3\": \"audio/mpeg\",\n    \".wav\": \"audio/wav\",\n    \".ogg\": \"audio/ogg\",\n    \".weba\": \"audio/webm\",\n    \".aac\": \"audio/aac\",\n    \".pdf\": \"application/pdf\",\n    \".doc\": \"application/msword\",\n    \".docx\":\n      \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n    \".xls\": \"application/vnd.ms-excel\",\n    \".xlsx\":\n      \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n    \".ppt\": \"application/vnd.ms-powerpoint\",\n    \".pptx\":\n      \"application/vnd.openxmlformats-officedocument.presentationml.presentation\",\n    \".txt\": \"text/plain\",\n    \".csv\": \"text/csv\",\n    \".html\": \"text/html\",\n    \".rtf\": \"application/rtf\",\n    \".zip\": \"application/zip\",\n    \".tar\": \"application/x-tar\",\n    \".gz\": \"application/gzip\",\n    \".7z\": \"application/x-7z-compressed\",\n    \".rar\": \"application/x-rar-compressed\",\n    \".json\": \"application/json\",\n    \".xml\": \"application/xml\",\n    \".js\": \"application/javascript\",\n    \".css\": \"text/css\",\n    \".bin\": \"application/octet-stream\",\n    \".pt\": \"application/x-pytorch\",\n    \".pb\": \"application/x-tensorflow\",\n  };\n\n  return mimeTypes[ext] || \"application/octet-stream\";\n}\n\nexport async function getDirectorySizeInBytes(\n  directoryPath: string\n): Promise<number> {\n  const { stdout } = await execFilePromise(\"du\", [\"-sb\", directoryPath]);\n  const sizeInBytes = parseInt(stdout.split(\"\\t\")[0], 10);\n  return sizeInBytes;\n}\n"
  },
  {
    "path": "src/workflows/index.ts",
    "content": "import config from \"../config\";\nimport { WorkflowTree, isWorkflow } from \"../types\";\nimport fs from \"fs\";\nimport ts from \"typescript\";\nimport pino from \"pino\";\n\nconst log = pino({ level: config.logLevel });\n\nconst workflows: WorkflowTree = {};\n\nconst walk = (dir: string, tree: WorkflowTree) => {\n  const files = fs.readdirSync(dir);\n  files.forEach((file) => {\n    let filePath = `${dir}/${file}`;\n    if (fs.statSync(filePath).isDirectory()) {\n      log.debug({ directory: filePath }, \"Scanning workflow directory\");\n      tree[file] = {};\n      walk(filePath, tree[file] as WorkflowTree);\n    } else {\n      // This is happening at runtime, so if it's .ts we need to compile it\n      const newPath = filePath.replace(\".ts\", \".js\");\n      if (file.endsWith(\".ts\")) {\n        log.debug({ file: filePath }, \"Transpiling TypeScript workflow\");\n        const source = fs.readFileSync(filePath, \"utf8\");\n        const result = ts.transpileModule(source, {\n          compilerOptions: { module: ts.ModuleKind.CommonJS },\n        });\n        // write it a sibling .js file\n        fs.writeFileSync(newPath, result.outputText);\n      } else if (file.endsWith(\".js\")) {\n        // fs.cpSync(filePath, newPath);\n      } else {\n        return;\n      }\n\n      // Eval the file in the current context\n      log.debug({ file: newPath }, \"Evaluating workflow file\");\n      try {\n        const workflow = eval(fs.readFileSync(newPath, \"utf8\"));\n        if (workflow && isWorkflow(workflow)) {\n          const workflowName = file.replace(\".js\", \"\").replace(\".ts\", \"\");\n          tree[workflowName] = workflow;\n          log.info({ workflow: workflowName, file: newPath }, \"Loaded workflow\");\n        } else {\n          log.warn(\n            { file: newPath },\n            \"File does not export a valid Workflow object\"\n          );\n        }\n      } catch (e: any) {\n        log.error(\n          { file: newPath, error: e.message, stack: e.stack },\n          \"Failed to evaluate workflow file\"\n        );\n      }\n    }\n  });\n};\nwalk(config.workflowDir, workflows);\n\nexport default workflows;\n"
  },
  {
    "path": "test/Dockerfile.file-server",
    "content": "FROM node:20-alpine\n\nWORKDIR /app\n\n# Install TypeScript and Node types for compilation\nRUN npm install -g typescript && npm install --save-dev @types/node\n\n# Copy and compile the server file\nCOPY test/file-server.ts ./\nRUN tsc --module commonjs --target es2020 --esModuleInterop --skipLibCheck --types node file-server.ts\n\n# Create storage directory\nRUN mkdir -p /storage\n\n# Expose port\nEXPOSE 8080\n\n# Set default environment variables\nENV PORT=8080\nENV STORAGE_DIR=/storage\n\n# Run the compiled JavaScript\nCMD [\"node\", \"file-server.js\"]"
  },
  {
    "path": "test/core.spec.ts",
    "content": "import { expect, describe, it, beforeAll } from \"vitest\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport { fetch, Agent } from \"undici\";\nimport {\n  sleep,\n  createWebhookListener,\n  submitPrompt,\n  checkImage,\n  waitForServerToBeReady,\n  s3,\n  getAzureContainer,\n  verifyWebhookV2,\n} from \"./test-utils\";\nimport sd15Txt2Img from \"./workflows/sd1.5-txt2img.json\";\nimport sd15Img2Img from \"./workflows/sd1.5-img2img.json\";\nimport sd15MultiOutput from \"./workflows/sd1.5-multi-output.json\";\nimport sd15Parallel2 from \"./workflows/sd1.5-parallel-2.json\";\nimport sd15Parallel3 from \"./workflows/sd1.5-parallel-3.json\";\nimport {\n  CreateBucketCommand,\n  PutObjectCommand,\n  GetObjectCommand,\n  ListObjectsCommand,\n} from \"@aws-sdk/client-s3\";\n\nconst bucketName = \"salad-benchmark-test\";\nconst pngKey = \"test-image.png\";\nconst azureContainerName = \"test-container\";\nconst webhookAddress = \"http://host.docker.internal:1234/webhook\";\n\n// Helper function to convert stream to buffer\nasync function streamToBuffer(\n  readableStream: NodeJS.ReadableStream\n): Promise<Buffer> {\n  const chunks: Buffer[] = [];\n  return new Promise((resolve, reject) => {\n    readableStream.on(\"data\", (chunk) => chunks.push(Buffer.from(chunk)));\n    readableStream.on(\"error\", reject);\n    readableStream.on(\"end\", () => resolve(Buffer.concat(chunks)));\n  });\n}\n\nconst sd15Txt2ImgBatch4 = JSON.parse(JSON.stringify(sd15Txt2Img));\nsd15Txt2ImgBatch4[\"5\"].inputs.batch_size = 4;\n\nconst inputPng = fs.readFileSync(\n  path.join(__dirname, \"input-images\", \"doodle-girl.png\")\n);\n\nconst inputPngBase64 = inputPng.toString(\"base64\");\nsd15Img2Img[\"10\"].inputs.image = inputPngBase64;\n\nconst sd15Img2ImgWithHttpUrl = JSON.parse(JSON.stringify(sd15Img2Img));\nsd15Img2ImgWithHttpUrl[\"10\"].inputs.image =\n  \"https://salad-benchmark-assets.download/coco2017/train2017/000000000009.jpg\";\n\nconst sd15Img2ImgWithS3Url = JSON.parse(JSON.stringify(sd15Img2Img));\nsd15Img2ImgWithS3Url[\"10\"].inputs.image = `s3://${bucketName}/${pngKey}`;\n\nconst sd15Img2ImgWithAzureUrl = JSON.parse(JSON.stringify(sd15Img2Img));\n// Use azurite hostname for Docker network access\nsd15Img2ImgWithAzureUrl[\n  \"10\"\n].inputs.image = `http://azurite:10000/devstoreaccount1/${azureContainerName}/${pngKey}`;\n\nconst sd15Img2ImgWithJpeg = JSON.parse(JSON.stringify(sd15Img2Img));\nconst inputJpeg = fs\n  .readFileSync(path.join(__dirname, \"input-images\", \"food.jpg\"))\n  .toString(\"base64\");\nsd15Img2ImgWithJpeg[\"10\"].inputs.image = inputJpeg;\n\ndescribe(\"Stable Diffusion 1.5\", () => {\n  beforeAll(async () => {\n    await waitForServerToBeReady();\n    await s3.send(\n      new CreateBucketCommand({\n        Bucket: bucketName,\n      })\n    );\n    await s3.send(\n      new PutObjectCommand({\n        Bucket: bucketName,\n        Key: pngKey,\n        Body: inputPng,\n        ContentType: \"image/png\",\n      })\n    );\n    // Purge the HTTP file server before seeding\n    await fetch(`http://localhost:8080/purge`, {\n      method: \"DELETE\",\n    });\n    // Seed the HTTP file server with test image\n    await fetch(`http://localhost:8080/${pngKey}`, {\n      method: \"PUT\",\n      body: inputPng,\n      headers: {\n        \"Content-Type\": \"image/png\",\n      },\n    });\n    // Seed the Azure Blob container with test image\n    const azureContainer = await getAzureContainer(azureContainerName);\n    const blockBlobClient = azureContainer.getBlockBlobClient(pngKey);\n    await blockBlobClient.upload(inputPng, inputPng.length, {\n      blobHTTPHeaders: { blobContentType: \"image/png\" },\n    });\n  });\n  describe(\"Return content in response\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0]);\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4);\n      expect(respBody.filenames.length).toEqual(4);\n      expect(respBody.images.length).toEqual(4);\n      for (let i = 0; i < respBody.filenames.length; i++) {\n        await checkImage(respBody.filenames[i], respBody.images[i]);\n      }\n    });\n\n    it(\"image2image works with base64 encoded png\", async () => {\n      const respBody = await submitPrompt(sd15Img2Img);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 768,\n        height: 768,\n      });\n    });\n\n    it(\"image2image works with base64 encoded jpeg\", async () => {\n      const respBody = await submitPrompt(sd15Img2ImgWithJpeg);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 640,\n        height: 480,\n      });\n    });\n\n    it(\"image2image works with http image url\", async () => {\n      const respBody = await submitPrompt(sd15Img2ImgWithHttpUrl);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 640,\n        height: 480,\n      });\n    });\n\n    it(\"image2image works with s3 image url\", async () => {\n      const respBody = await submitPrompt(sd15Img2ImgWithS3Url);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 768,\n        height: 768,\n      });\n    });\n\n    it(\"image2image works with azure blob image url\", async () => {\n      const respBody = await submitPrompt(sd15Img2ImgWithAzureUrl);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 768,\n        height: 768,\n      });\n    });\n\n    it(\"image2image works with hf image url in model repo\", async () => {\n      // First, upload an image to HF model repo to use as source\n      const timestamp = Date.now();\n      const uploadResp = await submitPrompt(sd15Txt2Img, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"model\",\n          directory: `test-source-images-${timestamp}`,\n        },\n      });\n\n      // Extract the URL of the uploaded image\n      const hfImageUrl = uploadResp.images[0];\n\n      // Now use this HF URL as input for img2img\n      const sd15Img2ImgWithHfUrl = JSON.parse(JSON.stringify(sd15Img2Img));\n      sd15Img2ImgWithHfUrl[\"10\"].inputs.image = hfImageUrl;\n\n      const respBody = await submitPrompt(sd15Img2ImgWithHfUrl);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 512,\n        height: 512,\n      });\n    });\n\n    it(\"image2image works with hf image url in dataset repo\", async () => {\n      // First, upload an image to HF dataset repo to use as source\n      const timestamp = Date.now();\n      const uploadResp = await submitPrompt(sd15Txt2Img, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory: `test-source-images-dataset-${timestamp}`,\n        },\n      });\n\n      // Extract the URL of the uploaded image\n      const hfImageUrl = uploadResp.images[0];\n\n      // Now use this HF URL as input for img2img\n      const sd15Img2ImgWithHfDatasetUrl = JSON.parse(\n        JSON.stringify(sd15Img2Img)\n      );\n      sd15Img2ImgWithHfDatasetUrl[\"10\"].inputs.image = hfImageUrl;\n\n      const respBody = await submitPrompt(sd15Img2ImgWithHfDatasetUrl);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 512,\n        height: 512,\n      });\n    });\n\n    it(\"image2image works with hf url containing spaces in path\", async () => {\n      // First, upload an image to HF with a directory name containing spaces\n      // This tests that URL-encoded spaces (%20) are properly decoded when downloading\n      const timestamp = Date.now();\n      const uploadResp = await submitPrompt(sd15Txt2Img, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory: `test source images ${timestamp}`, // Directory with spaces\n        },\n      });\n\n      // Extract the URL - it should contain %20 for the spaces\n      const hfImageUrl = uploadResp.images[0];\n      expect(hfImageUrl.includes(\"%20\")).toBeTruthy();\n\n      // Now use this HF URL as input for img2img - this will trigger the download\n      // which requires proper URL decoding to work\n      const sd15Img2ImgWithSpacesUrl = JSON.parse(JSON.stringify(sd15Img2Img));\n      sd15Img2ImgWithSpacesUrl[\"10\"].inputs.image = hfImageUrl;\n\n      const respBody = await submitPrompt(sd15Img2ImgWithSpacesUrl);\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      await checkImage(respBody.filenames[0], respBody.images[0], {\n        width: 512,\n        height: 512,\n      });\n    });\n\n    it(\"works if the workflow has multiple output nodes\", async () => {\n      const respBody = await submitPrompt(sd15MultiOutput);\n      expect(respBody.filenames.length).toEqual(2);\n      expect(respBody.images.length).toEqual(2);\n    });\n\n    it(\"works if there are 2 parallel, non-interrelated workflows (also tests http model download)\", async () => {\n      const respBody = await submitPrompt(sd15Parallel2);\n      expect(respBody.filenames.length).toEqual(2);\n      expect(respBody.images.length).toEqual(2);\n    });\n\n    it(\"works if there are 3 parallel, non-interrelated workflows (also tests hf model download)\", async () => {\n      const respBody = await submitPrompt(sd15Parallel3);\n      expect(respBody.filenames.length).toEqual(3);\n      expect(respBody.images.length).toEqual(3);\n    });\n\n    it(\"can convert to jpeg\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, {\n        format: \"jpeg\",\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(respBody.filenames[0].endsWith(\".jpeg\")).toBeTruthy();\n      await checkImage(respBody.filenames[0], respBody.images[0]);\n    });\n\n    it(\"can convert to webp\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, {\n        format: \"webp\",\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(respBody.filenames[0].endsWith(\".webp\")).toBeTruthy();\n      await checkImage(respBody.filenames[0], respBody.images[0]);\n    });\n  });\n\n  describe(\"Return content in webhook\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      let expected = 1;\n      const webhook = await createWebhookListener(async (body) => {\n        expected--;\n        const { id, filename, image } = body;\n        expect(id).toEqual(reqId);\n        await checkImage(filename, image);\n      });\n      const { id: reqId } = await submitPrompt(sd15Txt2Img, true);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      let expected = 4;\n      const webhook = await createWebhookListener(async (body) => {\n        expected--;\n        const { id, filename, image } = body;\n        expect(id).toEqual(reqId);\n        await checkImage(filename, image);\n      });\n      const { id: reqId } = await submitPrompt(sd15Txt2ImgBatch4, true);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n    });\n\n    it(\"image2image works with base64 encoded images\", async () => {\n      let expected = 1;\n      const webhook = await createWebhookListener(async (body) => {\n        expected--;\n        const { id, filename, image } = body;\n        expect(id).toEqual(reqId);\n        await checkImage(filename, image, {\n          width: 768,\n          height: 768,\n        });\n      });\n      const { id: reqId } = await submitPrompt(sd15Img2Img, true);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n    });\n  });\n\n  describe(\"Return content in webhook - v2\", () => {\n    const submitPromptWebhookV2 = async (prompt: any, upload?: any) => {\n      return submitPrompt(prompt, false, undefined, upload, true);\n    };\n    it(\"text2image works with 1 image\", async () => {\n      let expected = 1;\n      const responses: any[] = [];\n      const webhook = await createWebhookListener(async (body, headers) => {\n        responses.push({ body, headers });\n        expected--;\n      });\n      const { id: reqId } = await submitPromptWebhookV2(sd15Txt2Img);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n      for (const resp of responses) {\n        expect(\n          verifyWebhookV2(JSON.stringify(resp.body), resp.headers)\n        ).toEqual(true);\n        expect(resp.body.id).toEqual(reqId);\n        expect(resp.headers[\"webhook-id\"]).toEqual(reqId);\n        expect(resp.body.filenames.length).toEqual(1);\n        expect(resp.body.images.length).toEqual(1);\n        await checkImage(resp.body.filenames[0], resp.body.images[0]);\n      }\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      let expected = 1;\n      const responses: any[] = [];\n      const webhook = await createWebhookListener(async (body, headers) => {\n        responses.push({ body, headers });\n        expected--;\n      });\n      const { id: reqId } = await submitPromptWebhookV2(sd15Txt2ImgBatch4);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n      for (const resp of responses) {\n        expect(\n          verifyWebhookV2(JSON.stringify(resp.body), resp.headers)\n        ).toEqual(true);\n        expect(resp.body.id).toEqual(reqId);\n        expect(resp.headers[\"webhook-id\"]).toEqual(reqId);\n        expect(resp.body.filenames.length).toEqual(4);\n        expect(resp.body.images.length).toEqual(4);\n        for (let i = 0; i < resp.body.filenames.length; i++) {\n          await checkImage(resp.body.filenames[i], resp.body.images[i]);\n        }\n      }\n    });\n\n    it(\"image2image works with base64 encoded images\", async () => {\n      let expected = 1;\n      const responses: any[] = [];\n      const webhook = await createWebhookListener(async (body, headers) => {\n        responses.push({ body, headers });\n        expected--;\n      });\n      const { id: reqId } = await submitPromptWebhookV2(sd15Img2Img);\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n      for (const resp of responses) {\n        expect(\n          verifyWebhookV2(JSON.stringify(resp.body), resp.headers)\n        ).toEqual(true);\n        expect(resp.body.id).toEqual(reqId);\n        expect(resp.headers[\"webhook-id\"]).toEqual(reqId);\n        expect(resp.body.filenames.length).toEqual(1);\n        expect(resp.body.images.length).toEqual(1);\n        await checkImage(resp.body.filenames[0], resp.body.images[0], {\n          width: 768,\n          height: 768,\n        });\n      }\n    });\n\n    it(\"works with s3 uploads\", async () => {\n      let expected = 1;\n      const responses: any[] = [];\n      const webhook = await createWebhookListener(async (body, headers) => {\n        expected--;\n        responses.push({ body, headers });\n      });\n      const { id: reqId } = await submitPromptWebhookV2(sd15Img2Img, {\n        bucket: bucketName,\n        prefix: \"sd15-img2img/\",\n      });\n      while (expected > 0) {\n        await sleep(100);\n      }\n      await webhook.close();\n      for (const resp of responses) {\n        expect(\n          verifyWebhookV2(JSON.stringify(resp.body), resp.headers)\n        ).toEqual(true);\n        expect(resp.body.id).toEqual(reqId);\n        expect(resp.headers[\"webhook-id\"]).toEqual(reqId);\n        expect(resp.body.filenames.length).toEqual(1);\n        expect(resp.body.images.length).toEqual(1);\n        expect(\n          resp.body.images[0].startsWith(\"s3://\") &&\n            resp.body.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n        const s3Url = new URL(resp.body.images[0]);\n        const bucket = s3Url.hostname;\n        const key = s3Url.pathname.slice(1);\n        const s3Resp = await s3.send(\n          new GetObjectCommand({\n            Bucket: bucket,\n            Key: key,\n          })\n        );\n        const imageBuffer = Buffer.from(\n          await s3Resp.Body!.transformToByteArray()\n        );\n        await checkImage(key, imageBuffer.toString(\"base64\"), {\n          width: 768,\n          height: 768,\n        });\n      }\n    });\n  });\n\n  describe(\"Upload to S3 and return S3 URL\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        bucket: bucketName,\n        prefix: \"sd15-txt2img/\",\n        async: false,\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(\n        respBody.images[0].startsWith(\"s3://\") &&\n          respBody.images[0].endsWith(\".png\")\n      ).toBeTruthy();\n      const s3Url = new URL(respBody.images[0]);\n      const bucket = s3Url.hostname;\n      const key = s3Url.pathname.slice(1);\n      const s3Resp = await s3.send(\n        new GetObjectCommand({\n          Bucket: bucket,\n          Key: key,\n        })\n      );\n      const imageBuffer = Buffer.from(\n        await s3Resp.Body!.transformToByteArray()\n      );\n      await checkImage(key, imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"works with convert_output to webp (fixes SharedArrayBuffer issue #121)\", async () => {\n      const respBody = await submitPrompt(\n        sd15Txt2Img,\n        false,\n        { format: \"webp\" },\n        {\n          bucket: bucketName,\n          prefix: \"sd15-txt2img-convert-webp/\",\n          async: false,\n        }\n      );\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(respBody.filenames[0].endsWith(\".webp\")).toBeTruthy();\n      expect(\n        respBody.images[0].startsWith(\"s3://\") &&\n          respBody.images[0].endsWith(\".webp\")\n      ).toBeTruthy();\n      const s3Url = new URL(respBody.images[0]);\n      const bucket = s3Url.hostname;\n      const key = s3Url.pathname.slice(1);\n      const s3Resp = await s3.send(\n        new GetObjectCommand({\n          Bucket: bucket,\n          Key: key,\n        })\n      );\n      const imageBuffer = Buffer.from(\n        await s3Resp.Body!.transformToByteArray()\n      );\n      await checkImage(key, imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"works with convert_output to jpeg (fixes SharedArrayBuffer issue #121)\", async () => {\n      const respBody = await submitPrompt(\n        sd15Txt2Img,\n        false,\n        { format: \"jpeg\" },\n        {\n          bucket: bucketName,\n          prefix: \"sd15-txt2img-convert-jpeg/\",\n          async: false,\n        }\n      );\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(respBody.filenames[0].endsWith(\".jpeg\")).toBeTruthy();\n      expect(\n        respBody.images[0].startsWith(\"s3://\") &&\n          respBody.images[0].endsWith(\".jpeg\")\n      ).toBeTruthy();\n      const s3Url = new URL(respBody.images[0]);\n      const bucket = s3Url.hostname;\n      const key = s3Url.pathname.slice(1);\n      const s3Resp = await s3.send(\n        new GetObjectCommand({\n          Bucket: bucket,\n          Key: key,\n        })\n      );\n      const imageBuffer = Buffer.from(\n        await s3Resp.Body!.transformToByteArray()\n      );\n      await checkImage(key, imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        bucket: bucketName,\n        prefix: \"sd15-txt2img-batch4/\",\n        async: false,\n      });\n      expect(respBody.filenames.length).toEqual(4);\n      expect(respBody.images.length).toEqual(4);\n      for (let i = 0; i < respBody.filenames.length; i++) {\n        expect(\n          respBody.images[i].startsWith(\"s3://\") &&\n            respBody.images[i].endsWith(\".png\")\n        ).toBeTruthy();\n        const s3Url = new URL(respBody.images[i]);\n        const bucket = s3Url.hostname;\n        const key = s3Url.pathname.slice(1);\n        const s3Resp = await s3.send(\n          new GetObjectCommand({\n            Bucket: bucket,\n            Key: key,\n          })\n        );\n        const imageBuffer = Buffer.from(\n          await s3Resp.Body!.transformToByteArray()\n        );\n        await checkImage(respBody.filenames[i], imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Upload to S3 Asynchronously\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        bucket: bucketName,\n        prefix: \"sd15-txt2img-async/\",\n        async: true,\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      const listCmd = new ListObjectsCommand({\n        Bucket: bucketName,\n        Prefix: \"sd15-txt2img-async/\",\n      });\n\n      let outputs: string[] = [];\n      while (outputs.length < 1) {\n        const page = await s3.send(listCmd);\n        outputs = page.Contents?.map((obj) => obj.Key!) || [];\n        if (outputs.length < 1) {\n          await sleep(1000);\n        }\n      }\n\n      expect(outputs.length).toEqual(1);\n      const s3Resp = await s3.send(\n        new GetObjectCommand({\n          Bucket: bucketName,\n          Key: outputs[0],\n        })\n      );\n      const imageBuffer = Buffer.from(\n        await s3Resp.Body!.transformToByteArray()\n      );\n      await checkImage(outputs[0]!, imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        bucket: bucketName,\n        prefix: \"sd15-txt2img-batch4-async/\",\n        async: true,\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      const listCmd = new ListObjectsCommand({\n        Bucket: bucketName,\n        Prefix: \"sd15-txt2img-batch4-async/\",\n      });\n\n      let outputs: string[] = [];\n      while (outputs.length < 4) {\n        const page = await s3.send(listCmd);\n        outputs = page.Contents?.map((obj) => obj.Key!) || [];\n\n        if (outputs.length < 4) {\n          await sleep(1000);\n        }\n      }\n\n      expect(outputs.length).toEqual(4);\n      for (const key of outputs) {\n        const s3Resp = await s3.send(\n          new GetObjectCommand({\n            Bucket: bucketName,\n            Key: key,\n          })\n        );\n        const imageBuffer = Buffer.from(\n          await s3Resp.Body!.transformToByteArray()\n        );\n        await checkImage(key!, imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Upload to Azure Blob and return Blob URL\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        azure_blob_upload: {\n          container: azureContainerName,\n          blob_prefix: \"sd15-txt2img/\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(\n        respBody.images[0].includes(`/${azureContainerName}/sd15-txt2img/`) &&\n          respBody.images[0].endsWith(\".png\")\n      ).toBeTruthy();\n\n      // Verify the image was uploaded to Azure Blob\n      const azureUrl = respBody.images[0];\n      const urlParts = new URL(azureUrl);\n      let pathParts = urlParts.pathname.split(\"/\").filter((p) => p);\n\n      // For Azurite/emulator URLs in path-style format (http://host:port/accountname/container/blob)\n      // vs Azure URLs in host-style format (https://accountname.blob.core.windows.net/container/blob)\n      if (!urlParts.hostname.includes(\".blob.core.windows.net\")) {\n        // Path-style URL - first part is account name, skip it\n        pathParts = pathParts.slice(1);\n      }\n\n      const containerName = pathParts[0];\n      const blobName = pathParts.slice(1).join(\"/\");\n\n      const azureContainer = await getAzureContainer(containerName);\n      const blockBlobClient = azureContainer.getBlockBlobClient(blobName);\n      const downloadResponse = await blockBlobClient.download();\n      const imageBuffer = await streamToBuffer(\n        downloadResponse.readableStreamBody!\n      );\n      await checkImage(respBody.filenames[0], imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        azure_blob_upload: {\n          container: azureContainerName,\n          blob_prefix: \"sd15-txt2img-batch4/\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(4);\n      expect(respBody.images.length).toEqual(4);\n\n      for (let i = 0; i < respBody.filenames.length; i++) {\n        expect(\n          respBody.images[i].includes(\n            `/${azureContainerName}/sd15-txt2img-batch4/`\n          ) && respBody.images[i].endsWith(\".png\")\n        ).toBeTruthy();\n\n        // Verify each image was uploaded to Azure Blob\n        const azureUrl = respBody.images[i];\n        const urlParts = new URL(azureUrl);\n        let pathParts = urlParts.pathname.split(\"/\").filter((p) => p);\n\n        // For Azurite/emulator URLs in path-style format (http://host:port/accountname/container/blob)\n        // vs Azure URLs in host-style format (https://accountname.blob.core.windows.net/container/blob)\n        if (!urlParts.hostname.includes(\".blob.core.windows.net\")) {\n          // Path-style URL - first part is account name, skip it\n          pathParts = pathParts.slice(1);\n        }\n\n        const containerName = pathParts[0];\n        const blobName = pathParts.slice(1).join(\"/\");\n\n        const azureContainer = await getAzureContainer(containerName);\n        const blockBlobClient = azureContainer.getBlockBlobClient(blobName);\n        const downloadResponse = await blockBlobClient.download();\n        const imageBuffer = await streamToBuffer(\n          downloadResponse.readableStreamBody!\n        );\n        await checkImage(respBody.filenames[i], imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Upload to HuggingFace and return HF URL\", () => {\n    it(\"text2image works with dataset repo\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory: \"test-outputs\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(\n        respBody.images[0].startsWith(\n          \"https://huggingface.co/datasets/SaladTechnologies/comfyui-api-integration-testing/resolve/main/test-outputs/\"\n        ) && respBody.images[0].endsWith(\".png\")\n      ).toBeTruthy();\n    });\n\n    it(\"text2image works with model repo\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory: \"test-outputs-batch\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(4);\n      expect(respBody.images.length).toEqual(4);\n      for (let i = 0; i < respBody.filenames.length; i++) {\n        expect(\n          respBody.images[i].startsWith(\n            \"https://huggingface.co/datasets/SaladTechnologies/comfyui-api-integration-testing/resolve/main/test-outputs-batch/\"\n          ) && respBody.images[i].endsWith(\".png\")\n        ).toBeTruthy();\n      }\n    });\n  });\n\n  describe(\"Upload to HuggingFace Asynchronously\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const timestamp = Date.now();\n      const directory = `async-test-${timestamp}`;\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Poll HF repo for the uploaded file\n      let fileExists = false;\n      let attempts = 0;\n      let fileUrl = \"\";\n\n      while (!fileExists && attempts < 20) {\n        // We need to check if any file exists in the directory\n        // HF API endpoint for listing files: https://huggingface.co/api/datasets/{repo}/tree/{revision}/{path}\n        const apiUrl = `https://huggingface.co/api/datasets/SaladTechnologies/comfyui-api-integration-testing/tree/main/${directory}`;\n\n        try {\n          const response = await fetch(apiUrl, {\n            headers: {\n              Authorization: `Bearer ${process.env.HF_TOKEN}`,\n            },\n          });\n\n          if (response.ok) {\n            const files = (await response.json()) as any[];\n            if (files.length > 0) {\n              fileExists = true;\n              // Construct the file URL\n              const fileName = files[0].path.split(\"/\").pop();\n              fileUrl = `https://huggingface.co/datasets/SaladTechnologies/comfyui-api-integration-testing/resolve/main/${directory}/${fileName}`;\n            }\n          }\n        } catch (error) {\n          // Directory might not exist yet\n        }\n\n        if (!fileExists) {\n          await sleep(2000);\n          attempts++;\n        }\n      }\n\n      expect(fileExists).toBeTruthy();\n\n      // Verify the file can be downloaded\n      const downloadResponse = await fetch(fileUrl, {\n        headers: {\n          Authorization: `Bearer ${process.env.HF_TOKEN}`,\n        },\n      });\n      expect(downloadResponse.ok).toBeTruthy();\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const timestamp = Date.now();\n      const directory = `async-batch-test-${timestamp}`;\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        hf_upload: {\n          repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n          repo_type: \"dataset\",\n          directory,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Poll HF repo for the uploaded files\n      let fileCount = 0;\n      let attempts = 0;\n\n      while (fileCount < 4 && attempts < 20) {\n        const apiUrl = `https://huggingface.co/api/datasets/SaladTechnologies/comfyui-api-integration-testing/tree/main/${directory}`;\n\n        try {\n          const response = await fetch(apiUrl, {\n            headers: {\n              Authorization: `Bearer ${process.env.HF_TOKEN}`,\n            },\n          });\n\n          if (response.ok) {\n            const files = (await response.json()) as any[];\n            fileCount = files.length;\n          }\n        } catch (error) {\n          // Directory might not exist yet\n        }\n\n        if (fileCount < 4) {\n          await sleep(2000);\n          attempts++;\n        }\n      }\n\n      expect(fileCount).toEqual(4);\n    });\n  });\n\n  describe(\"Upload to Azure Blob Asynchronously\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      // Use timestamp to make prefix unique per test run\n      const timestamp = Date.now();\n      const uniquePrefix = `sd15-txt2img-async-${timestamp}/`;\n\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        azure_blob_upload: {\n          container: azureContainerName,\n          blob_prefix: uniquePrefix,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Wait for async upload to complete\n      const azureContainer = await getAzureContainer(azureContainerName);\n      let blobs: string[] = [];\n      let attempts = 0;\n      while (blobs.length < 1 && attempts < 10) {\n        blobs = [];\n        for await (const blob of azureContainer.listBlobsFlat({\n          prefix: uniquePrefix,\n        })) {\n          blobs.push(blob.name);\n        }\n        if (blobs.length < 1) {\n          await sleep(1000);\n        }\n        attempts++;\n      }\n\n      expect(blobs.length).toEqual(1);\n\n      // Verify the uploaded image\n      const blockBlobClient = azureContainer.getBlockBlobClient(blobs[0]);\n      const downloadResponse = await blockBlobClient.download();\n      const imageBuffer = await streamToBuffer(\n        downloadResponse.readableStreamBody!\n      );\n      await checkImage(blobs[0], imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      // Use timestamp to make prefix unique per test run\n      const timestamp = Date.now();\n      const uniquePrefix = `sd15-txt2img-batch4-async-${timestamp}/`;\n\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        azure_blob_upload: {\n          container: azureContainerName,\n          blob_prefix: uniquePrefix,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Wait for async uploads to complete\n      let blobs: string[] = [];\n      let attempts = 0;\n      while (blobs.length < 4 && attempts < 10) {\n        await sleep(1000);\n        blobs = [];\n        const azureContainer = await getAzureContainer(azureContainerName);\n        for await (const blob of azureContainer.listBlobsFlat({\n          prefix: uniquePrefix,\n        })) {\n          blobs.push(blob.name);\n        }\n        attempts++;\n      }\n\n      expect(blobs.length).toEqual(4);\n\n      // Verify each uploaded image\n      for (const blobName of blobs) {\n        const azureContainer = await getAzureContainer(azureContainerName);\n        const blockBlobClient = azureContainer.getBlockBlobClient(blobName);\n        const downloadResponse = await blockBlobClient.download();\n        const imageBuffer = await streamToBuffer(\n          downloadResponse.readableStreamBody!\n        );\n        await checkImage(blobName, imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Upload to HTTP file server and return HTTP URL\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        http_upload: {\n          url_prefix: \"http://file-server:8080\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(1);\n      expect(respBody.images.length).toEqual(1);\n      expect(\n        respBody.images[0].startsWith(\"http://file-server:8080/\") &&\n          respBody.images[0].endsWith(\".png\")\n      ).toBeTruthy();\n\n      // Verify the image was uploaded to the HTTP server\n      const httpUrl = respBody.images[0].replace(\"file-server\", \"localhost\");\n      const response = await fetch(httpUrl);\n      expect(response.ok).toBeTruthy();\n      const imageBuffer = Buffer.from(await response.arrayBuffer());\n      await checkImage(respBody.filenames[0], imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        http_upload: {\n          url_prefix: \"http://file-server:8080\",\n        },\n      });\n      expect(respBody.filenames.length).toEqual(4);\n      expect(respBody.images.length).toEqual(4);\n\n      for (let i = 0; i < respBody.filenames.length; i++) {\n        expect(\n          respBody.images[i].startsWith(\"http://file-server:8080/\") &&\n            respBody.images[i].endsWith(\".png\")\n        ).toBeTruthy();\n\n        // Verify each image was uploaded to the HTTP server\n        const httpUrl = respBody.images[i].replace(\"file-server\", \"localhost\");\n        const response = await fetch(httpUrl);\n        expect(response.ok).toBeTruthy();\n        const imageBuffer = Buffer.from(await response.arrayBuffer());\n        await checkImage(respBody.filenames[i], imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Upload to HTTP file server Asynchronously\", () => {\n    it(\"text2image works with 1 image\", async () => {\n      const expectedPrefix = \"http-async-txt2img-\";\n      const respBody = await submitPrompt(sd15Txt2Img, false, undefined, {\n        http_upload: {\n          url_prefix: `http://file-server:8080/${expectedPrefix}`,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Wait for async upload to complete by polling the list endpoint\n      let files: string[] = [];\n      let attempts = 0;\n      while (files.length < 1 && attempts < 10) {\n        const listResp = await fetch(\n          `http://localhost:8080/list?prefix=${expectedPrefix}`\n        );\n        const listData = (await listResp.json()) as { files?: string[] };\n        files = listData.files || [];\n        if (files.length < 1) {\n          await sleep(1000);\n        }\n        attempts++;\n      }\n\n      expect(files.length).toEqual(1);\n\n      // Verify the uploaded image\n      const fileUrl = `http://localhost:8080/${files[0]}`;\n      const response = await fetch(fileUrl);\n      expect(response.ok).toBeTruthy();\n      const imageBuffer = Buffer.from(await response.arrayBuffer());\n      await checkImage(files[0], imageBuffer.toString(\"base64\"));\n    });\n\n    it(\"text2image works with multiple images\", async () => {\n      const expectedPrefix = \"http-async-batch4-\";\n      const respBody = await submitPrompt(sd15Txt2ImgBatch4, false, undefined, {\n        http_upload: {\n          url_prefix: `http://file-server:8080/${expectedPrefix}`,\n          async: true,\n        },\n      });\n      expect(respBody.status).toEqual(\"ok\");\n\n      // Wait for async uploads to complete by polling the list endpoint\n      let files: string[] = [];\n      let attempts = 0;\n      while (files.length < 4 && attempts < 10) {\n        const listResp = await fetch(\n          `http://localhost:8080/list?prefix=${expectedPrefix}`\n        );\n        const listData = (await listResp.json()) as { files?: string[] };\n        files = listData.files || [];\n        if (files.length < 4) {\n          await sleep(1000);\n        }\n        attempts++;\n      }\n\n      expect(files.length).toEqual(4);\n\n      // Verify each uploaded image\n      for (const filename of files) {\n        const fileUrl = `http://localhost:8080/${filename}`;\n        const response = await fetch(fileUrl);\n        expect(response.ok).toBeTruthy();\n        const imageBuffer = Buffer.from(await response.arrayBuffer());\n        await checkImage(filename, imageBuffer.toString(\"base64\"));\n      }\n    });\n  });\n\n  describe(\"Workflow endpoints\", () => {\n    async function submitWorkflow(\n      endpoint: string,\n      inputs: any,\n      webhook: boolean = false,\n      convert: any = undefined,\n      upload: any = undefined\n    ): Promise<any> {\n      const body: any = {\n        input: inputs, // Wrap inputs in 'input' field as expected by workflow endpoint\n      };\n      if (webhook) {\n        body[\"webhook\"] = webhookAddress;\n      }\n      if (convert) {\n        body[\"convert_output\"] = convert;\n      }\n      if (upload) {\n        // Handle different upload provider keys\n        if (upload.bucket !== undefined || upload.prefix !== undefined) {\n          body[\"s3\"] = upload;\n        } else {\n          Object.assign(body, upload);\n        }\n      }\n\n      const resp = await fetch(`http://localhost:3000${endpoint}`, {\n        method: \"POST\",\n        headers: {\n          \"Content-Type\": \"application/json\",\n        },\n        body: JSON.stringify(body),\n        dispatcher: new Agent({\n          headersTimeout: 0,\n          bodyTimeout: 0,\n          connectTimeout: 0,\n        }),\n      });\n\n      if (!resp.ok) {\n        console.error(await resp.text());\n        throw new Error(`Workflow submission failed: ${resp.status}`);\n      }\n      return await resp.json();\n    }\n\n    describe(\"/workflow/txt2img\", () => {\n      it(\"works with default parameters\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/txt2img\", {\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0]);\n      });\n\n      it(\"works with custom parameters\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/txt2img\", {\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          seed: 42,\n          steps: 20,\n          cfg_scale: 7.5,\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with webhook\", async () => {\n        let expected = 1;\n        const webhook = await createWebhookListener(async (body) => {\n          expected--;\n          const { id, filename, image } = body;\n          expect(id).toEqual(reqId);\n          await checkImage(filename, image);\n        });\n\n        const { id: reqId } = await submitWorkflow(\n          \"/workflow/txt2img\",\n          {\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n          },\n          true\n        );\n\n        while (expected > 0) {\n          await sleep(100);\n        }\n        await webhook.close();\n      });\n\n      it(\"works with S3 upload\", async () => {\n        const respBody = await submitWorkflow(\n          \"/workflow/txt2img\",\n          {\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n          },\n          false,\n          undefined,\n          {\n            bucket: bucketName,\n            prefix: \"workflow-txt2img/\",\n            async: false,\n          }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(\n          respBody.images[0].startsWith(\"s3://\") &&\n            respBody.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n\n        const s3Url = new URL(respBody.images[0]);\n        const bucket = s3Url.hostname;\n        const key = s3Url.pathname.slice(1);\n        const s3Resp = await s3.send(\n          new GetObjectCommand({\n            Bucket: bucket,\n            Key: key,\n          })\n        );\n        const imageBuffer = Buffer.from(\n          await s3Resp.Body!.transformToByteArray()\n        );\n        await checkImage(key, imageBuffer.toString(\"base64\"));\n      });\n\n      it(\"works with HuggingFace upload\", async () => {\n        const timestamp = Date.now();\n        const respBody = await submitWorkflow(\n          \"/workflow/txt2img\",\n          {\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n          },\n          false,\n          undefined,\n          {\n            hf_upload: {\n              repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n              repo_type: \"dataset\",\n              directory: `workflow-txt2img-${timestamp}`,\n            },\n          }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(\n          respBody.images[0].startsWith(\n            \"https://huggingface.co/datasets/SaladTechnologies/comfyui-api-integration-testing/resolve/main/workflow-txt2img-\"\n          ) && respBody.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n      });\n\n      it(\"works with format conversion\", async () => {\n        const respBody = await submitWorkflow(\n          \"/workflow/txt2img\",\n          {\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n          },\n          false,\n          { format: \"jpeg\" }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(respBody.filenames[0].endsWith(\".jpeg\")).toBeTruthy();\n        await checkImage(respBody.filenames[0], respBody.images[0]);\n      });\n    });\n\n    describe(\"/workflow/img2img\", () => {\n      it(\"works with base64 image\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/img2img\", {\n          image: inputPngBase64,\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with custom parameters\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/img2img\", {\n          image: inputPngBase64,\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          seed: 42,\n          steps: 20,\n          cfg_scale: 7.5,\n          denoise: 0.8,\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with HTTP image URL\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/img2img\", {\n          image: `http://file-server:8080/${pngKey}`,\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with S3 image URL\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/img2img\", {\n          image: `s3://${bucketName}/${pngKey}`,\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with Azure Blob image URL\", async () => {\n        const respBody = await submitWorkflow(\"/workflow/img2img\", {\n          image: `http://azurite:10000/devstoreaccount1/${azureContainerName}/${pngKey}`,\n          prompt: \"a beautiful sunset\",\n          checkpoint: \"dreamshaper_8.safetensors\",\n          width: 768,\n          height: 768,\n        });\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        await checkImage(respBody.filenames[0], respBody.images[0], {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with webhook\", async () => {\n        let expected = 1;\n        const webhook = await createWebhookListener(async (body) => {\n          expected--;\n          const { id, filename, image } = body;\n          expect(id).toEqual(reqId);\n          await checkImage(filename, image, {\n            width: 768,\n            height: 768,\n          });\n        });\n\n        const { id: reqId } = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          true\n        );\n\n        while (expected > 0) {\n          await sleep(100);\n        }\n        await webhook.close();\n      });\n\n      it(\"works with Azure Blob upload\", async () => {\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            azure_blob_upload: {\n              container: azureContainerName,\n              blob_prefix: \"workflow-img2img/\",\n            },\n          }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(\n          respBody.images[0].includes(\n            `/${azureContainerName}/workflow-img2img/`\n          ) && respBody.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n\n        // Verify the image was uploaded\n        const azureUrl = respBody.images[0];\n        const urlParts = new URL(azureUrl);\n        let pathParts = urlParts.pathname.split(\"/\").filter((p) => p);\n        if (!urlParts.hostname.includes(\".blob.core.windows.net\")) {\n          pathParts = pathParts.slice(1);\n        }\n        const containerName = pathParts[0];\n        const blobName = pathParts.slice(1).join(\"/\");\n\n        const azureContainer = await getAzureContainer(containerName);\n        const blockBlobClient = azureContainer.getBlockBlobClient(blobName);\n        const downloadResponse = await blockBlobClient.download();\n        const imageBuffer = await streamToBuffer(\n          downloadResponse.readableStreamBody!\n        );\n        await checkImage(\n          respBody.filenames[0],\n          imageBuffer.toString(\"base64\"),\n          {\n            width: 768,\n            height: 768,\n          }\n        );\n      });\n\n      it(\"works with HTTP file server upload\", async () => {\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            http_upload: {\n              url_prefix: \"http://file-server:8080/workflow-img2img\",\n            },\n          }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(\n          respBody.images[0].startsWith(\n            \"http://file-server:8080/workflow-img2img\"\n          ) && respBody.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n\n        // Verify the image was uploaded\n        const httpUrl = respBody.images[0].replace(\"file-server\", \"localhost\");\n        const response = await fetch(httpUrl);\n        expect(response.ok).toBeTruthy();\n        const imageBuffer = Buffer.from(await response.arrayBuffer());\n        await checkImage(\n          respBody.filenames[0],\n          imageBuffer.toString(\"base64\"),\n          {\n            width: 768,\n            height: 768,\n          }\n        );\n      });\n\n      it(\"works with HuggingFace upload\", async () => {\n        const timestamp = Date.now();\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            hf_upload: {\n              repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n              repo_type: \"dataset\",\n              directory: `workflow-img2img-${timestamp}`,\n            },\n          }\n        );\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n        expect(\n          respBody.images[0].startsWith(\n            \"https://huggingface.co/datasets/SaladTechnologies/comfyui-api-integration-testing/resolve/main/workflow-img2img-\"\n          ) && respBody.images[0].endsWith(\".png\")\n        ).toBeTruthy();\n      });\n\n      it(\"works with async HuggingFace upload\", async () => {\n        const timestamp = Date.now();\n        const directory = `workflow-txt2img-async-${timestamp}`;\n\n        const respBody = await submitWorkflow(\n          \"/workflow/txt2img\",\n          {\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n          },\n          false,\n          undefined,\n          {\n            hf_upload: {\n              repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n              repo_type: \"dataset\",\n              directory,\n              async: true,\n            },\n          }\n        );\n        expect(respBody.status).toEqual(\"ok\");\n\n        // Poll HF repo for the uploaded file\n        let fileExists = false;\n        let attempts = 0;\n\n        while (!fileExists && attempts < 20) {\n          const apiUrl = `https://huggingface.co/api/datasets/SaladTechnologies/comfyui-api-integration-testing/tree/main/${directory}`;\n\n          try {\n            const response = await fetch(apiUrl, {\n              headers: {\n                Authorization: `Bearer ${process.env.HF_TOKEN}`,\n              },\n            });\n\n            if (response.ok) {\n              const files = (await response.json()) as any[];\n              if (files.length > 0) {\n                fileExists = true;\n              }\n            }\n          } catch (error) {\n            // Directory might not exist yet\n          }\n\n          if (!fileExists) {\n            await sleep(2000);\n            attempts++;\n          }\n        }\n\n        expect(fileExists).toBeTruthy();\n      });\n\n      it(\"works with async S3 upload\", async () => {\n        // Use a unique prefix to avoid picking up files from other tests\n        const timestamp = Date.now();\n        const uniquePrefix = `workflow-img2img-async-${timestamp}/`;\n\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            bucket: bucketName,\n            prefix: uniquePrefix,\n            async: true,\n          }\n        );\n        expect(respBody.status).toEqual(\"ok\");\n\n        // Wait for async upload\n        const listCmd = new ListObjectsCommand({\n          Bucket: bucketName,\n          Prefix: uniquePrefix,\n        });\n\n        let outputs: string[] = [];\n        while (outputs.length < 1) {\n          const page = await s3.send(listCmd);\n          outputs = page.Contents?.map((obj) => obj.Key!) || [];\n          if (outputs.length < 1) {\n            await sleep(1000);\n          }\n        }\n\n        expect(outputs.length).toEqual(1);\n        const s3Resp = await s3.send(\n          new GetObjectCommand({\n            Bucket: bucketName,\n            Key: outputs[0],\n          })\n        );\n        const imageBuffer = Buffer.from(\n          await s3Resp.Body!.transformToByteArray()\n        );\n        await checkImage(outputs[0]!, imageBuffer.toString(\"base64\"), {\n          width: 768,\n          height: 768,\n        });\n      });\n\n      it(\"works with async HuggingFace upload\", async () => {\n        const timestamp = Date.now();\n        const directory = `workflow-img2img-async-hf-${timestamp}`;\n\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: inputPngBase64,\n            prompt: \"a beautiful sunset\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            hf_upload: {\n              repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n              repo_type: \"dataset\",\n              directory,\n              async: true,\n            },\n          }\n        );\n        expect(respBody.status).toEqual(\"ok\");\n\n        // Poll HF repo for the uploaded file\n        let fileExists = false;\n        let attempts = 0;\n\n        while (!fileExists && attempts < 20) {\n          const apiUrl = `https://huggingface.co/api/datasets/SaladTechnologies/comfyui-api-integration-testing/tree/main/${directory}`;\n\n          try {\n            const response = await fetch(apiUrl, {\n              headers: {\n                Authorization: `Bearer ${process.env.HF_TOKEN}`,\n              },\n            });\n\n            if (response.ok) {\n              const files = (await response.json()) as any[];\n              if (files.length > 0) {\n                fileExists = true;\n              }\n            }\n          } catch (error) {\n            // Directory might not exist yet\n          }\n\n          if (!fileExists) {\n            await sleep(2000);\n            attempts++;\n          }\n        }\n\n        expect(fileExists).toBeTruthy();\n      });\n\n      it(\"image2image works with hf image url in model repo\", async () => {\n        // First, upload an image to HF model repo to use as source\n        const timestamp = Date.now();\n        const uploadResp = await submitPrompt(sd15Txt2Img, false, undefined, {\n          hf_upload: {\n            repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n            repo_type: \"model\",\n            directory: `test-source-images-model-${timestamp}`,\n          },\n        });\n\n        // Extract the URL of the uploaded image\n        const hfImageUrl = uploadResp.images[0];\n\n        // Now use this HF URL as input for img2img workflow\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: hfImageUrl,\n            prompt: \"a beautiful mountain landscape\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            http_upload: {\n              url_prefix: \"http://file-server:8080/workflow-img2img-hf-source\",\n            },\n          }\n        );\n\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n\n        // Verify the transformed image was created\n        const httpUrl = respBody.images[0].replace(\"file-server\", \"localhost\");\n        const response = await fetch(httpUrl);\n        expect(response.ok).toBeTruthy();\n        const imageBuffer = Buffer.from(await response.arrayBuffer());\n        await checkImage(\n          respBody.filenames[0],\n          imageBuffer.toString(\"base64\"),\n          {\n            width: 768,\n            height: 768,\n          }\n        );\n      });\n\n      it(\"image2image works with hf image url in dataset repo\", async () => {\n        // First, upload an image to HF dataset repo to use as source\n        const timestamp = Date.now();\n        const uploadResp = await submitPrompt(sd15Txt2Img, false, undefined, {\n          hf_upload: {\n            repo: \"SaladTechnologies/comfyui-api-integration-testing\",\n            repo_type: \"dataset\",\n            directory: `test-source-images-dataset-${timestamp}`,\n          },\n        });\n\n        // Extract the URL of the uploaded image\n        const hfImageUrl = uploadResp.images[0];\n\n        // Now use this HF URL as input for img2img workflow\n        const respBody = await submitWorkflow(\n          \"/workflow/img2img\",\n          {\n            image: hfImageUrl,\n            prompt: \"a futuristic cityscape\",\n            checkpoint: \"dreamshaper_8.safetensors\",\n            width: 768,\n            height: 768,\n          },\n          false,\n          undefined,\n          {\n            http_upload: {\n              url_prefix:\n                \"http://file-server:8080/workflow-img2img-hf-dataset-source\",\n            },\n          }\n        );\n\n        expect(respBody.filenames.length).toEqual(1);\n        expect(respBody.images.length).toEqual(1);\n\n        // Verify the transformed image was created\n        const httpUrl = respBody.images[0].replace(\"file-server\", \"localhost\");\n        const response = await fetch(httpUrl);\n        expect(response.ok).toBeTruthy();\n        const imageBuffer = Buffer.from(await response.arrayBuffer());\n        await checkImage(\n          respBody.filenames[0],\n          imageBuffer.toString(\"base64\"),\n          {\n            width: 768,\n            height: 768,\n          }\n        );\n      });\n    });\n  });\n});\n\ndescribe(\"Download Endpoint\", () => {\n  const testModelFilename = \"test-model.safetensors\";\n  const testModelUrl = `http://file-server:8080/${testModelFilename}`;\n\n  beforeAll(async () => {\n    await waitForServerToBeReady();\n    // Seed the HTTP file server with a test \"model\" file\n    const testModelContent = Buffer.from(\"fake model content for testing\");\n    await fetch(`http://localhost:8080/${testModelFilename}`, {\n      method: \"PUT\",\n      body: testModelContent,\n      headers: {\n        \"Content-Type\": \"application/octet-stream\",\n      },\n    });\n  });\n\n  async function submitDownload(body: {\n    url: string;\n    model_type: string;\n    filename?: string;\n    wait?: boolean;\n  }): Promise<{ status: number; body: any }> {\n    const resp = await fetch(`http://localhost:3000/download`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify(body),\n      dispatcher: new Agent({\n        headersTimeout: 0,\n        bodyTimeout: 0,\n        connectTimeout: 0,\n      }),\n    });\n    return {\n      status: resp.status,\n      body: await resp.json(),\n    };\n  }\n\n  describe(\"Async download (wait: false)\", () => {\n    it(\"returns 202 immediately with status 'started'\", async () => {\n      const { status, body } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"checkpoints\",\n        filename: `async-test-${Date.now()}.safetensors`,\n        wait: false,\n      });\n\n      expect(status).toEqual(202);\n      expect(body.status).toEqual(\"started\");\n      expect(body.url).toEqual(testModelUrl);\n      expect(body.model_type).toEqual(\"checkpoints\");\n      expect(typeof body.filename).toEqual(\"string\");\n      // Async response should not include size or duration\n      expect(body.size).toEqual(undefined);\n      expect(body.duration).toEqual(undefined);\n    });\n\n    it(\"defaults to async when wait is not specified\", async () => {\n      const { status, body } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"checkpoints\",\n        filename: `async-default-${Date.now()}.safetensors`,\n      });\n\n      expect(status).toEqual(202);\n      expect(body.status).toEqual(\"started\");\n    });\n  });\n\n  describe(\"Sync download (wait: true)\", () => {\n    it(\"returns 200 with status 'completed', size, and duration\", async () => {\n      const { status, body } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"checkpoints\",\n        filename: `sync-test-${Date.now()}.safetensors`,\n        wait: true,\n      });\n\n      expect(status).toEqual(200);\n      expect(body.status).toEqual(\"completed\");\n      expect(body.url).toEqual(testModelUrl);\n      expect(body.model_type).toEqual(\"checkpoints\");\n      expect(typeof body.filename).toEqual(\"string\");\n      expect(body.size).toBeGreaterThan(0);\n      expect(body.duration).toBeGreaterThanOrEqual(0);\n    });\n\n    it(\"extracts filename from URL when not provided\", async () => {\n      const { status, body } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"checkpoints\",\n        wait: true,\n      });\n\n      expect(status).toEqual(200);\n      expect(body.filename).toEqual(testModelFilename);\n    });\n  });\n\n  describe(\"Error handling\", () => {\n    it(\"returns 400 for invalid model_type\", async () => {\n      const { status } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"invalid_model_type_that_does_not_exist\",\n        wait: true,\n      });\n\n      expect(status).toEqual(400);\n    });\n\n    it(\"returns 400 for invalid URL\", async () => {\n      const { status, body } = await submitDownload({\n        url: \"not-a-valid-url\",\n        model_type: \"checkpoints\",\n        wait: true,\n      });\n\n      expect(status).toEqual(400);\n      expect(typeof body.error).toEqual(\"string\");\n    });\n\n    it(\"returns 400 for download failure (non-existent file)\", async () => {\n      const { status, body } = await submitDownload({\n        url: \"http://file-server:8080/non-existent-file.safetensors\",\n        model_type: \"checkpoints\",\n        wait: true,\n      });\n\n      expect(status).toEqual(400);\n      expect(typeof body.error).toEqual(\"string\");\n    });\n  });\n\n  describe(\"Different model types\", () => {\n    it(\"works with loras model type\", async () => {\n      const { status, body } = await submitDownload({\n        url: testModelUrl,\n        model_type: \"loras\",\n        filename: `lora-test-${Date.now()}.safetensors`,\n        wait: true,\n      });\n\n      expect(status).toEqual(200);\n      expect(body.status).toEqual(\"completed\");\n      expect(body.model_type).toEqual(\"loras\");\n    });\n  });\n});\n\ndescribe(\"System Events\", () => {\n  beforeAll(async () => {\n    await waitForServerToBeReady();\n  });\n\n  it(\"works\", async () => {\n    const uniquePrompt = JSON.parse(JSON.stringify(sd15Txt2Img));\n    uniquePrompt[\"3\"].inputs.seed = Math.floor(Math.random() * 1000000);\n    const eventsReceived: { [key: string]: number } = {};\n    const webhook = await createWebhookListener((body) => {\n      if (body?.data?.data?.prompt_id !== promptId) {\n        // Ignore events from other prompts\n        return;\n      }\n      if (!eventsReceived[body.event]) {\n        eventsReceived[body.event] = 0;\n      }\n      eventsReceived[body.event]++;\n    }, \"/system\");\n\n    const { id: promptId } = await submitPrompt(uniquePrompt);\n    let attempts = 100;\n    while (\n      !(\n        eventsReceived[\"comfy.execution_success\"] &&\n        eventsReceived[\"comfy.executed\"] &&\n        eventsReceived[\"comfy.progress\"]\n      ) &&\n      attempts > 0\n    ) {\n      await sleep(100);\n      attempts--;\n    }\n\n    await webhook.close();\n\n    expect(eventsReceived[\"comfy.executed\"]).toEqual(1);\n    expect(eventsReceived[\"comfy.execution_success\"]).toEqual(1);\n    expect(eventsReceived[\"comfy.progress\"]).toBeGreaterThan(0);\n  });\n});\n"
  },
  {
    "path": "test/docker-image/Dockerfile",
    "content": "ARG comfy_version=0.19.3\nFROM ghcr.io/saladtechnologies/comfyui-api:comfy${comfy_version}-torch2.8.0-cuda12.8-devel\n\nRUN apt-get update && apt-get install -y \\\n  libgl1 \\\n  libgl1-mesa-glx \\\n  libglib2.0-0 \\\n  && rm -rf /var/lib/apt/lists/*\n\nRUN comfy node registry-install comfyui-videohelpersuite\nRUN comfy node registry-install comfyui-animatediff-evolved\nRUN comfy node registry-install efficiency-nodes-comfyui\nRUN comfy node registry-install comfyui-advanced-controlnet\nRUN comfy node registry-install comfyui-cogvideoxwrapper\n\nCOPY poses ${INPUT_DIR}/poses\nENV STARTUP_CHECK_MAX_TRIES=100"
  },
  {
    "path": "test/docker-image/Dockerfile.smoketest",
    "content": "FROM ghcr.io/saladtechnologies/comfyui-api:comfy0.19.3-torch2.8.0-cuda12.8-runtime\n\nCOPY test/docker-image/dreamshaper_8.safetensors $MODEL_DIR/checkpoints/dreamshaper_8.safetensors\n\nCOPY bin/comfyui-api /comfyui-api\nRUN chmod +x /comfyui-api\n\nENV LOG_LEVEL=debug\n\nCMD [\"/comfyui-api\"]"
  },
  {
    "path": "test/docker-image/link-models",
    "content": "#! /usr/bin/bash\n\ncomfy_model_dir=~/_comfy_models/models\n\nln -s $comfy_model_dir $PWD"
  },
  {
    "path": "test/file-server.ts",
    "content": "import http from \"http\";\nimport fs from \"fs\";\nimport path from \"path\";\nimport { pipeline } from \"stream/promises\";\n\nconst PORT = process.env.PORT ? parseInt(process.env.PORT) : 8080;\nconst STORAGE_DIR = process.env.STORAGE_DIR || path.join(__dirname, \"test-storage\");\n\n// Ensure storage directory exists\nif (!fs.existsSync(STORAGE_DIR)) {\n  fs.mkdirSync(STORAGE_DIR, { recursive: true });\n}\n\nconst server = http.createServer(async (req, res) => {\n  const url = new URL(req.url || \"/\", `http://localhost:${PORT}`);\n  // Remove leading slash from pathname to use as filename\n  const filename = url.pathname.substring(1);\n  const filePath = path.join(STORAGE_DIR, filename);\n\n  // Basic auth support\n  const authHeader = req.headers.authorization;\n  if (process.env.REQUIRE_AUTH === \"true\") {\n    const expectedAuth = `Basic ${Buffer.from(\n      `${process.env.AUTH_USER || \"user\"}:${process.env.AUTH_PASS || \"pass\"}`\n    ).toString(\"base64\")}`;\n    \n    if (authHeader !== expectedAuth) {\n      res.writeHead(401, { \"WWW-Authenticate\": 'Basic realm=\"File Server\"' });\n      res.end(\"Unauthorized\");\n      return;\n    }\n  }\n\n  console.log(`${req.method} ${url.pathname}`);\n\n  if (req.method === \"GET\" && url.pathname === \"/list\") {\n    // List endpoint to get files (optionally filtered by prefix)\n    try {\n      const prefix = url.searchParams.get(\"prefix\") || \"\";\n      \n      // Recursively find all files in storage directory\n      const getAllFiles = (dir: string, basePath: string = \"\"): string[] => {\n        const entries = fs.readdirSync(dir, { withFileTypes: true });\n        const files: string[] = [];\n        \n        for (const entry of entries) {\n          const relativePath = basePath ? `${basePath}/${entry.name}` : entry.name;\n          const fullPath = path.join(dir, entry.name);\n          \n          if (entry.isDirectory()) {\n            files.push(...getAllFiles(fullPath, relativePath));\n          } else {\n            files.push(relativePath);\n          }\n        }\n        \n        return files;\n      };\n      \n      const allFiles = getAllFiles(STORAGE_DIR);\n      const files = allFiles.filter(f => prefix ? f.startsWith(prefix) : true);\n      \n      res.writeHead(200, { \"Content-Type\": \"application/json\" });\n      res.end(JSON.stringify({ files }));\n    } catch (error) {\n      console.error(\"Error listing files:\", error);\n      res.writeHead(500);\n      res.end(\"Internal Server Error\");\n    }\n  } else if (req.method === \"DELETE\" && url.pathname === \"/purge\") {\n    // Purge endpoint to delete all files and directories\n    try {\n      // Recursively delete all contents of the storage directory\n      const entries = fs.readdirSync(STORAGE_DIR, { withFileTypes: true });\n      for (const entry of entries) {\n        const fullPath = path.join(STORAGE_DIR, entry.name);\n        if (entry.isDirectory()) {\n          fs.rmSync(fullPath, { recursive: true, force: true });\n        } else {\n          fs.unlinkSync(fullPath);\n        }\n      }\n      \n      console.log(`Purged all files from ${STORAGE_DIR}`);\n      res.writeHead(200, { \"Content-Type\": \"application/json\" });\n      res.end(JSON.stringify({ success: true, message: \"All files purged\" }));\n    } catch (error) {\n      console.error(\"Error purging files:\", error);\n      res.writeHead(500);\n      res.end(\"Internal Server Error\");\n    }\n  } else if (req.method === \"PUT\") {\n    try {\n      // Ensure we don't write outside storage dir\n      if (!filePath.startsWith(STORAGE_DIR)) {\n        res.writeHead(400);\n        res.end(\"Invalid path\");\n        return;\n      }\n\n      // Create parent directory if it doesn't exist\n      const dir = path.dirname(filePath);\n      if (!fs.existsSync(dir)) {\n        fs.mkdirSync(dir, { recursive: true });\n      }\n\n      const writeStream = fs.createWriteStream(filePath);\n      await pipeline(req, writeStream);\n      \n      console.log(`Saved file: ${filePath}`);\n      res.writeHead(200);\n      res.end(\"OK\");\n    } catch (error) {\n      console.error(\"Error saving file:\", error);\n      res.writeHead(500);\n      res.end(\"Internal Server Error\");\n    }\n  } else if (req.method === \"GET\") {\n    try {\n      // Ensure we don't read outside storage dir\n      if (!filePath.startsWith(STORAGE_DIR)) {\n        res.writeHead(400);\n        res.end(\"Invalid path\");\n        return;\n      }\n\n      if (!fs.existsSync(filePath)) {\n        res.writeHead(404);\n        res.end(\"Not Found\");\n        return;\n      }\n\n      const stat = fs.statSync(filePath);\n      const contentType = getContentType(filename);\n      \n      res.writeHead(200, {\n        \"Content-Type\": contentType,\n        \"Content-Length\": stat.size,\n        \"Content-Disposition\": `inline; filename=\"${filename}\"`,\n      });\n\n      const readStream = fs.createReadStream(filePath);\n      await pipeline(readStream, res);\n      \n      console.log(`Served file: ${filePath}`);\n    } catch (error) {\n      console.error(\"Error reading file:\", error);\n      res.writeHead(500);\n      res.end(\"Internal Server Error\");\n    }\n  } else {\n    res.writeHead(405);\n    res.end(\"Method Not Allowed\");\n  }\n});\n\nfunction getContentType(filename: string): string {\n  const ext = path.extname(filename).toLowerCase();\n  const mimeTypes: Record<string, string> = {\n    \".jpg\": \"image/jpeg\",\n    \".jpeg\": \"image/jpeg\",\n    \".png\": \"image/png\",\n    \".gif\": \"image/gif\",\n    \".webp\": \"image/webp\",\n    \".svg\": \"image/svg+xml\",\n    \".bmp\": \"image/bmp\",\n    \".tiff\": \"image/tiff\",\n    \".ico\": \"image/x-icon\",\n    \".mp4\": \"video/mp4\",\n    \".mpeg\": \"video/mpeg\",\n    \".webm\": \"video/webm\",\n    \".mov\": \"video/quicktime\",\n    \".avi\": \"video/x-msvideo\",\n    \".mp3\": \"audio/mpeg\",\n    \".wav\": \"audio/wav\",\n    \".ogg\": \"audio/ogg\",\n    \".weba\": \"audio/webm\",\n    \".aac\": \"audio/aac\",\n    \".pdf\": \"application/pdf\",\n    \".doc\": \"application/msword\",\n    \".docx\": \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n    \".xls\": \"application/vnd.ms-excel\",\n    \".xlsx\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n    \".ppt\": \"application/vnd.ms-powerpoint\",\n    \".pptx\": \"application/vnd.openxmlformats-officedocument.presentationml.presentation\",\n    \".txt\": \"text/plain\",\n    \".csv\": \"text/csv\",\n    \".html\": \"text/html\",\n    \".rtf\": \"application/rtf\",\n    \".zip\": \"application/zip\",\n    \".tar\": \"application/x-tar\",\n    \".gz\": \"application/gzip\",\n    \".7z\": \"application/x-7z-compressed\",\n    \".rar\": \"application/x-rar-compressed\",\n    \".json\": \"application/json\",\n    \".xml\": \"application/xml\",\n    \".js\": \"application/javascript\",\n    \".css\": \"text/css\",\n    \".bin\": \"application/octet-stream\",\n    \".pt\": \"application/x-pytorch\",\n    \".pb\": \"application/x-tensorflow\",\n  };\n  \n  return mimeTypes[ext] || \"application/octet-stream\";\n}\n\nserver.listen(PORT, () => {\n  console.log(`File server listening on http://localhost:${PORT}`);\n  console.log(`Storage directory: ${STORAGE_DIR}`);\n  console.log(`Auth required: ${process.env.REQUIRE_AUTH === \"true\"}`);\n});\n\n// Graceful shutdown\nprocess.on(\"SIGTERM\", () => {\n  console.log(\"SIGTERM received, closing server\");\n  server.close(() => {\n    console.log(\"Server closed\");\n    process.exit(0);\n  });\n});\n\nprocess.on(\"SIGINT\", () => {\n  console.log(\"SIGINT received, closing server\");\n  server.close(() => {\n    console.log(\"Server closed\");\n    process.exit(0);\n  });\n});"
  },
  {
    "path": "test/llm-providers.spec.ts",
    "content": "import { expect, describe, it } from \"vitest\";\nimport {\n  anthropicProvider,\n  minimaxProvider,\n  selectProvider,\n  stripCodeFences,\n} from \"../src/llm-providers\";\n\ndescribe(\"LLM Providers - Anthropic\", () => {\n  it(\"should have the correct API URL\", () => {\n    expect(anthropicProvider.apiUrl).toEqual(\n      \"https://api.anthropic.com/v1/messages\"\n    );\n  });\n\n  it(\"should return x-api-key and anthropic-version auth headers\", () => {\n    const headers = anthropicProvider.authHeaders(\"sk-test\");\n    expect(headers[\"x-api-key\"]).toEqual(\"sk-test\");\n    expect(headers[\"anthropic-version\"]).toMatch(/^\\d{4}-\\d{2}-\\d{2}$/);\n  });\n\n  it(\"should build a request body with system as top-level field\", () => {\n    const body = anthropicProvider.buildRequestBody(\n      \"system instructions\",\n      \"user message\"\n    ) as any;\n    expect(body.system).toEqual(\"system instructions\");\n    expect(body.messages).toEqual([{ role: \"user\", content: \"user message\" }]);\n    expect(body.temperature).toEqual(0);\n    expect(body.max_tokens).toBeGreaterThan(0);\n  });\n\n  it(\"should parse content[0].text from Anthropic response format\", () => {\n    const mockResponse = {\n      content: [{ type: \"text\", text: \"import { z } from 'zod';\" }],\n    };\n    expect(anthropicProvider.parseResponse(mockResponse)).toEqual(\n      \"import { z } from 'zod';\"\n    );\n  });\n\n  it(\"should return empty string when response has no content\", () => {\n    expect(anthropicProvider.parseResponse({})).toEqual(\"\");\n    expect(anthropicProvider.parseResponse({ content: [] })).toEqual(\"\");\n  });\n});\n\ndescribe(\"LLM Providers - MiniMax\", () => {\n  it(\"should use the OpenAI-compatible endpoint\", () => {\n    expect(minimaxProvider.apiUrl).toEqual(\n      \"https://api.minimax.io/v1/chat/completions\"\n    );\n  });\n\n  it(\"should use MiniMax-M2.7 model\", () => {\n    expect(minimaxProvider.model).toEqual(\"MiniMax-M2.7\");\n  });\n\n  it(\"should return Bearer auth header\", () => {\n    const headers = minimaxProvider.authHeaders(\"mm-key-abc\");\n    expect(headers[\"Authorization\"]).toEqual(\"Bearer mm-key-abc\");\n    expect(Object.keys(headers)).not.toContain(\"x-api-key\");\n    expect(Object.keys(headers)).not.toContain(\"anthropic-version\");\n  });\n\n  it(\"should have temperature > 0 (MiniMax requires temperature in (0.0, 1.0])\", () => {\n    expect(minimaxProvider.temperature).toBeGreaterThan(0);\n    expect(minimaxProvider.temperature).toBeLessThanOrEqual(1);\n  });\n\n  it(\"should build a request body with system as first message in messages array\", () => {\n    const body = minimaxProvider.buildRequestBody(\n      \"system instructions\",\n      \"user message\"\n    ) as any;\n    expect(body.messages).toHaveLength(2);\n    expect(body.messages[0]).toEqual({\n      role: \"system\",\n      content: \"system instructions\",\n    });\n    expect(body.messages[1]).toEqual({\n      role: \"user\",\n      content: \"user message\",\n    });\n    expect(body.system).toBeUndefined();\n  });\n\n  it(\"should parse choices[0].message.content from OpenAI-compatible response format\", () => {\n    const mockResponse = {\n      choices: [\n        {\n          message: { role: \"assistant\", content: \"import { z } from 'zod';\" },\n          finish_reason: \"stop\",\n        },\n      ],\n    };\n    expect(minimaxProvider.parseResponse(mockResponse)).toEqual(\n      \"import { z } from 'zod';\"\n    );\n  });\n\n  it(\"should return empty string when response has no choices\", () => {\n    expect(minimaxProvider.parseResponse({})).toEqual(\"\");\n    expect(minimaxProvider.parseResponse({ choices: [] })).toEqual(\"\");\n  });\n});\n\ndescribe(\"selectProvider\", () => {\n  it(\"should return anthropicProvider when ANTHROPIC_API_KEY is set\", () => {\n    const provider = selectProvider(\"sk-ant-key\", undefined);\n    expect(provider.name).toEqual(\"anthropic\");\n  });\n\n  it(\"should return minimaxProvider when only MINIMAX_API_KEY is set\", () => {\n    const provider = selectProvider(undefined, \"mm-key\");\n    expect(provider.name).toEqual(\"minimax\");\n  });\n\n  it(\"should prefer anthropic when both keys are set\", () => {\n    const provider = selectProvider(\"sk-ant-key\", \"mm-key\");\n    expect(provider.name).toEqual(\"anthropic\");\n  });\n\n  it(\"should throw when neither key is set\", () => {\n    expect(() => selectProvider(undefined, undefined)).toThrow(\n      /ANTHROPIC_API_KEY|MINIMAX_API_KEY/\n    );\n  });\n\n  it(\"should throw when both keys are empty strings\", () => {\n    expect(() => selectProvider(\"\", \"\")).toThrow();\n  });\n});\n\ndescribe(\"stripCodeFences\", () => {\n  it(\"should remove ```typescript fences\", () => {\n    const input = \"```typescript\\nimport foo from 'foo';\\n```\";\n    const result = stripCodeFences(input);\n    expect(result).toEqual(\"import foo from 'foo';\");\n  });\n\n  it(\"should remove plain ``` fences\", () => {\n    const input = \"```\\nconst x = 1;\\n```\";\n    const result = stripCodeFences(input);\n    expect(result).toEqual(\"const x = 1;\");\n  });\n\n  it(\"should return text unchanged when there are no code fences\", () => {\n    const input = \"import { z } from 'zod';\\nconst x = z.string();\";\n    expect(stripCodeFences(input)).toEqual(input);\n  });\n\n  it(\"should preserve multi-line code\", () => {\n    const input =\n      \"```typescript\\nline1\\nline2\\nline3\\n```\";\n    const result = stripCodeFences(input);\n    expect(result).toEqual(\"line1\\nline2\\nline3\");\n  });\n\n  it(\"should handle text with only an opening fence gracefully\", () => {\n    const input = \"```typescript\\nconst x = 1;\";\n    const result = stripCodeFences(input);\n    // Opening fence stripped but no closing fence found — inner content preserved\n    expect(result).toContain(\"const x = 1;\");\n    expect(result).not.toContain(\"```typescript\");\n  });\n});\n"
  },
  {
    "path": "test/output/.gitkeep",
    "content": ""
  },
  {
    "path": "test/submit-many-jobs.js",
    "content": "#!/usr/bin/env node\n\nconst fs = require('fs').promises;\n\nconst usage = `\nUsage: node submit-many-jobs.js <accessDomainName> <numJobs>\n\nThis script submits multiple jobs to a specified access domain for image generation.\nExample: node submit-many-jobs.js http://localhost:3000 10\n`;\n\nconst accessDomainName = process.argv[2];\nconst numJobs = parseInt(process.argv[3]) || 10;\nif (!accessDomainName || isNaN(numJobs) || numJobs <= 0) {\n  console.error(usage);\n  process.exit(1);\n}\n\nconst jobFile = \"workflows/sd1.5-txt2img.json\";\n\nasync function loadJobJson() {\n  try {\n    const jobData = await fs.readFile(jobFile, 'utf8');\n    return JSON.parse(jobData);\n  } catch (error) {\n    console.error(`Error reading job file: ${error.message}`);\n    process.exit(1);\n  }\n}\n\nfunction getRandomSeed() {\n  return Math.floor(Math.random() * (9999 - 1000 + 1)) + 1000;\n}\n\nasync function doAJob(jobJson) {\n  // Clone the job JSON to avoid modifying the original\n  const job = JSON.parse(JSON.stringify(jobJson));\n  \n  // Generate random seed\n  const randomSeed = getRandomSeed();\n  \n  // Update job[\"3\"][\"inputs\"][\"seed\"] to random_seed\n  job[\"3\"].inputs.seed = randomSeed;\n  \n  // Wrap job_json in a \"prompt\" object\n  const payload = { prompt: job };\n  \n  console.log(\"Submitting job...\");\n  \n  try {\n    const response = await fetch(`${accessDomainName}/prompt`, {\n      method: 'POST',\n      headers: {\n        'Content-Type': 'application/json'\n      },\n      body: JSON.stringify(payload)\n    });\n    \n    if (!response.ok) {\n      throw new Error(`HTTP error! status: ${response.status}`);\n    }\n    \n    const responseData = await response.json();\n    \n    // Decode base64 image and save\n    const base64Image = responseData.images[0];\n    const imageBuffer = Buffer.from(base64Image, 'base64');\n    \n    // Create output directory if it doesn't exist\n    await fs.mkdir('output', { recursive: true });\n\n    const filename = `output/${responseData.filenames[0]}`;\n    \n    await fs.writeFile(filename, imageBuffer);\n    \n    console.log(\"Job Done. Image saved.\");\n    \n  } catch (error) {\n    console.error(\"Error submitting job:\", error.message);\n    process.exit(1);\n  }\n}\n\nasync function main() {\n  const jobJson = await loadJobJson();\n  \n  // Create array of promises for all jobs\n  const jobPromises = [];\n  \n  for (let i = 0; i < numJobs; i++) {\n    console.log(`Submitting job ${i + 1} of ${numJobs}`);\n    jobPromises.push(doAJob(jobJson));\n  }\n  \n  // Wait for all jobs to complete\n  try {\n    await Promise.all(jobPromises);\n    console.log(\"All jobs done.\");\n  } catch (error) {\n    console.error(\"Error in job execution:\", error.message);\n    process.exit(1);\n  }\n}\n\n// Run the main function\nmain().catch(error => {\n  console.error(\"Unhandled error:\", error.message);\n  process.exit(1);\n});"
  },
  {
    "path": "test/test-utils.ts",
    "content": "import { expect } from \"vitest\";\nimport sharp from \"sharp\";\nimport fastify, { FastifyInstance } from \"fastify\";\nimport { fetch, Agent } from \"undici\";\nimport { S3Client } from \"@aws-sdk/client-s3\";\nimport { BlobServiceClient, ContainerClient } from \"@azure/storage-blob\";\nimport { Webhook } from \"svix\";\n\nexport const s3 = new S3Client({\n  region: \"us-east-1\",\n  endpoint: \"http://localhost:4566\", // LocalStack endpoint\n  credentials: {\n    accessKeyId: \"test\",\n    secretAccessKey: \"test\",\n  },\n  forcePathStyle: true, // Required for LocalStack\n});\n\n// Azurite connection string for local testing\nconst azuriteConnectionString =\n  \"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://localhost:10000/devstoreaccount1;\";\n\nexport const azureBlobClient = BlobServiceClient.fromConnectionString(\n  azuriteConnectionString\n);\n\nexport async function getAzureContainer(\n  containerName: string\n): Promise<ContainerClient> {\n  const containerClient = azureBlobClient.getContainerClient(containerName);\n  if (!(await containerClient.exists())) {\n    await containerClient.create();\n  }\n  return containerClient;\n}\n\nexport async function sleep(ms: number): Promise<void> {\n  return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\nexport async function createWebhookListener(\n  onReceive: (body: any, headers?: any) => void | Promise<void>,\n  endpoint: string = \"/webhook\"\n): Promise<FastifyInstance> {\n  const app = fastify({\n    bodyLimit: 1024 * 1024 * 1024, // 1 GB\n  });\n  app.post(endpoint, (req, res) => {\n    if (req.body) {\n      onReceive(req.body, req.headers);\n    }\n    res.send({ success: true });\n  });\n  await app.listen({ port: 1234 });\n  await app.ready();\n  /**\n   * TODO: There is some kind of race condition here I can't figure out.\n   * comfyui-api logs report that it got no response from the webhook if this\n   * value is smaller.\n   * */\n  await sleep(700);\n  return app;\n}\n\nconst webhookAddress = \"http://host.docker.internal:1234/webhook\";\n\nexport async function submitPrompt(\n  prompt: any,\n  webhook: boolean = false,\n  convert: any = undefined,\n  upload: any = undefined,\n  webhook_v2: boolean = false\n): Promise<any> {\n  const body: any = {\n    prompt,\n  };\n  if (webhook) {\n    body[\"webhook\"] = webhookAddress;\n  }\n  if (webhook_v2) {\n    body[\"webhook_v2\"] = webhookAddress;\n  }\n  if (convert) {\n    body[\"convert_output\"] = convert;\n  }\n  // Handle different upload provider keys\n  if (upload) {\n    // For backward compatibility, if upload is passed directly as s3 config\n    if (upload.bucket !== undefined || upload.prefix !== undefined) {\n      body[\"s3\"] = upload;\n    } else {\n      Object.assign(body, upload);\n    }\n  }\n  try {\n    const resp = await fetch(`http://localhost:3000/prompt`, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n      },\n      body: JSON.stringify(body),\n      dispatcher: new Agent({\n        headersTimeout: 0,\n        bodyTimeout: 0,\n        connectTimeout: 0,\n      }),\n    });\n    if (!resp.ok) {\n      console.error(await resp.text());\n      throw new Error(\"Prompt submission failed\");\n    }\n    expect(resp.ok).toEqual(true);\n    return await resp.json();\n  } catch (e) {\n    console.error(e);\n    throw e;\n  }\n}\n\nexport async function checkImage(\n  filename: string,\n  imageB64: string,\n  options: { width: number; height: number; webpFrames?: number } = {\n    width: 512,\n    height: 512,\n  }\n): Promise<void> {\n  const image = sharp(Buffer.from(imageB64, \"base64\"));\n  const metadata = await image.metadata();\n  expect(metadata.width).toEqual(options.width);\n  expect(metadata.height).toEqual(options.height);\n  if (filename.endsWith(\".webp\")) {\n    expect(metadata.format).toEqual(\"webp\");\n    expect(metadata.pages).toEqual(options.webpFrames);\n  } else if (filename.endsWith(\".png\")) {\n    expect(metadata.format).toEqual(\"png\");\n  } else if (filename.endsWith(\".jpeg\") || filename.endsWith(\".jpg\")) {\n    expect(metadata.format).toEqual(\"jpeg\");\n  }\n}\n\nexport async function waitForServerToBeReady(): Promise<void> {\n  while (true) {\n    try {\n      const resp = await fetch(`http://localhost:3000/ready`);\n      if (resp.ok) {\n        break;\n      }\n    } catch (e) {}\n    await sleep(100);\n  }\n}\n\nconst webhook = new Webhook(\"testsecret\");\n\nexport function verifyWebhookV2(\n  body: string,\n  headers: Record<string, string>\n): boolean {\n  if (\n    !headers[\"webhook-id\"] ||\n    !headers[\"webhook-timestamp\"] ||\n    !headers[\"webhook-signature\"]\n  ) {\n    return false;\n  }\n  try {\n    webhook.verify(body, headers);\n    return true;\n  } catch (e) {\n    return false;\n  }\n}\n"
  },
  {
    "path": "test/utils.spec.ts",
    "content": "import { expect, describe, it, beforeEach, afterEach } from \"vitest\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport os from \"os\";\nimport { z } from \"zod\";\nimport { isWorkflow, Workflow } from \"../src/types\";\nimport { parseGitUrl } from \"../src/git-url-parser\";\n\n/**\n * Unit tests for utils.ts functions.\n * These tests verify specific bug fixes without requiring the full ComfyUI environment.\n */\n\ndescribe(\"Workflow Validation\", () => {\n  describe(\"isWorkflow\", () => {\n    it(\"should return true for a valid workflow object\", () => {\n      const validWorkflow: Workflow = {\n        RequestSchema: z.object({\n          prompt: z.string(),\n        }),\n        generateWorkflow: (input) => ({\n          \"1\": {\n            inputs: { text: input.prompt },\n            class_type: \"CLIPTextEncode\",\n          },\n        }),\n      };\n\n      expect(isWorkflow(validWorkflow)).toEqual(true);\n    });\n\n    it(\"should return true for a workflow with optional description and summary\", () => {\n      const workflowWithMeta: Workflow = {\n        RequestSchema: z.object({}),\n        generateWorkflow: () => ({}),\n        description: \"Test workflow description\",\n        summary: \"Test summary\",\n      };\n\n      expect(isWorkflow(workflowWithMeta)).toEqual(true);\n    });\n\n    it(\"should return false for an object missing RequestSchema\", () => {\n      const missingSchema = {\n        generateWorkflow: () => ({}),\n      };\n\n      expect(isWorkflow(missingSchema)).toEqual(false);\n    });\n\n    it(\"should return false for an object missing generateWorkflow\", () => {\n      const missingGenerator = {\n        RequestSchema: z.object({}),\n      };\n\n      expect(isWorkflow(missingGenerator)).toEqual(false);\n    });\n\n    it(\"should return false for null\", () => {\n      expect(isWorkflow(null)).toEqual(false);\n    });\n\n    it(\"should return false for undefined\", () => {\n      expect(isWorkflow(undefined)).toEqual(false);\n    });\n\n    it(\"should return false for primitive values\", () => {\n      expect(isWorkflow(\"string\")).toEqual(false);\n      expect(isWorkflow(123)).toEqual(false);\n      expect(isWorkflow(true)).toEqual(false);\n    });\n\n    it(\"should return false for an empty object\", () => {\n      expect(isWorkflow({})).toEqual(false);\n    });\n\n    it(\"should return false for an array\", () => {\n      expect(isWorkflow([])).toEqual(false);\n    });\n  });\n});\n\ndescribe(\"Workflow Loading\", () => {\n  let tempDir: string;\n\n  beforeEach(() => {\n    tempDir = fs.mkdtempSync(path.join(os.tmpdir(), \"test-workflow-\"));\n  });\n\n  afterEach(() => {\n    fs.rmSync(tempDir, { recursive: true, force: true });\n  });\n\n  describe(\"workflow file structure\", () => {\n    it(\"should recognize .ts files as valid workflow files\", () => {\n      const tsFile = path.join(tempDir, \"test-workflow.ts\");\n      fs.writeFileSync(tsFile, \"// placeholder\");\n\n      const files = fs.readdirSync(tempDir);\n      const tsFiles = files.filter((f) => f.endsWith(\".ts\"));\n\n      expect(tsFiles.length).toEqual(1);\n      expect(tsFiles[0]).toEqual(\"test-workflow.ts\");\n    });\n\n    it(\"should recognize .js files as valid workflow files\", () => {\n      const jsFile = path.join(tempDir, \"test-workflow.js\");\n      fs.writeFileSync(jsFile, \"// placeholder\");\n\n      const files = fs.readdirSync(tempDir);\n      const jsFiles = files.filter((f) => f.endsWith(\".js\"));\n\n      expect(jsFiles.length).toEqual(1);\n      expect(jsFiles[0]).toEqual(\"test-workflow.js\");\n    });\n\n    it(\"should ignore non-js/ts files\", () => {\n      fs.writeFileSync(path.join(tempDir, \"readme.md\"), \"# Readme\");\n      fs.writeFileSync(path.join(tempDir, \"config.json\"), \"{}\");\n      fs.writeFileSync(path.join(tempDir, \"workflow.ts\"), \"// valid\");\n\n      const files = fs.readdirSync(tempDir);\n      const workflowFiles = files.filter(\n        (f) => f.endsWith(\".ts\") || f.endsWith(\".js\")\n      );\n\n      expect(workflowFiles.length).toEqual(1);\n    });\n\n    it(\"should handle nested directory structures\", () => {\n      // Create nested structure like /workflows/sdxl/txt2img.ts\n      const nestedDir = path.join(tempDir, \"sdxl\");\n      fs.mkdirSync(nestedDir);\n      fs.writeFileSync(path.join(nestedDir, \"txt2img.ts\"), \"// workflow\");\n      fs.writeFileSync(path.join(nestedDir, \"img2img.ts\"), \"// workflow\");\n\n      expect(fs.existsSync(nestedDir)).toEqual(true);\n      expect(fs.statSync(nestedDir).isDirectory()).toEqual(true);\n\n      const nestedFiles = fs.readdirSync(nestedDir);\n      expect(nestedFiles.length).toEqual(2);\n    });\n  });\n\n  describe(\"workflow file naming\", () => {\n    it(\"should derive workflow name from filename without extension\", () => {\n      const filename = \"txt2img.ts\";\n      const workflowName = filename.replace(\".js\", \"\").replace(\".ts\", \"\");\n\n      expect(workflowName).toEqual(\"txt2img\");\n    });\n\n    it(\"should handle filenames with hyphens\", () => {\n      const filename = \"txt2img-with-refiner.ts\";\n      const workflowName = filename.replace(\".js\", \"\").replace(\".ts\", \"\");\n\n      expect(workflowName).toEqual(\"txt2img-with-refiner\");\n    });\n\n    it(\"should handle .js extension removal correctly\", () => {\n      const filename = \"workflow.js\";\n      const workflowName = filename.replace(\".js\", \"\").replace(\".ts\", \"\");\n\n      expect(workflowName).toEqual(\"workflow\");\n    });\n  });\n});\n\ndescribe(\"Utils\", () => {\n  describe(\"installCustomNode - requirements.txt check\", () => {\n    /**\n     * This test verifies the fix for issue #123:\n     * Installation should not fail if a custom node repo is missing requirements.txt\n     *\n     * The fix adds a check using fs.existsSync to verify if requirements.txt exists\n     * before attempting to run pip install.\n     */\n\n    let tempDir: string;\n\n    beforeEach(() => {\n      tempDir = fs.mkdtempSync(path.join(os.tmpdir(), \"test-custom-node-\"));\n    });\n\n    afterEach(() => {\n      // Clean up temp directory\n      fs.rmSync(tempDir, { recursive: true, force: true });\n    });\n\n    it(\"should skip pip install when requirements.txt is missing\", () => {\n      // Simulate a custom node directory without requirements.txt\n      const customNodePath = tempDir;\n      const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n\n      // This simulates the fix logic: check before running pip install\n      const shouldRunPipInstall = fs.existsSync(requirementsPath);\n\n      expect(shouldRunPipInstall).toEqual(false);\n    });\n\n    it(\"should run pip install when requirements.txt exists\", () => {\n      // Simulate a custom node directory with requirements.txt\n      const customNodePath = tempDir;\n      const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n\n      // Create requirements.txt with some dependencies\n      fs.writeFileSync(requirementsPath, \"numpy>=1.0.0\\ntorch>=2.0.0\\n\");\n\n      // This simulates the fix logic: check before running pip install\n      const shouldRunPipInstall = fs.existsSync(requirementsPath);\n\n      expect(shouldRunPipInstall).toEqual(true);\n    });\n\n    it(\"should handle empty requirements.txt file\", () => {\n      // Even an empty requirements.txt should trigger pip install\n      // (pip handles empty files gracefully)\n      const customNodePath = tempDir;\n      const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n\n      // Create empty requirements.txt\n      fs.writeFileSync(requirementsPath, \"\");\n\n      const shouldRunPipInstall = fs.existsSync(requirementsPath);\n\n      expect(shouldRunPipInstall).toEqual(true);\n    });\n\n    it(\"should handle nested custom node directory structure\", () => {\n      // Some custom nodes have nested directory structures\n      const customNodePath = path.join(tempDir, \"ComfyUI-CustomNode\");\n      fs.mkdirSync(customNodePath);\n\n      const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n\n      // No requirements.txt in nested directory\n      expect(fs.existsSync(requirementsPath)).toEqual(false);\n\n      // Now add requirements.txt\n      fs.writeFileSync(requirementsPath, \"requests>=2.0.0\\n\");\n      expect(fs.existsSync(requirementsPath)).toEqual(true);\n    });\n\n    it(\"should not be confused by similarly named files\", () => {\n      // Ensure we check for exact filename, not partial matches\n      const customNodePath = tempDir;\n\n      // Create files with similar names but not exact match\n      fs.writeFileSync(path.join(customNodePath, \"requirements.txt.bak\"), \"backup\");\n      fs.writeFileSync(path.join(customNodePath, \"requirements\"), \"no extension\");\n      fs.writeFileSync(path.join(customNodePath, \"my-requirements.txt\"), \"wrong prefix\");\n\n      const requirementsPath = path.join(customNodePath, \"requirements.txt\");\n      const shouldRunPipInstall = fs.existsSync(requirementsPath);\n\n      // Should still be false - exact filename must match\n      expect(shouldRunPipInstall).toEqual(false);\n    });\n  });\n});\n\ndescribe(\"parseGitUrl\", () => {\n  describe(\"plain URLs (no ref)\", () => {\n    it(\"should return URL as-is when no ref is specified\", () => {\n      const result = parseGitUrl(\"https://github.com/user/repo\");\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(null);\n    });\n\n    it(\"should handle .git suffix without ref\", () => {\n      const result = parseGitUrl(\"https://github.com/user/repo.git\");\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo.git\");\n      expect(result.ref).toEqual(null);\n    });\n\n    it(\"should handle GitLab URLs without ref\", () => {\n      const result = parseGitUrl(\"https://gitlab.com/user/repo\");\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/user/repo\");\n      expect(result.ref).toEqual(null);\n    });\n\n    it(\"should handle Bitbucket URLs without ref\", () => {\n      const result = parseGitUrl(\"https://bitbucket.org/user/repo\");\n      expect(result.baseUrl).toEqual(\"https://bitbucket.org/user/repo\");\n      expect(result.ref).toEqual(null);\n    });\n  });\n\n  describe(\"GitHub URL formats\", () => {\n    it(\"should parse /tree/{ref} format with commit hash\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/kijai/ComfyUI-KJNodes/tree/204f6d5aae73b10c0fe2fb26e61405fd6337bb77\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/kijai/ComfyUI-KJNodes\");\n      expect(result.ref).toEqual(\"204f6d5aae73b10c0fe2fb26e61405fd6337bb77\");\n    });\n\n    it(\"should parse /tree/{ref} format with branch name\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/tree/main\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should parse /tree/{ref} format with tag\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/tree/v1.0.0\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"v1.0.0\");\n    });\n\n    it(\"should parse /commit/{sha} format\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/commit/abc123def456\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"abc123def456\");\n    });\n\n    it(\"should parse /releases/tag/{tag} format\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/releases/tag/v2.0.0\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"v2.0.0\");\n    });\n  });\n\n  describe(\"GitLab URL formats\", () => {\n    it(\"should parse /-/tree/{ref} format\", () => {\n      const result = parseGitUrl(\n        \"https://gitlab.com/user/repo/-/tree/main\"\n      );\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/user/repo\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should parse /-/tree/{ref} format with commit hash\", () => {\n      const result = parseGitUrl(\n        \"https://gitlab.com/user/repo/-/tree/abc123def\"\n      );\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/user/repo\");\n      expect(result.ref).toEqual(\"abc123def\");\n    });\n\n    it(\"should parse /-/commit/{sha} format\", () => {\n      const result = parseGitUrl(\n        \"https://gitlab.com/user/repo/-/commit/abc123def456\"\n      );\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/user/repo\");\n      expect(result.ref).toEqual(\"abc123def456\");\n    });\n\n    it(\"should handle GitLab subgroups\", () => {\n      const result = parseGitUrl(\n        \"https://gitlab.com/group/subgroup/repo/-/tree/develop\"\n      );\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/group/subgroup/repo\");\n      expect(result.ref).toEqual(\"develop\");\n    });\n  });\n\n  describe(\"Bitbucket URL formats\", () => {\n    it(\"should parse /src/{ref} format\", () => {\n      const result = parseGitUrl(\n        \"https://bitbucket.org/user/repo/src/main\"\n      );\n      expect(result.baseUrl).toEqual(\"https://bitbucket.org/user/repo\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should parse /src/{ref} format with trailing path\", () => {\n      const result = parseGitUrl(\n        \"https://bitbucket.org/user/repo/src/develop/some/path\"\n      );\n      expect(result.baseUrl).toEqual(\"https://bitbucket.org/user/repo\");\n      expect(result.ref).toEqual(\"develop\");\n    });\n\n    it(\"should parse /commits/{sha} format\", () => {\n      const result = parseGitUrl(\n        \"https://bitbucket.org/user/repo/commits/abc123def456\"\n      );\n      expect(result.baseUrl).toEqual(\"https://bitbucket.org/user/repo\");\n      expect(result.ref).toEqual(\"abc123def456\");\n    });\n  });\n\n  describe(\"Generic @ref format (npm/pip style)\", () => {\n    it(\"should parse repo@ref format\", () => {\n      const result = parseGitUrl(\"https://github.com/user/repo@v1.0.0\");\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"v1.0.0\");\n    });\n\n    it(\"should parse repo.git@ref format\", () => {\n      const result = parseGitUrl(\"https://github.com/user/repo.git@main\");\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo.git\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should parse @ref with commit hash\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo@abc123def456789\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"abc123def456789\");\n    });\n\n    it(\"should handle @ref with GitLab URLs\", () => {\n      const result = parseGitUrl(\"https://gitlab.com/user/repo@feature-branch\");\n      expect(result.baseUrl).toEqual(\"https://gitlab.com/user/repo\");\n      expect(result.ref).toEqual(\"feature-branch\");\n    });\n  });\n\n  describe(\"edge cases\", () => {\n    it(\"should handle branch names with hyphens\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/tree/feature-branch-name\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"feature-branch-name\");\n    });\n\n    it(\"should handle branch names with dots\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo/tree/release-1.0.0\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"release-1.0.0\");\n    });\n\n    it(\"should handle repo names with dots\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/repo.name/tree/main\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo.name\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should handle repo names with hyphens\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/user/my-awesome-repo/tree/develop\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/user/my-awesome-repo\");\n      expect(result.ref).toEqual(\"develop\");\n    });\n\n    it(\"should handle organization/user names with hyphens\", () => {\n      const result = parseGitUrl(\n        \"https://github.com/my-org/repo/tree/main\"\n      );\n      expect(result.baseUrl).toEqual(\"https://github.com/my-org/repo\");\n      expect(result.ref).toEqual(\"main\");\n    });\n\n    it(\"should prioritize @ref over path-based patterns\", () => {\n      // If someone uses repo@ref, the @ should be parsed, not any path segments\n      const result = parseGitUrl(\"https://github.com/user/repo@v1.0.0\");\n      expect(result.baseUrl).toEqual(\"https://github.com/user/repo\");\n      expect(result.ref).toEqual(\"v1.0.0\");\n    });\n  });\n});\n"
  },
  {
    "path": "test/workflows/sd1.5-img2img.json",
    "content": "{\n  \"3\": {\n    \"inputs\": {\n      \"seed\": 895988836787787,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"dpmpp_2m\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 0.8700000000000001,\n      \"model\": [\n        \"14\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"12\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"photograph of victorian woman with wings, sky clouds, meadow grass\\n\",\n      \"clip\": [\n        \"14\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"watermark, text\\n\",\n      \"clip\": [\n        \"14\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"3\",\n        0\n      ],\n      \"vae\": [\n        \"14\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"10\": {\n    \"inputs\": {\n      \"image\": \"example.png\",\n      \"upload\": \"image\"\n    },\n    \"class_type\": \"LoadImage\",\n    \"_meta\": {\n      \"title\": \"Load Image\"\n    }\n  },\n  \"12\": {\n    \"inputs\": {\n      \"pixels\": [\n        \"10\",\n        0\n      ],\n      \"vae\": [\n        \"14\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEEncode\",\n    \"_meta\": {\n      \"title\": \"VAE Encode\"\n    }\n  },\n  \"14\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  }\n}"
  },
  {
    "path": "test/workflows/sd1.5-multi-output.json",
    "content": "{\n  \"3\": {\n    \"inputs\": {\n      \"seed\": 986110750609924,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"5\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"5\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"3\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"11\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI1\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"13\": {\n    \"inputs\": {\n      \"pixels\": [\n        \"8\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEEncode\",\n    \"_meta\": {\n      \"title\": \"VAE Encode\"\n    }\n  },\n  \"14\": {\n    \"inputs\": {\n      \"seed\": 1120337778160860,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"13\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"15\": {\n    \"inputs\": {\n      \"samples\": [\n        \"14\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"16\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"15\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "test/workflows/sd1.5-parallel-2.json",
    "content": "{\n  \"20\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"21\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"22\": {\n    \"inputs\": {\n      \"text\": \"a beautiful girl\",\n      \"clip\": [\n        \"20\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"23\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"20\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"24\": {\n    \"inputs\": {\n      \"seed\": 915296395740982,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"20\",\n        0\n      ],\n      \"positive\": [\n        \"22\",\n        0\n      ],\n      \"negative\": [\n        \"23\",\n        0\n      ],\n      \"latent_image\": [\n        \"21\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"25\": {\n    \"inputs\": {\n      \"samples\": [\n        \"24\",\n        0\n      ],\n      \"vae\": [\n        \"20\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"26\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"25\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"27\": {\n    \"inputs\": {\n      \"seed\": 968119624959126,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"28\",\n        0\n      ],\n      \"positive\": [\n        \"30\",\n        0\n      ],\n      \"negative\": [\n        \"31\",\n        0\n      ],\n      \"latent_image\": [\n        \"29\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"28\": {\n    \"inputs\": {\n      \"ckpt_name\": \"https://civitai.com/api/download/models/142421?type=Model&format=SafeTensor&size=full&fp=fp16\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"29\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"30\": {\n    \"inputs\": {\n      \"text\": \"a beautiful girl\",\n      \"clip\": [\n        \"28\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"31\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"28\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"32\": {\n    \"inputs\": {\n      \"samples\": [\n        \"27\",\n        0\n      ],\n      \"vae\": [\n        \"28\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"33\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"32\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "test/workflows/sd1.5-parallel-3.json",
    "content": "{\n  \"20\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"21\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"22\": {\n    \"inputs\": {\n      \"text\": \"a beautiful girl\",\n      \"clip\": [\n        \"20\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"23\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"20\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"24\": {\n    \"inputs\": {\n      \"seed\": 519460056390706,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"20\",\n        0\n      ],\n      \"positive\": [\n        \"22\",\n        0\n      ],\n      \"negative\": [\n        \"23\",\n        0\n      ],\n      \"latent_image\": [\n        \"21\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"25\": {\n    \"inputs\": {\n      \"samples\": [\n        \"24\",\n        0\n      ],\n      \"vae\": [\n        \"20\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"26\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"25\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"27\": {\n    \"inputs\": {\n      \"seed\": 579463667436551,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"28\",\n        0\n      ],\n      \"positive\": [\n        \"30\",\n        0\n      ],\n      \"negative\": [\n        \"31\",\n        0\n      ],\n      \"latent_image\": [\n        \"29\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"28\": {\n    \"inputs\": {\n      \"ckpt_name\": \"https://civitai.com/api/download/models/142421?type=Model&format=SafeTensor&size=full&fp=fp16\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"29\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"30\": {\n    \"inputs\": {\n      \"text\": \"a beautiful girl\",\n      \"clip\": [\n        \"28\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"31\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"28\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"32\": {\n    \"inputs\": {\n      \"samples\": [\n        \"27\",\n        0\n      ],\n      \"vae\": [\n        \"28\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"33\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"32\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  },\n  \"34\": {\n    \"inputs\": {\n      \"seed\": 231666073963398,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"35\",\n        0\n      ],\n      \"positive\": [\n        \"37\",\n        0\n      ],\n      \"negative\": [\n        \"38\",\n        0\n      ],\n      \"latent_image\": [\n        \"36\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"35\": {\n    \"inputs\": {\n      \"ckpt_name\": \"https://huggingface.co/Lykon/DreamShaper/resolve/main/DreamShaper_5_beta2_noVae_half_pruned.safetensors?download=true\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"36\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"37\": {\n    \"inputs\": {\n      \"text\": \"a beautiful girl\",\n      \"clip\": [\n        \"35\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"38\": {\n    \"inputs\": {\n      \"text\": \"\",\n      \"clip\": [\n        \"35\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"39\": {\n    \"inputs\": {\n      \"samples\": [\n        \"34\",\n        0\n      ],\n      \"vae\": [\n        \"35\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"40\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"39\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "test/workflows/sd1.5-txt2img.json",
    "content": "{\n  \"3\": {\n    \"inputs\": {\n      \"seed\": 712610403220747,\n      \"steps\": 20,\n      \"cfg\": 8,\n      \"sampler_name\": \"euler\",\n      \"scheduler\": \"normal\",\n      \"denoise\": 1,\n      \"model\": [\n        \"4\",\n        0\n      ],\n      \"positive\": [\n        \"6\",\n        0\n      ],\n      \"negative\": [\n        \"7\",\n        0\n      ],\n      \"latent_image\": [\n        \"5\",\n        0\n      ]\n    },\n    \"class_type\": \"KSampler\",\n    \"_meta\": {\n      \"title\": \"KSampler\"\n    }\n  },\n  \"4\": {\n    \"inputs\": {\n      \"ckpt_name\": \"dreamshaper_8.safetensors\"\n    },\n    \"class_type\": \"CheckpointLoaderSimple\",\n    \"_meta\": {\n      \"title\": \"Load Checkpoint\"\n    }\n  },\n  \"5\": {\n    \"inputs\": {\n      \"width\": 512,\n      \"height\": 512,\n      \"batch_size\": 1\n    },\n    \"class_type\": \"EmptyLatentImage\",\n    \"_meta\": {\n      \"title\": \"Empty Latent Image\"\n    }\n  },\n  \"6\": {\n    \"inputs\": {\n      \"text\": \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"7\": {\n    \"inputs\": {\n      \"text\": \"text, watermark\",\n      \"clip\": [\n        \"4\",\n        1\n      ]\n    },\n    \"class_type\": \"CLIPTextEncode\",\n    \"_meta\": {\n      \"title\": \"CLIP Text Encode (Prompt)\"\n    }\n  },\n  \"8\": {\n    \"inputs\": {\n      \"samples\": [\n        \"3\",\n        0\n      ],\n      \"vae\": [\n        \"4\",\n        2\n      ]\n    },\n    \"class_type\": \"VAEDecode\",\n    \"_meta\": {\n      \"title\": \"VAE Decode\"\n    }\n  },\n  \"9\": {\n    \"inputs\": {\n      \"filename_prefix\": \"ComfyUI\",\n      \"images\": [\n        \"8\",\n        0\n      ]\n    },\n    \"class_type\": \"SaveImage\",\n    \"_meta\": {\n      \"title\": \"Save Image\"\n    }\n  }\n}"
  },
  {
    "path": "tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    /* Visit https://aka.ms/tsconfig to read more about this file */\n\n    /* Projects */\n    // \"incremental\": true,                              /* Save .tsbuildinfo files to allow for incremental compilation of projects. */\n    // \"composite\": true,                                /* Enable constraints that allow a TypeScript project to be used with project references. */\n    // \"tsBuildInfoFile\": \"./.tsbuildinfo\",              /* Specify the path to .tsbuildinfo incremental compilation file. */\n    // \"disableSourceOfProjectReferenceRedirect\": true,  /* Disable preferring source files instead of declaration files when referencing composite projects. */\n    // \"disableSolutionSearching\": true,                 /* Opt a project out of multi-project reference checking when editing. */\n    // \"disableReferencedProjectLoad\": true,             /* Reduce the number of projects loaded automatically by TypeScript. */\n\n    /* Language and Environment */\n    \"target\": \"ES2021\",                                  /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */\n    // \"lib\": [],                                        /* Specify a set of bundled library declaration files that describe the target runtime environment. */\n    // \"jsx\": \"preserve\",                                /* Specify what JSX code is generated. */\n    // \"experimentalDecorators\": true,                   /* Enable experimental support for legacy experimental decorators. */\n    // \"emitDecoratorMetadata\": true,                    /* Emit design-type metadata for decorated declarations in source files. */\n    // \"jsxFactory\": \"\",                                 /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */\n    // \"jsxFragmentFactory\": \"\",                         /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */\n    // \"jsxImportSource\": \"\",                            /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */\n    // \"reactNamespace\": \"\",                             /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */\n    // \"noLib\": true,                                    /* Disable including any library files, including the default lib.d.ts. */\n    // \"useDefineForClassFields\": true,                  /* Emit ECMAScript-standard-compliant class fields. */\n    // \"moduleDetection\": \"auto\",                        /* Control what method is used to detect module-format JS files. */\n\n    /* Modules */\n    \"module\": \"nodenext\",                                /* Specify what module code is generated. */\n    // \"rootDir\": \"./\",                                  /* Specify the root folder within your source files. */\n    \"moduleResolution\": \"nodenext\",                     /* Specify how TypeScript looks up a file from a given module specifier. */\n    // \"baseUrl\": \"./\",                                  /* Specify the base directory to resolve non-relative module names. */\n    // \"paths\": {},                                      /* Specify a set of entries that re-map imports to additional lookup locations. */\n    // \"rootDirs\": [],                                   /* Allow multiple folders to be treated as one when resolving modules. */\n    // \"typeRoots\": [],                                  /* Specify multiple folders that act like './node_modules/@types'. */\n    // \"types\": [],                                      /* Specify type package names to be included without being referenced in a source file. */\n    // \"allowUmdGlobalAccess\": true,                     /* Allow accessing UMD globals from modules. */\n    // \"moduleSuffixes\": [],                             /* List of file name suffixes to search when resolving a module. */\n    // \"allowImportingTsExtensions\": true,               /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */\n    // \"resolvePackageJsonExports\": true,                /* Use the package.json 'exports' field when resolving package imports. */\n    // \"resolvePackageJsonImports\": true,                /* Use the package.json 'imports' field when resolving imports. */\n    // \"customConditions\": [],                           /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */\n    \"resolveJsonModule\": true,                        /* Enable importing .json files. */\n    // \"allowArbitraryExtensions\": true,                 /* Enable importing files with any extension, provided a declaration file is present. */\n    // \"noResolve\": true,                                /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */\n\n    /* JavaScript Support */\n    // \"allowJs\": true,                                  /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */\n    // \"checkJs\": true,                                  /* Enable error reporting in type-checked JavaScript files. */\n    // \"maxNodeModuleJsDepth\": 1,                        /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */\n\n    /* Emit */\n    // \"declaration\": true,                              /* Generate .d.ts files from TypeScript and JavaScript files in your project. */\n    // \"declarationMap\": true,                           /* Create sourcemaps for d.ts files. */\n    // \"emitDeclarationOnly\": true,                      /* Only output d.ts files and not JavaScript files. */\n    // \"sourceMap\": true,                                /* Create source map files for emitted JavaScript files. */\n    // \"inlineSourceMap\": true,                          /* Include sourcemap files inside the emitted JavaScript. */\n    // \"outFile\": \"./\",                                  /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */\n    \"outDir\": \"./dist\",                                   /* Specify an output folder for all emitted files. */\n    // \"removeComments\": true,                           /* Disable emitting comments. */\n    // \"noEmit\": true,                                   /* Disable emitting files from a compilation. */\n    // \"importHelpers\": true,                            /* Allow importing helper functions from tslib once per project, instead of including them per-file. */\n    // \"importsNotUsedAsValues\": \"remove\",               /* Specify emit/checking behavior for imports that are only used for types. */\n    // \"downlevelIteration\": true,                       /* Emit more compliant, but verbose and less performant JavaScript for iteration. */\n    // \"sourceRoot\": \"\",                                 /* Specify the root path for debuggers to find the reference source code. */\n    // \"mapRoot\": \"\",                                    /* Specify the location where debugger should locate map files instead of generated locations. */\n    // \"inlineSources\": true,                            /* Include source code in the sourcemaps inside the emitted JavaScript. */\n    // \"emitBOM\": true,                                  /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */\n    // \"newLine\": \"crlf\",                                /* Set the newline character for emitting files. */\n    // \"stripInternal\": true,                            /* Disable emitting declarations that have '@internal' in their JSDoc comments. */\n    // \"noEmitHelpers\": true,                            /* Disable generating custom helper functions like '__extends' in compiled output. */\n    // \"noEmitOnError\": true,                            /* Disable emitting files if any type checking errors are reported. */\n    // \"preserveConstEnums\": true,                       /* Disable erasing 'const enum' declarations in generated code. */\n    // \"declarationDir\": \"./\",                           /* Specify the output directory for generated declaration files. */\n    // \"preserveValueImports\": true,                     /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */\n\n    /* Interop Constraints */\n    // \"isolatedModules\": true,                          /* Ensure that each file can be safely transpiled without relying on other imports. */\n    // \"verbatimModuleSyntax\": true,                     /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */\n    // \"allowSyntheticDefaultImports\": true,             /* Allow 'import x from y' when a module doesn't have a default export. */\n    \"esModuleInterop\": true,                             /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */\n    // \"preserveSymlinks\": true,                         /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */\n    \"forceConsistentCasingInFileNames\": true,            /* Ensure that casing is correct in imports. */\n\n    /* Type Checking */\n    \"strict\": true,                                      /* Enable all strict type-checking options. */\n    // \"noImplicitAny\": true,                            /* Enable error reporting for expressions and declarations with an implied 'any' type. */\n    // \"strictNullChecks\": true,                         /* When type checking, take into account 'null' and 'undefined'. */\n    // \"strictFunctionTypes\": true,                      /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */\n    // \"strictBindCallApply\": true,                      /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */\n    // \"strictPropertyInitialization\": true,             /* Check for class properties that are declared but not set in the constructor. */\n    // \"noImplicitThis\": true,                           /* Enable error reporting when 'this' is given the type 'any'. */\n    // \"useUnknownInCatchVariables\": true,               /* Default catch clause variables as 'unknown' instead of 'any'. */\n    // \"alwaysStrict\": true,                             /* Ensure 'use strict' is always emitted. */\n    // \"noUnusedLocals\": true,                           /* Enable error reporting when local variables aren't read. */\n    // \"noUnusedParameters\": true,                       /* Raise an error when a function parameter isn't read. */\n    // \"exactOptionalPropertyTypes\": true,               /* Interpret optional property types as written, rather than adding 'undefined'. */\n    // \"noImplicitReturns\": true,                        /* Enable error reporting for codepaths that do not explicitly return in a function. */\n    // \"noFallthroughCasesInSwitch\": true,               /* Enable error reporting for fallthrough cases in switch statements. */\n    // \"noUncheckedIndexedAccess\": true,                 /* Add 'undefined' to a type when accessed using an index. */\n    // \"noImplicitOverride\": true,                       /* Ensure overriding members in derived classes are marked with an override modifier. */\n    // \"noPropertyAccessFromIndexSignature\": true,       /* Enforces using indexed accessors for keys declared using an indexed type. */\n    // \"allowUnusedLabels\": true,                        /* Disable error reporting for unused labels. */\n    // \"allowUnreachableCode\": true,                     /* Disable error reporting for unreachable code. */\n\n    /* Completeness */\n    // \"skipDefaultLibCheck\": true,                      /* Skip type checking .d.ts files that are included with TypeScript. */\n    \"skipLibCheck\": true                                 /* Skip type checking all .d.ts files. */\n  },\n  \"exclude\": [\n    \"example-workflows\",\n    \"test\"\n  ]\n}\n"
  },
  {
    "path": "vitest.config.ts",
    "content": "import { defineConfig } from \"vitest/config\";\n\nexport default defineConfig({\n  test: {\n    testTimeout: 0,\n    hookTimeout: 0,\n    include: [\"test/**/*.spec.ts\"],\n  },\n});\n"
  }
]