Showing preview only (497K chars total). Download the full file or copy to clipboard to get everything.
Repository: SaladTechnologies/comfyui-api
Branch: main
Commit: 58e531acc930
Files: 75
Total size: 472.5 KB
Directory structure:
gitextract_0k49dxwn/
├── .github/
│ └── workflows/
│ ├── build-comfy-base-images.yml
│ └── create-release.yml
├── .gitignore
├── .nvmrc
├── DEVELOPING.md
├── LICENSE
├── README.md
├── build-and-release
├── build-binary
├── claude-endpoint-creation-prompt.md
├── docker/
│ ├── api.dockerfile
│ ├── build-api-images
│ ├── build-comfy-base-images
│ ├── comfyui.dockerfile
│ └── push-comfy-base-images
├── docker-compose.yml
├── example-workflows/
│ ├── flux/
│ │ ├── img2img.json
│ │ ├── img2img.ts
│ │ ├── txt2img.json
│ │ └── txt2img.ts
│ ├── sd1.5/
│ │ ├── img2img.js
│ │ ├── img2img.json
│ │ ├── img2img.ts
│ │ ├── txt2img.js
│ │ ├── txt2img.json
│ │ └── txt2img.ts
│ └── sdxl/
│ ├── img2img.json
│ ├── img2img.ts
│ ├── txt2img-with-refiner.json
│ ├── txt2img-with-refiner.ts
│ ├── txt2img.json
│ └── txt2img.ts
├── generate-workflow
├── manifest.yml
├── package.json
├── scripts/
│ └── smoke-proxy.mjs
├── src/
│ ├── comfy-node-preprocessors.ts
│ ├── comfy.ts
│ ├── commands.ts
│ ├── config.ts
│ ├── credential-resolver.ts
│ ├── event-emitters.ts
│ ├── git-url-parser.ts
│ ├── image-tools.ts
│ ├── index.ts
│ ├── llm-providers.ts
│ ├── proxy-dispatcher.ts
│ ├── remote-storage-manager.ts
│ ├── server.ts
│ ├── storage-providers/
│ │ ├── azure-blob.ts
│ │ ├── hf.ts
│ │ ├── http.ts
│ │ ├── index.ts
│ │ └── s3.ts
│ ├── types.ts
│ ├── utils.ts
│ └── workflows/
│ └── index.ts
├── test/
│ ├── Dockerfile.file-server
│ ├── core.spec.ts
│ ├── docker-image/
│ │ ├── Dockerfile
│ │ ├── Dockerfile.smoketest
│ │ └── link-models
│ ├── file-server.ts
│ ├── llm-providers.spec.ts
│ ├── output/
│ │ └── .gitkeep
│ ├── submit-many-jobs.js
│ ├── test-utils.ts
│ ├── utils.spec.ts
│ └── workflows/
│ ├── sd1.5-img2img.json
│ ├── sd1.5-multi-output.json
│ ├── sd1.5-parallel-2.json
│ ├── sd1.5-parallel-3.json
│ └── sd1.5-txt2img.json
├── tsconfig.json
└── vitest.config.ts
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/build-comfy-base-images.yml
================================================
name: Build Comfy Base Images
on:
workflow_dispatch:
inputs:
comfy_version:
description: ComfyUI version
required: true
default: "0.19.3"
torch_version:
description: PyTorch version
required: true
default: "2.8.0"
cuda_version:
description: CUDA version
required: true
default: "12.8"
jobs:
build-and-push:
permissions:
contents: read
packages: write
runs-on: ubuntu-24.04-2core-8gb-75gb
steps:
- uses: actions/checkout@v4
- name: Login to GitHub Container Registry
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Build base images
working-directory: ./docker
run: ./build-comfy-base-images ${{ inputs.comfy_version }} ${{ inputs.torch_version }} ${{ inputs.cuda_version }}
- name: Push base images
working-directory: ./docker
run: ./push-comfy-base-images ${{ inputs.comfy_version }} ${{ inputs.torch_version }} ${{ inputs.cuda_version }}
================================================
FILE: .github/workflows/create-release.yml
================================================
name: Create Release
on:
workflow_dispatch: {}
jobs:
build-and-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install dependencies
run: npm install
- name: Build
run: npm run build-binary
- name: Get version from package.json
id: version
run: echo "version=$(jq -r .version package.json)" >> "$GITHUB_OUTPUT"
- name: Get the title and body from the last merged PR
id: pr-output
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
info=$(gh pr list --state merged --limit 1 --json title,body | jq -r '.[0]')
{
echo 'title<<EOF'
echo $info | jq -r '.title'
echo EOF
} >> "$GITHUB_OUTPUT"
{
echo 'body<<EOF'
echo $info | jq -r '.body'
echo EOF
} >> "$GITHUB_OUTPUT"
- name: Create a release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.version.outputs.version }}
release_name: ${{ steps.pr-output.outputs.title }}
body: ${{ steps.pr-output.outputs.body }}
draft: true
prerelease: false
- name: Upload release artifacts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh release upload ${{ steps.version.outputs.version }} ./bin/comfyui-api#Linux_x64
================================================
FILE: .gitignore
================================================
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
bin/
*.safetensors
*.ckpt
*.pth
test/docker-image/models
scratch
test/output/*.png
cache/
================================================
FILE: .nvmrc
================================================
v20.18.1
================================================
FILE: DEVELOPING.md
================================================
# Developing ComfyUI-API
This document provides guidelines for developers who want to contribute to the ComfyUI-API project.
It covers setting up the development environment, coding standards, testing procedures, and how to submit contributions.
- [Developing ComfyUI-API](#developing-comfyui-api)
- [Submitting Contributions](#submitting-contributions)
- [Core Design Principles](#core-design-principles)
- [Setting Up the Development Environment](#setting-up-the-development-environment)
- [Testing Procedures](#testing-procedures)
- [Running Tests](#running-tests)
- [Generating New Workflow Endpoints](#generating-new-workflow-endpoints)
- [Automating with Claude 4 Sonnet](#automating-with-claude-4-sonnet)
- [Debugging Custom Workflows](#debugging-custom-workflows)
- [Storage Providers](#storage-providers)
- [Adding a New Storage Provider](#adding-a-new-storage-provider)
## Submitting Contributions
Contributions are welcome!
ComfyUI is a powerful tool with MANY options, and it's likely that not all of them are currently well supported by the `comfyui-api` server.
Please open an issue with as much information as possible about the problem you're facing or the feature you need.
If you have encountered a bug, please include the steps to reproduce it, and any relevant logs or error messages.
If you are able, adding a failing test is the best way to ensure your issue is resolved quickly.
Let's make productionizing ComfyUI as easy as possible!
## Core Design Principles
When contributing to the ComfyUI-API project, please keep the following design principles in mind:
- **Asynchronous Operations**: Use asynchronous programming practices wherever possible to ensure the server remains responsive. Avoid blocking the event loop.
- **Modularity**: Because the range of uses for this API is so broad, strive to keep components modular and loosely coupled. This will make it easier to add new features and maintain existing ones.
- **Don't Duplicate Existing ComfyUI functionality**: Wherever possible, leverage existing ComfyUI api endpoints and functionality, rather than re-implementing it in the API server. Local ComfyUI can be accessed from the the API server at `config.comfyURL`.
- **Error Handling**: Implement robust error handling to gracefully manage unexpected situations. Provide clear and informative error messages to users. Errors should never crash the server unless recovery is deemed impossible.
- **Testing**: If your feature or bug fix is significant, please include tests to verify its functionality. This helps maintain the integrity of the codebase.
## Setting Up the Development Environment
```shell
git clone https://github.com/SaladTechnologies/comfyui-api.git
cd comfyui-api
npm install
npm run build-binary
```
This will create a `comfyui-api` binary in the `dist/` directory, which is mounted into the Docker container when you run `docker compose up`.
Whenever you make changes, you will need to re-run `npm run build-binary` to rebuild the binary, and then restart the Docker container to see your changes.
## Testing Procedures
This project uses [mocha](https://mochajs.org/) and [earl](https://earl.fun/) for testing.
Tests are administered against a locally running instance of the ComfyUI API server, which can be started with Docker Compose, and actual images are generated during the tests.
Additional services are present in the docker-compose file to provide mock storage services for testing uploads and downloads.
These services are not required for normal operation of the API server.
### Running Tests
In one terminal, start the test server:
```shell
docker compose up --build
```
> --build is only needed the first time, or if you make changes to the file-server code.
In another terminal, run the tests:
```shell
npm run quick-test
```
This will take several minutes, but can be done with very modest hardware.
All tests in the `quick-test` suite use SD1.5 models, which are small and fast to run.
The models used are defined in [the manifest](./manifest.yml), as well in a couple [test workflows](./test/workflows/)
## Generating New Workflow Endpoints
Since the ComfyUI prompt format is a little obtuse, it's common to wrap the `/prompt` endpoint with a more user-friendly interface.
This can be done by adding conforming `.js` or `.ts` files to the `/workflows` directory in your dockerfile.
You can see some examples in [`./workflows`](./workflows/).
Typescript files will be automatically transpiled to javascript files, so you can use either.
Endpoints are loaded at runtime via `eval` in the context of `src/workflows`, so you can use any Node.js or TypeScript features you want, including importing other files such as the API config object.
By loading extra endpoints this way, no rebuild is required to add new endpoints, and you can continue using the pre-built binary.
You can see many examples of this in the [Salad Recipes](https://github.com/SaladTechnologies/salad-recipes/tree/master/src) repo, where this API powers all of the ComfyUI recipes.
Here is an example text-to-image workflow file.
```typescript
import { z } from "zod";
import config from "../config";
const ComfyNodeSchema = z.object({
inputs: z.any(),
class_type: z.string(),
_meta: z.any().optional(),
});
type ComfyNode = z.infer<typeof ComfyNodeSchema>;
type ComfyPrompt = Record<string, ComfyNode>;
interface Workflow {
RequestSchema: z.ZodObject<any, any>;
generateWorkflow: (input: any) => Promise<ComfyPrompt> | ComfyPrompt;
description?: string;
summary?: string;
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.default("text, watermark")
.describe("The negative prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 100000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(20)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint: z
.string()
.refine((val) => config.models.checkpoints.all.includes(val))
.optional()
.default(config.warmupCkpt || config.models.checkpoints.all[0])
.describe("Checkpoint to use"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"3": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["5", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"5": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptyLatentImage",
_meta: {
title: "Empty Latent Image",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"8": {
inputs: {
samples: ["3", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "output",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Text to Image",
description: "Generate an image from a text prompt",
};
export default workflow;
```
Note your file MUST export a `Workflow` object, which contains a `RequestSchema` and a `generateWorkflow` function. The `RequestSchema` is a zod schema that describes the input to the workflow, and the `generateWorkflow` function takes the input and returns a ComfyUI API-format prompt.
The workflow endpoints will follow whatever directory structure you provide.
For example, a directory structure like this:
```shell
/workflows
└── sdxl
├── img2img.ts
├── txt2img-with-refiner.ts
└── txt2img.ts
```
Would yield the following endpoints:
- `POST /workflow/sdxl/img2img`
- `POST /workflow/sdxl/txt2img-with-refiner`
- `POST /workflow/sdxl/txt2img`
These endpoints will be present in the swagger docs, and can be used to interact with the API.
If you provide descriptions in your zod schemas, these will be used to create a markdown table of inputs in the swagger docs.
### Automating with an LLM
Creating these endpoints can be done mostly automatically by a large language model, given the JSON prompt graph.
A [system prompt](./claude-endpoint-creation-prompt.md) to do this is included in this repository, as is [a script that uses this prompt](./generate-workflow) to create endpoints. It requires `jq` and `curl` to be installed.
```shell
./generate-workflow <inputFile> <outputFile>
```
The script supports two LLM providers and picks one based on your environment:
| Provider | Environment variable | Model used |
|---|---|---|
| Anthropic (default) | `ANTHROPIC_API_KEY` | `claude-sonnet-4-20250514` |
| [MiniMax](https://www.minimax.io/) | `MINIMAX_API_KEY` | `MiniMax-M2.7` (204 K context) |
When both variables are set, Anthropic is preferred.
MiniMax uses the OpenAI-compatible endpoint (`https://api.minimax.io/v1`) so no extra dependencies are needed.
Where `<inputFile>` is the JSON prompt graph, and `<outputFile>` is the output file to write the generated workflow to.
As with all AI-generated code, it is strongly recommended to review the generated code before using it in production.
### Debugging Custom Workflows
When developing or troubleshooting custom workflows, enable debug logging to see detailed information about what's happening under the hood.
#### Enabling Debug Logging
Set the `LOG_LEVEL` environment variable to `debug`:
```shell
# Docker
docker run -e LOG_LEVEL=debug ...
# Docker Compose
environment:
- LOG_LEVEL=debug
```
#### What Debug Logging Shows
With `LOG_LEVEL=debug`, the server will log:
1. **Workflow Loading** (at startup):
- Which workflow directories are being scanned
- TypeScript files being transpiled
- Each workflow file being evaluated
- Successfully loaded workflows
- Warnings for files that don't export valid Workflow objects
- Errors if workflow files fail to evaluate (with stack traces)
2. **Workflow Execution** (per request):
- The input received from the request (`Workflow input received`)
- The generated ComfyUI prompt (`Generated ComfyUI prompt from workflow`)
- The full request body sent to `/prompt` (`Sending request to /prompt endpoint`)
- Any errors from the `/prompt` endpoint (including the full prompt that failed)
#### Common Issues and Solutions
**Problem: 400 error from `/prompt` endpoint with validation errors**
Debug logs will show the exact prompt being sent. Common causes:
- Missing required nodes (e.g., no `SaveImage` node with `filename_prefix`)
- Invalid node references (e.g., referencing a node ID that doesn't exist)
- Invalid input types (e.g., string where number expected)
Check the `promptRequestBody` in the error log to see exactly what was sent.
**Problem: Workflow file not loading**
Debug logs will show if the file:
- Failed to transpile (TypeScript syntax error)
- Failed to evaluate (JavaScript runtime error)
- Doesn't export a valid Workflow object
**Problem: Workflow generates wrong output**
Use debug logs to compare:
1. The `input` received by the workflow
2. The `prompt` generated by your `generateWorkflow` function
3. Compare against a known-working prompt from ComfyUI's web interface
#### Example Debug Output
```
{"level":30,"workflow":"txt2img","msg":"Workflow input received","input":{"prompt":"a cat","width":512}}
{"level":30,"workflow":"txt2img","msg":"Generated ComfyUI prompt from workflow","prompt":{"3":{"inputs":{"seed":123...}}}}
{"level":30,"workflow":"txt2img","msg":"Sending request to /prompt endpoint","promptRequestBody":{...}}
{"level":30,"workflow":"txt2img","msg":"Workflow completed successfully","status":200}
```
When a workflow fails:
```
{"level":50,"workflow":"txt2img","msg":"Workflow request to /prompt endpoint failed","status":400,"error":"Prompt must contain a node with a \"filename_prefix\" input","location":"prompt","promptRequestBody":{...}}
```
#### Inspecting Prompts Without Debug Logging
If you can't enable debug logging, you can still inspect your generated prompts by:
1. **Using the `/docs` endpoint**: Access the Swagger UI at `http://localhost:3000/docs` to test your workflow endpoints interactively
2. **Testing generateWorkflow locally**: Import your workflow file and call `generateWorkflow()` with test inputs to see the output
3. **Comparing with ComfyUI**: Export a working prompt from ComfyUI's web interface and compare it to your generated prompt
## Storage Providers
Storage providers are modular components that handle the downloading of models and input media, as well as the uploading of completed outputs.
The ComfyUI API server supports multiple storage backends, each with its own configuration and usage.
They all live in `src/storage-providers/` and must be exported in `src/storage-providers/index.ts`.
They are defined by the `StorageProvider` interface in `src/types.ts`:
```typescript
export interface StorageProvider {
/**
* The key in a request body that indicates this storage provider should be used for upload.
* Must be unique across all storage providers, and must be included if `uploadFile` is implemented.
*/
requestBodyUploadKey?: string;
/**
* The zod schema for the request body field that indicates this storage provider should
* be used for upload. Must be included if `requestBodyUploadKey` is defined.
*/
requestBodyUploadSchema?: z.ZodObject<any, any>;
/**
* Takes the inputs from the request body and generates a URL for uploading.
* @param inputs
*/
createUrl(inputs: any): string;
/**
* Test if the given URL can be handled by this storage provider.
* @param url URL to test
*/
testUrl(url: string): boolean;
/**
* Upload a file to the given URL.
* @param url URL to upload to
* @param fileOrPath File path or buffer to upload
* @param contentType MIME type of the file
*
* @returns An Upload object that can be used to start and abort the upload.
*/
uploadFile?(
url: string,
fileOrPath: string | Buffer,
contentType: string
): Upload;
/**
* Download a file from the given URL to the specified output directory.
* @param url URL to download from
* @param outputDir Directory to save the downloaded file
* @param filenameOverride Optional filename to use instead of auto-generated one
*
* @resolves The path to the downloaded file
*/
downloadFile?(
url: string,
outputDir: string,
filenameOverride?: string
): Promise<string>;
}
```
- Each storage provider must implement the `StorageProvider` interface, which includes methods for creating upload URLs, testing if a URL can be handled by the provider, uploading files, and downloading files.
- The server will automatically select the appropriate storage provider based on the URL provided in the request body, using the `testUrl` method of each provider to determine which one can handle the URL.
- Upload and download methods are optional, as some providers may only support one or the other.
### Adding a New Storage Provider
To add a new storage provider, follow these steps:
1. Create a new file in the `src/storage-providers/` directory for your provider, e.g., `src/storage-providers/my-provider.ts`.
2. Implement the `StorageProvider` interface in your new file. **Be sure to use asynchronous methods** wherever possible to avoid blocking the event loop.
3. Export your provider in `src/storage-providers/index.ts`, making sure to add it to the `storageProviders` array.
4. **Always keep the HTTPStorageProvider as the last provider in the list**, as it acts as a catch-all for any URLs not matched by other providers.
See the existing providers for examples of how to implement the interface.
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2025 Salad Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# ComfyUI API - A Stateless and Extendable API for ComfyUI
A simple wrapper that facilitates using [ComfyUI](https://github.com/comfyanonymous/ComfyUI/) as a stateless API, either by receiving images in the response, or by sending completed images to a webhook
- [ComfyUI API - A Stateless and Extendable API for ComfyUI](#comfyui-api---a-stateless-and-extendable-api-for-comfyui)
- [Download and Use](#download-and-use)
- [Features](#features)
- [Full ComfyUI Support](#full-comfyui-support)
- [Stateless API](#stateless-api)
- [Request Format](#request-format)
- [Per-Request Credentials](#per-request-credentials)
- [Response Format](#response-format)
- [Example Usage](#example-usage)
- [Base64 Response](#base64-response)
- [Webhook Response with Base64 Images](#webhook-response-with-base64-images)
- [S3 Urls in Response](#s3-urls-in-response)
- [S3 Urls in Webhook Payload](#s3-urls-in-webhook-payload)
- [Azure Blob Urls in Response](#azure-blob-urls-in-response)
- [Azure Blob Urls in Webhook Payload](#azure-blob-urls-in-webhook-payload)
- [Model Manifest](#model-manifest)
- [Downloading Behavior](#downloading-behavior)
- [LRU Caching](#lru-caching)
- [Modular Storage Backends](#modular-storage-backends)
- [S3-Compatible Storage](#s3-compatible-storage)
- [Huggingface Repository](#huggingface-repository)
- [Azure Blob Storage](#azure-blob-storage)
- [HTTP](#http)
- [Image To Image Workflows](#image-to-image-workflows)
- [Dynamic Model Loading](#dynamic-model-loading)
- [On-Demand Model Download Endpoint](#on-demand-model-download-endpoint)
- [Authentication Types](#authentication-types)
- [Server-side image processing](#server-side-image-processing)
- [Probes](#probes)
- [API Configuration Guide](#api-configuration-guide)
- [Environment Variables](#environment-variables)
- [Kubernetes Deployment: Proxy Environment Variables](#kubernetes-deployment-proxy-environment-variables)
- [Configuration Details](#configuration-details)
- [Additional Notes](#additional-notes)
- [Using Synchronously](#using-synchronously)
- [Using with Webhooks](#using-with-webhooks)
- [prompt.complete](#promptcomplete)
- [prompt.failed](#promptfailed)
- [Validating Webhooks](#validating-webhooks)
- [Node.js Example](#nodejs-example)
- [Python Example](#python-example)
- [DEPRECATED: Legacy Webhook Behavior](#deprecated-legacy-webhook-behavior)
- [output.complete](#outputcomplete)
- [prompt.failed (legacy)](#promptfailed-legacy)
- [System Events](#system-events)
- [status](#status)
- [progress](#progress)
- [progress\_state](#progress_state)
- [executing](#executing)
- [execution\_start](#execution_start)
- [execution\_cached](#execution_cached)
- [executed](#executed)
- [execution\_success](#execution_success)
- [execution\_interrupted](#execution_interrupted)
- [execution\_error](#execution_error)
- [file\_downloaded](#file_downloaded)
- [file\_uploaded](#file_uploaded)
- [file\_deleted](#file_deleted)
- [Prebuilt Docker Images](#prebuilt-docker-images)
- [Considerations for Running on SaladCloud](#considerations-for-running-on-saladcloud)
- [Custom Workflows](#custom-workflows)
- [Contributing](#contributing)
- [Architecture](#architecture)
## Download and Use
Either use a [pre-built Docker image](#prebuilt-docker-images), or build your own.
Download the latest version from the release page, and copy it into your existing ComfyUI dockerfile.
You can find good base dockerfiles in the [docker](./docker) directory.
There are also example dockerfiles for popular models in the [SaladCloud Recipes Repo](https://github.com/SaladTechnologies/salad-recipes/tree/master/src).
If you have your own ComfyUI dockerfile, you can add the comfyui-api server to it like so:
```dockerfile
# Change this to the version you want to use
ARG api_version=1.18.1
# Download the comfyui-api binary, and make it executable
ADD https://github.com/SaladTechnologies/comfyui-api/releases/download/${api_version}/comfyui-api .
RUN chmod +x comfyui-api
# Set CMD to launch the comfyui-api binary. The comfyui-api binary will launch ComfyUI as a child process.
CMD ["./comfyui-api"]
```
The server will be available on port `3000` by default, but this can be customized with the `PORT` environment variable.
The server hosts swagger docs at `/docs`, which can be used to interact with the API.
## Features
- **Full Power Of ComfyUI**: The server supports the full ComfyUI /prompt API, and can be used to execute any ComfyUI workflow.
- **Verified Model/Workflow Support**: Stable Diffusion 1.5, Stable Diffusion XL, Stable Diffusion 3.5, Flux, AnimateDiff, LTX Video, Hunyuan Video, CogVideoX, Mochi Video, Cosmos 1.0. My assumption is more model types are supported, but these are the ones I have verified.
- **Stateless API**: The server is stateless, and can be scaled horizontally to handle more requests.
- **Swagger Docs**: The server hosts swagger docs at `/docs`, which can be used to interact with the API.
- **"Synchronous" Support**: The server will return base64-encoded images directly in the response, if no async behavior is requested.
- **Async Support via Webhooks**: The server can send completed outputs to a webhook URL, allowing for asynchronous processing.
- **Modular Storage Backends**: Completed outputs can be sent base64-encoded to a webhook, or uploaded to any s3-compatible storage, an http endpoint, a huggingface repo, or azure blob storage. All of these can be used to download input media as well. More storage backends can be added easily. Supports an optional LRU cache for downloaded models and files to keep local storage from overflowing.
- **Warmup Workflow**: The server can be configured to run a warmup workflow on startup, which can be used to load and warm up models, and to ensure the server is ready to accept requests.
- **Return Images In PNG (default), JPEG, or WebP**: The server can return images in PNG, JPEG, or WebP format, via a parameter in the API request. Most options supported by [sharp](https://sharp.pixelplumbing.com/) are supported.
- **Probes**: The server has two probes, `/health` and `/ready`, which can be used to check the server's health and readiness to receive traffic.
- **Dynamic Workflow Endpoints**: Automatically mount new workflow endpoints by adding conforming `.js` or `.ts` files to the `/workflows` directory in your docker image. See [the guide](./DEVELOPING.md#generating-new-workflow-endpoints) for more information. A [Claude 4 Sonnet](https://claude.ai) [prompt](./claude-endpoint-creation-prompt.md) is included to assist in automating this process.
- **Bring Your Own Models And Extensions**: Use any model or extension you want by adding them to the normal ComfyUI directories `/opt/ComfyUI/`. You can configure a [manifest file](#model-manifest) to download models and install extensions automatically on startup.
- **Dynamic Model Loading**: If you provide a URL in a model-loading node, the server will locally cache the model automatically before executing the workflow.
- **On-Demand Model Download**: Trigger model downloads via a dedicated API endpoint, with support for both synchronous and asynchronous operations.
- **Execution Stats**: The server will return [execution stats in the response](#response-format).
- **Works Great with SaladCloud**: The server is designed to work well with SaladCloud, and can be used to host ComfyUI on the SaladCloud platform. It is likely to work well with other platforms as well.
- **Manages Deletion Cost**: _ONLY ON SALAD_. The server will automatically set the instance deletion cost to the queue length, so that busier nodes are less likely to be scaled in while they are processing requests.
- **Single Binary**: The server is distributed as a single binary, and can be run with no dependencies.
- **Websocket Events Via Webhook**: The server can forward ComfyUI websocket events to a configured webhook, which can be used to monitor the progress of a workflow.
- **Friendly License**: The server is distributed under the MIT license, and can be used for any purpose. All of its dependencies are also MIT or Apache 2.0 licensed, except ComfyUI itself, which is GPL-3.0 licensed.
## Full ComfyUI Support
ComfyUI API sits in front of ComfyUI, and uses the ComfyUI `/prompt` API to execute workflows, so any API-formatted prompt can be executed by the server. Before queueing the prompt, the server will download any required inputs, such as images. It also overrides the `filename_prefix` field in the prompt to ensure that output files are saved with a unique filename. Once the prompt is queued, the server will wait for the prompt to complete, and then return the outputs in the response body, via a webhook, or upload them to S3, depending on the request parameters. Because of this, anything you can run in ComfyUI can be run in the ComfyUI API server, including custom nodes and workflows, and any models ComfyUI supports.
## Stateless API
The ComfyUI API server is designed to be stateless, meaning that it does not store any state between requests. This allows the server to be scaled horizontally behind a load balancer, and to handle more requests by adding more instances of the server. The server uses a configurable warmup workflow to ensure that ComfyUI is ready to accept requests, and to load any required models. The server also self-hosts swagger docs and an openapi spec at `/docs`, which can be used to interact with the API.
### Request Format
Prompts are submitted to the server via the `POST /prompt` endpoint, which accepts a JSON body containing the prompt graph, as well as any additional parameters such as the webhook URL, S3 bucket and prefix, and image conversion options. A request may look something like:
```json
{
"id": "123e4567-e89b-12d3-a456-426614174000",
"prompt": {
"1": {
"inputs": {
"image": "https://salad-benchmark-assets.download/coco2017/train2017/000000000009.jpg",
"upload": "image"
},
"class_type": "LoadImage"
}
},
"webhook_v2": "https://example.com/webhook",
"convert_output": {
"format": "jpeg",
"options": {
"quality": 80,
"progressive": true
}
}
}
```
- Only the `prompt` field is required. The other fields are optional, and can be omitted if not needed.
- Your prompt must be a valid ComfyUI prompt graph, which is a JSON object where each key is a node ID, and the value is an object containing the node's inputs, class type, and optional metadata.
- Your prompt must include a node that saves an output, such as a `SaveImage` node.
### Per-Request Credentials
You can provide authentication credentials for protected model URLs directly in the prompt request using the `credentials` field. This allows downloading gated models (like Hugging Face gated models) or private models from S3/Azure without configuring environment variables.
```json
{
"prompt": { ... },
"credentials": [
{
"url_pattern": "https://huggingface.co/**",
"auth": {
"type": "bearer",
"token": "hf_xxxxxxxxxxxxx"
}
},
{
"url_pattern": "s3://my-private-bucket/**",
"auth": {
"type": "s3",
"access_key_id": "AKIAIOSFODNN7EXAMPLE",
"secret_access_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
}
}
]
}
```
Each credential entry has:
- `url_pattern`: A glob-style pattern to match URLs. Supports:
- `*` matches any characters except `/`
- `**` matches any characters including `/`
- `?` matches a single character
- `auth`: Authentication configuration (see [Authentication Types](#authentication-types) for all supported types)
**Pattern Examples:**
- `https://huggingface.co/**` - matches all Hugging Face URLs
- `https://*.s3.amazonaws.com/**` - matches any S3 bucket URL
- `s3://my-bucket/*` - matches files in the root of `my-bucket`
- `s3://my-bucket/**` - matches all files in `my-bucket` including subdirectories
Credentials are matched in order - the first matching pattern wins. This allows you to provide different credentials for different sources in a single request.
### Response Format
For async requests (i.e. when a webhook or S3 upload is used), the server will return a `202 Accepted` response immediately, and the outputs will be sent to the webhook or uploaded to S3 in the background.
For synchronous requests (i.e. no webhook or s3.async is false), the server will return a `200 OK` response once the prompt has completed, with a body containing the outputs. The response body will have the following format:
```json
{
"id": "123e4567-e89b-12d3-a456-426614174000",
"prompt": { ... },
"images": [
"base64-encoded-image-1",
"base64-encoded-image-2"
],
"filenames": [
"output-filename-1.png",
"output-filename-2.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1625247600000,
"end": 1625247605000,
"duration": 5000
},
"nodes": {
"1": {
"start": 1625247600000
},
"2": {
"start": 1625247601000
}
}
},
"preprocess_time": 1500,
"upload_time": 1,
"total_time": 6576
}
}
```
If you requested image conversion, the images will be in the requested format (e.g. JPEG or WebP) instead of PNG.
### Example Usage
#### Base64 Response
**Request:**
```json
{
"prompt": { ... }
}
```
**Response:**
```json
{
"id": "generated-uuid",
"prompt": { ... },
"images": ["base64-encoded-image-1", "base64-encoded-image-2"],
"filenames": ["generated-uuid_ComfyUI_0.png", "generated-uuid_ComfyUI_1.png"],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 1,
"total_time": 6205
}
}
```
#### Webhook Response with Base64 Images
**Request:**
```json
{
"prompt": { ... },
"webhook_v2": "https://example.com/webhook"
}
```
**HTTP Response: 202 Accepted**
```json
{
"id": "generated-uuid",
"status": "ok",
"webhook_v2": "https://example.com/webhook",
"prompt": { ... }
}
```
**Webhook Payload**
```json
{
"type": "prompt.complete",
"id": "generated-uuid",
"prompt": { ... },
"webhook_v2": "https://example.com/webhook",
"images": [
"base64-encoded-image-1",
"base64-encoded-image-2"
],
"filenames": [
"output-filename-1.png",
"output-filename-2.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 1,
"total_time": 6205
}
}
```
#### S3 Urls in Response
**Request:**
```json
{
"prompt": { ... },
"s3": {
"bucket": "my-bucket",
"prefix": "outputs/",
"async": false
}
}
```
**Response:**
```json
{
"id": "generated-uuid",
"prompt": { ... },
"images": [
"s3://my-bucket/outputs/generated-uuid_ComfyUI_0.png",
"s3://my-bucket/outputs/generated-uuid_ComfyUI_1.png"
],
"filenames": [
"generated-uuid_ComfyUI_0.png",
"generated-uuid_ComfyUI_1.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 300,
"total_time": 6505
}
}
```
#### S3 Urls in Webhook Payload
**Request:**
```json
{
"prompt": { ... },
"s3": {
"bucket": "my-bucket",
"prefix": "outputs/"
},
"webhook_v2": "https://example.com/webhook"
}
```
**HTTP Response: 202 Accepted**
```json
{
"id": "generated-uuid",
"status": "ok",
"webhook_v2": "https://example.com/webhook",
"s3": {
"bucket": "my-bucket",
"prefix": "outputs/",
},
"prompt": { ... }
}
```
**Webhook Payload**
```json
{
"type": "prompt.complete",
"id": "generated-uuid",
"prompt": { ... },
"webhook_v2": "https://example.com/webhook",
"s3": {
"bucket": "my-bucket",
"prefix": "outputs/"
},
"images": [
"s3://my-bucket/outputs/generated-uuid_ComfyUI_0.png",
"s3://my-bucket/outputs/generated-uuid_ComfyUI_1.png"
],
"filenames": [
"generated-uuid_ComfyUI_0.png",
"generated-uuid_ComfyUI_1.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 300,
"total_time": 6505
}
}
```
#### Azure Blob Urls in Response
**Request:**
```json
{
"prompt": { ... },
"azure_blob_upload": {
"container": "my-container",
"blob_prefix": "outputs/",
"async": false
}
}
```
**Response:**
```json
{
"id": "generated-uuid",
"prompt": { ... },
"images": [
"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_0.png",
"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_1.png"
],
"filenames": [
"generated-uuid_ComfyUI_0.png",
"generated-uuid_ComfyUI_1.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 300,
"total_time": 6505
}
}
```
#### Azure Blob Urls in Webhook Payload
**Request:**
```json
{
"prompt": { ... },
"azure_blob_upload": {
"container": "my-container",
"blob_prefix": "outputs/"
},
"webhook_v2": "https://example.com/webhook"
}
```
**HTTP Response: 202 Accepted**
```json
{
"id": "generated-uuid",
"status": "ok",
"webhook_v2": "https://example.com/webhook",
"azure_blob_upload": {
"container": "my-container",
"blob_prefix": "outputs/",
},
"prompt": { ... }
}
```
**Webhook Payload**
```json
{
"type": "prompt.complete",
"id": "generated-uuid",
"prompt": { ... },
"webhook_v2": "https://example.com/webhook",
"azure_blob_upload": {
"container": "my-container",
"blob_prefix": "outputs/"
},
"images": [
"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_0.png",
"https://<your-account>.blob.core.windows.net/my-container/outputs/generated-uuid_ComfyUI_1.png"
],
"filenames": [
"generated-uuid_ComfyUI_0.png",
"generated-uuid_ComfyUI_1.png"
],
"stats": {
"comfy_execution": {
"total": {
"start": 1700000000000,
"end": 1700000005000,
"duration": 5000
},
"nodes": {
"3": {
"start": 1700000000000
},
...
}
},
"preprocess_time": 1200,
"upload_time": 300,
"total_time": 6505
}
}
```
## Model Manifest
The server can be configured to download models and install extensions automatically on startup, by providing a manifest file in either JSON or YAML format. The manifest filepath can be provided via the `MANIFEST` environment variable, or the full manifest as a JSON string via the `MANIFEST_JSON` environment variable. If both are provided, the `MANIFEST_JSON` variable will take precedence.
The manifest file should have the following format (all fields are optional):
```yaml
apt:
- package1
- package2
pip:
- package3
- package4
custom_nodes:
- node-name-from-comfy-registry
- https://github.com/username/repo
- https://github.com/username/repo/tree/commit-hash-or-branch
- https://github.com/username/repo@v1.0.0
models:
before_start:
- url: https://example.com/model.ckpt
local_path: /opt/ComfyUI/models/checkpoints/model1.ckpt
- url: s3://my-bucket/path/to/model.safetensors
local_path: /opt/ComfyUI/models/checkpoints/model2.safetensors
after_start:
- url: https://example.com/another-model.ckpt
local_path: /opt/ComfyUI/models/checkpoints/model3.ckpt
```
If a manifest is provided, the server will perform the following in order:
1. Install any apt packages listed in the `apt` field.
2. Install any pip packages listed in the `pip` field. Uses `uv`, otherwise falls back to `pip`.
3. Install any custom nodes listed in the `custom_nodes` field, using the `comfy` cli tool if available and a plain string is provided, or by cloning the provided git repository if a URL is provided. You can pin a specific commit, branch, or tag using various URL formats:
- **GitHub**: `/tree/{ref}`, `/commit/{sha}`, `/releases/tag/{tag}`
- **GitLab**: `/-/tree/{ref}`, `/-/commit/{sha}`
- **Bitbucket**: `/src/{ref}`, `/commits/{sha}`
- **Generic**: `repo@{ref}` (npm/pip style, e.g., `https://github.com/user/repo@v1.0.0`)
Example: `https://github.com/kijai/ComfyUI-KJNodes/tree/204f6d5aae73b10c0fe2fb26e61405fd6337bb77`. If cloned, `requirements.txt` will be installed if it exists, using `uv` if available, otherwise falling back to `pip`.
4. Download any models listed in the `models.before_start` field, and save them to the specified `local_path`.
5. Start background downloading any models listed in the `models.after_start` field, and save them to the specified `local_path`. These downloads will be started in the background and will not block the server from accepting requests. This is useful for preloading less frequently used models.
## Downloading Behavior
When downloading files, whether via the manifest, image-to-image workflows, or dynamic model loading, the server will first check if the file already exists at the specified path.
It does this by hashing the provided URL and looking for a matching file in the cache directory (`$HOME/.cache/comfyui-api` by default).
For example, the url `https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16` will always be saved in the cache as `Pk6VSKLStckZydwGhX0bM8TqaqHEW9yt.safetensors`.
If a matching file is found, it will be used instead of downloading the file again.
This helps to reduce bandwidth usage and speed up request times.
If the url is an S3 URL, the server will use the AWS SDK to download the file.
This allows the server to access private S3 buckets (or S3-compatible buckets), as long as the appropriate AWS credentials are provided via environment variables.
If the url is a huggingface URL, the server will use the `hf` cli tool to download the file.
This allows you to take advantage of high-speed [xet storage](https://huggingface.co/docs/hub/en/storage-backends#xet), as well as other optimizations provided by huggingface.
If the url is an azure blob storage URL, the server will use the Azure SDK to download the file.
If the url is a regular http(s) URL, the server will use `fetch` to stream the file to disk.
If the url has a file extension, the server will use that extension when saving the file.
Otherwise, it will attempt to determine the file extension from the `Content-Disposition` or `Content-Type` headers.
All downloaded files live in the configured cache directory with a name taken as the first 32 characters of the URL hash plus the file extension, and are symbolically linked to the specified local path.
If a download for a given URL is already in progress, any subsequent requests for the same URL will wait for the first download to complete, and then use the downloaded file.
## LRU Caching
The server uses an LRU cache to manage the cache directory, which is used to store downloaded models and other files.
It is configured to be disabled by default, but you can set a size via the `LRU_CACHE_SIZE_GB` environment variable.
When the cache size exceeds the configured size, the server will delete the least recently used files until the cache size is below the configured size.
**Note:** Cache-size is determined _after_ a download completes, so actual cache size can temporarily exceed the configured size while downloads are in progress.
## Modular Storage Backends
The server supports multiple storage backends for downloading models and input media, and uploading completed outputs.
All uploads take a prefix of some kind, not a full path or URL.
All uploads can be handled synchronously or asynchronously, depending on the `async` field in the upload block of the request body.
- If `async` is `true` or omitted, the server will return a `202 Accepted` response immediately, and the upload will be handled in the background.
- If `async` is `false`, the server will wait for the upload to complete before returning a `200 OK` response with the uploaded urls in the response body.
If an upload for a particular url is in progress, a subsequent upload to the same url will abort the first request and take over the upload.
This is rooted in the assumption that you want the latest version of any particular output.
### S3-Compatible Storage
Includes AWS S3, Cloudflare R2, etc.
Uses the AWS SDK. Requires appropriate AWS credentials to be provided via environment variables.
Used for URLs starting with `s3://`.
For downloads, use the format `s3://bucket-name/path/to/file`.
For uploads, include the `s3` field in the request body, like:
```json
{
"prompt": {...},
"s3": {
"bucket": "my-bucket",
"prefix": "optional/prefix",
"async": false
}
}
```
### Huggingface Repository
Uses the `hf` cli tool.
Requires the `HF_TOKEN` environment variable to be set with a valid Huggingface token.
Used for URLs starting with `https://huggingface.co/`.
Works with both public and private repos, model and dataset repos, and large files stored with [xet storage](https://huggingface.co/docs/hub/en/storage-backends#xet).
For downloads, use the format `https://huggingface.co/username/repo/resolve/revision/path/to/file` or `https://huggingface.co/datasets/username/repo/resolve/revision/path/to/file`.
For uploads, include the `hf_upload` field in the request body, like
```json
{
"prompt": {},
"hf_upload": {
"repo": "username/repo",
"repo_type": "dataset",
"revision": "main",
"directory": "test-source-images",
"async": false
}
}
```
The `repo_type` field can be either `model` or `dataset`, and defaults to `model`.
### Azure Blob Storage
Uses the Azure SDK.
Requires appropriate Azure credentials to be provided via environment variables.
Used for URLs matching `https://<your-account>.blob.core.windows.net/`.
For downloads, use the format `https://<your-account>.blob.core.windows.net/container/path/to/file`.
For uploads, include the `azure_blob_upload` field in the request body, like:
```json
{
"prompt": {},
"azure_blob_upload": {
"container": "my-container",
"blob_prefix": "optional/prefix",
"async": false
}
}
```
### HTTP
Uses Fetch.
Supports custom headers via the `HTTP_AUTH_HEADER_NAME` and `HTTP_AUTH_HEADER_VALUE` environment variables.
Basic auth can be used via the URL, i.e. `https://username:password@your-http-endpoint.com`.
For downloads, use any valid http(s) URL that is not matched by the other storage backends.
For uploads, makes a PUT request to the specified URL with the image as the body. Matches any other URL not matched by the other storage backends.
## Image To Image Workflows
The ComfyUI API server supports image-to-image workflows, allowing you to submit an image and receive a modified version of that image in response.
This is useful for tasks such as image in-painting, style transfer, and other image manipulation tasks.
To use image-to-image workflows, you can submit an image as a base64-encoded string, or a URL.
The server will automatically detect the input type and process the image accordingly, using an appropriate storage provider if necessary.
Here's an example of doing this in a `LoadImage` node:
```json
{
"inputs": {
"image": "https://salad-benchmark-assets.download/coco2017/train2017/000000000009.jpg",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
}
```
## Dynamic Model Loading
The ComfyUI API server supports dynamic model loading, allowing you to specify a model URL in a model-loading node, and the server will automatically download and cache the model before executing the workflow.
This is useful for workflows that need to potentially use a different model for each request.
An example may be head-shot generation, which would specify a LoRA per person.
The LoRA may be generated on-the-fly by another service, and provided to the ComfyUI API server via a URL.
```json
{
"inputs": {
"ckpt_name": "https://civitai.com/api/download/models/76750?type=Model&format=SafeTensor&size=pruned&fp=fp16"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
```
## On-Demand Model Download Endpoint
The server provides a `POST /download` endpoint that allows you to trigger model downloads on-demand. This is useful for pre-loading models before they are needed in a workflow, or for managing model availability across your infrastructure.
### Request Format
```json
{
"url": "https://example.com/model.safetensors",
"model_type": "checkpoints",
"filename": "my-model.safetensors",
"wait": false,
"auth": {
"type": "bearer",
"token": "hf_xxxxxxxxxxxxx"
}
}
```
| Field | Required | Description |
|-------|----------|-------------|
| `url` | Yes | The URL to download the model from. Supports all [storage backends](#modular-storage-backends). |
| `model_type` | Yes | The type of model (e.g., `checkpoints`, `loras`, `vae`, `controlnet`, etc.). Must match a subdirectory in your models folder. |
| `filename` | No | Override the filename. Defaults to the basename from the URL. |
| `wait` | No | If `false` (default), returns immediately with `202 Accepted`. If `true`, waits for the download to complete and returns `200 OK` with file stats. |
| `auth` | No | Authentication credentials for accessing protected resources. See [Authentication Types](#authentication-types) below. |
### Authentication Types
The `auth` field supports multiple authentication methods for different storage providers:
**Bearer Token** (e.g., Hugging Face gated models):
```json
{
"type": "bearer",
"token": "hf_xxxxxxxxxxxxx"
}
```
**Basic Auth**:
```json
{
"type": "basic",
"username": "user",
"password": "pass"
}
```
**Custom Header** (e.g., API keys):
```json
{
"type": "header",
"header_name": "X-API-Key",
"header_value": "your-api-key"
}
```
**Query Parameter** (e.g., Azure SAS tokens):
```json
{
"type": "query",
"query_param": "sig",
"query_value": "your-sas-token"
}
```
**S3 Credentials** (for private S3 buckets):
```json
{
"type": "s3",
"access_key_id": "AKIAIOSFODNN7EXAMPLE",
"secret_access_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
"session_token": "optional-sts-token",
"region": "us-east-1",
"endpoint": "https://s3.custom-endpoint.com"
}
```
The `session_token`, `region`, and `endpoint` fields are optional for S3 auth.
### Response Format
**Asynchronous (default, `wait: false`):**
Returns `202 Accepted` immediately:
```json
{
"url": "https://example.com/model.safetensors",
"model_type": "checkpoints",
"filename": "my-model.safetensors",
"status": "started"
}
```
**Synchronous (`wait: true`):**
Returns `200 OK` when the download completes:
```json
{
"url": "https://example.com/model.safetensors",
"model_type": "checkpoints",
"filename": "my-model.safetensors",
"status": "completed",
"size": 6938281472,
"duration": 45.23
}
```
- `size`: File size in bytes
- `duration`: Download time in seconds
### Example Usage
**Async download (fire-and-forget):**
```bash
curl -X POST http://localhost:3000/download \
-H "Content-Type: application/json" \
-d '{
"url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors",
"model_type": "checkpoints"
}'
```
**Sync download (wait for completion):**
```bash
curl -X POST http://localhost:3000/download \
-H "Content-Type: application/json" \
-d '{
"url": "s3://my-bucket/models/custom-lora.safetensors",
"model_type": "loras",
"filename": "my-custom-lora.safetensors",
"wait": true
}'
```
The download uses the same caching and storage provider infrastructure as [dynamic model loading](#dynamic-model-loading), so downloaded files are cached and deduplicated automatically.
## Server-side image processing
The ComfyUI API server uses the [sharp](https://sharp.pixelplumbing.com/) library to process images. This allows you to return the images in different, more compact formats, such as JPEG or WebP. This can be accomplished by including the `convert_output` object in the request body, which can contain the following fields:
```json
{
"format": "jpeg|webp",
"options": {}
}
```
Omitting the `convert_output` object will default to PNG format, which is lossless and has the best quality, but is also the largest in size.
**JPEG options**:
- `quality`: The quality of the JPEG image, between 1 and 100. Default is `80`.
- `progressive`: Use progressive (interlace) scanning. Default is `false`.
- `chromaSubsampling`: Set to `4:4:4` to prevent chroma subsampling otherwise defaults to `4:2:0` chroma subsampling.
- `optimizeCoding`: Optimize the Huffman coding tables. Default is `true`.
- `mozjpeg`: use mozjpeg defaults, equivalent to `{ trellisQuantisation: true, overshootDeringing: true, optimizeScans: true, quantisationTable: 3 }`
- `trellisQuantisation`: Use trellis quantization. Default is `false`.
- `overshootDeringing`: Use overshoot deringing. Default is `false`.
- `optimizeScans`: Optimize the scan order. Default is `false`.
- `quantisationTable`: Set the quantization table to use, 1 - 8. Default is `0`.
**WebP options**:
- `quality`: The quality of the WebP image, between 1 and 100. Default is `80`.
- `alphaQuality`: The quality of the alpha channel, between 0 and 100. Default is `100`.
- `lossless`: Use lossless compression. Default is `false`.
- `nearLossless`: Use near-lossless compression. Default is `false`.
- `smartSubsample`: Use smart subsampling. Default is `false`.
- `preset`: named preset for preprocessing/filtering, one of `default`, `picture`, `photo`, `drawing`, `icon`, or `text`. Default is `default`.
- `effort`: CPU effort level, between 0 (fastest) and 6 (slowest). Default is `4`.
## Probes
The server has two probes, `/health` and `/ready`.
- The `/health` probe will return a 200 status code once the warmup workflow has completed. It will stay healthy as long as the server is running, even if ComfyUI crashes.
- The `/ready` probe will also return a 200 status code once the warmup workflow has completed. It will return a 503 status code if ComfyUI is not running, such as in the case it has crashed, but is being automatically restarted. If you have set `MAX_QUEUE_DEPTH` to a non-zero value, it will return a 503 status code if ComfyUI's queue has reached the maximum depth.
## API Configuration Guide
### Environment Variables
The following table lists the available environment variables and their default values.
For historical reasons, the default values mostly assume this will run on top of an [ai-dock](https://github.com/ai-dock/comfyui) image, but we currently provide [our own more minimal image](#prebuilt-docker-images) here in this repo.
If you are using the s3 storage functionality, make sure to set all of the appropriate environment variables for your S3 bucket, such as `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION`.
The server will automatically use these to upload images to S3.
If you are using the huggingface storage functionality, make sure to set the `HF_TOKEN` environment variable with a valid Huggingface token with appropriate permissions.
If you are using the azure blob storage functionality, make sure to set all of the appropriate environment variables for your Azure account, such as `AZURE_STORAGE_CONNECTION_STRING`.
| Variable | Default Value | Description |
| ---------------------------- | -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ALWAYS_RESTART_COMFYUI | "false" | If set to "true", the ComfyUI process will be automatically restarted if it exits. Otherwise, the API server will exit when ComfyUI exits. |
| BASE | (not set) | There are different ways to load the comfyui environment for determining config values that vary with the base image. Currently only "ai-dock" has a special preset value. |
| CACHE_DIR | "$HOME/.cache/comfyui-api" | Directory to use for caching downloaded models and other files. |
| CMD | "init.sh" | Command to launch ComfyUI |
| COMFY_HOME | "/opt/ComfyUI" | ComfyUI home directory |
| COMFYUI_PORT_HOST | "8188" | ComfyUI port number |
| DIRECT_ADDRESS | "127.0.0.1" | Direct address for ComfyUI |
| HOST | "::" | Wrapper host address |
| HTTP_AUTH_HEADER_NAME | (not set) | If set, the server will include this header name with the value from HTTP_AUTH_HEADER_VALUE in all outgoing HTTP requests for uploading and downloading files. This can be used to add basic auth or bearer tokens to requests. |
| HTTP_AUTH_HEADER_VALUE | (not set) | The value to use for the HTTP_AUTH_HEADER_NAME header in all outgoing HTTP requests for uploading and downloading files. |
| INPUT_DIR | "/opt/ComfyUI/input" | Directory for input files |
| LOG_LEVEL | "info" | Log level for the application. One of "trace", "debug", "info", "warn", "error", "fatal". |
| LRU_CACHE_SIZE_GB | "0" | Maximum size of the LRU cache in GB. If set to 0, this feature is disabled. |
| MANIFEST | (not set) | Path to the [manifest file](#model-manifest) (optional). Can be yml or json. |
| MANIFEST_JSON | (not set) | A JSON string representing the [manifest](#model-manifest). If set, this will take precedence over the MANIFEST variable. |
| MARKDOWN_SCHEMA_DESCRIPTIONS | "true" | If set to "true", the server will use the descriptions in the zod schemas to generate markdown tables in the swagger docs. |
| MAX_BODY_SIZE_MB | "100" | Maximum body size in MB |
| MAX_BODY_SIZE_MB | "100" | Maximum request body size in MB |
| MAX_QUEUE_DEPTH | "0" | Maximum number of queued requests before the readiness probe will return 503. 0 indicates no limit. |
| MODEL_DIR | "/opt/ComfyUI/models" | Directory for model files |
| OUTPUT_DIR | "/opt/ComfyUI/output" | Directory for output files |
| PORT | "3000" | Wrapper port number |
| PREPEND_FILENAMES | "true" | If set to "true", the server will prepend a unique identifier to output filenames to avoid collisions. Otherwise, the server will overwrite filename prefixes with the unique identifier (legacy behavior). |
| PROMPT_WEBHOOK_RETRIES | "3" | Number of times to retry sending a webhook for a prompt |
| STARTUP_CHECK_INTERVAL_S | "1" | Interval in seconds between startup checks |
| STARTUP_CHECK_MAX_TRIES | "20" | Maximum number of startup check attempts |
| SYSTEM_META\_\* | (not set) | Any environment variable starting with SYSTEM*META* will be sent to the system webhook as metadata. i.e. `SYSTEM_META_batch=abc` will add `{"batch": "abc"}` to the `.metadata` field on system webhooks. |
| SYSTEM_WEBHOOK_EVENTS | (not set) | Comma separated list of events to send to the webhook. Only selected events will be sent. If not set, no events will be sent. See [System Events](#system-events). You may also use the special value `all` to subscribe to all event types. |
| SYSTEM_WEBHOOK_URL | (not set) | Optionally receive via webhook the events that ComfyUI emits on websocket. This includes progress events. |
| WARMUP_PROMPT_FILE | (not set) | Path to warmup prompt file (optional). If both `WARMUP_PROMPT_FILE` and `WARMUP_PROMPT_URL` are set, `WARMUP_PROMPT_FILE` takes precedence. |
| WARMUP_PROMPT_URL | (not set) | URL to download warmup prompt from (optional). Allows using a remote warmup workflow without building a custom Docker image. Downloaded and parsed at startup before ComfyUI launches. |
| WEBHOOK_SECRET | (empty string) | If set, the server will sign webhook_v2 requests with this secret. |
| WORKFLOW_DIR | "/workflows" | Directory for workflow files |
#### Kubernetes Deployment: Proxy Environment Variables
To enable outbound requests (e.g., webhook delivery) to use a corporate proxy in Kubernetes, configure the standard proxy environment variables. The server uses undici's EnvHttpProxyAgent, which reads `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` and routes requests accordingly.
- Recommended `NO_PROXY`: include localhost and common Kubernetes internal addresses so local and in-cluster services do not go through the proxy.
- Set only one set of variables (prefer uppercase). If both lowercase and uppercase are set, the lowercase variables take precedence and the uppercase ones are ignored.
Example Deployment snippet:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: comfyui-api
spec:
replicas: 1
selector:
matchLabels:
app: comfyui-api
template:
metadata:
labels:
app: comfyui-api
spec:
containers:
- name: comfyui-api
image: your.registry/comfyui-api:latest
env:
# Recommended: set only uppercase vars
- name: HTTP_PROXY
value: "http://your-http-proxy:3128"
- name: HTTPS_PROXY
value: "http://your-https-proxy:3129"
- name: NO_PROXY
value: "localhost,127.0.0.1,::1,.svc,.svc.cluster.local,169.254.169.254"
ports:
- name: http
containerPort: 3000
```
Notes:
- If your proxy requires authentication, include credentials in the proxy URL (e.g., `http://user:pass@proxy.company:3128`).
- `NO_PROXY="*"` bypasses the proxy for all requests.
- When only `HTTP_PROXY` is set, it is used for both HTTP and HTTPS.
- Suggested `NO_PROXY` entries:
- `localhost,127.0.0.1,::1` for in-container loopback
- `.svc,.svc.cluster.local` for Kubernetes service DNS
- `169.254.169.254` for cloud/container metadata service
### Configuration Details
1. **ComfyUI Settings**:
- The application uses the `CMD` environment variable to specify the command for launching ComfyUI.
- ComfyUI is accessed at `http://${DIRECT_ADDRESS}:${COMFYUI_PORT_HOST}`.
2. **Wrapper Settings**:
- The wrapper API listens on `HOST:PORT`.
- It can be accessed at `http://localhost:${PORT}`.
- Use an IPv6 address for `HOST` when deploying on Salad. This is the default behavior.
3. **Startup Checks**:
- The application performs startup checks at intervals specified by `STARTUP_CHECK_INTERVAL_S`.
- It will attempt up to `STARTUP_CHECK_MAX_TRIES` before giving up.
4. **Directories**:
- The application uses the `COMFY_HOME` environment variable to locate the ComfyUI installation.
- Output files are stored in `OUTPUT_DIR`.
- Input files are read from `INPUT_DIR`.
- Model files are located in `MODEL_DIR`.
- Workflow files are stored in `WORKFLOW_DIR`. See [below](#generating-new-workflow-endpoints) for more information.
5. **Warmup Prompt**:
- If `WARMUP_PROMPT_FILE` is set, the application will load and parse a warmup prompt from this file.
- Alternatively, set `WARMUP_PROMPT_URL` to download the warmup prompt from a remote URL at startup. This allows using a custom warmup workflow without building a custom Docker image.
- If both are set, `WARMUP_PROMPT_FILE` takes precedence.
- The checkpoint used in this prompt can be used as the default for workflow models.
6. **Models**:
- The application scans the `MODEL_DIR` for subdirectories and creates configurations for each model type found.
- Each model type will have its directory path, list of available models, and a Zod enum for validation.
- The model names are exposed via the `GET /models` endpoint, and via the config object throughout the application.
7. **ComfyUI Description**:
- The application retrieves available samplers and schedulers from ComfyUI itself at startup. It does not take custom nodes or extensions into account.
- This information is used to create Zod enums for validation in workflows, but is otherwise not used by the application.
### Additional Notes
- The application uses Zod for runtime type checking and validation of configuration values.
- The configuration includes setup for both the wrapper application and ComfyUI itself.
Remember to set these environment variables according to your specific deployment needs before running the application.
## Using Synchronously
The default behavior of the API is to return an array of base64-encoded outputs in the response body.
All that is needed to do this is to omit webhook and upload fields from the request body.
## Using with Webhooks
ComfyUI API sends two types of webhooks: System Events, which are emitted by ComfyUI itself, and Workflow Events, which are emitted by the API server. See [System Events](#system-events) for more information on System Events.
If a user includes `.webhook_v2` field in a request to `/prompt` or any of the workflow endpoints, the server will send any completed outputs to the webhook URL provided in the request.
It will also send a webhook if the request fails.
For successful requests including the `.webhook_v2` field, a single webhook request will be sent once the entire workflow has completed, containing all outputs.
Webhooks are sent as [Standard Webhooks](https://www.standardwebhooks.com/), and can be validated using the `WEBHOOK_SECRET` environment variable and any standard webhook validation library such as `svix`.
### prompt.complete
The webhook type name for a completed prompt is `prompt.complete`. The webhook will have the same schema as the synchronous response, with the addition of the `type` and `timestamp` fields:
```json
{
"type": "prompt.complete",
"timestamp": "2025-01-01T00:00:00Z",
"id": "request-id",
"images": ["base64-encoded-image-1", "base64-encoded-image-2"],
"filenames": ["output-filename-1.png", "output-filename-2.png"],
"prompt": {},
"stats":{}
}
```
Note that if you include upload fields in your request, the `.images` field will contain the uploaded URLs instead of base64-encoded images.
### prompt.failed
The webhook type name for a failed request is `prompt.failed`. The webhook will have the following schema:
```json
{
"type": "prompt.failed",
"timestamp": "2025-01-01T00:00:00Z",
"error": "error-message",
"id": "request-id",
"prompt": {}
}
```
### Validating Webhooks
#### Node.js Example
```shell
npm install svix
```
```javascript
const { Webhook } = require('svix')
//Express.js middleware
function validateWebhookSignature(req, res, next) {
const webhook = new Webhook(secret)
try {
webhook.verify(req.body, req.headers)
next()
} catch (error) {
console.error('Webhook verification failed:', error)
return res.status(401).send('Invalid signature')
}
}
```
#### Python Example
```shell
pip install svix
```
```python
from fastapi import FastAPI, Request, HTTPException
from svix import Webhook
from typing import Any, Dict
async def validate_webhook(request: Request) -> Dict[str, Any]:
"""
FastAPI Dependency to validate webhook signatures
"""
try:
# Get the raw body
body = await request.body()
# Create webhook instance
webhook = Webhook(webhook_secret)
# Verify the webhook signature
payload = webhook.verify(body, dict(request.headers))
return payload
except Exception as e:
print(f"Webhook verification failed: {e}")
raise HTTPException(status_code=401, detail="Invalid webhook signature")
```
### DEPRECATED: Legacy Webhook Behavior
**LEGACY BEHAVIOR**: For successful requests including the now-deprecated `.webhook` field, every output from the workflow will be sent as individual webhook requests. That means if your request generates 4 images, you will receive 4 webhook requests, each with a single image.
These webhooks are not signed, so we recommend migrating to the new `.webhook_v2` field as soon as possible.
#### output.complete
The webhook event name for a completed output is `output.complete`. The webhook will have the following schema:
```json
{
"event": "output.complete",
"image": "base64-encoded-image",
"id": "request-id",
"filename": "output-filename.png",
"prompt": {}
}
```
#### prompt.failed (legacy)
The webhook event name for a failed request is `prompt.failed`. The webhook will have the following schema:
```json
{
"event": "prompt.failed",
"error": "error-message",
"id": "request-id",
"prompt": {}
}
```
## System Events
> Note: From version 1.14.0, the frontend aggregate progress event `progress_state` is included in the supported system event set and can be forwarded like other events. Use `SYSTEM_WEBHOOK_EVENTS=progress_state` or `SYSTEM_WEBHOOK_EVENTS=all` to subscribe.
ComfyUI emits a number of events over websocket during the course of a workflow. These can be configured to be sent to a webhook using the `SYSTEM_WEBHOOK_URL` and `SYSTEM_WEBHOOK_EVENTS` environment variables. Additionally, any environment variable starting with `SYSTEM_META_` will be sent as metadata with the event. From version 1.13.0, these are signed, and can be validated using the `WEBHOOK_SECRET` environment variable and any standard webhook validation library such as `svix`. See [above](#validating-webhooks) for examples.
All webhooks have the same format, which is as follows:
```json
{
"event": "event_name",
"data": {},
"metadata": {}
}
```
When running on SaladCloud, `.metadata` will always include lowercase versions of the [Default Environment Variables](https://docs.salad.com/container-engine/how-to-guides/environment-variables#default-environment-variables).
The following events are available:
- "status"
- "progress"
- "progress_state"
- "executing"
- "execution_start"
- "execution_cached"
- "executed"
- "execution_success"
- "execution_interrupted"
- "execution_error"
- "file_downloaded"
- "file_uploaded"
- "file_deleted"
The `SYSTEM_WEBHOOK_EVENTS` environment variable should be a comma-separated list of the events you want to send to the webhook. If not set, no events will be sent.
The event name received in the webhook will be `comfy.${event_name}`, i.e. `comfy.progress`, or `storage.${event_name}` for file events.
**Example**:
```shell
export SYSTEM_WEBHOOK_EVENTS="progress,execution_start,execution_success,execution_error"
```
This will cause the API to send the `progress`, `execution_start`, `execution_success`, and `execution_error` events to the webhook.
The `SYSTEM_META_*` environment variables can be used to add metadata to the webhook events. For example:
```shell
export SYSTEM_META_batch=abc
export SYSTEM_META_purpose=testing
```
Will add `{"batch": "abc", "purpose": "testing"}` to the `.metadata` field on system webhooks.
The following are the schemas for the event data that will be sent to the webhook. This will populate the `.data` field on the webhook.
### status
```json
{
"type": "status",
"data": {
"status": {
"exec_info": {
"queue_remaining": 3
}
}
},
"sid": "abc123"
}
```
### progress
```json
{
"type": "progress",
"data": {
"value": 45,
"max": 100,
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"node": "42"
},
"sid": "xyz789"
}
```
### progress_state
```json
{
"type": "progress_state",
"data": {
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"nodes": {
"42": {
"value": 5,
"max": 20,
"state": "executing",
"node_id": "42",
"prompt_id": "123e4567-e89b-12d3-a456-426614174000"
}
}
},
"sid": "xyz789"
}
```
### executing
```json
{
"type": "executing",
"data": {
"node": "42",
"display_node": "42",
"prompt_id": "123e4567-e89b-12d3-a456-426614174000"
},
"sid": "xyz789"
}
```
### execution_start
```json
{
"type": "execution_start",
"data": {
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"timestamp": 1705505423000
},
"sid": "xyz789"
}
```
### execution_cached
```json
{
"type": "execution_cached",
"data": {
"nodes": ["42", "7", "13"],
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"timestamp": 1705505423000
},
"sid": "xyz789"
}
```
### executed
```json
{
"type": "executed",
"data": {
"node": "42",
"display_node": "42",
"output": {},
"prompt_id": "123e4567-e89b-12d3-a456-426614174000"
},
"sid": "xyz789"
}
```
### execution_success
```json
{
"type": "execution_success",
"data": {
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"timestamp": 1705505423000
},
"sid": "xyz789"
}
```
### execution_interrupted
```json
{
"type": "execution_interrupted",
"data": {
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"node_id": "42",
"node_type": "KSampler",
"executed": []
},
"sid": "xyz789"
}
```
### execution_error
```json
{
"type": "execution_error",
"data": {
"prompt_id": "123e4567-e89b-12d3-a456-426614174000",
"node_id": "42",
"node_type": "KSampler",
"executed": [],
"exception_message": "CUDA out of memory. Tried to allocate 2.20 GiB",
"exception_type": "RuntimeError",
"traceback": "Traceback (most recent call last):\n File \"nodes.py\", line 245, in sample\n samples = sampler.sample(model, noise, steps)",
"current_inputs": {
"seed": 42,
"steps": 20,
"cfg": 7.5,
"sampler_name": "euler"
},
"current_outputs": []
},
"sid": "xyz789"
}
```
### file_downloaded
```jsonc
{
// Where the file was downloaded from
"url": "https://example.com/model.safetensors",
// Local path where the file was saved
"local_path": "/opt/ComfyUI/models/model.safetensors",
// Size of the downloaded file in bytes
"size": 123456789,
// Duration of the download in seconds
"duration": 2.34
}
```
### file_uploaded
```jsonc
{
// Local path of the file that was uploaded
"local_path": "/opt/ComfyUI/output/image.png",
// URL where the file was uploaded to
"url": "s3://my-bucket/images/image.png",
// Size of the uploaded file in bytes
"size": 123456,
// Duration of the upload in seconds
"duration": 0.56
}
```
### file_deleted
```jsonc
{
// URL of the file that was deleted. Note there are edge cases where this may be unknown, and the value will be "unknown".
"url": "s3://my-bucket/models/old_model.safetensors",
// Local path of the file that was deleted
"local_path": "/opt/ComfyUI/models/old_model.safetensors",
// Size of the deleted file in bytes
"size": 987654321
}
```
## Prebuilt Docker Images
You can find ready-to-go docker images under [Packages](https://github.com/orgs/SaladTechnologies/packages?repo_name=comfyui-api) in this repository.
The images are tagged with the comfyui-api version they are built with, and the comfyui version they are built for, along with their pytorch version and CUDA version. There are versions for both CUDA runtime and CUDA devel, so you can choose the one that best fits your needs.
The tag pattern is `ghcr.io/saladtechnologies/comfyui-api:comfy<comfy-version>-api<api-version>-torch<pytorch-version>-cuda<cuda-version>-<runtime|devel>` where:
- `<comfy-version>` is the version of ComfyUI used
- `<api-version>` is the version of the comfyui-api server
- `<pytorch-version>` is the version of PyTorch used
- `<cuda-version>` is the version of CUDA used
- `<runtime|devel>` is whether the image is built with the CUDA runtime or the CUDA devel image. The devel image is much larger, but includes the full CUDA toolkit, which is required for some custom nodes.
**If the tag doesn't have `api<api-version>`, it does not include the api, and is just the ComfyUI base image.**
Included in the API images are the following utilities:
- `git`
- `curl`
- `wget`
- `unzip`
- `ComfyUI`
- `comfy` cli
All of SaladCloud's image and video generation [recipes](https://docs.salad.com/products/recipes/overview) are built on top of these images, so you can use them as a base for your own workflows. For examples of using this with custom models and nodes, check out the [Salad Recipes](https://github.com/SaladTechnologies/salad-recipes/tree/master/src) repository on GitHub.
## Considerations for Running on SaladCloud
- **SaladCloud's Container Gateway has a 100s timeout.** It is possible to construct very long running ComfyUI workflows, such as for video generation, that would exceed this timeout. In this scenario, you will need to either use a webhook to receive the results, or integrate with SaladCloud's [Job Queues](https://docs.salad.com/products/sce/job-queues/job-queues#job-queues) to handle long-running workflows.
- **SaladCloud's maximum container image size is 35GB(compressed).** The base [comfyui-api image](https://github.com/SaladTechnologies/comfyui-api/pkgs/container/comfyui-api) is around 3.25GB(compressed), so any models and extensions must fit in the remaining space.
## Custom Workflows
Custom workflows offer a simple and powerful way to create new endpoints for your specific use cases which abstract away the complexities of the underlying ComfyUI node-based prompt format.
You can create workflows in either javascript or typescript, and they can be as simple or complex as you need them to be.
Workflows are loaded at runtime, even when you use the pre-compiled binary releases or docker images, so you can easily add new workflows without needing to rebuild the image.
[See the guide on generating new workflow endpoints](./DEVELOPING.md#generating-new-workflow-endpoints) for more information.
## Contributing
Contributions are welcome!
See the [Development](./DEVELOPMENT.md) guide for more information on how to develop, test, and contribute to this project.
ComfyUI is a powerful tool with MANY options, and it's likely that not all of them are currently well supported by the `comfyui-api` server.
Please open an issue with as much information as possible about the problem you're facing or the feature you need.
If you have encountered a bug, please include the steps to reproduce it, and any relevant logs or error messages.
If you are able, adding a failing test is the best way to ensure your issue is resolved quickly.
Let's make productionizing ComfyUI as easy as possible!
## Architecture
The server is built with [Fastify](https://www.fastify.io/), a fast and low overhead web framework for Node.js.
It sits in front of ComfyUI, and provides a RESTful API for interacting with ComfyUI.

================================================
FILE: build-and-release
================================================
#! /usr/bin/env bash
npm install
npm run build-binary
version=$(node -p "require('./package.json').version")
echo "Version: $version"
gh release create $version \
--title "Release $version" \
--notes "Release $version"
gh release upload $version ./bin/comfyui-api#Linux_x64 --clobber
================================================
FILE: build-binary
================================================
#! /usr/bin/env bash
set -e
npm install
npx tsc
npx pkg --options "stack-size=65500" .
================================================
FILE: claude-endpoint-creation-prompt.md
================================================
# Instructions
Your job is to convert a json workflow graph for ai image generation into a typescript function.
- You should define a type for the input, using Zod for validation.
- You should use `.describe` to describe each parameter to the best of your ability.
- Filename prefix is always set by the system in a different location.
- Do not extrapolate enum values. Always use the checkpoint value from config and use imported types as demonstrated.
- Use snake_case for multi-word parameters.
- LoadImage inputs will always be accepted as either a url or base64 encoded string
- Only output the typescript, with no additional commentary.
# Example Output
import { z } from "zod";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const ComfyNodeSchema = z.object({
inputs: z.any(),
class_type: z.string(),
_meta: z.any().optional(),
});
type ComfyNode = z.infer<typeof ComfyNodeSchema>;
interface Workflow {
RequestSchema: z.ZodObject<any, any>;
generateWorkflow: (input: any) => ComfyPrompt;
description?: string;
summary?: string;
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()``
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(4)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(1)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("simple")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint,
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"6": {
inputs: {
text: input.prompt,
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Positive Prompt)",
},
},
"8": {
inputs: {
samples: ["31", 0],
vae: ["30", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "Flux",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"27": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptySD3LatentImage",
_meta: {
title: "EmptySD3LatentImage",
},
},
"30": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"31": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["30", 0],
positive: ["6", 0],
negative: ["33", 0],
latent_image: ["27", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"33": {
inputs: {
text: "",
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Negative Prompt)",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Text to Image",
description: "Generate an image from a text prompt",
};
export default workflow;
================================================
FILE: docker/api.dockerfile
================================================
ARG base=runtime
ARG comfy_version=0.19.3
ARG pytorch_version=2.8.0
ARG cuda_version=12.8
FROM ghcr.io/saladtechnologies/comfyui-api:comfy${comfy_version}-torch${pytorch_version}-cuda${cuda_version}-${base}
ENV WORKFLOW_DIR=/workflows
ENV STARTUP_CHECK_MAX_TRIES=30
ARG api_version=1.18.1
ADD https://github.com/SaladTechnologies/comfyui-api/releases/download/${api_version}/comfyui-api .
RUN chmod +x comfyui-api
CMD ["./comfyui-api"]
================================================
FILE: docker/build-api-images
================================================
#! /usr/bin/bash
usage="Usage: $0 [comfy_version] [torch_version] [cuda_version] [api_version]"
comfy_version=${1:-0.19.3}
torch_version=${2:-2.8.0}
cuda_version=${3:-12.8}
current_api_version=$(cat ../package.json | jq -r '.version')
api_version=${4:-$current_api_version}
bases=("devel" "runtime")
for base in "${bases[@]}"; do
docker build -t ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base \
-f api.dockerfile \
--build-arg comfy_version=$comfy_version \
--build-arg base=$base \
--build-arg pytorch_version=$torch_version \
--build-arg cuda_version=$cuda_version \
--build-arg api_version=$api_version \
.
docker push ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base
if [ $base == "runtime" ]; then
docker tag ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-runtime ghcr.io/saladtechnologies/comfyui-api:latest
docker push ghcr.io/saladtechnologies/comfyui-api:latest
docker image rm ghcr.io/saladtechnologies/comfyui-api:latest
fi
# Remove the image to make space for the next one. Github actions runners don't get much storage.
docker image rm ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-api$api_version-torch$torch_version-cuda$cuda_version-$base
done
================================================
FILE: docker/build-comfy-base-images
================================================
#! /usr/bin/bash
comfy_version=${1:-0.19.3}
torch_version=${2:-2.8.0}
cuda_version=${3:-12.8}
bases=("devel" "runtime")
for base in "${bases[@]}"; do
docker build -t ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-$base \
-f comfyui.dockerfile \
--no-cache \
--build-arg comfy_version=$comfy_version \
--build-arg base=$base \
--build-arg pytorch_version=$torch_version \
--build-arg cuda_version=$cuda_version \
.
done
================================================
FILE: docker/comfyui.dockerfile
================================================
ARG base=runtime
ARG pytorch_version=2.8.0
ARG cuda_version=12.8
FROM pytorch/pytorch:${pytorch_version}-cuda${cuda_version}-cudnn9-${base}
ENV DEBIAN_FRONTEND=noninteractive
ENV PIP_PREFER_BINARY=1
ENV CMAKE_BUILD_PARALLEL_LEVEL=8
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
curl \
git \
unzip \
wget \
&& apt clean -y && rm -rf /var/lib/apt/lists/*
# Install comfy-cli, which makes it easy to install custom nodes and other comfy specific functionality.
SHELL ["/bin/bash", "-c"]
RUN pip install --no-cache-dir --upgrade pip
RUN pip install --no-cache-dir uv
RUN uv pip install --no-cache-dir --system "comfy-cli==1.5.1" "huggingface_hub[cli]"
WORKDIR /opt
ARG comfy_version=0.19.3
RUN git clone --depth 1 --branch v${comfy_version} https://github.com/comfyanonymous/ComfyUI.git
WORKDIR /opt/ComfyUI
ARG cuda_version=12.8
RUN uv pip install --no-cache-dir --system torchaudio --index-url https://download.pytorch.org/whl/cu${cuda_version//./}
RUN uv pip install --no-cache-dir --system -r requirements.txt
ENV COMFY_HOME=/opt/ComfyUI
RUN comfy --skip-prompt tracking disable
RUN comfy --skip-prompt set-default ${COMFY_HOME}
RUN git clone https://github.com/Comfy-Org/ComfyUI-Manager.git ./custom_nodes/ComfyUI-Manager
RUN uv pip install --system --no-cache-dir -r ./custom_nodes/ComfyUI-Manager/requirements.txt
ENV MODEL_DIR=${COMFY_HOME}/models
ENV OUTPUT_DIR=${COMFY_HOME}/output
ENV INPUT_DIR=${COMFY_HOME}/input
ENV CMD="comfy --workspace ${COMFY_HOME} launch -- --listen *"
ENV BASE=""
CMD ["bash", "-c", "comfy --workspace ${COMFY_HOME} launch -- --listen '*'"]
================================================
FILE: docker/push-comfy-base-images
================================================
#! /usr/bin/bash
usage="Usage: $0 [comfy_version] [torch_version] [cuda_version]"
comfy_version=${1:-0.19.3}
torch_version=${2:-2.8.0}
cuda_version=${3:-12.8}
bases=("devel" "runtime")
for base in "${bases[@]}"; do
docker push ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-$base
done
docker tag ghcr.io/saladtechnologies/comfyui-api:comfy$comfy_version-torch$torch_version-cuda$cuda_version-runtime ghcr.io/saladtechnologies/comfyui-api:base
docker push ghcr.io/saladtechnologies/comfyui-api:base
================================================
FILE: docker-compose.yml
================================================
services:
comfyui:
image: ghcr.io/saladtechnologies/comfyui-api:comfy0.19.3-torch2.8.0-cuda12.8-runtime
volumes:
- type: bind
source: ./bin
target: /app/bin
- type: bind
source: ./manifest.yml
target: /app/manifest.yml
- ./cache:/root/.cache/comfyui-api
- ./example-workflows/sd1.5:/workflows
command: ["/app/bin/comfyui-api"]
ports:
- "3000:3000"
- "8188:8188"
environment:
LOG_LEVEL: "debug"
AWS_ENDPOINT_URL: "http://localstack:4566"
AWS_ACCESS_KEY_ID: "test"
AWS_SECRET_ACCESS_KEY: "test"
AWS_REGION: "us-east-1"
MANIFEST: "/app/manifest.yml"
STARTUP_CHECK_MAX_TRIES: "30"
HF_TOKEN: ${HF_TOKEN}
AZURE_STORAGE_CONNECTION_STRING: "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite:10000/devstoreaccount1;"
# ALWAYS_RESTART_COMFYUI: "true"
SYSTEM_WEBHOOK_URL: "http://host.docker.internal:1234/system"
SYSTEM_WEBHOOK_EVENTS: all
WEBHOOK_SECRET: testsecret
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [ gpu ]
count: all
localstack:
image: localstack/localstack
ports:
- "4566:4566" # LocalStack Gateway
- "4510-4559:4510-4559" # External services
environment:
- SERVICES=s3
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
azurite:
image: mcr.microsoft.com/azure-storage/azurite
ports:
- "10000:10000" # Blob service
command: "azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --skipApiVersionCheck"
file-server:
build:
context: .
dockerfile: test/Dockerfile.file-server
ports:
- "8080:8080"
environment:
- PORT=8080
- STORAGE_DIR=/storage
- REQUIRE_AUTH=false
================================================
FILE: example-workflows/flux/img2img.json
================================================
{
"6": {
"inputs": {
"text": "A noble wolf stands by a raging river in the style of a japanese scroll",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"31",
0
],
"vae": [
"30",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"27": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"30": {
"inputs": {
"ckpt_name": "flux1-schnell-fp8.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"31": {
"inputs": {
"seed": 226018262510838,
"steps": 4,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 0.8,
"model": [
"30",
0
],
"positive": [
"6",
0
],
"negative": [
"33",
0
],
"latent_image": [
"38",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"33": {
"inputs": {
"text": "",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative Prompt)"
}
},
"37": {
"inputs": {
"image": "IMG_0655.JPG",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"38": {
"inputs": {
"pixels": [
"40",
0
],
"vae": [
"30",
2
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"40": {
"inputs": {
"width": 1024,
"height": 1024,
"interpolation": "nearest",
"method": "fill / crop",
"condition": "always",
"multiple_of": 8,
"image": [
"37",
0
]
},
"class_type": "ImageResize+",
"_meta": {
"title": "🔧 Image Resize"
}
}
}
================================================
FILE: example-workflows/flux/img2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(4)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(1)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("simple")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(0.8)
.describe("Denoising strength"),
checkpoint,
image: z.string().describe("Input image for img2img"),
interpolation: z
.enum(["nearest"])
.optional()
.default("nearest")
.describe("Interpolation method for image resizing"),
resize_method: z
.enum(["fill / crop"])
.optional()
.default("fill / crop")
.describe("Method for resizing the image"),
resize_condition: z
.enum(["always"])
.optional()
.default("always")
.describe("Condition for when to resize the image"),
multiple_of: z
.number()
.int()
.optional()
.default(8)
.describe("Ensure image dimensions are multiples of this value"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"6": {
inputs: {
text: input.prompt,
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Positive Prompt)",
},
},
"8": {
inputs: {
samples: ["31", 0],
vae: ["30", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "ComfyUI",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"30": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"31": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["30", 0],
positive: ["6", 0],
negative: ["33", 0],
latent_image: ["38", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"33": {
inputs: {
text: "",
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Negative Prompt)",
},
},
"37": {
inputs: {
image: input.image,
upload: "image",
},
class_type: "LoadImage",
_meta: {
title: "Load Image",
},
},
"38": {
inputs: {
pixels: ["40", 0],
vae: ["30", 2],
},
class_type: "VAEEncode",
_meta: {
title: "VAE Encode",
},
},
"40": {
inputs: {
width: input.width,
height: input.height,
interpolation: input.interpolation,
method: input.resize_method,
condition: input.resize_condition,
multiple_of: input.multiple_of,
image: ["37", 0],
},
class_type: "ImageResize+",
_meta: {
title: "🔧 Image Resize",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Image-to-Image",
description: "Text-guided Image-to-Image generation",
};
export default workflow;
================================================
FILE: example-workflows/flux/txt2img.json
================================================
{
"6": {
"inputs": {
"text": "a bottle with a beautiful rainbow galaxy inside it on top of a wooden table in the middle of a modern kitchen beside a plate of vegetables and mushrooms and a wine glasse that contains a planet earth with a plate with a half eaten apple pie on it",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"31",
0
],
"vae": [
"30",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "Flux",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"27": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"30": {
"inputs": {
"ckpt_name": "flux1-schnell-fp8.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"31": {
"inputs": {
"seed": 1030319533692526,
"steps": 4,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"30",
0
],
"positive": [
"6",
0
],
"negative": [
"33",
0
],
"latent_image": [
"27",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"33": {
"inputs": {
"text": "",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative Prompt)"
}
}
}
================================================
FILE: example-workflows/flux/txt2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(4)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(1)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("simple")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint,
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"6": {
inputs: {
text: input.prompt,
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Positive Prompt)",
},
},
"8": {
inputs: {
samples: ["31", 0],
vae: ["30", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "Flux",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"27": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptySD3LatentImage",
_meta: {
title: "EmptySD3LatentImage",
},
},
"30": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"31": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["30", 0],
positive: ["6", 0],
negative: ["33", 0],
latent_image: ["27", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"33": {
inputs: {
text: "",
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Negative Prompt)",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
};
export default workflow;
================================================
FILE: example-workflows/sd1.5/img2img.js
================================================
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var zod_1 = require("zod");
var config_1 = require("../config");
var RequestSchema = zod_1.z.object({
prompt: zod_1.z.string().describe("The positive prompt for image generation"),
negative_prompt: zod_1.z
.string()
.optional()
.default("text, watermark")
.describe("The negative prompt for image generation"),
seed: zod_1.z
.number()
.int()
.optional()
.default(function () { return Math.floor(Math.random() * 1000000000000000); })
.describe("Seed for random number generation"),
steps: zod_1.z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(15)
.describe("Number of sampling steps"),
cfg_scale: zod_1.z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config_1.default.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config_1.default.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: zod_1.z
.number()
.min(0)
.max(1)
.optional()
.default(0.8)
.describe("Denoising strength"),
checkpoint: zod_1.z
.string()
.refine(function (val) { return config_1.default.models.checkpoints.all.includes(val); })
.optional()
.default(config_1.default.warmupCkpt || config_1.default.models.checkpoints.all[0])
.describe("Checkpoint to use"),
image: zod_1.z.string().describe("Input image for img2img"),
width: zod_1.z
.number()
.int()
.min(64)
.max(2048)
.optional()
.default(512)
.describe("Width of the generated image"),
height: zod_1.z
.number()
.int()
.min(64)
.max(2048)
.optional()
.default(512)
.describe("Height of the generated image"),
interpolation: zod_1.z
.enum(["nearest"])
.optional()
.default("nearest")
.describe("Interpolation method for image resizing"),
resize_method: zod_1.z
.enum(["keep proportion"])
.optional()
.default("keep proportion")
.describe("Method for resizing the image"),
resize_condition: zod_1.z
.enum(["always"])
.optional()
.default("always")
.describe("Condition for when to resize the image"),
multiple_of: zod_1.z
.number()
.int()
.min(0)
.optional()
.default(0)
.describe("Ensure dimensions are multiples of this value"),
});
function generateWorkflow(input) {
return {
"3": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["12", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"8": {
inputs: {
samples: ["3", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "output",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"10": {
inputs: {
image: input.image,
upload: "image",
},
class_type: "LoadImage",
_meta: {
title: "Load Image",
},
},
"11": {
inputs: {
width: input.width,
height: input.height,
interpolation: input.interpolation,
method: input.resize_method,
condition: input.resize_condition,
multiple_of: input.multiple_of,
image: ["10", 0],
},
class_type: "ImageResize+",
_meta: {
title: "🔧 Image Resize",
},
},
"12": {
inputs: {
pixels: ["11", 0],
vae: ["4", 2],
},
class_type: "VAEEncode",
_meta: {
title: "VAE Encode",
},
},
};
}
var workflow = {
RequestSchema: RequestSchema,
generateWorkflow: generateWorkflow,
summary: "Image-to-Image",
description: "Text-guided Image-to-Image generation",
};
exports.default = workflow;
================================================
FILE: example-workflows/sd1.5/img2img.json
================================================
{
"3": {
"inputs": {
"seed": 818335187507771,
"steps": 15,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 0.8,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"12",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaper_8.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": "A girl in a pink dress with cat ears, magazine photograph",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"image": "example.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"11": {
"inputs": {
"width": 512,
"height": 512,
"interpolation": "nearest",
"method": "keep proportion",
"condition": "always",
"multiple_of": 0,
"image": [
"10",
0
]
},
"class_type": "ImageResize+",
"_meta": {
"title": "🔧 Image Resize"
}
},
"12": {
"inputs": {
"pixels": [
"11",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
}
}
================================================
FILE: example-workflows/sd1.5/img2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.default("text, watermark")
.describe("The negative prompt for image generation"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(15)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(0.8)
.describe("Denoising strength"),
checkpoint: z
.string()
.refine((val) => config.models.checkpoints.all.includes(val))
.optional()
.default(config.warmupCkpt || config.models.checkpoints.all[0])
.describe("Checkpoint to use"),
image: z.string().describe("Input image for img2img"),
width: z
.number()
.int()
.min(64)
.max(2048)
.optional()
.default(512)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(64)
.max(2048)
.optional()
.default(512)
.describe("Height of the generated image"),
interpolation: z
.enum(["nearest"])
.optional()
.default("nearest")
.describe("Interpolation method for image resizing"),
resize_method: z
.enum(["keep proportion"])
.optional()
.default("keep proportion")
.describe("Method for resizing the image"),
resize_condition: z
.enum(["always"])
.optional()
.default("always")
.describe("Condition for when to resize the image"),
multiple_of: z
.number()
.int()
.min(0)
.optional()
.default(0)
.describe("Ensure dimensions are multiples of this value"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"3": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["12", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"8": {
inputs: {
samples: ["3", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "output",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"10": {
inputs: {
image: input.image,
upload: "image",
},
class_type: "LoadImage",
_meta: {
title: "Load Image",
},
},
"11": {
inputs: {
width: input.width,
height: input.height,
interpolation: input.interpolation,
method: input.resize_method,
condition: input.resize_condition,
multiple_of: input.multiple_of,
image: ["10", 0],
},
class_type: "ImageResize+",
_meta: {
title: "🔧 Image Resize",
},
},
"12": {
inputs: {
pixels: ["11", 0],
vae: ["4", 2],
},
class_type: "VAEEncode",
_meta: {
title: "VAE Encode",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Image-to-Image",
description: "Text-guided Image-to-Image generation",
};
export default workflow;
================================================
FILE: example-workflows/sd1.5/txt2img.js
================================================
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var zod_1 = require("zod");
var config_1 = require("../config");
var RequestSchema = zod_1.z.object({
prompt: zod_1.z.string().describe("The positive prompt for image generation"),
negative_prompt: zod_1.z
.string()
.optional()
.default("")
.describe("The negative prompt for image generation"),
width: zod_1.z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Width of the generated image"),
height: zod_1.z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Height of the generated image"),
seed: zod_1.z
.number()
.int()
.optional()
.default(function () { return Math.floor(Math.random() * 100000000000); })
.describe("Seed for random number generation"),
steps: zod_1.z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(20)
.describe("Number of sampling steps"),
cfg_scale: zod_1.z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config_1.default.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config_1.default.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: zod_1.z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint: zod_1.z
.string()
.refine(function (val) { return config_1.default.models.checkpoints.all.includes(val); })
.optional()
.default(config_1.default.warmupCkpt || config_1.default.models.checkpoints.all[0])
.describe("Checkpoint to use"),
});
function generateWorkflow(input) {
return {
"3": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["5", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"5": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptyLatentImage",
_meta: {
title: "Empty Latent Image",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"8": {
inputs: {
samples: ["3", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "output",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
};
}
var workflow = {
RequestSchema: RequestSchema,
generateWorkflow: generateWorkflow,
};
exports.default = workflow;
================================================
FILE: example-workflows/sd1.5/txt2img.json
================================================
{
"3": {
"inputs": {
"seed": 712610403220747,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaper_8.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}
================================================
FILE: example-workflows/sd1.5/txt2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.default("")
.describe("The negative prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(512)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 100000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(20)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint: z
.string()
.refine((val) => config.models.checkpoints.all.includes(val))
.optional()
.default(config.warmupCkpt || config.models.checkpoints.all[0])
.describe("Checkpoint to use"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"3": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["5", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"5": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptyLatentImage",
_meta: {
title: "Empty Latent Image",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"8": {
inputs: {
samples: ["3", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "output",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
};
export default workflow;
================================================
FILE: example-workflows/sdxl/img2img.json
================================================
{
"8": {
"inputs": {
"samples": [
"36",
0
],
"vae": [
"14",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "img2img",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"14": {
"inputs": {
"ckpt_name": "sd_xl_base_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint Base"
}
},
"16": {
"inputs": {
"width": 4096,
"height": 4096,
"crop_w": 0,
"crop_h": 0,
"target_width": 4096,
"target_height": 4096,
"text_g": "a professional photo of a young man smiling\n\nhigh resolution, highly detailed, 4k",
"text_l": "a professional photo of a young man smiling\n\nhigh resolution, highly detailed, 4k",
"clip": [
"14",
1
]
},
"class_type": "CLIPTextEncodeSDXL",
"_meta": {
"title": "CLIPTextEncodeSDXL"
}
},
"19": {
"inputs": {
"width": 4096,
"height": 4096,
"crop_w": 0,
"crop_h": 0,
"target_width": 4096,
"target_height": 4096,
"text_g": "blurry, horror, rendering, illustration, drawing, painting",
"text_l": "blurry, horror, rendering, illustration, drawing, painting",
"clip": [
"14",
1
]
},
"class_type": "CLIPTextEncodeSDXL",
"_meta": {
"title": "CLIPTextEncodeSDXL"
}
},
"36": {
"inputs": {
"seed": 887855663168366,
"steps": 20,
"cfg": 5.5,
"sampler_name": "dpmpp_2m_sde_gpu",
"scheduler": "exponential",
"denoise": 0.75,
"model": [
"14",
0
],
"positive": [
"16",
0
],
"negative": [
"19",
0
],
"latent_image": [
"39",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"38": {
"inputs": {
"image": "ComfyUI_00376_.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"39": {
"inputs": {
"pixels": [
"40",
0
],
"vae": [
"14",
2
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"40": {
"inputs": {
"upscale_method": "nearest-exact",
"width": 1024,
"height": 1024,
"crop": "center",
"image": [
"38",
0
]
},
"class_type": "ImageScale",
"_meta": {
"title": "Upscale Image"
}
}
}
================================================
FILE: example-workflows/sdxl/img2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.describe("The negative prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(4096)
.optional()
.default(4096)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(4096)
.optional()
.default(4096)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(20)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(5.5)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("dpmpp_2m_sde_gpu")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("exponential")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(0.75)
.describe("Denoising strength"),
checkpoint,
image: z.string().describe("Input image for img2img"),
upscale_method: z
.enum(["nearest-exact"])
.optional()
.default("nearest-exact")
.describe(
"Method used for upscaling if input image is smaller than target size"
),
target_width: z
.number()
.int()
.min(256)
.max(4096)
.optional()
.default(1024)
.describe("Target width for upscaling"),
target_height: z
.number()
.int()
.min(256)
.max(4096)
.optional()
.default(1024)
.describe("Target height for upscaling"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"8": {
inputs: {
samples: ["36", 0],
vae: ["14", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "img2img",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"14": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint Base",
},
},
"16": {
inputs: {
width: input.width,
height: input.height,
crop_w: 0,
crop_h: 0,
target_width: input.width,
target_height: input.height,
text_g: input.prompt,
text_l: input.prompt,
clip: ["14", 1],
},
class_type: "CLIPTextEncodeSDXL",
_meta: {
title: "CLIPTextEncodeSDXL",
},
},
"19": {
inputs: {
width: input.width,
height: input.height,
crop_w: 0,
crop_h: 0,
target_width: input.width,
target_height: input.height,
text_g: input.negative_prompt,
text_l: input.negative_prompt,
clip: ["14", 1],
},
class_type: "CLIPTextEncodeSDXL",
_meta: {
title: "CLIPTextEncodeSDXL",
},
},
"36": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["14", 0],
positive: ["16", 0],
negative: ["19", 0],
latent_image: ["39", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"38": {
inputs: {
image: input.image,
upload: "image",
},
class_type: "LoadImage",
_meta: {
title: "Load Image",
},
},
"39": {
inputs: {
pixels: ["40", 0],
vae: ["14", 2],
},
class_type: "VAEEncode",
_meta: {
title: "VAE Encode",
},
},
"40": {
inputs: {
upscale_method: input.upscale_method,
width: input.target_width,
height: input.target_height,
crop: "center",
image: ["38", 0],
},
class_type: "ImageScale",
_meta: {
title: "Upscale Image",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Image-to-Image",
description: "Text-guided Image-to-Image generation",
};
export default workflow;
================================================
FILE: example-workflows/sdxl/txt2img-with-refiner.json
================================================
{
"4": {
"inputs": {
"ckpt_name": "sd_xl_base_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint - BASE"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"10": {
"inputs": {
"add_noise": "enable",
"noise_seed": 721897303308196,
"steps": 25,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"start_at_step": 0,
"end_at_step": 20,
"return_with_leftover_noise": "enable",
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSamplerAdvanced",
"_meta": {
"title": "KSampler (Advanced) - BASE"
}
},
"11": {
"inputs": {
"add_noise": "disable",
"noise_seed": 0,
"steps": 25,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"start_at_step": 20,
"end_at_step": 10000,
"return_with_leftover_noise": "disable",
"model": [
"12",
0
],
"positive": [
"15",
0
],
"negative": [
"16",
0
],
"latent_image": [
"10",
0
]
},
"class_type": "KSamplerAdvanced",
"_meta": {
"title": "KSampler (Advanced) - REFINER"
}
},
"12": {
"inputs": {
"ckpt_name": "sd_xl_refiner_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint - REFINER"
}
},
"15": {
"inputs": {
"text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it",
"clip": [
"12",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"16": {
"inputs": {
"text": "text, watermark",
"clip": [
"12",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"17": {
"inputs": {
"samples": [
"11",
0
],
"vae": [
"12",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"19": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"17",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}
================================================
FILE: example-workflows/sdxl/txt2img-with-refiner.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.default("text, watermark")
.describe("The negative prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(25)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
base_start_step: z
.number()
.int()
.min(0)
.max(100)
.optional()
.default(0)
.describe("Start step for base model sampling"),
base_end_step: z
.number()
.int()
.min(0)
.max(100)
.optional()
.default(20)
.describe("End step for base model sampling"),
refiner_start_step: z
.number()
.int()
.min(0)
.max(100)
.optional()
.default(20)
.describe("Start step for refiner model sampling"),
checkpoint,
refiner_checkpoint: z
.string()
.optional()
.default("sd_xl_refiner_1.0.safetensors")
.describe("Checkpoint for the refiner model"),
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint - BASE",
},
},
"5": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptyLatentImage",
_meta: {
title: "Empty Latent Image",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"10": {
inputs: {
add_noise: "enable",
noise_seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
start_at_step: input.base_start_step,
end_at_step: input.base_end_step,
return_with_leftover_noise: "enable",
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["5", 0],
},
class_type: "KSamplerAdvanced",
_meta: {
title: "KSampler (Advanced) - BASE",
},
},
"11": {
inputs: {
add_noise: "disable",
noise_seed: 0,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
start_at_step: input.refiner_start_step,
end_at_step: 10000,
return_with_leftover_noise: "disable",
model: ["12", 0],
positive: ["15", 0],
negative: ["16", 0],
latent_image: ["10", 0],
},
class_type: "KSamplerAdvanced",
_meta: {
title: "KSampler (Advanced) - REFINER",
},
},
"12": {
inputs: {
ckpt_name: input.refiner_checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint - REFINER",
},
},
"15": {
inputs: {
text: input.prompt,
clip: ["12", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"16": {
inputs: {
text: input.negative_prompt,
clip: ["12", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"17": {
inputs: {
samples: ["11", 0],
vae: ["12", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"19": {
inputs: {
filename_prefix: "ComfyUI",
images: ["17", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
};
export default workflow;
================================================
FILE: example-workflows/sdxl/txt2img.json
================================================
{
"4": {
"inputs": {
"ckpt_name": "sd_xl_base_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint - BASE"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"17": {
"inputs": {
"samples": [
"49",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"19": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"17",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"49": {
"inputs": {
"seed": 0,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
}
}
================================================
FILE: example-workflows/sdxl/txt2img.ts
================================================
import { z } from "zod";
// This gets evaluated in the context of src/workflows, so imports must be relative to that directory
import { ComfyPrompt, Workflow } from "../types";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
negative_prompt: z
.string()
.optional()
.default("text, watermark")
.describe("The negative prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()
.default(() => Math.floor(Math.random() * 100000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(20)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(8)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("normal")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint,
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"4": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint - BASE",
},
},
"5": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptyLatentImage",
_meta: {
title: "Empty Latent Image",
},
},
"6": {
inputs: {
text: input.prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"7": {
inputs: {
text: input.negative_prompt,
clip: ["4", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Prompt)",
},
},
"17": {
inputs: {
samples: ["49", 0],
vae: ["4", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"19": {
inputs: {
filename_prefix: "ComfyUI",
images: ["17", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"49": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["4", 0],
positive: ["6", 0],
negative: ["7", 0],
latent_image: ["5", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
};
export default workflow;
================================================
FILE: generate-workflow
================================================
#! /bin/bash
usage="Usage: $0 <input-prompt-json> <output-typescript-file>"
input_prompt_json=$1
output_typescript_file=$2
set -f # Disable globbing, there's a * in the input prompt
system_prompt=$(jq -R -s '{"text": .}' claude-endpoint-creation-prompt.md | jq .text)
input_prompt=$(jq @json $input_prompt_json)
# Select LLM provider based on available API keys.
# Anthropic (Claude) is preferred when both keys are set.
# MiniMax is used as a fallback via its OpenAI-compatible API.
if [ -n "$ANTHROPIC_API_KEY" ]; then
provider="anthropic"
api_key="$ANTHROPIC_API_KEY"
api_url="https://api.anthropic.com/v1/messages"
model_id="claude-sonnet-4-20250514"
anthropic_version="2023-06-01"
elif [ -n "$MINIMAX_API_KEY" ]; then
provider="minimax"
api_key="$MINIMAX_API_KEY"
api_url="https://api.minimax.io/v1/chat/completions"
model_id="MiniMax-M2.7"
else
echo "Please set the ANTHROPIC_API_KEY or MINIMAX_API_KEY environment variable" >&2
exit 1
fi
if [ "$provider" = "minimax" ]; then
# MiniMax uses the OpenAI-compatible API format.
# temperature must be in (0.0, 1.0] for MiniMax; 0.01 gives near-deterministic output.
api_body=$(
cat <<EOF
{
"model": "$model_id",
"messages": [
{"role": "system", "content": $system_prompt},
{"role": "user", "content": $input_prompt}
],
"max_tokens": 8192,
"temperature": 0.01
}
EOF
)
response=$(
curl -s -X POST \
-H "Authorization: Bearer $api_key" \
-H "Content-Type: application/json" \
-d "$api_body" \
"$api_url"
)
response_text=$(echo "$response" | jq -r '.choices[0].message.content // empty')
else
# Anthropic API format
api_body=$(
cat <<EOF
{
"model": "$model_id",
"system": $system_prompt,
"max_tokens": 8192,
"temperature": 0,
"messages": [
{
"role": "user",
"content": $input_prompt
}
]
}
EOF
)
response=$(
curl -s -X POST \
-H "x-api-key: $api_key" \
-H "Content-Type: application/json" \
-H "anthropic-version: $anthropic_version" \
-d "$api_body" \
"$api_url"
)
response_text=$(echo "$response" | jq -r '.content[0].text // empty')
fi
if [ -z "$response_text" ]; then
echo "Error: API call failed" >&2
echo "$response" | jq . >&2
exit 1
fi
# Strip code-block delimiters if the model wrapped the output in ``` fences.
first_line=$(echo "$response_text" | head -n 1)
if [[ "$first_line" == '```'* ]]; then
echo "$response_text" | tail -n +2 | head -n -1 > "$output_typescript_file"
else
echo "$response_text" > "$output_typescript_file"
fi
set +f
================================================
FILE: manifest.yml
================================================
# apt:
# - git
# - ffmpeg
# - libgl1
custom_nodes:
- comfyui_essentials
# - comfyui-kjnodes
# - https://github.com/visualbruno/ComfyUI-Hunyuan3d-2-1.git
# pip:
# - numpy
models:
before_start:
- url: https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16
local_path: models/checkpoints/dreamshaper_8.safetensors
- url: https://huggingface.co/Lykon/DreamShaper/resolve/main/DreamShaper_5_beta2_noVae_half_pruned.safetensors?download=true
local_path: models/checkpoints/dreamshaper5.safetensors
================================================
FILE: package.json
================================================
{
"name": "comfyui-api",
"version": "1.18.0",
"description": "Wraps comfyui to make it easier to use as a stateless web service",
"main": "dist/src/index.js",
"scripts": {
"test": "vitest run",
"unit-test": "vitest run test/utils.spec.ts",
"build": "tsc",
"build-binary": "./build-binary",
"postinstall": "npm install --cpu=wasm32 sharp"
},
"author": "Shawn Rushefsky",
"license": "MIT",
"devDependencies": {
"@anthropic-ai/sdk": "^0.26.1",
"@types/node": "^20.12.7",
"@types/ws": "^8.5.13",
"@yao-pkg/pkg": "^6.1.0",
"minimist": "^1.2.8",
"svix": "^1.78.0",
"vitest": "^3.0.0"
},
"bin": {
"comfyui-api": "dist/src/index.js"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.820.0",
"@azure/identity": "^4.13.0",
"@azure/storage-blob": "^12.28.0",
"@fastify/swagger": "^9.5.0",
"@fastify/swagger-ui": "^5.2.2",
"@rollup/rollup-linux-x64-gnu": "^4.60.2",
"@smithy/node-http-handler": "^4.0.5",
"fastify": "^5.8.5",
"fastify-type-provider-zod": "^4.0.2",
"sharp": "^0.34.5",
"typescript": "^5.8.3",
"undici": "^7.24.0",
"ws": "^8.18.2",
"yaml": "^2.8.3",
"zod": "^3.25.36"
},
"pkg": {
"targets": [
"node20-linux-x64"
],
"outputPath": "bin",
"public": true
},
"engines": {
"node": ">=20.18.1"
}
}
================================================
FILE: scripts/smoke-proxy.mjs
================================================
import fastify from "fastify";
import { fetch } from "undici";
import { getProxyDispatcher } from "../dist/src/proxy-dispatcher.js";
async function main() {
const app = fastify({ logger: true });
app.post("/webhook", async (req, reply) => {
return reply.send({ success: true, received: req.body || null });
});
await app.listen({ port: 12345, host: "127.0.0.1" });
await app.ready();
console.log("Local webhook server listening on http://127.0.0.1:12345/webhook");
const resp = await fetch("http://127.0.0.1:12345/webhook", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ ping: true }),
dispatcher: getProxyDispatcher(),
});
console.log("Fetch status:", resp.status, resp.statusText);
const body = await resp.json();
console.log("Response:", body);
await app.close();
}
main().catch((err) => {
console.error("Smoke test failed:", err);
process.exit(1);
});
================================================
FILE: src/comfy-node-preprocessors.ts
================================================
import path from "path";
import { FastifyBaseLogger } from "fastify";
import { ComfyNode, ComfyPrompt, WorkflowCredential } from "./types";
import config from "./config";
import getStorageManager from "./remote-storage-manager";
import { isValidUrl } from "./utils";
import { processInputMedia } from "./image-tools";
import { z } from "zod";
import { CredentialProvider, createCredentialProvider } from "./credential-resolver";
const configPath = path.join(config.comfyDir, "models", "configs");
const checkpointPath = path.join(config.comfyDir, "models", "checkpoints");
const diffusersPath = path.join(config.comfyDir, "models", "diffusers");
const vaePath = path.join(config.comfyDir, "models", "vae");
const loraPath = path.join(config.comfyDir, "models", "loras");
const controlNetPath = path.join(config.comfyDir, "models", "controlnet");
const clipPath = path.join(config.comfyDir, "models", "text_encoders");
const styleModelPath = path.join(config.comfyDir, "models", "style_models");
const gligenPath = path.join(config.comfyDir, "models", "gligen");
const upscaleModelPath = path.join(config.comfyDir, "models", "upscale_models");
function updateModelsInConfig(modelType: string, modelName: string) {
if (config.models[modelType].all.includes(modelName)) {
return;
}
config.models[modelType].all.push(modelName);
config.models[modelType].all = Array.from(
new Set(config.models[modelType].all)
).sort();
config.models[modelType].enum = z.enum(
config.models[modelType].all as [string, ...string[]]
);
}
async function processCheckpointLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { config_name, ckpt_name } = node.inputs;
if (isValidUrl(config_name)) {
const localConfigPath = await storageManager.downloadFile(
config_name,
configPath,
undefined,
getCredentials(config_name)
);
const filename = path.basename(localConfigPath);
updateModelsInConfig("configs", filename);
node.inputs.config_name = filename;
}
if (isValidUrl(ckpt_name)) {
const localCkptPath = await storageManager.downloadFile(
ckpt_name,
checkpointPath,
undefined,
getCredentials(ckpt_name)
);
const filename = path.basename(localCkptPath);
updateModelsInConfig("checkpoints", filename);
node.inputs.ckpt_name = filename;
}
return node;
}
async function processCheckpointLoaderSimpleNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { ckpt_name } = node.inputs;
if (isValidUrl(ckpt_name)) {
const localCkptPath = await storageManager.downloadFile(
ckpt_name,
checkpointPath,
undefined,
getCredentials(ckpt_name)
);
const filename = path.basename(localCkptPath);
updateModelsInConfig("checkpoints", filename);
node.inputs.ckpt_name = filename;
}
return node;
}
async function processDiffusersLoaderNode(
node: ComfyNode,
_getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { model_path } = node.inputs;
// Note: downloadRepo doesn't support credentials yet (git clone)
if (isValidUrl(model_path)) {
const downloadedPath = await storageManager.downloadRepo(
model_path,
diffusersPath
);
const filename = path.basename(downloadedPath);
updateModelsInConfig("diffusers", filename);
node.inputs.model_path = filename;
}
return node;
}
async function processLoraLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { lora_name } = node.inputs;
if (isValidUrl(lora_name)) {
const localLoraPath = await storageManager.downloadFile(
lora_name,
loraPath,
undefined,
getCredentials(lora_name)
);
const filename = path.basename(localLoraPath);
updateModelsInConfig("loras", filename);
node.inputs.lora_name = filename;
}
return node;
}
async function processVAELoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { vae_name } = node.inputs;
if (isValidUrl(vae_name)) {
const localVaePath = await storageManager.downloadFile(
vae_name,
vaePath,
undefined,
getCredentials(vae_name)
);
const filename = path.basename(localVaePath);
updateModelsInConfig("vae", filename);
node.inputs.vae_name = filename;
}
return node;
}
async function processControlNetLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { control_net_name } = node.inputs;
if (isValidUrl(control_net_name)) {
const localControlNetPath = await storageManager.downloadFile(
control_net_name,
controlNetPath,
undefined,
getCredentials(control_net_name)
);
const filename = path.basename(localControlNetPath);
updateModelsInConfig("controlnet", filename);
node.inputs.control_net_name = filename;
}
return node;
}
async function processUNETLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { unet_name } = node.inputs;
if (isValidUrl(unet_name)) {
const localUNETPath = await storageManager.downloadFile(
unet_name,
diffusersPath,
undefined,
getCredentials(unet_name)
);
const filename = path.basename(localUNETPath);
updateModelsInConfig("diffusers", filename);
node.inputs.unet_name = filename;
}
return node;
}
async function processCLIPLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { clip_name } = node.inputs;
if (isValidUrl(clip_name)) {
const localCLIPPath = await storageManager.downloadFile(
clip_name,
clipPath,
undefined,
getCredentials(clip_name)
);
const filename = path.basename(localCLIPPath);
updateModelsInConfig("text_encoders", filename);
node.inputs.clip_name = filename;
}
return node;
}
async function processDualCLIPLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { clip_name1, clip_name2 } = node.inputs;
if (isValidUrl(clip_name1)) {
const localCLIPPath1 = await storageManager.downloadFile(
clip_name1,
clipPath,
undefined,
getCredentials(clip_name1)
);
const filename = path.basename(localCLIPPath1);
updateModelsInConfig("text_encoders", filename);
node.inputs.clip_name1 = filename;
}
if (isValidUrl(clip_name2)) {
const localCLIPPath2 = await storageManager.downloadFile(
clip_name2,
clipPath,
undefined,
getCredentials(clip_name2)
);
const filename = path.basename(localCLIPPath2);
updateModelsInConfig("text_encoders", filename);
node.inputs.clip_name2 = filename;
}
return node;
}
async function processStyleModelLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { style_model_name } = node.inputs;
if (isValidUrl(style_model_name)) {
const localStyleModelPath = await storageManager.downloadFile(
style_model_name,
styleModelPath,
undefined,
getCredentials(style_model_name)
);
const filename = path.basename(localStyleModelPath);
updateModelsInConfig("style_models", filename);
node.inputs.style_model_name = filename;
}
return node;
}
async function processGLIGENLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { gligen_name } = node.inputs;
if (isValidUrl(gligen_name)) {
const localGLIGENPath = await storageManager.downloadFile(
gligen_name,
gligenPath,
undefined,
getCredentials(gligen_name)
);
const filename = path.basename(localGLIGENPath);
updateModelsInConfig("gligen", filename);
node.inputs.gligen_name = filename;
}
return node;
}
async function processUpscaleModelLoaderNode(
node: ComfyNode,
getCredentials: CredentialProvider
): Promise<ComfyNode> {
const storageManager = getStorageManager();
const { model_name } = node.inputs;
if (isValidUrl(model_name)) {
const localModelPath = await storageManager.downloadFile(
model_name,
upscaleModelPath,
undefined,
getCredentials(model_name)
);
const filename = path.basename(localModelPath);
updateModelsInConfig("upscale_models", filename);
node.inputs.model_name = filename;
}
return node;
}
export async function processModelLoadingNode(
node: ComfyNode,
log: FastifyBaseLogger,
getCredentials: CredentialProvider = () => undefined
): Promise<ComfyNode> {
switch (node.class_type) {
case "CheckpointLoader":
return processCheckpointLoaderNode(node, getCredentials);
case "CheckpointLoaderSimple":
case "unCLIPCheckpointLoader":
return processCheckpointLoaderSimpleNode(node, getCredentials);
case "DiffusersLoader":
return processDiffusersLoaderNode(node, getCredentials);
case "LoraLoader":
case "LoraLoaderModelOnly":
return processLoraLoaderNode(node, getCredentials);
case "VAELoader":
return processVAELoaderNode(node, getCredentials);
case "ControlNetLoader":
case "DiffControlNetLoader":
return processControlNetLoaderNode(node, getCredentials);
case "UNETLoader":
return processUNETLoaderNode(node, getCredentials);
case "CLIPLoader":
case "CLIPVisionLoader":
return processCLIPLoaderNode(node, getCredentials);
case "DualCLIPLoader":
return processDualCLIPLoaderNode(node, getCredentials);
case "StyleModelLoader":
return processStyleModelLoaderNode(node, getCredentials);
case "GLIGENLoader":
return processGLIGENLoaderNode(node, getCredentials);
case "UpscaleModelLoader":
return processUpscaleModelLoaderNode(node, getCredentials);
default:
return node;
}
}
export async function processLoadImageNode(
node: ComfyNode,
log: FastifyBaseLogger
): Promise<ComfyNode> {
node.inputs.image = await processInputMedia(node.inputs.image, log);
return node;
}
export async function processLoadDirectoryOfImagesNode(
node: ComfyNode,
jobId: string,
log: FastifyBaseLogger
): Promise<ComfyNode> {
const processPromises: Promise<string>[] = [];
for (const imageInput of node.inputs.directory) {
processPromises.push(processInputMedia(imageInput, log, jobId));
}
await Promise.all(processPromises);
node.inputs.directory = jobId;
return node;
}
export async function processLoadVideoNode(
node: ComfyNode,
log: FastifyBaseLogger
): Promise<ComfyNode> {
const { video, file } = node.inputs;
if (video) {
node.inputs.video = await processInputMedia(video, log);
}
if (file) {
node.inputs.file = await processInputMedia(file, log);
}
return node;
}
export async function processLoadAudioNode(
node: ComfyNode,
log: FastifyBaseLogger
): Promise<ComfyNode> {
const { audio } = node.inputs;
if (audio) {
node.inputs.audio = await processInputMedia(audio, log);
}
return node;
}
const loadDirectoryOfImagesNodeTypes = new Set<string>([
"VHS_LoadImages",
"VHS_LoadImagesPath",
]);
const loadVideoNodeTypes = new Set<string>([
"LoadVideo",
"VHS_LoadVideo",
"VHS_LoadVideoPath",
"VHS_LoadVideoFFmpegPath",
"VHS_LoadVideoFFmpeg",
]);
const modelLoadingNodeTypes = new Set([
"CheckpointLoader",
"CheckpointLoaderSimple",
"DiffusersLoader",
"unCLIPCheckpointLoader",
"LoraLoader",
"LoraLoaderModelOnly",
"VAELoader",
"ControlNetLoader",
"DiffControlNetLoader",
"UNETLoader",
"CLIPLoader",
"DualCLIPLoader",
"CLIPVisionLoader",
"StyleModelLoader",
"GLIGENLoader",
"UpscaleModelLoader",
]);
const audioLoadingNodeTypes = new Set(["LoadAudio"]);
export type NodeProcessError = Error & {
code?: number;
location?: string;
message?: string;
};
export async function preprocessNodes(
prompt: ComfyPrompt,
id: string,
log: FastifyBaseLogger,
credentials?: WorkflowCredential[]
): Promise<{ prompt: ComfyPrompt; hasSaveImage: boolean }> {
// Create a credential provider for URL pattern matching
const getCredentials = createCredentialProvider(credentials);
let hasSaveImage = false;
for (const nodeId in prompt) {
const node = prompt[nodeId];
if (
node.inputs.filename_prefix &&
typeof node.inputs.filename_prefix === "string"
) {
/**
* If the node is for saving files, we want to set the filename_prefix
* to the id of the prompt. This ensures no collisions between prompts
* from different users.
*/
node.inputs.filename_prefix = config.prependFilenames
? id + "_" + node.inputs.filename_prefix
: id;
if (
typeof node.inputs.save_output !== "undefined" &&
!node.inputs.save_output
) {
continue;
}
hasSaveImage = true;
} else if (node?.inputs?.image && typeof node.inputs.image === "string") {
/**
* If the node is for loading an image, the user will have provided
* the image as base64 encoded data, or as a url. we need to download
* the image if it's a url, and save it to a local file.
*/
try {
Object.assign(node, await processLoadImageNode(node, log));
} catch (e: any) {
const err = new Error(
`Failed to download image for node ${nodeId}: ${e.message}`
) as NodeProcessError;
err.code = 400;
err.location = `prompt.${nodeId}.inputs.image`;
throw err;
}
} else if (
loadDirectoryOfImagesNodeTypes.has(node.class_type) &&
Array.isArray(node.inputs.directory) &&
node.inputs.directory.every((x: any) => typeof x === "string")
) {
/**
* If the node is for loading a directory of images, the user will have
* provided the local directory as a string or an array of strings. If it's an
* array, we need to download each image to a local file, and update the input
* to be the local directory.
*/
try {
Object.assign(
node,
await processLoadDirectoryOfImagesNode(node, id, log)
);
} catch (e: any) {
const err = new Error(
`Failed to download images for node ${nodeId}: ${e.message}`
) as NodeProcessError;
err.code = 400;
err.location = `prompt.${nodeId}.inputs.directory`;
throw err;
}
} else if (loadVideoNodeTypes.has(node.class_type)) {
/**
* If the node is for loading a video, the user will have provided
* the video as base64 encoded data, or as a url. we need to download
* the video if it's a url, and save it to a local file.
*/
try {
Object.assign(node, await processLoadVideoNode(node, log));
} catch (e: any) {
const err = new Error(
`Failed to download video for node ${nodeId}: ${e.message}`
) as NodeProcessError;
err.code = 400;
err.location = `prompt.${nodeId}.inputs.video`;
throw err;
}
} else if (audioLoadingNodeTypes.has(node.class_type)) {
/**
* If the node is for loading audio, the user will have provided
* the audio as base64 encoded data, or as a url. we need to download
* the audio if it's a url, and save it to a local file.
*/
try {
Object.assign(node, await processLoadAudioNode(node, log));
} catch (e: any) {
const err = new Error(
`Failed to download audio for node ${nodeId}: ${e.message}`
) as NodeProcessError;
err.code = 400;
err.location = `prompt.${nodeId}.inputs.audio`;
throw err;
}
} else if (modelLoadingNodeTypes.has(node.class_type)) {
try {
Object.assign(node, await processModelLoadingNode(node, log, getCredentials));
} catch (e: any) {
const err = new Error(
`Failed to process model for node ${nodeId}: ${e.message}`
) as NodeProcessError;
err.code = 400;
err.location = `prompt.${nodeId}.inputs`;
throw err;
}
}
}
return { prompt, hasSaveImage };
}
================================================
FILE: src/comfy.ts
================================================
import { sleep } from "./utils";
import config from "./config";
import { CommandExecutor } from "./commands";
import { FastifyBaseLogger } from "fastify";
import {
ComfyPrompt,
ComfyWSMessage,
isStatusMessage,
isProgressMessage,
isProgressStateMessage,
isExecutionStartMessage,
isExecutionCachedMessage,
isExecutedMessage,
isExecutionSuccessMessage,
isExecutingMessage,
isExecutionInterruptedMessage,
isExecutionErrorMessage,
WebhookHandlers,
ComfyPromptResponse,
ComfyHistoryResponse,
ExecutionStats,
isExecutionStats,
} from "./types";
import path from "path";
import fsPromises from "fs/promises";
import WebSocket, { MessageEvent } from "ws";
import { fetch } from "undici";
import { getProxyDispatcher } from "./proxy-dispatcher";
import { z } from "zod";
const commandExecutor = new CommandExecutor();
export function launchComfyUI() {
const cmdAndArgs = config.comfyLaunchCmd.split(" ");
const cmd = cmdAndArgs[0];
const args = cmdAndArgs.slice(1);
return commandExecutor.execute(cmd, args, {
DIRECT_ADDRESS: config.comfyHost,
COMFYUI_PORT_HOST: config.comfyPort,
WEB_ENABLE_AUTH: "false",
CF_QUICK_TUNNELS: "false",
});
}
export function shutdownComfyUI() {
commandExecutor.interrupt();
}
export async function pingComfyUI(): Promise<void> {
const res = await fetch(config.comfyURL, {
dispatcher: getProxyDispatcher(),
});
if (!res.ok) {
throw new Error(`Failed to ping Comfy UI: ${await res.text()}`);
}
}
export async function waitForComfyUIToStart(
log: FastifyBaseLogger
): Promise<void> {
let retries = 0;
while (retries < config.startupCheckMaxTries) {
try {
await pingComfyUI();
log.info("Comfy UI started");
return;
} catch (e) {
// Ignore
}
retries++;
await sleep(config.startupCheckInterval);
}
throw new Error(
`Comfy UI did not start after ${
(config.startupCheckInterval / 1000) * config.startupCheckMaxTries
} seconds`
);
}
export async function warmupComfyUI(): Promise<void> {
if (config.warmupPrompt) {
const resp = await fetch(`http://localhost:${config.wrapperPort}/prompt`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ prompt: config.warmupPrompt }),
dispatcher: getProxyDispatcher(),
});
if (!resp.ok) {
throw new Error(`Failed to warmup Comfy UI: ${await resp.text()}`);
}
}
}
export async function queuePrompt(prompt: ComfyPrompt): Promise<string> {
const resp = await fetch(`${config.comfyURL}/prompt`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ prompt, client_id: config.wsClientId }),
dispatcher: getProxyDispatcher(),
});
if (!resp.ok) {
throw new Error(`Failed to queue prompt: ${await resp.text()}`);
}
const { prompt_id } = (await resp.json()) as ComfyPromptResponse;
return prompt_id;
}
export async function getPromptOutputs(
promptId: string,
log: FastifyBaseLogger
): Promise<Record<string, Buffer> | null> {
const resp = await fetch(`${config.comfyURL}/history/${promptId}`, {
dispatcher: getProxyDispatcher(),
});
if (!resp.ok) {
const txt = await resp.text();
log.error(`Failed to get prompt outputs: ${txt}`);
throw new Error(`Failed to get prompt outputs: ${txt}`);
}
const body = (await resp.json()) as ComfyHistoryResponse;
const allOutputs: Record<string, Buffer> = {};
const fileLoadPromises: Promise<void>[] = [];
if (!body[promptId]) {
log.debug(`Prompt ${promptId} not found in history endpoint response`);
return null;
}
const { status, outputs } = body[promptId];
if (status.completed) {
for (const nodeId in outputs) {
const node = outputs[nodeId];
for (const outputType in node) {
for (let outputFile of node[outputType]) {
const filename = outputFile.filename;
if (!filename) {
/**
* Some nodes have fields in the outputs that are not actual files.
* For example, the SaveAnimatedWebP node has a field called "animated"
* that only container boolean values mapping to the files present in
* .images. We can safely ignore these.
*/
continue;
}
const filepath = path.join(config.outputDir, filename);
fileLoadPromises.push(
fsPromises
.readFile(filepath)
.then((data) => {
allOutputs[filename] = data;
})
.catch((e: any) => {
/**
* The most likely reason for this is a node that has an optonal
* output. If the node doesn't produce that output, the file won't
* exist.
*/
log.warn(`Failed to read file ${filepath}: ${e.message}`);
})
);
}
}
}
} else if (status.status_str === "error") {
log.error(JSON.stringify(status));
throw new Error("Prompt execution failed");
} else {
log.debug(JSON.stringify(status));
throw new Error("Prompt is not completed");
}
await Promise.all(fileLoadPromises);
return allOutputs;
}
async function collectExecutionStats(
promptId: string,
log: FastifyBaseLogger
): Promise<ExecutionStats> {
let start = Date.now();
return new Promise((resolve, reject) => {
const stats: ExecutionStats = {
comfy_execution: { start, end: 0, duration: 0, nodes: {} },
};
const handleMessage = (event: MessageEvent) => {
const { data } = event;
if (typeof data === "string") {
const message = JSON.parse(data) as ComfyWSMessage;
if (message?.data?.prompt_id !== promptId) return;
if (isExecutionStartMessage(message)) {
start = Date.now();
stats.comfy_execution.start = start;
log.info(`Prompt ${promptId} started execution`);
} else if (isExecutingMessage(message)) {
const nodeId = message.data.node;
if (!nodeId) return;
stats.comfy_execution.nodes[nodeId] = {
start: Date.now(),
};
} else if (isExecutionSuccessMessage(message)) {
stats.comfy_execution.end = Date.now();
stats.comfy_execution.duration =
stats.comfy_execution.end - stats.comfy_execution.start;
wsClient?.removeEventListener("close", onClose);
wsClient?.removeEventListener("message", handleMessage);
log.info(`Prompt ${promptId} completed execution`);
return resolve(stats);
} else if (isExecutionErrorMessage(message)) {
wsClient?.removeEventListener("close", onClose);
wsClient?.removeEventListener("message", handleMessage);
return reject(new Error("Prompt execution failed"));
} else if (isExecutionInterruptedMessage(message)) {
wsClient?.removeEventListener("close", onClose);
wsClient?.removeEventListener("message", handleMessage);
return reject(new Error("Prompt execution interrupted"));
}
}
};
const onClose = () => {
wsClient?.removeEventListener("message", handleMessage);
wsClient?.removeEventListener("close", onClose);
return reject(new Error("Websocket closed"));
};
wsClient?.addEventListener("message", handleMessage);
wsClient?.addEventListener("close", onClose);
});
}
export const comfyIDToApiID: Record<string, string> = {};
class HistoryEndpointPoller {
private promptId: string;
private log: FastifyBaseLogger;
private maxTries: number;
private interval: number;
private currentTries: number = 0;
private sleepTimer: NodeJS.Timeout | null = null;
private resolveCurrentSleep: (() => void) | null = null;
constructor(options: {
promptId: string;
log: FastifyBaseLogger;
maxTries: number;
interval: number;
}) {
this.promptId = options.promptId;
this.log = options.log;
this.maxTries = options.maxTries;
this.interval = options.interval;
}
async poll(): Promise<Record<string, Buffer> | null> {
while (this.currentTries < this.getMaxTries() || this.maxTries === 0) {
this.log.debug(
`Polling history endpoint for prompt ${this.promptId}, try ${
this.currentTries
} of ${this.getMaxTries()}`
);
const outputs = await getPromptOutputs(this.promptId, this.log);
if (outputs) {
return outputs;
}
this.currentTries++;
this.log.debug(
`Polling history endpoint for prompt ${
this.promptId
}, sleep for ${this.getInterval()}ms`
);
await new Promise<void>((resolve) => {
this.resolveCurrentSleep = resolve;
this.sleepTimer = setTimeout(resolve, this.getInterval());
});
}
return null;
}
getInterval(): number {
return this.interval;
}
getMaxTries(): number {
return this.maxTries;
}
setInterval(interval: number, skipCurrentTimeout: boolean = true): void {
this.interval = interval;
if (skipCurrentTimeout && this.sleepTimer) {
clearTimeout(this.sleepTimer);
this.sleepTimer = null;
}
if (skipCurrentTimeout && this.resolveCurrentSleep) {
this.resolveCurrentSleep();
this.resolveCurrentSleep = null;
}
}
setMaxTries(maxTries: number, reset: boolean = true): void {
this.maxTries = maxTries;
if (reset) {
this.currentTries = 0;
}
}
stop(): void {
this.setMaxTries(this.currentTries);
this.setInterval(0);
}
}
export type PromptOutputsWithStats = {
outputs: Record<string, Buffer>;
stats: ExecutionStats;
};
export async function runPromptAndGetOutputs(
id: string,
prompt: ComfyPrompt,
log: FastifyBaseLogger
): Promise<PromptOutputsWithStats> {
const promptId = await queuePrompt(prompt);
comfyIDToApiID[promptId] = id;
log.debug(`Prompt ${id} queued as comfy prompt id: ${promptId}`);
/**
* We start with a slow poll to the history endpoint, both as a safety measure around websocket
* failures, and to avoid hammering the history endpoint with requests in the case of many queued
* prompts.
*/
const poller = new HistoryEndpointPoller({
promptId,
log,
maxTries: 0,
interval: 1000,
});
const historyPoll = poller.poll();
/**
* We also listen to the websocket stream for the prompt to complete.
*/
const executionStatsPromise = collectExecutionStats(promptId, log);
/**
* We wait for either the history endpoint to return the outputs, or the websocket
* to signal that the prompt has completed.
*/
let firstToComplete: Record<string, Buffer> | ExecutionStats | null;
try {
firstToComplete = await Promise.race([historyPoll, executionStatsPromise]);
} catch (e) {
/**
* If an error is thrown by either of those processes, we stop the polling and
* throw an error.
*/
log.error(`Error waiting for prompt to complete: ${e}`);
firstToComplete = null;
}
if (isExecutionStats(firstToComplete)) {
/**
* If the websocket signals that the prompt has completed (this is typical), we can speed
* up the history endpoint polling, as it should only need 1-2 tries to get the outputs.
*/
log.info(`Prompt ${id} completed`);
poller.setMaxTries(100);
poller.setInterval(30);
const outputs = await historyPoll;
/**
* We delete the comfyIDToApiID mapping after a short delay to prevent
* this object from growing indefinitely.
*/
setTimeout(() => {
delete comfyIDToApiID[promptId];
}, 1000);
if (outputs) {
return { outputs, stats: firstToComplete };
}
throw new Error("Failed to get prompt outputs");
} else if (firstToComplete === null) {
poller.stop();
throw new Error("Failed to get prompt outputs");
}
/**
* If we reach this point, it means that the history endpoint returned the outputs
* before the websocket signaled that the prompt had completed. This is unexpected,
* but fine. We return the outputs and delete the comfyIDToApiID mapping.
*/
setTimeout(() => {
/**
* We delete the comfyIDToApiID mapping after a short delay to prevent
* this object from growing indefinitely.
*/
delete comfyIDToApiID[promptId];
}, 1000);
const outputs = firstToComplete as Record<string, Buffer>;
const stats = await executionStatsPromise;
return { outputs, stats };
}
let wsClient: WebSocket | null = null;
export function connectToComfyUIWebsocketStream(
hooks: WebhookHandlers,
log: FastifyBaseLogger,
useApiIDs: boolean = true
): Promise<WebSocket> {
return new Promise((resolve, reject) => {
wsClient = new WebSocket(config.comfyWSURL);
wsClient.on("message", (data, isBinary) => {
if (hooks.onMessage) {
hooks.onMessage(data);
}
if (!isBinary) {
const message = JSON.parse(data.toString("utf8")) as ComfyWSMessage;
if (
useApiIDs &&
message.data.prompt_id &&
comfyIDToApiID[message.data.prompt_id]
) {
message.data.prompt_id = comfyIDToApiID[message.data.prompt_id];
}
if (isStatusMessage(message) && hooks.onStatus) {
hooks.onStatus(message);
} else if (isProgressMessage(message) && hooks.onProgress) {
hooks.onProgress(message);
} else if (isExecutionStartMessage(message) && hooks.onExecutionStart) {
hooks.onExecutionStart(message);
} else if (
isExecutionCachedMessage(message) &&
hooks.onExecutionCached
) {
hooks.onExecutionCached(message);
} else if (isExecutingMessage(message) && hooks.onExecuting) {
hooks.onExecuting(message);
} else if (isExecutedMessage(message) && hooks.onExecuted) {
hooks.onExecuted(message);
} else if (
isExecutionSuccessMessage(message) &&
hooks.onExecutionSuccess
) {
hooks.onExecutionSuccess(message);
} else if (
isExecutionInterruptedMessage(message) &&
hooks.onExecutionInterrupted
) {
hooks.onExecutionInterrupted(message);
} else if (isExecutionErrorMessage(message) && hooks.onExecutionError) {
hooks.onExecutionError(message);
} else if (isProgressStateMessage(message) && hooks.onProgressState) {
if (useApiIDs && message.data.nodes) {
for (const nodeId in message.data.nodes) {
const node = message.data.nodes[nodeId];
if (node.prompt_id && comfyIDToApiID[node.prompt_id]) {
node.prompt_id = comfyIDToApiID[node.prompt_id];
}
}
}
hooks.onProgressState(message);
}
} else {
log.info(`Received binary message`);
}
});
wsClient.on("open", () => {
log.info("Connected to Comfy UI websocket");
return resolve(wsClient as WebSocket);
});
wsClient.on("error", (error) => {
log.error(`Failed to connect to Comfy UI websocket: ${error}`);
return reject(error);
});
wsClient.on("close", () => {
log.info("Disconnected from Comfy UI websocket");
});
});
}
export async function getModels(): Promi
gitextract_0k49dxwn/ ├── .github/ │ └── workflows/ │ ├── build-comfy-base-images.yml │ └── create-release.yml ├── .gitignore ├── .nvmrc ├── DEVELOPING.md ├── LICENSE ├── README.md ├── build-and-release ├── build-binary ├── claude-endpoint-creation-prompt.md ├── docker/ │ ├── api.dockerfile │ ├── build-api-images │ ├── build-comfy-base-images │ ├── comfyui.dockerfile │ └── push-comfy-base-images ├── docker-compose.yml ├── example-workflows/ │ ├── flux/ │ │ ├── img2img.json │ │ ├── img2img.ts │ │ ├── txt2img.json │ │ └── txt2img.ts │ ├── sd1.5/ │ │ ├── img2img.js │ │ ├── img2img.json │ │ ├── img2img.ts │ │ ├── txt2img.js │ │ ├── txt2img.json │ │ └── txt2img.ts │ └── sdxl/ │ ├── img2img.json │ ├── img2img.ts │ ├── txt2img-with-refiner.json │ ├── txt2img-with-refiner.ts │ ├── txt2img.json │ └── txt2img.ts ├── generate-workflow ├── manifest.yml ├── package.json ├── scripts/ │ └── smoke-proxy.mjs ├── src/ │ ├── comfy-node-preprocessors.ts │ ├── comfy.ts │ ├── commands.ts │ ├── config.ts │ ├── credential-resolver.ts │ ├── event-emitters.ts │ ├── git-url-parser.ts │ ├── image-tools.ts │ ├── index.ts │ ├── llm-providers.ts │ ├── proxy-dispatcher.ts │ ├── remote-storage-manager.ts │ ├── server.ts │ ├── storage-providers/ │ │ ├── azure-blob.ts │ │ ├── hf.ts │ │ ├── http.ts │ │ ├── index.ts │ │ └── s3.ts │ ├── types.ts │ ├── utils.ts │ └── workflows/ │ └── index.ts ├── test/ │ ├── Dockerfile.file-server │ ├── core.spec.ts │ ├── docker-image/ │ │ ├── Dockerfile │ │ ├── Dockerfile.smoketest │ │ └── link-models │ ├── file-server.ts │ ├── llm-providers.spec.ts │ ├── output/ │ │ └── .gitkeep │ ├── submit-many-jobs.js │ ├── test-utils.ts │ ├── utils.spec.ts │ └── workflows/ │ ├── sd1.5-img2img.json │ ├── sd1.5-multi-output.json │ ├── sd1.5-parallel-2.json │ ├── sd1.5-parallel-3.json │ └── sd1.5-txt2img.json ├── tsconfig.json └── vitest.config.ts
SYMBOL INDEX (247 symbols across 32 files)
FILE: example-workflows/flux/img2img.ts
type InputType (line 90) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 92) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/flux/txt2img.ts
type InputType (line 68) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 70) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/sd1.5/img2img.js
function generateWorkflow (line 94) | function generateWorkflow(input) {
FILE: example-workflows/sd1.5/img2img.ts
type InputType (line 96) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 98) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/sd1.5/txt2img.js
function generateWorkflow (line 71) | function generateWorkflow(input) {
FILE: example-workflows/sd1.5/txt2img.ts
type InputType (line 73) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 75) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/sdxl/img2img.ts
type InputType (line 96) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 98) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/sdxl/txt2img-with-refiner.ts
type InputType (line 95) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 97) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: example-workflows/sdxl/txt2img.ts
type InputType (line 73) | type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow (line 75) | function generateWorkflow(input: InputType): ComfyPrompt {
FILE: scripts/smoke-proxy.mjs
function main (line 5) | async function main() {
FILE: src/comfy-node-preprocessors.ts
function updateModelsInConfig (line 22) | function updateModelsInConfig(modelType: string, modelName: string) {
function processCheckpointLoaderNode (line 35) | async function processCheckpointLoaderNode(
function processCheckpointLoaderSimpleNode (line 69) | async function processCheckpointLoaderSimpleNode(
function processDiffusersLoaderNode (line 91) | async function processDiffusersLoaderNode(
function processLoraLoaderNode (line 112) | async function processLoraLoaderNode(
function processVAELoaderNode (line 134) | async function processVAELoaderNode(
function processControlNetLoaderNode (line 156) | async function processControlNetLoaderNode(
function processUNETLoaderNode (line 178) | async function processUNETLoaderNode(
function processCLIPLoaderNode (line 200) | async function processCLIPLoaderNode(
function processDualCLIPLoaderNode (line 222) | async function processDualCLIPLoaderNode(
function processStyleModelLoaderNode (line 254) | async function processStyleModelLoaderNode(
function processGLIGENLoaderNode (line 276) | async function processGLIGENLoaderNode(
function processUpscaleModelLoaderNode (line 298) | async function processUpscaleModelLoaderNode(
function processModelLoadingNode (line 320) | async function processModelLoadingNode(
function processLoadImageNode (line 359) | async function processLoadImageNode(
function processLoadDirectoryOfImagesNode (line 367) | async function processLoadDirectoryOfImagesNode(
function processLoadVideoNode (line 381) | async function processLoadVideoNode(
function processLoadAudioNode (line 395) | async function processLoadAudioNode(
type NodeProcessError (line 439) | type NodeProcessError = Error & {
function preprocessNodes (line 445) | async function preprocessNodes(
FILE: src/comfy.ts
function launchComfyUI (line 33) | function launchComfyUI() {
function shutdownComfyUI (line 45) | function shutdownComfyUI() {
function pingComfyUI (line 49) | async function pingComfyUI(): Promise<void> {
function waitForComfyUIToStart (line 58) | async function waitForComfyUIToStart(
function warmupComfyUI (line 81) | async function warmupComfyUI(): Promise<void> {
function queuePrompt (line 97) | async function queuePrompt(prompt: ComfyPrompt): Promise<string> {
function getPromptOutputs (line 113) | async function getPromptOutputs(
function collectExecutionStats (line 178) | async function collectExecutionStats(
class HistoryEndpointPoller (line 234) | class HistoryEndpointPoller {
method constructor (line 242) | constructor(options: {
method poll (line 253) | async poll(): Promise<Record<string, Buffer> | null> {
method getInterval (line 278) | getInterval(): number {
method getMaxTries (line 282) | getMaxTries(): number {
method setInterval (line 286) | setInterval(interval: number, skipCurrentTimeout: boolean = true): void {
method setMaxTries (line 298) | setMaxTries(maxTries: number, reset: boolean = true): void {
method stop (line 305) | stop(): void {
type PromptOutputsWithStats (line 311) | type PromptOutputsWithStats = {
function runPromptAndGetOutputs (line 316) | async function runPromptAndGetOutputs(
function connectToComfyUIWebsocketStream (line 401) | function connectToComfyUIWebsocketStream(
function getModels (line 480) | async function getModels(): Promise<
function interruptPrompt (line 528) | async function interruptPrompt(id: string): Promise<void> {
FILE: src/commands.ts
class CommandExecutor (line 3) | class CommandExecutor {
method execute (line 13) | async execute(
method interrupt (line 52) | interrupt(): void {
FILE: src/config.ts
type ComfyDescription (line 156) | interface ComfyDescription {
function getPythonCommand (line 162) | function getPythonCommand(): string {
function getComfyUIDescription (line 181) | function getComfyUIDescription(): ComfyDescription {
function parseManifest (line 251) | function parseManifest(manifestPath: string): any {
function setWarmupPrompt (line 684) | function setWarmupPrompt(content: string): void {
FILE: src/credential-resolver.ts
function resolveCredentials (line 7) | function resolveCredentials(
function matchesPattern (line 36) | function matchesPattern(url: string, pattern: string): boolean {
type CredentialProvider (line 53) | type CredentialProvider = (url: string) => DownloadOptions | undefined;
function createCredentialProvider (line 59) | function createCredentialProvider(
FILE: src/event-emitters.ts
function signWebhookPayload (line 12) | function signWebhookPayload(payload: string): string {
function sendWebhook (line 19) | async function sendWebhook(
function sendSystemWebhook (line 57) | async function sendSystemWebhook(
function getConfiguredWebhookHandlers (line 92) | function getConfiguredWebhookHandlers(
FILE: src/git-url-parser.ts
function parseGitUrl (line 26) | function parseGitUrl(repoUrl: string): {
FILE: src/image-tools.ts
function processInputMedia (line 11) | async function processInputMedia(
function guessFileExtensionFromBase64 (line 60) | function guessFileExtensionFromBase64(base64Data: string): string | null {
function convertImageBuffer (line 200) | async function convertImageBuffer(
FILE: src/llm-providers.ts
type LLMProviderConfig (line 9) | interface LLMProviderConfig {
method authHeaders (line 28) | authHeaders(apiKey) {
method buildRequestBody (line 34) | buildRequestBody(systemPrompt, userPrompt) {
method parseResponse (line 43) | parseResponse(response) {
method authHeaders (line 55) | authHeaders(apiKey) {
method buildRequestBody (line 58) | buildRequestBody(systemPrompt, userPrompt) {
method parseResponse (line 69) | parseResponse(response) {
function selectProvider (line 86) | function selectProvider(
function stripCodeFences (line 102) | function stripCodeFences(text: string): string {
FILE: src/proxy-dispatcher.ts
function getProxyDispatcher (line 9) | function getProxyDispatcher(): Dispatcher {
FILE: src/remote-storage-manager.ts
type CacheMetadata (line 22) | interface CacheMetadata {
function linkIfDoesNotExist (line 30) | async function linkIfDoesNotExist(
function getFileByPrefix (line 55) | async function getFileByPrefix(
function getMetaFilePath (line 68) | function getMetaFilePath(cachedFilePath: string): string {
function readCacheMetadata (line 75) | async function readCacheMetadata(cachedFilePath: string): Promise<CacheM...
function writeCacheMetadata (line 88) | async function writeCacheMetadata(cachedFilePath: string, metadata: Cach...
function sanitizeUrlForMetadata (line 97) | function sanitizeUrlForMetadata(url: string): string {
class RemoteStorageManager (line 109) | class RemoteStorageManager {
method constructor (line 117) | constructor(cacheDir: string, log: FastifyBaseLogger) {
method enforceCacheSize (line 138) | async enforceCacheSize(): Promise<void> {
method getCacheSizeInfo (line 164) | async getCacheSizeInfo(): Promise<{
method makeSpace (line 200) | private async makeSpace(
method downloadFile (line 247) | async downloadFile(
method validateCacheAccess (line 380) | private async validateCacheAccess(
method downloadRepo (line 418) | async downloadRepo(repoUrl: string, targetDir: string): Promise<string> {
method uploadFile (line 456) | async uploadFile(
method _cloneWithinDirectory (line 501) | private async _cloneWithinDirectory(
function getStorageManager (line 571) | function getStorageManager(log?: FastifyBaseLogger) {
FILE: src/server.ts
type PromptRequest (line 81) | type PromptRequest = z.infer<typeof PromptRequestSchema>;
type WorkflowRequest (line 89) | type WorkflowRequest = z.infer<typeof WorkflowRequestSchema>;
type ModelResponse (line 108) | type ModelResponse = z.infer<typeof ModelResponseSchema>;
type ProcessedOutput (line 289) | type ProcessedOutput = {
type Handler (line 372) | type Handler = (data: ProcessedOutput) => Promise<{
type BodyType (line 688) | type BodyType = z.infer<typeof BodySchema>;
function launchComfyUIAndAPIServerAndWaitForWarmup (line 806) | async function launchComfyUIAndAPIServerAndWaitForWarmup() {
function downloadAllModels (line 856) | async function downloadAllModels(
function downloadWarmupPrompt (line 866) | async function downloadWarmupPrompt() {
function processManifest (line 896) | async function processManifest() {
function start (line 935) | async function start() {
FILE: src/storage-providers/azure-blob.ts
class AzureBlobStorageProvider (line 14) | class AzureBlobStorageProvider implements StorageProvider {
method constructor (line 30) | constructor(log: FastifyBaseLogger) {
method createUrl (line 82) | createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {
method testUrl (line 109) | testUrl(url: string): boolean {
method uploadFile (line 118) | uploadFile(
method downloadFile (line 135) | async downloadFile(
class AzureBlobUpload (line 179) | class AzureBlobUpload implements Upload {
method constructor (line 188) | constructor(
method createInputStream (line 202) | private createInputStream(fileOrPath: string | Buffer): ReadStream | B...
method upload (line 210) | async upload(): Promise<void> {
method abort (line 257) | async abort(): Promise<void> {
FILE: src/storage-providers/hf.ts
class HFStorageProvider (line 13) | class HFStorageProvider implements StorageProvider {
method constructor (line 39) | constructor(log: FastifyBaseLogger) {
method createUrl (line 43) | createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {
method testUrl (line 59) | testUrl(url: string): boolean {
method uploadFile (line 63) | uploadFile(
method downloadFile (line 71) | async downloadFile(
class HFUpload (line 123) | class HFUpload implements Upload {
method constructor (line 132) | constructor(
method upload (line 156) | async upload(): Promise<void> {
method abort (line 190) | async abort(): Promise<void> {
function parseHfUrl (line 203) | function parseHfUrl(url: string): {
FILE: src/storage-providers/http.ts
class HTTPStorageProvider (line 12) | class HTTPStorageProvider implements StorageProvider {
method constructor (line 22) | constructor(log: FastifyBaseLogger) {
method createUrl (line 26) | createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {
method testUrl (line 34) | testUrl(url: string): boolean {
method uploadFile (line 38) | uploadFile(
method validateAuth (line 51) | async validateAuth(url: string, options: DownloadOptions): Promise<voi...
method downloadFile (line 91) | async downloadFile(
class HTTPUpload (line 147) | class HTTPUpload implements Upload {
method constructor (line 155) | constructor(
method upload (line 168) | async upload(): Promise<void> {
method abort (line 223) | async abort(): Promise<void> {
function mimeToExtension (line 240) | function mimeToExtension(mimeType: string): string | null {
function getIntendedFileExtensionFromResponse (line 306) | function getIntendedFileExtensionFromResponse(
function applyQueryAuth (line 348) | function applyQueryAuth(url: string, auth?: DownloadAuth): string {
function getAuthHeaders (line 365) | function getAuthHeaders(url: string, auth?: DownloadAuth): Record<string...
FILE: src/storage-providers/s3.ts
class S3StorageProvider (line 17) | class S3StorageProvider implements StorageProvider {
method constructor (line 29) | constructor(log: FastifyBaseLogger) {
method createUrl (line 44) | createUrl(inputs: z.infer<typeof this.urlRequestSchema>): string {
method testUrl (line 52) | testUrl(url: string): boolean {
method uploadFile (line 56) | uploadFile(
method validateAuth (line 68) | async validateAuth(url: string, options: DownloadOptions): Promise<voi...
method downloadFile (line 100) | async downloadFile(
method getS3ClientWithInfo (line 148) | private getS3ClientWithInfo(auth?: DownloadAuth): { client: S3Client; ...
function parseS3Url (line 180) | function parseS3Url(s3Url: string): { bucket: string; key: string } {
class S3Upload (line 187) | class S3Upload implements Upload {
method constructor (line 197) | constructor(
method upload (line 212) | async upload(): Promise<void> {
method abort (line 227) | async abort(): Promise<void> {
method createInputStream (line 237) | private createInputStream(fileOrPath: string | Buffer): ReadStream | B...
method _uploadFileToS3 (line 245) | private async _uploadFileToS3(
method _uploadFileToS3Url (line 275) | private async _uploadFileToS3Url(
FILE: src/types.ts
type ComfyNode (line 11) | type ComfyNode = z.infer<typeof ComfyNodeSchema>;
type ComfyPrompt (line 13) | type ComfyPrompt = Record<string, ComfyNode>;
type JPEGOptions (line 63) | type JPEGOptions = z.infer<typeof JPEGOptionsSchema>;
type WebpOptions (line 110) | type WebpOptions = z.infer<typeof WebpOptionsSchema>;
type OutputConversionOptions (line 117) | type OutputConversionOptions = z.infer<
type ExecutionStats (line 139) | type ExecutionStats = z.infer<typeof ExecutionStatsSchema>;
function isExecutionStats (line 140) | function isExecutionStats(obj: any): obj is ExecutionStats {
type PromptErrorResponse (line 149) | type PromptErrorResponse = z.infer<typeof PromptErrorResponseSchema>;
type Workflow (line 156) | interface Workflow {
function isWorkflow (line 163) | function isWorkflow(obj: any): obj is Workflow {
type WorkflowTree (line 172) | interface WorkflowTree {
type ComfyWSMessage (line 176) | interface ComfyWSMessage {
type ComfyWSStatusMessage (line 192) | interface ComfyWSStatusMessage extends ComfyWSMessage {
type ComfyWSProgressMessage (line 203) | interface ComfyWSProgressMessage extends ComfyWSMessage {
type ComfyWSProgressStateMessage (line 213) | interface ComfyWSProgressStateMessage extends ComfyWSMessage {
type ComfyWSExecutingMessage (line 233) | interface ComfyWSExecutingMessage extends ComfyWSMessage {
type ComfyWSExecutionStartMessage (line 242) | interface ComfyWSExecutionStartMessage extends ComfyWSMessage {
type ComfyWSExecutionCachedMessage (line 250) | interface ComfyWSExecutionCachedMessage extends ComfyWSMessage {
type ComfyWSExecutedMessage (line 259) | interface ComfyWSExecutedMessage extends ComfyWSMessage {
type ComfyWSExecutionSuccessMessage (line 269) | interface ComfyWSExecutionSuccessMessage extends ComfyWSMessage {
type ComfyWSExecutionInterruptedMessage (line 277) | interface ComfyWSExecutionInterruptedMessage extends ComfyWSMessage {
type ComfyWSExecutionErrorMessage (line 287) | interface ComfyWSExecutionErrorMessage extends ComfyWSMessage {
function isStatusMessage (line 302) | function isStatusMessage(
function isProgressMessage (line 308) | function isProgressMessage(
function isProgressStateMessage (line 314) | function isProgressStateMessage(
function isExecutingMessage (line 320) | function isExecutingMessage(
function isExecutionStartMessage (line 326) | function isExecutionStartMessage(
function isExecutionCachedMessage (line 332) | function isExecutionCachedMessage(
function isExecutedMessage (line 338) | function isExecutedMessage(
function isExecutionSuccessMessage (line 344) | function isExecutionSuccessMessage(
function isExecutionInterruptedMessage (line 350) | function isExecutionInterruptedMessage(
function isExecutionErrorMessage (line 356) | function isExecutionErrorMessage(
type WebhookHandlers (line 362) | type WebhookHandlers = {
type ComfyPromptResponse (line 418) | type ComfyPromptResponse = {
type ComfyHistoryResponse (line 424) | type ComfyHistoryResponse = Record<
type Upload (line 445) | interface Upload {
type DownloadAuth (line 486) | type DownloadAuth = z.infer<typeof DownloadAuthSchema>;
type DownloadOptions (line 495) | type DownloadOptions = z.infer<typeof DownloadOptionsSchema>;
type WorkflowCredential (line 506) | type WorkflowCredential = z.infer<typeof WorkflowCredentialSchema>;
type StorageProvider (line 523) | interface StorageProvider {
type DownloadRequest (line 598) | type DownloadRequest = z.infer<typeof DownloadRequestSchema>;
type DownloadResponse (line 609) | type DownloadResponse = z.infer<typeof DownloadResponseSchema>;
FILE: src/utils.ts
function sleep (line 15) | async function sleep(ms: number): Promise<void> {
function isValidUrl (line 19) | function isValidUrl(str: string): boolean {
function zodToMarkdownTable (line 32) | function zodToMarkdownTable(schema: ZodObject<ZodRawShape>): string {
function getZodTypeName (line 50) | function getZodTypeName(zodType: ZodTypeAny): {
function getZodDescription (line 103) | function getZodDescription(zodType: ZodTypeAny): string {
function getZodDefault (line 114) | function getZodDefault(zodType: ZodTypeAny): string {
function snakeCaseToUpperCamelCase (line 125) | function snakeCaseToUpperCamelCase(str: string): string {
function camelCaseToSnakeCase (line 131) | function camelCaseToSnakeCase(str: string): string {
function fetchWithRetries (line 138) | async function fetchWithRetries(
function setDeletionCost (line 166) | async function setDeletionCost(cost: number): Promise<void> {
function isPythonVenvActive (line 186) | function isPythonVenvActive(): boolean {
function installCustomNode (line 205) | async function installCustomNode(
function aptInstallPackages (line 252) | async function aptInstallPackages(
function pipInstallPackages (line 264) | async function pipInstallPackages(
function makeHumanReadableSize (line 289) | function makeHumanReadableSize(sizeInBytes: number): string {
function hashUrlBase64 (line 302) | function hashUrlBase64(url: string, length = 32): string {
function getContentTypeFromUrl (line 310) | function getContentTypeFromUrl(url: string): string {
function getDirectorySizeInBytes (line 363) | async function getDirectorySizeInBytes(
FILE: test/core.spec.ts
function streamToBuffer (line 33) | async function streamToBuffer(
function submitWorkflow (line 1112) | async function submitWorkflow(
function submitDownload (line 1823) | async function submitDownload(body: {
FILE: test/file-server.ts
constant PORT (line 6) | const PORT = process.env.PORT ? parseInt(process.env.PORT) : 8080;
constant STORAGE_DIR (line 7) | const STORAGE_DIR = process.env.STORAGE_DIR || path.join(__dirname, "tes...
function getContentType (line 157) | function getContentType(filename: string): string {
FILE: test/submit-many-jobs.js
function loadJobJson (line 21) | async function loadJobJson() {
function getRandomSeed (line 31) | function getRandomSeed() {
function doAJob (line 35) | async function doAJob(jobJson) {
function main (line 84) | async function main() {
FILE: test/test-utils.ts
function getAzureContainer (line 27) | async function getAzureContainer(
function sleep (line 37) | async function sleep(ms: number): Promise<void> {
function createWebhookListener (line 41) | async function createWebhookListener(
function submitPrompt (line 67) | async function submitPrompt(
function checkImage (line 120) | async function checkImage(
function waitForServerToBeReady (line 142) | async function waitForServerToBeReady(): Promise<void> {
function verifyWebhookV2 (line 156) | function verifyWebhookV2(
Condensed preview — 75 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (515K chars).
[
{
"path": ".github/workflows/build-comfy-base-images.yml",
"chars": 1095,
"preview": "name: Build Comfy Base Images\n\non:\n workflow_dispatch:\n inputs:\n comfy_version:\n description: ComfyUI ve"
},
{
"path": ".github/workflows/create-release.yml",
"chars": 1623,
"preview": "name: Create Release\n\non:\n workflow_dispatch: {}\n\njobs:\n build-and-release:\n runs-on: ubuntu-latest\n\n steps:\n "
},
{
"path": ".gitignore",
"chars": 2141,
"preview": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports"
},
{
"path": ".nvmrc",
"chars": 9,
"preview": "v20.18.1\n"
},
{
"path": "DEVELOPING.md",
"chars": 17643,
"preview": "# Developing ComfyUI-API\n\nThis document provides guidelines for developers who want to contribute to the ComfyUI-API pro"
},
{
"path": "LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2025 Salad Technologies\n\nPermission is hereby granted, free of charge, to any person obtainin"
},
{
"path": "README.md",
"chars": 65614,
"preview": "# ComfyUI API - A Stateless and Extendable API for ComfyUI\n\nA simple wrapper that facilitates using [ComfyUI](https://gi"
},
{
"path": "build-and-release",
"chars": 290,
"preview": "#! /usr/bin/env bash\n\nnpm install\nnpm run build-binary\nversion=$(node -p \"require('./package.json').version\")\necho \"Vers"
},
{
"path": "build-binary",
"chars": 87,
"preview": "#! /usr/bin/env bash\nset -e\n\nnpm install\nnpx tsc\nnpx pkg --options \"stack-size=65500\" ."
},
{
"path": "claude-endpoint-creation-prompt.md",
"chars": 4327,
"preview": "# Instructions\nYour job is to convert a json workflow graph for ai image generation into a typescript function.\n- You sh"
},
{
"path": "docker/api.dockerfile",
"chars": 441,
"preview": "ARG base=runtime\nARG comfy_version=0.19.3\nARG pytorch_version=2.8.0\nARG cuda_version=12.8\n\nFROM ghcr.io/saladtechnologie"
},
{
"path": "docker/build-api-images",
"chars": 1437,
"preview": "#! /usr/bin/bash\n\nusage=\"Usage: $0 [comfy_version] [torch_version] [cuda_version] [api_version]\"\n\ncomfy_version=${1:-0.1"
},
{
"path": "docker/build-comfy-base-images",
"chars": 500,
"preview": "#! /usr/bin/bash\n\ncomfy_version=${1:-0.19.3}\ntorch_version=${2:-2.8.0}\ncuda_version=${3:-12.8}\nbases=(\"devel\" \"runtime\")"
},
{
"path": "docker/comfyui.dockerfile",
"chars": 1618,
"preview": "ARG base=runtime\nARG pytorch_version=2.8.0\nARG cuda_version=12.8\n\nFROM pytorch/pytorch:${pytorch_version}-cuda${cuda_ver"
},
{
"path": "docker/push-comfy-base-images",
"chars": 553,
"preview": "#! /usr/bin/bash\n\nusage=\"Usage: $0 [comfy_version] [torch_version] [cuda_version]\"\n\ncomfy_version=${1:-0.19.3}\ntorch_ver"
},
{
"path": "docker-compose.yml",
"chars": 1982,
"preview": "services:\n comfyui:\n image: ghcr.io/saladtechnologies/comfyui-api:comfy0.19.3-torch2.8.0-cuda12.8-runtime\n volume"
},
{
"path": "example-workflows/flux/img2img.json",
"chars": 2502,
"preview": "{\n \"6\": {\n \"inputs\": {\n \"text\": \"A noble wolf stands by a raging river in the style of a japanese scroll\",\n "
},
{
"path": "example-workflows/flux/img2img.ts",
"chars": 4634,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/flux/txt2img.json",
"chars": 1949,
"preview": "{\n \"6\": {\n \"inputs\": {\n \"text\": \"a bottle with a beautiful rainbow galaxy inside it on top of a wooden table in"
},
{
"path": "example-workflows/flux/txt2img.ts",
"chars": 3410,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/sd1.5/img2img.js",
"chars": 5868,
"preview": "\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar zod_1 = require(\"zod\");\nvar config_1 = "
},
{
"path": "example-workflows/sd1.5/img2img.json",
"chars": 2271,
"preview": "{\n \"3\": {\n \"inputs\": {\n \"seed\": 818335187507771,\n \"steps\": 15,\n \"cfg\": 8,\n \"sampler_name\": \"eule"
},
{
"path": "example-workflows/sd1.5/img2img.ts",
"chars": 4832,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/sd1.5/txt2img.js",
"chars": 4339,
"preview": "\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar zod_1 = require(\"zod\");\nvar config_1 = "
},
{
"path": "example-workflows/sd1.5/txt2img.json",
"chars": 1754,
"preview": "{\n \"3\": {\n \"inputs\": {\n \"seed\": 712610403220747,\n \"steps\": 20,\n \"cfg\": 8,\n \"sampler_name\": \"eule"
},
{
"path": "example-workflows/sd1.5/txt2img.ts",
"chars": 3575,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/sdxl/img2img.json",
"chars": 2781,
"preview": "{\n \"8\": {\n \"inputs\": {\n \"samples\": [\n \"36\",\n 0\n ],\n \"vae\": [\n \"14\",\n 2\n "
},
{
"path": "example-workflows/sdxl/img2img.ts",
"chars": 5030,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/sdxl/txt2img-with-refiner.json",
"chars": 3206,
"preview": "{\n \"4\": {\n \"inputs\": {\n \"ckpt_name\": \"sd_xl_base_1.0.safetensors\"\n },\n \"class_type\": \"CheckpointLoaderSim"
},
{
"path": "example-workflows/sdxl/txt2img-with-refiner.ts",
"chars": 5420,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "example-workflows/sdxl/txt2img.json",
"chars": 1755,
"preview": "{\n \"4\": {\n \"inputs\": {\n \"ckpt_name\": \"sd_xl_base_1.0.safetensors\"\n },\n \"class_type\": \"CheckpointLoaderSim"
},
{
"path": "example-workflows/sdxl/txt2img.ts",
"chars": 3550,
"preview": "import { z } from \"zod\";\n// This gets evaluated in the context of src/workflows, so imports must be relative to that dir"
},
{
"path": "generate-workflow",
"chars": 2586,
"preview": "#! /bin/bash\n\nusage=\"Usage: $0 <input-prompt-json> <output-typescript-file>\"\n\ninput_prompt_json=$1\noutput_typescript_fil"
},
{
"path": "manifest.yml",
"chars": 568,
"preview": "# apt:\n# - git\n# - ffmpeg\n# - libgl1\ncustom_nodes:\n - comfyui_essentials\n# - comfyui-kjnodes\n# - https://gith"
},
{
"path": "package.json",
"chars": 1365,
"preview": "{\n \"name\": \"comfyui-api\",\n \"version\": \"1.18.0\",\n \"description\": \"Wraps comfyui to make it easier to use as a stateles"
},
{
"path": "scripts/smoke-proxy.mjs",
"chars": 955,
"preview": "import fastify from \"fastify\";\nimport { fetch } from \"undici\";\nimport { getProxyDispatcher } from \"../dist/src/proxy-dis"
},
{
"path": "src/comfy-node-preprocessors.ts",
"chars": 16732,
"preview": "import path from \"path\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport { ComfyNode, ComfyPrompt, WorkflowCredentia"
},
{
"path": "src/comfy.ts",
"chars": 17173,
"preview": "import { sleep } from \"./utils\";\nimport config from \"./config\";\nimport { CommandExecutor } from \"./commands\";\nimport { F"
},
{
"path": "src/commands.ts",
"chars": 1860,
"preview": "import { spawn, ChildProcess } from \"child_process\";\n\nexport class CommandExecutor {\n private process: ChildProcess | n"
},
{
"path": "src/config.ts",
"chars": 18995,
"preview": "import assert from \"node:assert\";\nimport fs from \"node:fs\";\nimport path from \"node:path\";\nimport { randomUUID } from \"no"
},
{
"path": "src/credential-resolver.ts",
"chars": 2002,
"preview": "import { WorkflowCredential, DownloadAuth, DownloadOptions } from \"./types\";\n\n/**\n * Resolves credentials for a given UR"
},
{
"path": "src/event-emitters.ts",
"chars": 3095,
"preview": "import crypto from \"crypto\";\nimport config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport { getPro"
},
{
"path": "src/git-url-parser.ts",
"chars": 2703,
"preview": "/**\n * Parse a Git URL to extract the base repository URL and optional ref (branch/commit/tag).\n * Supports multiple for"
},
{
"path": "src/image-tools.ts",
"chars": 7410,
"preview": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport fsPromises from \"fs/promises\";\nimport"
},
{
"path": "src/index.ts",
"chars": 44,
"preview": "import { start } from \"./server\";\n\nstart();\n"
},
{
"path": "src/llm-providers.ts",
"chars": 3631,
"preview": "/**\n * LLM provider configurations for the generate-workflow script.\n *\n * Supported providers:\n * - Anthropic (Claude):"
},
{
"path": "src/proxy-dispatcher.ts",
"chars": 578,
"preview": "import { EnvHttpProxyAgent, type Dispatcher } from \"undici\";\n\n// Create a singleton proxy-aware dispatcher that:\n// - Re"
},
{
"path": "src/remote-storage-manager.ts",
"chars": 18871,
"preview": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport fs from \"fs\";\nimport fsPromises from "
},
{
"path": "src/server.ts",
"chars": 28213,
"preview": "import Fastify from \"fastify\";\nimport fastifySwagger from \"@fastify/swagger\";\nimport fastifySwaggerUI from \"@fastify/swa"
},
{
"path": "src/storage-providers/azure-blob.ts",
"chars": 9379,
"preview": "import path from \"path\";\nimport fsPromises from \"fs/promises\";\nimport { StorageProvider, Upload } from \"../types\";\nimpor"
},
{
"path": "src/storage-providers/hf.ts",
"chars": 7153,
"preview": "import path from \"path\";\nimport fsPromises from \"fs/promises\";\nimport { StorageProvider, Upload } from \"../types\";\nimpor"
},
{
"path": "src/storage-providers/http.ts",
"chars": 12526,
"preview": "import path from \"path\";\nimport { StorageProvider, Upload, DownloadOptions, DownloadAuth } from \"../types\";\nimport { Fas"
},
{
"path": "src/storage-providers/index.ts",
"chars": 414,
"preview": "import { StorageProvider } from \"../types\";\nimport { S3StorageProvider } from \"./s3\";\nimport { HTTPStorageProvider } fro"
},
{
"path": "src/storage-providers/s3.ts",
"chars": 8675,
"preview": "import path from \"path\";\nimport fs, { ReadStream } from \"fs\";\nimport { Readable } from \"stream\";\nimport {\n S3Client,\n "
},
{
"path": "src/types.ts",
"chars": 15977,
"preview": "import { z } from \"zod\";\nimport { randomUUID } from \"crypto\";\nimport { RawData } from \"ws\";\n\nexport const ComfyNodeSchem"
},
{
"path": "src/utils.ts",
"chars": 10476,
"preview": "import config from \"./config\";\nimport { FastifyBaseLogger } from \"fastify\";\nimport path from \"path\";\nimport fs from \"fs\""
},
{
"path": "src/workflows/index.ts",
"chars": 2076,
"preview": "import config from \"../config\";\nimport { WorkflowTree, isWorkflow } from \"../types\";\nimport fs from \"fs\";\nimport ts from"
},
{
"path": "test/Dockerfile.file-server",
"chars": 536,
"preview": "FROM node:20-alpine\n\nWORKDIR /app\n\n# Install TypeScript and Node types for compilation\nRUN npm install -g typescript && "
},
{
"path": "test/core.spec.ts",
"chars": 66746,
"preview": "import { expect, describe, it, beforeAll } from \"vitest\";\nimport path from \"path\";\nimport fs from \"fs\";\nimport { fetch, "
},
{
"path": "test/docker-image/Dockerfile",
"chars": 595,
"preview": "ARG comfy_version=0.19.3\nFROM ghcr.io/saladtechnologies/comfyui-api:comfy${comfy_version}-torch2.8.0-cuda12.8-devel\n\nRUN"
},
{
"path": "test/docker-image/Dockerfile.smoketest",
"chars": 285,
"preview": "FROM ghcr.io/saladtechnologies/comfyui-api:comfy0.19.3-torch2.8.0-cuda12.8-runtime\n\nCOPY test/docker-image/dreamshaper_8"
},
{
"path": "test/docker-image/link-models",
"chars": 85,
"preview": "#! /usr/bin/bash\n\ncomfy_model_dir=~/_comfy_models/models\n\nln -s $comfy_model_dir $PWD"
},
{
"path": "test/file-server.ts",
"chars": 7335,
"preview": "import http from \"http\";\nimport fs from \"fs\";\nimport path from \"path\";\nimport { pipeline } from \"stream/promises\";\n\ncons"
},
{
"path": "test/llm-providers.spec.ts",
"chars": 5655,
"preview": "import { expect, describe, it } from \"vitest\";\nimport {\n anthropicProvider,\n minimaxProvider,\n selectProvider,\n stri"
},
{
"path": "test/output/.gitkeep",
"chars": 0,
"preview": ""
},
{
"path": "test/submit-many-jobs.js",
"chars": 2796,
"preview": "#!/usr/bin/env node\n\nconst fs = require('fs').promises;\n\nconst usage = `\nUsage: node submit-many-jobs.js <accessDomainNa"
},
{
"path": "test/test-utils.ts",
"chars": 4719,
"preview": "import { expect } from \"vitest\";\nimport sharp from \"sharp\";\nimport fastify, { FastifyInstance } from \"fastify\";\nimport {"
},
{
"path": "test/utils.spec.ts",
"chars": 16474,
"preview": "import { expect, describe, it, beforeEach, afterEach } from \"vitest\";\nimport path from \"path\";\nimport fs from \"fs\";\nimpo"
},
{
"path": "test/workflows/sd1.5-img2img.json",
"chars": 1971,
"preview": "{\n \"3\": {\n \"inputs\": {\n \"seed\": 895988836787787,\n \"steps\": 20,\n \"cfg\": 8,\n \"sampler_name\": \"dpmp"
},
{
"path": "test/workflows/sd1.5-multi-output.json",
"chars": 2861,
"preview": "{\n \"3\": {\n \"inputs\": {\n \"seed\": 986110750609924,\n \"steps\": 20,\n \"cfg\": 8,\n \"sampler_name\": \"eule"
},
{
"path": "test/workflows/sd1.5-parallel-2.json",
"chars": 3464,
"preview": "{\n \"20\": {\n \"inputs\": {\n \"ckpt_name\": \"dreamshaper_8.safetensors\"\n },\n \"class_type\": \"CheckpointLoaderSim"
},
{
"path": "test/workflows/sd1.5-parallel-3.json",
"chars": 5253,
"preview": "{\n \"20\": {\n \"inputs\": {\n \"ckpt_name\": \"dreamshaper_8.safetensors\"\n },\n \"class_type\": \"CheckpointLoaderSim"
},
{
"path": "test/workflows/sd1.5-txt2img.json",
"chars": 1754,
"preview": "{\n \"3\": {\n \"inputs\": {\n \"seed\": 712610403220747,\n \"steps\": 20,\n \"cfg\": 8,\n \"sampler_name\": \"eule"
},
{
"path": "tsconfig.json",
"chars": 12341,
"preview": "{\n \"compilerOptions\": {\n /* Visit https://aka.ms/tsconfig to read more about this file */\n\n /* Projects */\n //"
},
{
"path": "vitest.config.ts",
"chars": 172,
"preview": "import { defineConfig } from \"vitest/config\";\n\nexport default defineConfig({\n test: {\n testTimeout: 0,\n hookTimeo"
}
]
About this extraction
This page contains the full source code of the SaladTechnologies/comfyui-api GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 75 files (472.5 KB), approximately 122.0k tokens, and a symbol index with 247 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.