[
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\ngithub: robertvoy\n"
  },
  {
    "path": ".github/workflows/publish_action.yml",
    "content": "name: Publish to Comfy registry\non:\n  workflow_dispatch:\n  push:\n    branches:\n      - main\n    paths:\n      - \"pyproject.toml\"\n\npermissions:\n  issues: write\n\njobs:\n  publish-node:\n    name: Publish Custom Node to registry\n    runs-on: ubuntu-latest\n    if: ${{ github.repository_owner == 'robertvoy' }}\n    steps:\n      - name: Check out code\n        uses: actions/checkout@v4\n      - name: Publish Custom Node\n        uses: Comfy-Org/publish-node-action@main\n        with:\n          personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "bin/\nlogs/\ngpu_config.json\n__pycache__/\n**/__pycache__/\n*.py[cod]\nnode_modules/\nnpm-debug.log*\nAGENTS.md\n"
  },
  {
    "path": ".nvmrc",
    "content": "20\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\r\n<img width=\"250\" src=\"https://github.com/user-attachments/assets/533bb98d-0c4a-499f-9bca-5c937e361087\" />\r\n<br><br>\r\n<a href=\"https://www.youtube.com/watch?v=p6eE3IlAbOs\"><img src=\"https://img.shields.io/badge/Video_Tutorial-grey?style=flat&logo=youtube&logoColor=white\" alt=\"Video Tutorial\"></a>\r\n<a href=\"/docs/worker-setup-guides.md\"><img src=\"https://img.shields.io/badge/Setup_Guides-grey?style=flat&logo=gitbook&logoColor=white\" alt=\"Setup Guides\"></a>\r\n<a href=\"/workflows\"><img src=\"https://img.shields.io/badge/Workflows-grey?style=flat&logo=json&logoColor=white\" alt=\"Workflows\"></a>\r\n<a href=\"https://buymeacoffee.com/robertvoy\"><img src=\"https://img.shields.io/badge/Donation-grey?style=flat&logo=buymeacoffee&logoColor=white\" alt=\"Donation\"></a>\r\n<a href=\"https://x.com/rbw_ai\"><img src=\"https://img.shields.io/twitter/follow/rbw_ai\" alt=\"Twitter\"></a>\r\n<br><br>\r\n</div>\r\n\r\n> **A powerful extension for ComfyUI that enables distributed and parallel processing across multiple GPUs and machines. Generate more images and videos and accelerate your upscaling workflows by leveraging all available GPU resources in your network and cloud.**\r\n\r\n![Clipboard Image (7)](https://github.com/user-attachments/assets/66aaadef-f195-48a1-a368-17dd0dae477d)\r\n\r\n---\r\n\r\n## Key Features\r\n\r\n#### Parallel Workflow Processing\r\n- Run your workflow on multiple GPUs simultaneously with varied seeds, collect results on the master\r\n- Scale output with more workers\r\n- Supports images and videos\r\n\r\n#### Distributed Upscaling\r\n- Accelerate Ultimate SD Upscale by distributing tiles across GPUs\r\n- Intelligent distribution\r\n- Handles single images and videos\r\n\r\n#### Ease of Use\r\n- Auto-setup local workers; easily add remote/cloud ones\r\n- Convert any workflow to distributed with 2 nodes\r\n- JSON configuration with UI controls\r\n\r\n---\r\n\r\n## Worker Types\r\n\r\n<img width=\"200\" align=\"right\" alt=\"ComfyUI_temp_khvcc_00034_@0 25x\" src=\"https://github.com/user-attachments/assets/651e4912-7c23-4e32-bd88-250f5175e129\" />\r\n\r\nComfyUI Distributed supports three types of workers:\r\n\r\n- **Local Workers** - Additional GPUs on the same machine (auto-configured on first launch)\r\n- **Remote Workers** - GPUs on other computers in your network\r\n- **Cloud Workers** - GPUs hosted on a cloud service like Runpod, accessible via secure tunnels\r\n\r\n> For detailed setup instructions, see the [setup guide](/docs/worker-setup-guides.md)\r\n\r\n---\r\n\r\n## Requirements\r\n\r\n- ComfyUI\r\n- Multiple NVIDIA GPUs\r\n> No additional GPUs? Use [Cloud Workers](https://github.com/robertvoy/ComfyUI-Distributed/blob/main/docs/worker-setup-guides.md#cloud-workers)\r\n- That's it\r\n\r\n---\r\n\r\n## Installation\r\n\r\n1. **Clone this repository** into your ComfyUI custom nodes directory:\r\n   ```bash\r\n   git clone https://github.com/robertvoy/ComfyUI-Distributed.git\r\n   ```\r\n   \r\n2. **Restart ComfyUI**\r\n   - If you'll be using remote/cloud workers, add `--enable-cors-header` to your launch arguments on the master\r\n\r\n3. Read the [setup guide](/docs/worker-setup-guides.md) for adding workers\r\n\r\n---\r\n\r\n## Official Sponsor\r\n\r\n[<img width=\"1500\" height=\"339\" src=\"https://github.com/user-attachments/assets/c5f75e1f-3e19-4c57-b05d-151311cd1cf0\" />](https://get.runpod.io/0bw29uf3ug0p)\r\n\r\nJoin Runpod with [this link](https://get.runpod.io/0bw29uf3ug0p) and unlock a special bonus.\r\n\r\n---\r\n\r\n## Workflow Examples\r\n\r\n### Basic Parallel Generation\r\nGenerate multiple images in the time it takes to generate one. Each worker uses a different seed.\r\n\r\n![Clipboard Image (6)](https://github.com/user-attachments/assets/9598c94c-d9b4-4ccf-ab16-a21398220aeb)\r\n\r\n> [Download workflow](/workflows/distributed-txt2img.json)\r\n\r\n1. Open your ComfyUI workflow\r\n2. Add **Distributed Seed** → connect to sampler's seed\r\n3. Add **Distributed Collector** → after VAE Decode\r\n4. Optional: enable `load_balance` on Distributed Collector to run on one least-busy participant\r\n5. Enable workers in the UI\r\n6. Run the workflow!\r\n\r\n### Parallel WAN Generation\r\nGenerate multiple videos in the time it takes to generate one. Each worker uses a different seed.\r\n\r\n![Clipboard Image (5)](https://github.com/user-attachments/assets/5382b845-833b-43b7-b238-a91c5579581a)\r\n\r\n> [Download workflow](/workflows/distributed-wan.json)\r\n\r\n1. Open your WAN ComfyUI workflow\r\n2. Add **Distributed Seed** → connect to sampler's seed\r\n3. Add **Distributed Collector** → after VAE Decode\r\n4. Add **Image Batch Divider** → after Distributed Collector\r\n5. Set the `divide_by` to the number of GPUs you have available\r\n> For example: if you have a master and 2x workers, set it to 3\r\n7. Enable workers in the UI\r\n8. Run the workflow!\r\n\r\n### Distributed Image Upscaling\r\nAccelerate Ultimate SD Upscaler by distributing tiles across multiple workers, with speed scaling as you add more GPUs.\r\n\r\n![Clipboard Image (3)](https://github.com/user-attachments/assets/ffb57a0d-7b75-4497-96d2-875d60865a1a)\r\n\r\n> [Download workflow](/workflows/distributed-upscale.json)\r\n\r\n1. Load your image\r\n2. Upscale with ESRGAN or similar\r\n3. Connect to **Ultimate SD Upscale Distributed**\r\n4. Configure tile settings\r\n5. Enable workers for faster processing\r\n\r\n### Distributed Video Upscaling\r\nAccelerate Ultimate SD Upscaler by distributing video tiles across multiple workers, with speed scaling as you add more GPUs.\r\n\r\n![Video Upscaler workflow](https://github.com/user-attachments/assets/3c3d61b1-0b5f-422e-8c58-7c1555fed765)\r\n\r\n> [Download workflow](/workflows/distributed-upscale-video.json)\r\n\r\n1. Load your video\r\n2. Optional: upscale with ESRGAN or similar\r\n3. Connect to **Ultimate SD Upscale Distributed**\r\n4. Configure tile settings\r\n5. Use RES4LYF (bong/res2) to get better results\r\n6. Enable workers for faster processing\r\n\r\n> You can run this workflow entirely on Runpod with minimal setup. [Check out the guide here.](https://github.com/robertvoy/ComfyUI-Distributed/blob/main/docs/video-upscaler-runpod-preset.md)\r\n\r\n---\r\n\r\n## Developer API\r\n\r\nControl your distributed cluster programmatically without opening the browser.\r\n\r\n* **Endpoint:** `POST /distributed/queue`\r\n* **Functionality:** Accepts a standard ComfyUI workflow JSON, automatically distributes it to available workers, and returns the execution ID.\r\n* **Documentation:** [See API Examples & Scripts](https://github.com/robertvoy/ComfyUI-Distributed/blob/main/docs/comfyui-distributed-api.md)\r\n\r\n> **⚠️ Security Warning:** Do not expose your ComfyUI port to the public internet. If you need remote access, run ComfyUI behind a secure proxy (like Cloudflare or a VPN).\r\n\r\n---\r\n\r\n## Distributed Value\r\n\r\nUse **Distributed Value** when you want per-worker overrides (for example, different prompts/models/settings per worker).\r\n\r\n- Output type adapts to the connected input where possible (`STRING`, `INT`, `FLOAT`, `COMBO`).\r\n- The node shows only currently enabled workers.\r\n- If worker enablement changes, worker fields update automatically.\r\n- When disconnected, it resets to default string mode and clears per-worker overrides.\r\n- On execution, master uses `default_value`; workers use their mapped override with typed coercion fallback to default.\r\n\r\n---\r\n\r\n## Nodes\r\n\r\n| Node | Description |\r\n|------|-------------|\r\n| **Distributed Seed** | Generates unique seeds for each worker |\r\n| **Distributed Collector** | Collects results (image/video frames and optionally audio) from workers back to the master; `load_balance` can route the run to one least-busy participant |\r\n| **Distributed Value** | Outputs per-worker override values with fallback to default |\r\n| **Ultimate SD Upscale Distributed** | Distributes upscale tiles across workers |\r\n| **Image Batch Divider** | Splits image batches for multi-GPU output |\r\n| **Audio Batch Divider** | Splits audio batches for multi-GPU output |\r\n| **Distributed Model Name** | Passes model paths to workers, enabling workflows to use models not present on the master in orchestrator-only mode |\r\n| **Distributed Empty Image** | Produces an empty IMAGE batch used when the master delegates all work |\r\n\r\n---\r\n\r\n## FAQ\r\n\r\n<details>\r\n<summary>Does it combine VRAM of multiple GPUs?</summary>\r\nNo, it does not combine VRAM of multiple GPUs.\r\n</details>\r\n\r\n<details>\r\n<summary>Does it speed up the generation of a single image or video?</summary>\r\nNo, it does not speed up the generation of a single image or video. Instead, it enables the generation of more images or videos simultaneously. However, it can speed up the upscaling of a single image when using the Ultimate SD Upscale Distributed feature.\r\n</details>\r\n\r\n<details>\r\n<summary>Does it work with the ComfyUI desktop app?</summary>\r\nYes, it does now.\r\n</details>\r\n\r\n<details>\r\n<summary>Can I combine my RTX 5090 with a GTX 980 to get faster results?</summary>\r\nYes, you can combine different GPUs, but performance is optimized when using similar GPUs. A significant performance imbalance between GPUs may cause bottlenecks.\r\n</details>\r\n\r\n<details>\r\n<summary>Does this work with cloud providers?</summary>\r\nYes, it is compatible with cloud providers. Refer to the setup guides for detailed instructions.\r\n</details>\r\n\r\n<details>\r\n<summary>Can I use my main machine just to coordinate workers without rendering?</summary>\r\nYes. Open the Distributed panel and uncheck the master toggle to run in orchestrator-only mode. The master will distribute work to workers but won't render locally. If all workers become unavailable, the master automatically re-enables to ensure your workflow still runs.\r\n</details>\r\n\r\n<details>\r\n<summary>Can I make this work with my Docker setup?</summary>\r\nYes, it is compatible with Docker setups, but you will need to configure your Docker environment yourself. Unfortunately, assistance with Docker configuration is not provided.\r\n</details>\r\n\r\n---\r\n\r\n## Disclaimer\r\n\r\nThis software is provided \"as is\" without any warranties, express or implied, including merchantability, fitness for a particular purpose, or non-infringement. The developers and copyright holders are not liable for any claims, damages, or liabilities arising from the use, modification, or distribution of the software. Users are solely responsible for ensuring compliance with applicable laws and regulations and for securing their networks against unauthorized access, hacking, data breaches, or loss. The developers assume no liability for any damages or incidents resulting from misuse, improper configuration, or external threats.\r\n\r\n---\r\n\r\n## Support the Project\r\n\r\n<img width=\"200\" align=\"right\" src=\"https://github.com/user-attachments/assets/84291921-c44e-4556-94f2-a3b16500f4f9\" />\r\n\r\nIf my custom nodes have added value to your workflow, consider fueling future development with a coffee!\r\n\r\nYour support helps keep this project thriving.\r\n\r\nBuy me a coffee at: https://buymeacoffee.com/robertvoy\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "__init__.py",
    "content": "# Import everything needed from the main module\nfrom .distributed import (\n    NODE_CLASS_MAPPINGS as DISTRIBUTED_CLASS_MAPPINGS, \n    NODE_DISPLAY_NAME_MAPPINGS as DISTRIBUTED_DISPLAY_NAME_MAPPINGS\n)\n\n# Import utilities\nfrom .utils.config import ensure_config_exists, CONFIG_FILE\nfrom .utils.logging import debug_log\n\n# Import distributed upscale nodes\nfrom .nodes.distributed_upscale import (\n    NODE_CLASS_MAPPINGS as UPSCALE_CLASS_MAPPINGS,\n    NODE_DISPLAY_NAME_MAPPINGS as UPSCALE_DISPLAY_NAME_MAPPINGS\n)\n\nWEB_DIRECTORY = \"./web\"\n\nensure_config_exists()\n\n# Merge node mappings\nNODE_CLASS_MAPPINGS = {**DISTRIBUTED_CLASS_MAPPINGS, **UPSCALE_CLASS_MAPPINGS}\nNODE_DISPLAY_NAME_MAPPINGS = {**DISTRIBUTED_DISPLAY_NAME_MAPPINGS, **UPSCALE_DISPLAY_NAME_MAPPINGS}\n\n__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']\n\ndebug_log(\"Loaded Distributed nodes.\")\ndebug_log(f\"Config file: {CONFIG_FILE}\")\ndebug_log(f\"Available nodes: {list(NODE_CLASS_MAPPINGS.keys())}\")\n"
  },
  {
    "path": "api/__init__.py",
    "content": "from . import config_routes  # noqa: F401\nfrom . import tunnel_routes  # noqa: F401\nfrom . import worker_routes  # noqa: F401\nfrom . import job_routes  # noqa: F401\nfrom . import usdu_routes  # noqa: F401\n"
  },
  {
    "path": "api/config_routes.py",
    "content": "import json\nfrom contextlib import asynccontextmanager\n\nfrom aiohttp import web\nimport server\n\ntry:\n    from ..utils.config import config_transaction, load_config, save_config\nexcept ImportError:\n    from ..utils.config import load_config\n\n    try:\n        from ..utils.config import save_config\n    except ImportError:\n        def save_config(_config):\n            return True\n\n    @asynccontextmanager\n    async def config_transaction():\n        config = load_config()\n        original_snapshot = json.dumps(config, sort_keys=True)\n        yield config\n        if json.dumps(config, sort_keys=True) != original_snapshot:\n            save_config(config)\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import handle_api_error, normalize_host\n\n\ndef _positive_int(value):\n    return value > 0\n\n\nCONFIG_SCHEMA = {\n    \"workers\": (list, None),\n    \"master\": (dict, None),\n    \"settings\": (dict, None),\n    \"tunnel\": (dict, None),\n    \"managed_processes\": (dict, None),\n    \"worker_timeout_seconds\": (int, _positive_int),\n    \"debug\": (bool, None),\n    \"auto_launch_workers\": (bool, None),\n    \"stop_workers_on_master_exit\": (bool, None),\n    \"master_delegate_only\": (bool, None),\n    \"websocket_orchestration\": (bool, None),\n    \"has_auto_populated_workers\": (bool, None),\n}\n\n_SETTINGS_FIELDS = {\n    \"worker_timeout_seconds\",\n    \"debug\",\n    \"auto_launch_workers\",\n    \"stop_workers_on_master_exit\",\n    \"master_delegate_only\",\n    \"websocket_orchestration\",\n    \"has_auto_populated_workers\",\n}\n\n_WORKER_FIELDS = [\n    (\"enabled\", None, False),\n    (\"name\", None, False),\n    (\"port\", None, False),\n    (\"host\", normalize_host, True),\n    (\"cuda_device\", None, True),\n    (\"extra_args\", None, True),\n    (\"type\", None, False),\n]\n\n_MASTER_FIELDS = [\n    (\"name\", None, False),\n    (\"host\", normalize_host, True),\n    (\"port\", None, False),\n    (\"cuda_device\", None, False),\n    (\"extra_args\", None, False),\n]\n\n\ndef _apply_field_patch(target: dict, data: dict, field_rules: list) -> None:\n    \"\"\"Apply a partial update to a target dict based on field rules.\"\"\"\n    for key, normalizer, remove_on_none in field_rules:\n        if key not in data:\n            continue\n        value = data[key]\n        if value is None and remove_on_none:\n            target.pop(key, None)\n        else:\n            target[key] = normalizer(value) if (normalizer and value is not None) else value\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/config\")\nasync def get_config_endpoint(request):\n    config = load_config()\n    return web.json_response(config)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/config\")\nasync def update_config_endpoint(request):\n    \"\"\"Bulk config update with schema validation.\"\"\"\n    try:\n        data = await request.json()\n    except Exception as e:\n        return await handle_api_error(request, f\"Invalid JSON payload: {e}\", 400)\n\n    if not isinstance(data, dict):\n        return await handle_api_error(request, \"Config payload must be an object\", 400)\n\n    validated_settings = {}\n    validated_root = {}\n    errors = []\n\n    for key, value in data.items():\n        if key not in CONFIG_SCHEMA:\n            errors.append(f\"Unknown field: {key}\")\n            continue\n\n        expected_type, validator = CONFIG_SCHEMA[key]\n        if not isinstance(value, expected_type):\n            errors.append(f\"{key}: expected {expected_type.__name__}\")\n            continue\n\n        if validator and not validator(value):\n            errors.append(f\"{key}: value {value!r} failed validation\")\n            continue\n\n        if key in _SETTINGS_FIELDS:\n            validated_settings[key] = value\n        else:\n            validated_root[key] = value\n\n    if errors:\n        return web.json_response({\n            \"status\": \"error\",\n            \"error\": errors,\n            \"message\": \"; \".join(errors),\n        }, status=400)\n\n    try:\n        async with config_transaction() as config:\n            settings = config.setdefault(\"settings\", {})\n            settings.update(validated_settings)\n            for key, value in validated_root.items():\n                config[key] = value\n            return web.json_response({\"status\": \"success\", \"config\": config})\n    except Exception as e:\n        return await handle_api_error(request, e)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/queue_status/{job_id}\")\nasync def queue_status_endpoint(request):\n    \"\"\"Check if a job queue is initialized.\"\"\"\n    try:\n        job_id = request.match_info['job_id']\n        \n        # Import to ensure initialization\n        from ..upscale.job_store import ensure_tile_jobs_initialized\n        prompt_server = ensure_tile_jobs_initialized()\n        \n        async with prompt_server.distributed_tile_jobs_lock:\n            exists = job_id in prompt_server.distributed_pending_tile_jobs\n        \n        debug_log(f\"Queue status check for job {job_id}: {'exists' if exists else 'not found'}\")\n        return web.json_response({\"exists\": exists, \"job_id\": job_id})\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/config/update_worker\")\nasync def update_worker_endpoint(request):\n    try:\n        data = await request.json()\n        worker_id = data.get(\"worker_id\")\n        \n        if worker_id is None:\n            return await handle_api_error(request, \"Missing worker_id\", 400)\n\n        async with config_transaction() as config:\n            worker_found = False\n            workers = config.setdefault(\"workers\", [])\n\n            for worker in workers:\n                if worker[\"id\"] == worker_id:\n                    _apply_field_patch(worker, data, _WORKER_FIELDS)\n                    worker_found = True\n                    break\n\n            if not worker_found:\n                # If worker not found and all required fields are provided, create new worker\n                if all(key in data for key in [\"name\", \"port\", \"cuda_device\"]):\n                    new_worker = {\n                        \"id\": worker_id,\n                        \"name\": data[\"name\"],\n                        \"host\": normalize_host(data.get(\"host\", \"localhost\")),\n                        \"port\": data[\"port\"],\n                        \"cuda_device\": data[\"cuda_device\"],\n                        \"enabled\": data.get(\"enabled\", False),\n                        \"extra_args\": data.get(\"extra_args\", \"\"),\n                        \"type\": data.get(\"type\", \"local\")\n                    }\n                    workers.append(new_worker)\n                else:\n                    return await handle_api_error(\n                        request,\n                        f\"Worker {worker_id} not found and missing required fields for creation\",\n                        404,\n                    )\n\n            return web.json_response({\"status\": \"success\"})\n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n\n@server.PromptServer.instance.routes.post(\"/distributed/config/delete_worker\")\nasync def delete_worker_endpoint(request):\n    try:\n        data = await request.json()\n        worker_id = data.get(\"worker_id\")\n        \n        if worker_id is None:\n            return await handle_api_error(request, \"Missing worker_id\", 400)\n            \n        async with config_transaction() as config:\n            workers = config.get(\"workers\", [])\n\n            # Find and remove the worker\n            worker_index = -1\n            for i, worker in enumerate(workers):\n                if worker[\"id\"] == worker_id:\n                    worker_index = i\n                    break\n\n            if worker_index == -1:\n                return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n\n            # Remove the worker\n            removed_worker = workers.pop(worker_index)\n\n            return web.json_response({\n                \"status\": \"success\",\n                \"message\": f\"Worker {removed_worker.get('name', worker_id)} deleted\"\n            })\n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n\n@server.PromptServer.instance.routes.post(\"/distributed/config/update_setting\")\nasync def update_setting_endpoint(request):\n    \"\"\"Updates a specific key in the settings object.\"\"\"\n    try:\n        data = await request.json()\n        key = data.get(\"key\")\n        value = data.get(\"value\")\n\n        if not key or value is None:\n            return await handle_api_error(request, \"Missing 'key' or 'value' in request\", 400)\n        if key not in _SETTINGS_FIELDS:\n            return await handle_api_error(request, f\"Unknown setting: {key}\", 400)\n\n        async with config_transaction() as config:\n            if 'settings' not in config:\n                config['settings'] = {}\n\n            config['settings'][key] = value\n\n            return web.json_response({\"status\": \"success\", \"message\": f\"Setting '{key}' updated.\"})\n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n\n@server.PromptServer.instance.routes.post(\"/distributed/config/update_master\")\nasync def update_master_endpoint(request):\n    \"\"\"Updates master configuration.\"\"\"\n    try:\n        data = await request.json()\n        \n        async with config_transaction() as config:\n            if 'master' not in config:\n                config['master'] = {}\n            _apply_field_patch(config['master'], data, _MASTER_FIELDS)\n\n            return web.json_response({\"status\": \"success\", \"message\": \"Master configuration updated.\"})\n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n"
  },
  {
    "path": "api/job_routes.py",
    "content": "import json\nimport asyncio\nimport io\nimport os\nimport base64\nimport binascii\nimport time\n\nfrom aiohttp import web\nimport server\nimport torch\nfrom PIL import Image\n\nfrom ..utils.logging import debug_log\nfrom ..utils.image import pil_to_tensor, ensure_contiguous\nfrom ..utils.network import handle_api_error\nfrom ..utils.constants import JOB_INIT_GRACE_PERIOD, MEMORY_CLEAR_DELAY\ntry:\n    from .queue_orchestration import ensure_distributed_state, orchestrate_distributed_execution\nexcept ImportError:\n    from .queue_orchestration import orchestrate_distributed_execution\n\n    def ensure_distributed_state():\n        return None\nfrom .queue_request import parse_queue_request_payload\n\nprompt_server = server.PromptServer.instance\n\n# Canonical worker result envelope accepted by POST /distributed/job_complete:\n# { \"job_id\": str, \"worker_id\": str, \"batch_idx\": int, \"image\": <base64 PNG>, \"is_last\": bool }\n\n\ndef _decode_image_sync(image_path):\n    \"\"\"Decode image/video file and compute hash in a threadpool worker.\"\"\"\n    import base64\n    import hashlib\n    import folder_paths\n\n    full_path = folder_paths.get_annotated_filepath(image_path)\n    if not os.path.exists(full_path):\n        raise FileNotFoundError(image_path)\n\n    hash_md5 = hashlib.md5()\n    with open(full_path, 'rb') as f:\n        for chunk in iter(lambda: f.read(4096), b\"\"):\n            hash_md5.update(chunk)\n    file_hash = hash_md5.hexdigest()\n\n    video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.webm'}\n    file_ext = os.path.splitext(full_path)[1].lower()\n\n    if file_ext in video_extensions:\n        with open(full_path, 'rb') as f:\n            file_data = f.read()\n        mime_types = {\n            '.mp4': 'video/mp4',\n            '.avi': 'video/x-msvideo',\n            '.mov': 'video/quicktime',\n            '.mkv': 'video/x-matroska',\n            '.webm': 'video/webm'\n        }\n        mime_type = mime_types.get(file_ext, 'video/mp4')\n        image_data = f\"data:{mime_type};base64,{base64.b64encode(file_data).decode('utf-8')}\"\n    else:\n        with Image.open(full_path) as img:\n            if img.mode not in ('RGB', 'RGBA'):\n                img = img.convert('RGB')\n            buffer = io.BytesIO()\n            img.save(buffer, format='PNG', compress_level=1)\n            image_data = f\"data:image/png;base64,{base64.b64encode(buffer.getvalue()).decode('utf-8')}\"\n\n    return {\n        \"status\": \"success\",\n        \"image_data\": image_data,\n        \"hash\": file_hash,\n    }\n\n\ndef _check_file_sync(filename, expected_hash):\n    \"\"\"Check file presence and hash in a threadpool worker.\"\"\"\n    import hashlib\n    import folder_paths\n\n    full_path = folder_paths.get_annotated_filepath(filename)\n    if not os.path.exists(full_path):\n        return {\n            \"status\": \"success\",\n            \"exists\": False,\n        }\n\n    hash_md5 = hashlib.md5()\n    with open(full_path, 'rb') as f:\n        for chunk in iter(lambda: f.read(4096), b\"\"):\n            hash_md5.update(chunk)\n    file_hash = hash_md5.hexdigest()\n\n    return {\n        \"status\": \"success\",\n        \"exists\": True,\n        \"hash_matches\": file_hash == expected_hash,\n    }\n\n\ndef _decode_canonical_png_tensor(image_payload):\n    \"\"\"Decode canonical base64 PNG payload into a contiguous IMAGE tensor.\"\"\"\n    if not isinstance(image_payload, str) or not image_payload.strip():\n        raise ValueError(\"Field 'image' must be a non-empty base64 PNG string.\")\n\n    encoded = image_payload.strip()\n    if encoded.startswith(\"data:\"):\n        header, sep, data_part = encoded.partition(\",\")\n        if not sep:\n            raise ValueError(\"Field 'image' data URL is malformed.\")\n        if not header.lower().startswith(\"data:image/png;base64\"):\n            raise ValueError(\"Field 'image' must be a PNG data URL when using data:* format.\")\n        encoded = data_part\n\n    try:\n        png_bytes = base64.b64decode(encoded, validate=True)\n    except (binascii.Error, ValueError) as exc:\n        raise ValueError(\"Field 'image' is not valid base64 PNG data.\") from exc\n\n    if not png_bytes:\n        raise ValueError(\"Field 'image' decoded to empty PNG data.\")\n\n    try:\n        with Image.open(io.BytesIO(png_bytes)) as img:\n            img = img.convert(\"RGB\")\n            tensor = pil_to_tensor(img)\n        return ensure_contiguous(tensor)\n    except Exception as exc:\n        raise ValueError(f\"Failed to decode PNG image payload: {exc}\") from exc\n\n\ndef _decode_audio_payload(audio_payload):\n    \"\"\"Decode canonical audio payload into an AUDIO dict.\"\"\"\n    from ..utils.audio_payload import decode_audio_payload\n\n    return decode_audio_payload(audio_payload)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/prepare_job\")\nasync def prepare_job_endpoint(request):\n    try:\n        data = await request.json()\n        multi_job_id = data.get('multi_job_id')\n        if not multi_job_id:\n            return await handle_api_error(request, \"Missing multi_job_id\", 400)\n\n        ensure_distributed_state()\n        async with prompt_server.distributed_jobs_lock:\n            if multi_job_id not in prompt_server.distributed_pending_jobs:\n                prompt_server.distributed_pending_jobs[multi_job_id] = asyncio.Queue()\n        \n        debug_log(f\"Prepared queue for job {multi_job_id}\")\n        return web.json_response({\"status\": \"success\"})\n    except Exception as e:\n        return await handle_api_error(request, e)\n\n@server.PromptServer.instance.routes.post(\"/distributed/clear_memory\")\nasync def clear_memory_endpoint(request):\n    debug_log(\"Received request to clear VRAM.\")\n    try:\n        # Use ComfyUI's prompt server queue system like the /free endpoint does\n        if hasattr(server.PromptServer.instance, 'prompt_queue'):\n            server.PromptServer.instance.prompt_queue.set_flag(\"unload_models\", True)\n            server.PromptServer.instance.prompt_queue.set_flag(\"free_memory\", True)\n            debug_log(\"Set queue flags for memory clearing.\")\n        \n        # Wait a bit for the queue to process\n        await asyncio.sleep(MEMORY_CLEAR_DELAY)\n        \n        # Also do direct cleanup as backup, but with error handling\n        import gc\n        import comfy.model_management as mm\n        \n        try:\n            mm.unload_all_models()\n        except AttributeError as e:\n            debug_log(f\"Warning during model unload: {e}\")\n        \n        try:\n            mm.soft_empty_cache()\n        except Exception as e:\n            debug_log(f\"Warning during cache clear: {e}\")\n        \n        for _ in range(3):\n            gc.collect()\n        \n        if torch.cuda.is_available():\n            torch.cuda.empty_cache()\n            torch.cuda.ipc_collect()\n        \n        debug_log(\"VRAM cleared successfully.\")\n        return web.json_response({\"status\": \"success\", \"message\": \"GPU memory cleared.\"})\n    except Exception as e:\n        # Even if there's an error, try to do basic cleanup\n        import gc\n        gc.collect()\n        if torch.cuda.is_available():\n            torch.cuda.empty_cache()\n        debug_log(f\"Partial VRAM clear completed with warning: {e}\")\n        return web.json_response({\"status\": \"success\", \"message\": \"GPU memory cleared (with warnings)\"})\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/queue\")\nasync def distributed_queue_endpoint(request):\n    \"\"\"Queue a distributed workflow, mirroring the UI orchestration pipeline.\"\"\"\n    try:\n        raw_payload = await request.json()\n    except Exception as exc:\n        return await handle_api_error(request, f\"Invalid JSON payload: {exc}\", 400)\n\n    try:\n        payload = parse_queue_request_payload(raw_payload)\n    except ValueError as exc:\n        return await handle_api_error(request, exc, 400)\n\n    try:\n        prompt_id, prompt_number, worker_count, node_errors = await orchestrate_distributed_execution(\n            payload.prompt,\n            payload.workflow_meta,\n            payload.client_id,\n            enabled_worker_ids=payload.enabled_worker_ids,\n            delegate_master=payload.delegate_master,\n            trace_execution_id=payload.trace_execution_id,\n        )\n        return web.json_response({\n            \"prompt_id\": prompt_id,\n            \"number\": prompt_number,\n            \"node_errors\": node_errors,\n            \"worker_count\": worker_count,\n            \"auto_prepare_supported\": True,\n        })\n    except Exception as exc:\n        return await handle_api_error(request, exc, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/load_image\")\nasync def load_image_endpoint(request):\n    \"\"\"Load an image or video file and return it as base64 data with hash.\"\"\"\n    try:\n        data = await request.json()\n        image_path = data.get(\"image_path\")\n        \n        if not image_path:\n            return await handle_api_error(request, \"Missing image_path\", 400)\n        loop = asyncio.get_running_loop()\n        payload = await loop.run_in_executor(None, _decode_image_sync, image_path)\n        return web.json_response(payload)\n    except FileNotFoundError:\n        return await handle_api_error(request, f\"File not found: {image_path}\", 404)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/check_file\")\nasync def check_file_endpoint(request):\n    \"\"\"Check if a file exists and matches the given hash.\"\"\"\n    try:\n        data = await request.json()\n        filename = data.get(\"filename\")\n        expected_hash = data.get(\"hash\")\n        \n        if not filename or not expected_hash:\n            return await handle_api_error(request, \"Missing filename or hash\", 400)\n        loop = asyncio.get_running_loop()\n        payload = await loop.run_in_executor(None, _check_file_sync, filename, expected_hash)\n        return web.json_response(payload)\n        \n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/job_complete\")\nasync def job_complete_endpoint(request):\n    try:\n        data = await request.json()\n    except Exception as exc:\n        return await handle_api_error(request, f\"Invalid JSON payload: {exc}\", 400)\n\n    if not isinstance(data, dict):\n        return await handle_api_error(request, \"Expected a JSON object body\", 400)\n\n    try:\n        job_id = data.get(\"job_id\")\n        worker_id = data.get(\"worker_id\")\n        batch_idx = data.get(\"batch_idx\")\n        image_payload = data.get(\"image\")\n        audio_payload = data.get(\"audio\")\n        is_last = data.get(\"is_last\")\n\n        errors = []\n        if not isinstance(job_id, str) or not job_id.strip():\n            errors.append(\"job_id: expected non-empty string\")\n        if not isinstance(worker_id, str) or not worker_id.strip():\n            errors.append(\"worker_id: expected non-empty string\")\n        if not isinstance(batch_idx, int) or batch_idx < 0:\n            errors.append(\"batch_idx: expected non-negative integer\")\n        if not isinstance(image_payload, str) or not image_payload.strip():\n            errors.append(\"image: expected non-empty base64 PNG string\")\n        if audio_payload is not None and not isinstance(audio_payload, dict):\n            errors.append(\"audio: expected object when provided\")\n        if not isinstance(is_last, bool):\n            errors.append(\"is_last: expected boolean\")\n        if errors:\n            return await handle_api_error(request, errors, 400)\n\n        tensor = _decode_canonical_png_tensor(image_payload)\n        decoded_audio = _decode_audio_payload(audio_payload) if audio_payload is not None else None\n        multi_job_id = job_id.strip()\n        worker_id = worker_id.strip()\n\n        pending = None\n        queue_size = 0\n        deadline = time.monotonic() + float(JOB_INIT_GRACE_PERIOD)\n        while pending is None:\n            async with prompt_server.distributed_jobs_lock:\n                pending = prompt_server.distributed_pending_jobs.get(multi_job_id)\n                if pending is not None:\n                    await pending.put(\n                        {\n                            \"tensor\": tensor,\n                            \"worker_id\": worker_id,\n                            \"image_index\": int(batch_idx),\n                            \"is_last\": is_last,\n                            \"audio\": decoded_audio,\n                        }\n                    )\n                    queue_size = pending.qsize()\n                    break\n\n            if time.monotonic() > deadline:\n                return await handle_api_error(request, \"job not initialized\", 404)\n            await asyncio.sleep(0.05)\n\n        debug_log(\n            f\"job_complete received canonical envelope - job_id: {multi_job_id}, \"\n            f\"worker: {worker_id}, batch_idx: {batch_idx}, is_last: {is_last}, \"\n            f\"queue_size: {queue_size}\"\n        )\n\n        return web.json_response({\"status\": \"success\"})\n    except Exception as e:\n        return await handle_api_error(request, e)\n"
  },
  {
    "path": "api/orchestration/__init__.py",
    "content": "# Orchestration helpers split out from queue_orchestration.py:\n# - prompt_transform.py: graph pruning + hidden input overrides\n# - media_sync.py: remote media/path normalization\n# - dispatch.py: worker probe + prompt dispatch\n"
  },
  {
    "path": "api/orchestration/dispatch.py",
    "content": "import asyncio\nimport json\nimport uuid\n\nimport aiohttp\n\nfrom ...utils.logging import debug_log, log\nfrom ...utils.network import build_worker_url, get_client_session, probe_worker\ntry:\n    from ...utils.trace_logger import trace_debug, trace_info\nexcept ImportError:\n    def trace_debug(*_args, **_kwargs):\n        return None\n\n    def trace_info(*_args, **_kwargs):\n        return None\n\ntry:\n    from ..schemas import parse_positive_int\nexcept ImportError:\n    def parse_positive_int(value, default):\n        try:\n            parsed = int(value)\n            return parsed if parsed > 0 else default\n        except (TypeError, ValueError):\n            return default\n\n_least_busy_rr_index = 0\n\n\nasync def worker_is_active(worker):\n    \"\"\"Ping worker's /prompt endpoint to confirm it's reachable.\"\"\"\n    url = build_worker_url(worker)\n    return await probe_worker(url, timeout=3.0) is not None\n\n\nasync def worker_ws_is_active(worker):\n    \"\"\"Ping worker's websocket endpoint to confirm it's reachable.\"\"\"\n    session = await get_client_session()\n    url = build_worker_url(worker, \"/distributed/worker_ws\")\n    try:\n        ws = await session.ws_connect(url, heartbeat=20, timeout=3)\n        await ws.close()\n        return True\n    except asyncio.TimeoutError:\n        debug_log(f\"[Distributed] Worker WS probe timed out: {url}\")\n        return False\n    except aiohttp.ClientConnectorError:\n        debug_log(f\"[Distributed] Worker WS unreachable: {url}\")\n        return False\n    except Exception as e:\n        debug_log(f\"[Distributed] Worker WS probe unexpected error: {e}\")\n        return False\n\n\nasync def _probe_worker_active(worker, use_websocket, semaphore):\n    async with semaphore:\n        is_active = await (worker_ws_is_active(worker) if use_websocket else worker_is_active(worker))\n        return worker, is_active\n\n\nasync def _dispatch_via_websocket(worker_url, payload, client_id, timeout=60.0):\n    \"\"\"Open a fresh worker websocket, dispatch one prompt, wait for ack, then close.\"\"\"\n    request_id = uuid.uuid4().hex\n    ws_payload = {\n        \"type\": \"dispatch_prompt\",\n        \"request_id\": request_id,\n        \"prompt\": payload.get(\"prompt\"),\n        \"workflow\": payload.get(\"workflow\"),\n        \"client_id\": client_id,\n    }\n    ws_url = worker_url.replace(\"http://\", \"ws://\").replace(\"https://\", \"wss://\")\n    ws_url = f\"{ws_url}/distributed/worker_ws\"\n    session = await get_client_session()\n\n    async with session.ws_connect(ws_url, heartbeat=20, timeout=timeout) as ws:\n        await ws.send_json(ws_payload)\n        async for msg in ws:\n            if msg.type == aiohttp.WSMsgType.TEXT:\n                data = json.loads(msg.data or \"{}\")\n                if data.get(\"type\") == \"dispatch_ack\" and data.get(\"request_id\") == request_id:\n                    if data.get(\"ok\"):\n                        return\n                    error_text = data.get(\"error\") or \"Worker rejected websocket dispatch.\"\n                    validation_error = data.get(\"validation_error\")\n                    node_errors = data.get(\"node_errors\")\n                    if validation_error:\n                        error_text = f\"{error_text} | validation_error={validation_error}\"\n                    if node_errors:\n                        error_text = f\"{error_text} | node_errors={node_errors}\"\n                    raise RuntimeError(error_text)\n            elif msg.type in (aiohttp.WSMsgType.ERROR, aiohttp.WSMsgType.CLOSED):\n                raise RuntimeError(f\"Worker websocket closed unexpectedly: {msg.type}\")\n\n    raise RuntimeError(\"Worker websocket closed before dispatch_ack was received.\")\n\n\nasync def dispatch_worker_prompt(\n    worker,\n    prompt_obj,\n    workflow_meta,\n    client_id=None,\n    use_websocket=False,\n    trace_execution_id=None,\n):\n    \"\"\"Send the prepared prompt to a worker ComfyUI instance.\"\"\"\n    worker_url = build_worker_url(worker)\n    url = build_worker_url(worker, \"/prompt\")\n    payload = {\"prompt\": prompt_obj}\n    extra_data = {}\n    if workflow_meta:\n        extra_data.setdefault(\"extra_pnginfo\", {})[\"workflow\"] = workflow_meta\n    if extra_data:\n        payload[\"extra_data\"] = extra_data\n\n    if use_websocket:\n        try:\n            await _dispatch_via_websocket(\n                worker_url,\n                {\n                    \"prompt\": prompt_obj,\n                    \"workflow\": workflow_meta,\n                },\n                client_id,\n            )\n            return\n        except Exception as exc:\n            worker_id = worker.get(\"id\")\n            if trace_execution_id:\n                trace_info(trace_execution_id, f\"Websocket dispatch failed for worker {worker_id}: {exc}\")\n            else:\n                log(f\"[Distributed] Websocket dispatch failed for worker {worker_id}: {exc}\")\n            raise\n\n    session = await get_client_session()\n    async with session.post(\n        url,\n        json=payload,\n        timeout=aiohttp.ClientTimeout(total=60),\n    ) as resp:\n        resp.raise_for_status()\n\n\nasync def select_active_workers(\n    workers,\n    use_websocket,\n    delegate_master,\n    trace_execution_id=None,\n    probe_concurrency=8,\n):\n    \"\"\"Probe workers and return (active_workers, updated_delegate_master).\"\"\"\n    probe_limit = parse_positive_int(probe_concurrency, 8)\n    probe_semaphore = asyncio.Semaphore(probe_limit)\n\n    if trace_execution_id and workers:\n        trace_debug(\n            trace_execution_id,\n            f\"Probing {len(workers)} workers with probe_concurrency={probe_limit}\",\n        )\n\n    probe_results = await asyncio.gather(\n        *[\n            _probe_worker_active(worker, use_websocket, probe_semaphore)\n            for worker in workers\n        ]\n    )\n\n    active_workers = []\n    for worker, is_active in probe_results:\n        if is_active:\n            active_workers.append(worker)\n        else:\n            if trace_execution_id:\n                trace_info(trace_execution_id, f\"Worker {worker['name']} ({worker['id']}) is offline, skipping.\")\n            else:\n                log(f\"[Distributed] Worker {worker['name']} ({worker['id']}) is offline, skipping.\")\n\n    if trace_execution_id and workers:\n        trace_debug(\n            trace_execution_id,\n            f\"Worker probe complete: active={len(active_workers)}/{len(workers)}\",\n        )\n\n    if not active_workers and delegate_master:\n        if trace_execution_id:\n            trace_debug(trace_execution_id, \"All workers offline while delegate-only requested; enabling master participation.\")\n        else:\n            debug_log(\"All workers offline while delegate-only requested; enabling master participation.\")\n        delegate_master = False\n\n    return active_workers, delegate_master\n\n\ndef _extract_queue_remaining(payload):\n    if not isinstance(payload, dict):\n        return 0\n    try:\n        queue_remaining = int(payload.get(\"exec_info\", {}).get(\"queue_remaining\", 0))\n    except (TypeError, ValueError):\n        queue_remaining = 0\n    return max(queue_remaining, 0)\n\n\nasync def _probe_worker_queue(worker, semaphore, probe_timeout):\n    async with semaphore:\n        worker_url = build_worker_url(worker)\n        payload = await probe_worker(worker_url, timeout=probe_timeout)\n        if payload is None:\n            return None\n        return {\n            \"worker\": worker,\n            \"queue_remaining\": _extract_queue_remaining(payload),\n        }\n\n\ndef _select_idle_round_robin(statuses):\n    global _least_busy_rr_index\n    if not statuses:\n        return None\n    index = _least_busy_rr_index % len(statuses)\n    _least_busy_rr_index += 1\n    return statuses[index]\n\n\nasync def select_least_busy_worker(\n    workers,\n    trace_execution_id=None,\n    probe_concurrency=8,\n    probe_timeout=3.0,\n):\n    \"\"\"Select one worker by queue depth, round-robin among idle workers.\"\"\"\n    if not workers:\n        return None\n\n    probe_limit = parse_positive_int(probe_concurrency, 8)\n    probe_semaphore = asyncio.Semaphore(probe_limit)\n    statuses = await asyncio.gather(\n        *[\n            _probe_worker_queue(worker, probe_semaphore, probe_timeout)\n            for worker in workers\n        ]\n    )\n    statuses = [status for status in statuses if status is not None]\n    if not statuses:\n        if trace_execution_id:\n            trace_info(trace_execution_id, \"Least-busy selection failed: no worker queue probes succeeded.\")\n        else:\n            log(\"[Distributed] Least-busy selection failed: no worker queue probes succeeded.\")\n        return None\n\n    idle_statuses = [status for status in statuses if status[\"queue_remaining\"] == 0]\n    if idle_statuses:\n        selected = _select_idle_round_robin(idle_statuses)\n    else:\n        selected = min(statuses, key=lambda status: status[\"queue_remaining\"])\n\n    worker = selected[\"worker\"]\n    queue_remaining = selected[\"queue_remaining\"]\n    if trace_execution_id:\n        trace_debug(\n            trace_execution_id,\n            f\"Least-busy worker selected: {worker.get('name')} ({worker.get('id')}), queue_remaining={queue_remaining}\",\n        )\n    else:\n        debug_log(\n            f\"Least-busy worker selected: {worker.get('name')} ({worker.get('id')}), queue_remaining={queue_remaining}\"\n        )\n    return worker\n"
  },
  {
    "path": "api/orchestration/media_sync.py",
    "content": "import asyncio\nimport hashlib\nimport mimetypes\nimport os\nimport re\n\nimport aiohttp\n\nfrom ...utils.logging import debug_log, log\nfrom ...utils.network import build_worker_url, get_client_session\nfrom ...utils.trace_logger import trace_debug, trace_info\n\n\nLIKELY_FILENAME_RE = re.compile(\n    r\"\\.(ckpt|safetensors|pt|pth|bin|yaml|json|png|jpg|jpeg|webp|gif|bmp|mp4|avi|mov|mkv|webm|\"\n    r\"wav|mp3|flac|m4a|aac|ogg|opus|aiff|aif|wma|latent|txt|vae|lora|embedding)\"\n    r\"(\\s*\\[\\w+\\])?$\",\n    re.IGNORECASE,\n)\nMEDIA_FILE_RE = re.compile(\n    r\"\\.(png|jpg|jpeg|webp|gif|bmp|mp4|avi|mov|mkv|webm|wav|mp3|flac|m4a|aac|ogg|opus|aiff|aif|wma)(\\s*\\[\\w+\\])?$\",\n    re.IGNORECASE,\n)\n\n\ndef _normalize_media_reference(value):\n    \"\"\"Normalize one media string value to a path-like reference or None.\"\"\"\n    if not isinstance(value, str):\n        return None\n    cleaned = re.sub(r\"\\s*\\[\\w+\\]$\", \"\", value).strip().replace(\"\\\\\", \"/\")\n    if MEDIA_FILE_RE.search(cleaned):\n        return cleaned\n    return None\n\n\ndef convert_paths_for_platform(obj, target_separator):\n    \"\"\"Recursively normalize likely file paths for the worker platform separator.\"\"\"\n    if target_separator not in (\"/\", \"\\\\\"):\n        return obj\n\n    def _convert(value):\n        if isinstance(value, str):\n            if (\"/\" in value or \"\\\\\" in value) and LIKELY_FILENAME_RE.search(value):\n                trimmed = value.strip()\n                has_drive = bool(re.match(r\"^[A-Za-z]:(\\\\\\\\|/)\", trimmed))\n                is_absolute = trimmed.startswith(\"/\") or trimmed.startswith(\"\\\\\\\\\")\n                has_protocol = bool(re.match(r\"^\\w+://\", trimmed))\n\n                # URLs are not local paths and should never be separator-normalized.\n                if has_protocol:\n                    return trimmed\n\n                # Keep relative media-style paths in forward-slash form (Comfy-style annotated paths).\n                if not has_drive and not is_absolute and not has_protocol and MEDIA_FILE_RE.search(trimmed):\n                    return re.sub(r\"[\\\\]+\", \"/\", trimmed)\n\n                if target_separator == \"\\\\\":\n                    return re.sub(r\"[\\\\/]+\", r\"\\\\\", trimmed)\n                return re.sub(r\"[\\\\/]+\", \"/\", trimmed)\n            return value\n        if isinstance(value, list):\n            return [_convert(item) for item in value]\n        if isinstance(value, dict):\n            return {key: _convert(item) for key, item in value.items()}\n        return value\n\n    return _convert(obj)\n\n\ndef _find_media_references(prompt_obj):\n    \"\"\"Find media file references in image/video/audio/file inputs used by worker prompts.\"\"\"\n    media_refs = set()\n    for node in prompt_obj.values():\n        if not isinstance(node, dict):\n            continue\n        inputs = node.get(\"inputs\", {})\n        for key in (\"image\", \"video\", \"audio\", \"file\"):\n            cleaned = _normalize_media_reference(inputs.get(key))\n            if cleaned:\n                media_refs.add(cleaned)\n    return sorted(media_refs)\n\n\ndef _rewrite_prompt_media_inputs(prompt_obj, worker_media_paths):\n    \"\"\"Rewrite media string inputs to worker-local uploaded paths.\"\"\"\n    if not isinstance(worker_media_paths, dict) or not worker_media_paths:\n        return\n\n    for node in prompt_obj.values():\n        if not isinstance(node, dict):\n            continue\n        inputs = node.get(\"inputs\", {})\n        if not isinstance(inputs, dict):\n            continue\n        for key in (\"image\", \"video\", \"audio\", \"file\"):\n            value = inputs.get(key)\n            cleaned = _normalize_media_reference(value)\n            if not cleaned:\n                continue\n            worker_path = worker_media_paths.get(cleaned)\n            if worker_path:\n                inputs[key] = worker_path\n\n\ndef _load_media_file_sync(filename):\n    \"\"\"Load local media bytes and hash for worker upload sync.\"\"\"\n    import folder_paths\n\n    full_path = folder_paths.get_annotated_filepath(filename)\n    if not os.path.exists(full_path):\n        raise FileNotFoundError(filename)\n\n    with open(full_path, \"rb\") as f:\n        file_bytes = f.read()\n\n    file_hash = hashlib.md5(file_bytes).hexdigest()\n    mime_type = mimetypes.guess_type(full_path)[0]\n    if not mime_type:\n        ext = os.path.splitext(full_path)[1].lower()\n        if ext in {\".mp4\", \".avi\", \".mov\", \".mkv\", \".webm\"}:\n            mime_type = \"video/mp4\"\n        else:\n            mime_type = \"image/png\"\n    return file_bytes, file_hash, mime_type\n\n\nasync def fetch_worker_path_separator(worker, trace_execution_id=None):\n    \"\"\"Best-effort fetch of a worker's path separator from /distributed/system_info.\"\"\"\n    url = build_worker_url(worker, \"/distributed/system_info\")\n    session = await get_client_session()\n    try:\n        async with session.get(url, timeout=aiohttp.ClientTimeout(total=5)) as resp:\n            if resp.status != 200:\n                return None\n            payload = await resp.json()\n            separator = ((payload or {}).get(\"platform\") or {}).get(\"path_separator\")\n            return separator if separator in (\"/\", \"\\\\\") else None\n    except Exception as exc:\n        if trace_execution_id:\n            trace_debug(trace_execution_id, f\"Failed to fetch worker system info ({worker.get('id')}): {exc}\")\n        else:\n            debug_log(f\"[Distributed] Failed to fetch worker system info ({worker.get('id')}): {exc}\")\n        return None\n\n\nasync def _upload_media_to_worker(worker, filename, file_bytes, file_hash, mime_type, trace_execution_id=None):\n    \"\"\"Upload one media file to worker iff missing or hash-mismatched.\"\"\"\n    session = await get_client_session()\n    normalized = filename.replace(\"\\\\\", \"/\")\n\n    check_url = build_worker_url(worker, \"/distributed/check_file\")\n    try:\n        async with session.post(\n            check_url,\n            json={\"filename\": normalized, \"hash\": file_hash},\n            timeout=aiohttp.ClientTimeout(total=6),\n        ) as resp:\n            if resp.status == 200:\n                payload = await resp.json()\n                if payload.get(\"exists\") and payload.get(\"hash_matches\"):\n                    return False, normalized\n    except Exception as exc:\n        if trace_execution_id:\n            trace_debug(trace_execution_id, f\"Media check failed for '{normalized}' on worker {worker.get('id')}: {exc}\")\n        else:\n            debug_log(f\"[Distributed] Media check failed for '{normalized}' on worker {worker.get('id')}: {exc}\")\n\n    parts = normalized.split(\"/\")\n    clean_name = parts[-1]\n    subfolder = \"/\".join(parts[:-1])\n\n    form = aiohttp.FormData()\n    form.add_field(\"image\", file_bytes, filename=clean_name, content_type=mime_type)\n    form.add_field(\"type\", \"input\")\n    form.add_field(\"subfolder\", subfolder)\n    form.add_field(\"overwrite\", \"true\")\n\n    upload_url = build_worker_url(worker, \"/upload/image\")\n    async with session.post(\n        upload_url,\n        data=form,\n        timeout=aiohttp.ClientTimeout(total=30),\n    ) as resp:\n        resp.raise_for_status()\n        try:\n            payload = await resp.json()\n        except Exception:\n            payload = {}\n\n    name = str((payload or {}).get(\"name\") or clean_name).strip()\n    subfolder = str((payload or {}).get(\"subfolder\") or \"\").strip().replace(\"\\\\\", \"/\").strip(\"/\")\n    worker_path = f\"{subfolder}/{name}\" if subfolder else name\n    return True, worker_path\n\n\nasync def sync_worker_media(worker, prompt_obj, trace_execution_id=None):\n    \"\"\"Sync referenced media files from master to a remote worker before dispatch.\"\"\"\n    media_refs = _find_media_references(prompt_obj)\n    if not media_refs:\n        return\n\n    loop = asyncio.get_running_loop()\n    uploaded = 0\n    skipped = 0\n    missing = 0\n    worker_media_paths = {}\n    for filename in media_refs:\n        try:\n            file_bytes, file_hash, mime_type = await loop.run_in_executor(\n                None, _load_media_file_sync, filename\n            )\n        except FileNotFoundError:\n            missing += 1\n            if trace_execution_id:\n                trace_info(trace_execution_id, f\"Media file '{filename}' not found on master; worker may fail to load it.\")\n            else:\n                log(f\"[Distributed] Media file '{filename}' not found on master; worker may fail to load it.\")\n            continue\n        except Exception as exc:\n            if trace_execution_id:\n                trace_info(trace_execution_id, f\"Failed to load media '{filename}' for worker sync: {exc}\")\n            else:\n                log(f\"[Distributed] Failed to load media '{filename}' for worker sync: {exc}\")\n            continue\n\n        try:\n            changed, worker_path = await _upload_media_to_worker(\n                worker,\n                filename,\n                file_bytes,\n                file_hash,\n                mime_type,\n                trace_execution_id=trace_execution_id,\n            )\n            if worker_path:\n                worker_media_paths[filename] = worker_path\n            if changed:\n                uploaded += 1\n            else:\n                skipped += 1\n        except Exception as exc:\n            if trace_execution_id:\n                trace_info(trace_execution_id, f\"Failed to upload media '{filename}' to worker {worker.get('id')}: {exc}\")\n            else:\n                log(f\"[Distributed] Failed to upload media '{filename}' to worker {worker.get('id')}: {exc}\")\n\n    _rewrite_prompt_media_inputs(prompt_obj, worker_media_paths)\n\n    summary = (\n        f\"Media sync for worker {worker.get('id')}: \"\n        f\"uploaded={uploaded}, skipped={skipped}, missing={missing}, referenced={len(media_refs)}\"\n    )\n    if trace_execution_id:\n        trace_debug(trace_execution_id, summary)\n    else:\n        debug_log(f\"[Distributed] {summary}\")\n"
  },
  {
    "path": "api/orchestration/prompt_transform.py",
    "content": "import json\nfrom collections import deque\n\nfrom ...utils.logging import debug_log\n\n\nclass PromptIndex:\n    \"\"\"Cache prompt metadata for faster worker/master prompt preparation.\"\"\"\n\n    def __init__(self, prompt_obj):\n        self._prompt_json = json.dumps(prompt_obj)\n        self.nodes_by_class = {}\n        self.class_by_node = {}\n        self.inputs_by_node = {}\n        for node_id, node in _iter_prompt_nodes(prompt_obj):\n            class_type = node.get(\"class_type\")\n            node_id_str = str(node_id)\n            if class_type:\n                self.nodes_by_class.setdefault(class_type, []).append(node_id_str)\n            self.class_by_node[node_id_str] = class_type\n            self.inputs_by_node[node_id_str] = node.get(\"inputs\", {})\n        self._upstream_cache = {}\n\n    def copy_prompt(self):\n        return json.loads(self._prompt_json)\n\n    def nodes_for_class(self, class_name):\n        return self.nodes_by_class.get(class_name, [])\n\n    def has_upstream(self, start_node_id, target_class):\n        cache_key = (str(start_node_id), target_class)\n        if cache_key in self._upstream_cache:\n            return self._upstream_cache[cache_key]\n\n        visited = set()\n        stack = [str(start_node_id)]\n        while stack:\n            node_id = stack.pop()\n            if node_id in visited:\n                continue\n            visited.add(node_id)\n            inputs = self.inputs_by_node.get(node_id, {})\n            for value in inputs.values():\n                if isinstance(value, list) and len(value) == 2:\n                    upstream_id = str(value[0])\n                    if self.class_by_node.get(upstream_id) == target_class:\n                        self._upstream_cache[cache_key] = True\n                        return True\n                    if upstream_id in self.inputs_by_node:\n                        stack.append(upstream_id)\n\n        self._upstream_cache[cache_key] = False\n        return False\n\n\ndef _iter_prompt_nodes(prompt_obj):\n    for node_id, node in prompt_obj.items():\n        if isinstance(node, dict):\n            yield str(node_id), node\n\n\ndef find_nodes_by_class(prompt_obj, class_name):\n    nodes = []\n    for node_id, node in _iter_prompt_nodes(prompt_obj):\n        if node.get(\"class_type\") == class_name:\n            nodes.append(node_id)\n    return nodes\n\n\ndef _find_downstream_nodes(prompt_obj, start_ids):\n    \"\"\"Return all nodes reachable downstream from the provided IDs.\"\"\"\n    adjacency = {}\n    for node_id, node in _iter_prompt_nodes(prompt_obj):\n        inputs = node.get(\"inputs\", {})\n        for value in inputs.values():\n            if isinstance(value, list) and len(value) == 2:\n                source_id = str(value[0])\n                adjacency.setdefault(source_id, set()).add(str(node_id))\n\n    connected = set(start_ids)\n    queue = deque(start_ids)\n    while queue:\n        current = queue.popleft()\n        for dependent in adjacency.get(current, ()):  # pragma: no branch - simple iteration\n            if dependent not in connected:\n                connected.add(dependent)\n                queue.append(dependent)\n    return connected\n\n\ndef _create_numeric_id_generator(prompt_obj):\n    \"\"\"Return a closure that yields new numeric string IDs.\"\"\"\n    max_id = 0\n    for node_id in prompt_obj.keys():\n        try:\n            numeric = int(node_id)\n        except (TypeError, ValueError):\n            continue\n        max_id = max(max_id, numeric)\n\n    counter = max_id\n\n    def _next_id():\n        nonlocal counter\n        counter += 1\n        return str(counter)\n\n    return _next_id\n\n\ndef _find_upstream_nodes(prompt_obj, start_ids):\n    \"\"\"Return all nodes reachable upstream from start_ids, including start nodes.\"\"\"\n    connected = set(str(node_id) for node_id in start_ids)\n    queue = deque(connected)\n    while queue:\n        node_id = queue.popleft()\n        node = prompt_obj.get(node_id) or {}\n        inputs = node.get(\"inputs\", {})\n        for value in inputs.values():\n            if isinstance(value, list) and len(value) == 2:\n                source_id = str(value[0])\n                if source_id in prompt_obj and source_id not in connected:\n                    connected.add(source_id)\n                    queue.append(source_id)\n    return connected\n\n\ndef prune_prompt_for_worker(prompt_obj):\n    \"\"\"Prune worker prompt to distributed nodes and their upstream dependencies.\"\"\"\n    collector_ids = find_nodes_by_class(prompt_obj, \"DistributedCollector\")\n    upscale_ids = find_nodes_by_class(prompt_obj, \"UltimateSDUpscaleDistributed\")\n    distributed_ids = collector_ids + upscale_ids\n    if not distributed_ids:\n        return prompt_obj\n\n    connected = _find_upstream_nodes(prompt_obj, distributed_ids)\n    pruned_prompt = {}\n    for node_id in connected:\n        node = prompt_obj.get(node_id)\n        if node is not None:\n            pruned_prompt[node_id] = json.loads(json.dumps(node))\n\n    # Generate IDs from the original prompt so we never reuse IDs from pruned downstream nodes.\n    next_id = _create_numeric_id_generator(prompt_obj)\n    for dist_id in distributed_ids:\n        if dist_id not in pruned_prompt:\n            continue\n        downstream = _find_downstream_nodes(prompt_obj, [dist_id])\n        has_removed_downstream = any(node_id != dist_id for node_id in downstream)\n        if has_removed_downstream:\n            preview_id = next_id()\n            pruned_prompt[preview_id] = {\n                \"inputs\": {\n                    \"images\": [dist_id, 0],\n                },\n                \"class_type\": \"PreviewImage\",\n                \"_meta\": {\n                    \"title\": \"Preview Image (auto-added)\",\n                },\n            }\n\n    return pruned_prompt\n\n\ndef prepare_delegate_master_prompt(prompt_obj, collector_ids):\n    \"\"\"Prune master prompt so it only executes post-collector nodes in delegate mode.\"\"\"\n    downstream = _find_downstream_nodes(prompt_obj, collector_ids)\n    nodes_to_keep = set(collector_ids)\n    nodes_to_keep.update(downstream)\n\n    pruned_prompt = {}\n    for node_id in nodes_to_keep:\n        node = prompt_obj.get(node_id)\n        if node is not None:\n            pruned_prompt[node_id] = json.loads(json.dumps(node))\n\n    pruned_ids = set(pruned_prompt.keys())\n    for node_id, node in pruned_prompt.items():\n        inputs = node.get(\"inputs\")\n        if not inputs:\n            continue\n        for input_name, input_value in list(inputs.items()):\n            if isinstance(input_value, list) and len(input_value) == 2:\n                source_id = str(input_value[0])\n                if source_id not in pruned_ids:\n                    inputs.pop(input_name, None)\n                    debug_log(\n                        f\"Removed upstream reference '{input_name}' from node {node_id} for delegate-only master prompt.\"\n                    )\n\n    # Generate IDs from the original prompt to avoid ID collisions with pruned nodes.\n    next_id = _create_numeric_id_generator(prompt_obj)\n    for collector_id in collector_ids:\n        collector_entry = pruned_prompt.get(collector_id)\n        if not collector_entry:\n            continue\n        placeholder_id = next_id()\n        pruned_prompt[placeholder_id] = {\n            \"class_type\": \"DistributedEmptyImage\",\n            \"inputs\": {\n                \"height\": 64,\n                \"width\": 64,\n                \"channels\": 3,\n            },\n            \"_meta\": {\n                \"title\": \"Distributed Empty Image (auto-added)\",\n            },\n        }\n        collector_entry.setdefault(\"inputs\", {})[\"images\"] = [placeholder_id, 0]\n        debug_log(\n            f\"Inserted placeholder node {placeholder_id} for collector {collector_id} in delegate-only master prompt.\"\n        )\n\n    return pruned_prompt\n\n\ndef generate_job_id_map(prompt_index, prefix):\n    \"\"\"Create stable per-node job IDs for distributed nodes.\"\"\"\n    job_map = {}\n    distributed_nodes = prompt_index.nodes_for_class(\"DistributedCollector\") + prompt_index.nodes_for_class(\n        \"UltimateSDUpscaleDistributed\"\n    )\n    for node_id in distributed_nodes:\n        job_map[node_id] = f\"{prefix}_{node_id}\"\n    return job_map\n\n\ndef _override_seed_nodes(prompt_copy, prompt_index, is_master, participant_id, worker_index_map):\n    \"\"\"Configure DistributedSeed nodes for master or worker role.\"\"\"\n    for node_id in prompt_index.nodes_for_class(\"DistributedSeed\"):\n        node = prompt_copy.get(node_id)\n        if not isinstance(node, dict):\n            continue\n        inputs = node.setdefault(\"inputs\", {})\n        inputs[\"is_worker\"] = not is_master\n        if is_master:\n            inputs[\"worker_id\"] = \"\"\n        else:\n            inputs[\"worker_id\"] = f\"worker_{worker_index_map.get(participant_id, 0)}\"\n\n\ndef _override_collector_nodes(\n    prompt_copy,\n    prompt_index,\n    is_master,\n    participant_id,\n    job_id_map,\n    master_url,\n    enabled_json,\n    delegate_master,\n):\n    \"\"\"Configure DistributedCollector nodes for master or worker role.\"\"\"\n    for node_id in prompt_index.nodes_for_class(\"DistributedCollector\"):\n        node = prompt_copy.get(node_id)\n        if not isinstance(node, dict):\n            continue\n\n        if prompt_index.has_upstream(node_id, \"UltimateSDUpscaleDistributed\"):\n            node.setdefault(\"inputs\", {})[\"pass_through\"] = True\n            continue\n\n        inputs = node.setdefault(\"inputs\", {})\n        inputs[\"multi_job_id\"] = job_id_map.get(node_id, node_id)\n        inputs[\"is_worker\"] = not is_master\n        inputs[\"enabled_worker_ids\"] = enabled_json\n        if is_master:\n            inputs[\"delegate_only\"] = bool(delegate_master)\n            inputs.pop(\"master_url\", None)\n            inputs.pop(\"worker_id\", None)\n        else:\n            inputs[\"master_url\"] = master_url\n            inputs[\"worker_id\"] = participant_id\n            inputs[\"delegate_only\"] = False\n\n\ndef _override_upscale_nodes(\n    prompt_copy,\n    prompt_index,\n    is_master,\n    participant_id,\n    job_id_map,\n    master_url,\n    enabled_json,\n):\n    \"\"\"Configure UltimateSDUpscaleDistributed nodes for master or worker role.\"\"\"\n    for node_id in prompt_index.nodes_for_class(\"UltimateSDUpscaleDistributed\"):\n        node = prompt_copy.get(node_id)\n        if not isinstance(node, dict):\n            continue\n        inputs = node.setdefault(\"inputs\", {})\n        inputs[\"multi_job_id\"] = job_id_map.get(node_id, node_id)\n        inputs[\"is_worker\"] = not is_master\n        inputs[\"enabled_worker_ids\"] = enabled_json\n        if is_master:\n            inputs.pop(\"master_url\", None)\n            inputs.pop(\"worker_id\", None)\n        else:\n            inputs[\"master_url\"] = master_url\n            inputs[\"worker_id\"] = participant_id\n\n\ndef _override_value_nodes(prompt_copy, prompt_index, is_master, participant_id, worker_index_map):\n    \"\"\"Configure DistributedValue nodes for master or worker role.\"\"\"\n    for node_id in prompt_index.nodes_for_class(\"DistributedValue\"):\n        node = prompt_copy.get(node_id)\n        if not isinstance(node, dict):\n            continue\n        inputs = node.setdefault(\"inputs\", {})\n        inputs[\"is_worker\"] = not is_master\n        if is_master:\n            inputs[\"worker_id\"] = \"\"\n        else:\n            inputs[\"worker_id\"] = f\"worker_{worker_index_map.get(participant_id, 0)}\"\n\n\ndef apply_participant_overrides(\n    prompt_copy,\n    participant_id,\n    enabled_worker_ids,\n    job_id_map,\n    master_url,\n    delegate_master,\n    prompt_index,\n):\n    \"\"\"Return a prompt copy with hidden inputs configured for master/worker.\"\"\"\n    is_master = participant_id == \"master\"\n    worker_index_map = {wid: idx for idx, wid in enumerate(enabled_worker_ids)}\n    enabled_json = json.dumps(enabled_worker_ids)\n\n    _override_seed_nodes(prompt_copy, prompt_index, is_master, participant_id, worker_index_map)\n    _override_value_nodes(prompt_copy, prompt_index, is_master, participant_id, worker_index_map)\n    _override_collector_nodes(\n        prompt_copy,\n        prompt_index,\n        is_master,\n        participant_id,\n        job_id_map,\n        master_url,\n        enabled_json,\n        delegate_master,\n    )\n    _override_upscale_nodes(\n        prompt_copy,\n        prompt_index,\n        is_master,\n        participant_id,\n        job_id_map,\n        master_url,\n        enabled_json,\n    )\n\n    return prompt_copy\n"
  },
  {
    "path": "api/queue_orchestration.py",
    "content": "import asyncio\nimport time\nimport uuid\n\nimport server\n\nfrom ..utils.async_helpers import queue_prompt_payload\nfrom ..utils.config import load_config\nfrom ..utils.constants import (\n    ORCHESTRATION_MEDIA_SYNC_CONCURRENCY,\n    ORCHESTRATION_MEDIA_SYNC_TIMEOUT,\n    ORCHESTRATION_WORKER_PROBE_CONCURRENCY,\n    ORCHESTRATION_WORKER_PREP_CONCURRENCY,\n)\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import build_master_url, build_master_callback_url\nfrom ..utils.trace_logger import trace_debug\nfrom .schemas import parse_positive_float, parse_positive_int\nfrom .orchestration.dispatch import (\n    dispatch_worker_prompt,\n    select_active_workers,\n    select_least_busy_worker,\n)\nfrom .orchestration.media_sync import convert_paths_for_platform, fetch_worker_path_separator, sync_worker_media\nfrom .orchestration.prompt_transform import (\n    PromptIndex,\n    apply_participant_overrides,\n    find_nodes_by_class,\n    generate_job_id_map,\n    prepare_delegate_master_prompt,\n    prune_prompt_for_worker,\n)\n\n\nprompt_server = server.PromptServer.instance\n\n\ndef _generate_execution_trace_id():\n    return f\"exec_{int(time.time() * 1000)}_{uuid.uuid4().hex[:6]}\"\n\n\ndef ensure_distributed_state(server_instance=None):\n    \"\"\"Ensure prompt_server has the state used by distributed queue orchestration.\"\"\"\n    ps = server_instance or prompt_server\n    if not hasattr(ps, \"distributed_pending_jobs\"):\n        ps.distributed_pending_jobs = {}\n    if not hasattr(ps, \"distributed_jobs_lock\"):\n        ps.distributed_jobs_lock = asyncio.Lock()\n\n\n# Initialize top-level distributed queue state at module import time.\nensure_distributed_state()\n\n\nasync def _ensure_distributed_queue(job_id):\n    \"\"\"Ensure a queue exists for the given distributed job ID.\"\"\"\n    ensure_distributed_state()\n    async with prompt_server.distributed_jobs_lock:\n        if job_id not in prompt_server.distributed_pending_jobs:\n            prompt_server.distributed_pending_jobs[job_id] = asyncio.Queue()\n\n\ndef _resolve_enabled_workers(config, requested_ids=None):\n    \"\"\"Return a list of worker configs that should participate.\"\"\"\n    workers = []\n    for worker in config.get(\"workers\", []):\n        worker_id = str(worker.get(\"id\") or \"\").strip()\n        if not worker_id:\n            continue\n\n        if requested_ids is not None:\n            if worker_id not in requested_ids:\n                continue\n        elif not worker.get(\"enabled\", False):\n            continue\n\n        raw_port = worker.get(\"port\", worker.get(\"listen_port\", 8188))\n        try:\n            port = int(raw_port or 8188)\n        except (TypeError, ValueError):\n            log(f\"[Distributed] Invalid port '{raw_port}' for worker {worker_id}; defaulting to 8188.\")\n            port = 8188\n\n        workers.append(\n            {\n                \"id\": worker_id,\n                \"name\": worker.get(\"name\", worker_id),\n                \"host\": worker.get(\"host\"),\n                \"port\": port,\n                \"type\": worker.get(\"type\", \"local\"),\n            }\n        )\n    return workers\n\n\ndef _resolve_orchestration_limits(config):\n    \"\"\"Resolve bounded concurrency/timeouts for worker preparation pipeline.\"\"\"\n    settings = (config or {}).get(\"settings\", {}) or {}\n    worker_probe_concurrency = parse_positive_int(\n        settings.get(\"worker_probe_concurrency\"),\n        ORCHESTRATION_WORKER_PROBE_CONCURRENCY,\n    )\n    worker_prep_concurrency = parse_positive_int(\n        settings.get(\"worker_prep_concurrency\"),\n        ORCHESTRATION_WORKER_PREP_CONCURRENCY,\n    )\n    media_sync_concurrency = parse_positive_int(\n        settings.get(\"media_sync_concurrency\"),\n        ORCHESTRATION_MEDIA_SYNC_CONCURRENCY,\n    )\n    media_sync_timeout_seconds = parse_positive_float(\n        settings.get(\"media_sync_timeout_seconds\"),\n        ORCHESTRATION_MEDIA_SYNC_TIMEOUT,\n    )\n    return (\n        worker_probe_concurrency,\n        worker_prep_concurrency,\n        media_sync_concurrency,\n        media_sync_timeout_seconds,\n    )\n\n\ndef _is_load_balance_enabled(value):\n    if isinstance(value, bool):\n        return value\n    if isinstance(value, (int, float)):\n        return bool(value)\n    if isinstance(value, str):\n        return value.strip().lower() in {\"1\", \"true\", \"yes\", \"on\"}\n    return False\n\n\ndef _prompt_requests_load_balance(prompt_index):\n    for node_id in prompt_index.nodes_for_class(\"DistributedCollector\"):\n        inputs = prompt_index.inputs_by_node.get(node_id, {})\n        if _is_load_balance_enabled(inputs.get(\"load_balance\", False)):\n            return True\n    return False\n\n\nasync def _prepare_worker_payload(\n    worker,\n    prompt_index,\n    enabled_ids,\n    job_id_map,\n    master_url,\n    config,\n    delegate_master,\n    trace_execution_id,\n    worker_prep_semaphore,\n    media_sync_semaphore,\n    media_sync_timeout_seconds,\n):\n    \"\"\"Prepare one worker prompt payload with bounded concurrency and media-sync timeout.\"\"\"\n    async with worker_prep_semaphore:\n        worker_prompt = prompt_index.copy_prompt()\n        worker_master_url = build_master_callback_url(\n            worker,\n            config=config,\n            prompt_server_instance=prompt_server,\n        )\n\n        worker_type = str(worker.get(\"type\") or \"local\").strip().lower()\n        is_remote_like = bool(worker.get(\"host\")) and worker_type != \"local\"\n        if is_remote_like:\n            path_separator = await fetch_worker_path_separator(worker, trace_execution_id=trace_execution_id)\n            if path_separator:\n                worker_prompt = convert_paths_for_platform(worker_prompt, path_separator)\n\n        worker_prompt = prune_prompt_for_worker(worker_prompt)\n        worker_prompt = apply_participant_overrides(\n            worker_prompt,\n            worker[\"id\"],\n            enabled_ids,\n            job_id_map,\n            worker_master_url,\n            delegate_master,\n            prompt_index,\n        )\n\n        if is_remote_like:\n            async with media_sync_semaphore:\n                try:\n                    await asyncio.wait_for(\n                        sync_worker_media(worker, worker_prompt, trace_execution_id=trace_execution_id),\n                        timeout=media_sync_timeout_seconds,\n                    )\n                except asyncio.TimeoutError:\n                    trace_debug(\n                        trace_execution_id,\n                        (\n                            f\"Media sync timed out after {media_sync_timeout_seconds:.1f}s \"\n                            f\"for worker {worker.get('name')} ({worker.get('id')}); continuing dispatch.\"\n                        ),\n                    )\n\n        return worker, worker_prompt\n\n\nasync def orchestrate_distributed_execution(\n    prompt_obj,\n    workflow_meta,\n    client_id,\n    enabled_worker_ids=None,\n    delegate_master=None,\n    trace_execution_id=None,\n):\n    \"\"\"Core orchestration logic for the /distributed/queue endpoint.\n\n    Returns:\n        tuple[str, int, int, dict]: (prompt_id, number, worker_count, node_errors)\n    \"\"\"\n    ensure_distributed_state()\n    execution_trace_id = trace_execution_id or _generate_execution_trace_id()\n\n    config = load_config()\n    use_websocket = bool(config.get(\"settings\", {}).get(\"websocket_orchestration\", False))\n    master_url = build_master_url(config=config, prompt_server_instance=prompt_server)\n    (\n        worker_probe_concurrency,\n        worker_prep_concurrency,\n        media_sync_concurrency,\n        media_sync_timeout_seconds,\n    ) = _resolve_orchestration_limits(config)\n    requested_ids = enabled_worker_ids if enabled_worker_ids is not None else None\n    workers = _resolve_enabled_workers(config, requested_ids)\n    prompt_index = PromptIndex(prompt_obj)\n    load_balance_requested = _prompt_requests_load_balance(prompt_index)\n    trace_debug(\n        execution_trace_id,\n        (\n            f\"Orchestration start: requested_workers={len(workers)}, \"\n            f\"requested_ids={requested_ids if requested_ids is not None else 'enabled_only'}, \"\n            f\"websocket={use_websocket}, \"\n            f\"probe_concurrency={worker_probe_concurrency}, \"\n            f\"prep_concurrency={worker_prep_concurrency}, \"\n            f\"media_sync_concurrency={media_sync_concurrency}, \"\n            f\"media_sync_timeout={media_sync_timeout_seconds:.1f}s, \"\n            f\"load_balance={load_balance_requested}\"\n        ),\n    )\n\n    # Respect master delegate-only configuration\n    if delegate_master is None:\n        delegate_master = bool(config.get(\"settings\", {}).get(\"master_delegate_only\", False))\n\n    if not workers and delegate_master:\n        trace_debug(\n            execution_trace_id,\n            \"Delegate-only requested but no workers are enabled. Falling back to master execution.\",\n        )\n        delegate_master = False\n\n    active_workers, delegate_master = await select_active_workers(\n        workers,\n        use_websocket,\n        delegate_master,\n        trace_execution_id=execution_trace_id,\n        probe_concurrency=worker_probe_concurrency,\n    )\n\n    if load_balance_requested:\n        candidate_workers = list(active_workers)\n        if not delegate_master:\n            # Include master in load balancing only when master participation is enabled.\n            candidate_workers.append(\n                {\n                    \"id\": \"master\",\n                    \"name\": \"Master\",\n                    \"host\": master_url,\n                    \"type\": \"local\",\n                }\n            )\n\n        selected_worker = None\n        if candidate_workers:\n            selected_worker = await select_least_busy_worker(\n                candidate_workers,\n                trace_execution_id=execution_trace_id,\n                probe_concurrency=worker_probe_concurrency,\n            )\n        if selected_worker is None and candidate_workers:\n            trace_debug(\n                execution_trace_id,\n                \"Load-balance selection probe failed; using first available candidate.\",\n            )\n            selected_worker = candidate_workers[0]\n\n        if selected_worker is not None and str(selected_worker.get(\"id\")) == \"master\":\n            # Master selected as least busy; run master workload only.\n            active_workers = []\n            delegate_master = False\n            trace_debug(\n                execution_trace_id,\n                \"Load-balance selected master for execution (workers skipped).\",\n            )\n        elif selected_worker is not None:\n            active_workers = [selected_worker]\n            # Worker selected as least busy; keep master orchestrator-only for this run.\n            delegate_master = True\n            trace_debug(\n                execution_trace_id,\n                f\"Load-balance selected worker {selected_worker.get('id')} (master set to delegate-only).\",\n            )\n        else:\n            trace_debug(\n                execution_trace_id,\n                \"Load-balance requested but no execution candidates were available.\",\n            )\n            active_workers = []\n            delegate_master = False\n\n    enabled_ids = [worker[\"id\"] for worker in active_workers]\n\n    discovery_prefix = f\"exec_{int(time.time() * 1000)}_{uuid.uuid4().hex[:6]}\"\n    job_id_map = generate_job_id_map(prompt_index, discovery_prefix)\n\n    if not job_id_map:\n        trace_debug(execution_trace_id, \"No distributed nodes detected; queueing prompt on master only.\")\n        queue_result = await queue_prompt_payload(\n            prompt_obj,\n            workflow_meta,\n            client_id,\n            include_queue_metadata=True,\n        )\n        return (\n            queue_result[\"prompt_id\"],\n            queue_result[\"number\"],\n            0,\n            queue_result.get(\"node_errors\", {}),\n        )\n\n    for job_id in job_id_map.values():\n        await _ensure_distributed_queue(job_id)\n\n    master_prompt = prompt_index.copy_prompt()\n    master_prompt = apply_participant_overrides(\n        master_prompt,\n        \"master\",\n        enabled_ids,\n        job_id_map,\n        master_url,\n        delegate_master,\n        prompt_index,\n    )\n\n    if delegate_master:\n        collector_ids = find_nodes_by_class(master_prompt, \"DistributedCollector\")\n        upscale_nodes = find_nodes_by_class(master_prompt, \"UltimateSDUpscaleDistributed\")\n        if upscale_nodes:\n            debug_log(\n                \"Delegate-only master mode currently does not support UltimateSDUpscaleDistributed nodes; running full prompt on master.\"\n            )\n        elif not collector_ids:\n            debug_log(\n                \"Delegate-only master mode requested but no collectors found in master prompt. Running full prompt on master.\"\n            )\n        else:\n            master_prompt = prepare_delegate_master_prompt(master_prompt, collector_ids)\n\n    if active_workers:\n        trace_debug(\n            execution_trace_id,\n            \"Active distributed workers: \"\n            + \", \".join(f\"{worker['name']} ({worker['id']})\" for worker in active_workers),\n        )\n    worker_payloads = []\n    if active_workers:\n        worker_prep_semaphore = asyncio.Semaphore(worker_prep_concurrency)\n        media_sync_semaphore = asyncio.Semaphore(media_sync_concurrency)\n        worker_payloads = await asyncio.gather(\n            *[\n                _prepare_worker_payload(\n                    worker,\n                    prompt_index,\n                    enabled_ids,\n                    job_id_map,\n                    master_url,\n                    config,\n                    delegate_master,\n                    execution_trace_id,\n                    worker_prep_semaphore,\n                    media_sync_semaphore,\n                    media_sync_timeout_seconds,\n                )\n                for worker in active_workers\n            ]\n        )\n\n    if worker_payloads:\n        await asyncio.gather(\n            *[\n                dispatch_worker_prompt(\n                    worker,\n                    wprompt,\n                    workflow_meta,\n                    client_id,\n                    use_websocket=use_websocket,\n                    trace_execution_id=execution_trace_id,\n                )\n                for worker, wprompt in worker_payloads\n            ]\n        )\n\n    queue_result = await queue_prompt_payload(\n        master_prompt,\n        workflow_meta,\n        client_id,\n        include_queue_metadata=True,\n    )\n    prompt_id = queue_result[\"prompt_id\"]\n    prompt_number = queue_result[\"number\"]\n    node_errors = queue_result.get(\"node_errors\", {})\n    trace_debug(\n        execution_trace_id,\n        f\"Orchestration complete: prompt_id={prompt_id}, dispatched_workers={len(worker_payloads)}, delegate_master={delegate_master}\",\n    )\n    return prompt_id, prompt_number, len(worker_payloads), node_errors\n"
  },
  {
    "path": "api/queue_request.py",
    "content": "from dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional\n\n\n@dataclass(frozen=True)\nclass QueueRequestPayload:\n    prompt: Dict[str, Any]\n    workflow_meta: Any\n    client_id: str\n    delegate_master: Optional[bool]\n    enabled_worker_ids: List[str]\n    auto_prepare: bool\n    trace_execution_id: Optional[str]\n\n\ndef parse_queue_request_payload(data: Any) -> QueueRequestPayload:\n    \"\"\"Parse and validate /distributed/queue payload into a normalized shape.\"\"\"\n    if not isinstance(data, dict):\n        raise ValueError(\"Expected a JSON object body\")\n\n    auto_prepare_raw = data.get(\"auto_prepare\", True)\n    if not isinstance(auto_prepare_raw, bool):\n        raise ValueError(\"auto_prepare must be a boolean when provided\")\n    auto_prepare = auto_prepare_raw\n\n    prompt = data.get(\"prompt\")\n    # Auto-prepare is always on server-side; keep the field for wire compatibility.\n    if prompt is None:\n        workflow_payload = data.get(\"workflow\")\n        if isinstance(workflow_payload, dict):\n            candidate_prompt = workflow_payload.get(\"prompt\")\n            if isinstance(candidate_prompt, dict):\n                prompt = candidate_prompt\n\n    if not isinstance(prompt, dict):\n        raise ValueError(\"Field 'prompt' must be an object\")\n\n    enabled_ids_raw = data.get(\"enabled_worker_ids\")\n    workers_field = data.get(\"workers\")\n    if enabled_ids_raw is None and workers_field is not None:\n        if not isinstance(workers_field, list):\n            raise ValueError(\"Field 'workers' must be a list when provided\")\n        enabled_ids_raw = []\n        for entry in workers_field:\n            worker_id = entry.get(\"id\") if isinstance(entry, dict) else entry\n            if worker_id is not None:\n                enabled_ids_raw.append(str(worker_id))\n\n    if enabled_ids_raw is None:\n        raise ValueError(\"enabled_worker_ids required\")\n    else:\n        if not isinstance(enabled_ids_raw, list):\n            raise ValueError(\"enabled_worker_ids must be a list of worker IDs\")\n        enabled_ids = [str(worker_id).strip() for worker_id in enabled_ids_raw if str(worker_id).strip()]\n\n    delegate_master = data.get(\"delegate_master\")\n    if delegate_master is not None and not isinstance(delegate_master, bool):\n        raise ValueError(\"delegate_master must be a boolean when provided\")\n\n    client_id = data.get(\"client_id\")\n    if not isinstance(client_id, str) or not client_id.strip():\n        raise ValueError(\"client_id required\")\n    client_id = client_id.strip()\n\n    trace_execution_id = data.get(\"trace_execution_id\")\n    if trace_execution_id is not None:\n        if not isinstance(trace_execution_id, str):\n            raise ValueError(\"trace_execution_id must be a string when provided\")\n        trace_execution_id = trace_execution_id.strip() or None\n\n    return QueueRequestPayload(\n        prompt=prompt,\n        workflow_meta=data.get(\"workflow\"),\n        client_id=client_id,\n        delegate_master=delegate_master,\n        enabled_worker_ids=enabled_ids,\n        auto_prepare=auto_prepare,\n        trace_execution_id=trace_execution_id,\n    )\n"
  },
  {
    "path": "api/schemas.py",
    "content": "def require_fields(data: dict, *fields) -> list[str]:\n    \"\"\"Return field names that are missing or empty in a JSON object.\"\"\"\n    if not isinstance(data, dict):\n        return list(fields)\n\n    missing = []\n    for field in fields:\n        if field not in data:\n            missing.append(field)\n            continue\n        value = data.get(field)\n        if value is None:\n            missing.append(field)\n            continue\n        if isinstance(value, str) and not value.strip():\n            missing.append(field)\n\n    return missing\n\n\ndef validate_worker_id(worker_id: str, config: dict) -> bool:\n    \"\"\"Return True when worker_id exists in config['workers'].\"\"\"\n    worker_id_str = str(worker_id)\n    workers = (config or {}).get(\"workers\", [])\n    return any(str(worker.get(\"id\")) == worker_id_str for worker in workers)\n\n\ndef validate_positive_int(value, field_name: str) -> str | None:\n    \"\"\"Validate positive integers and return an error string when invalid.\"\"\"\n    try:\n        parsed = int(value)\n    except (TypeError, ValueError):\n        return f\"Field '{field_name}' must be a positive integer.\"\n    if parsed <= 0:\n        return f\"Field '{field_name}' must be a positive integer.\"\n    return None\n\n\ndef parse_positive_int(value, default: int) -> int:\n    \"\"\"Parse value as positive int, returning default on failure.\"\"\"\n    try:\n        parsed = int(value)\n    except (TypeError, ValueError):\n        return max(1, int(default))\n    return max(1, parsed)\n\n\ndef parse_positive_float(value, default: float) -> float:\n    \"\"\"Parse value as positive float, returning default on failure.\"\"\"\n    try:\n        parsed = float(value)\n    except (TypeError, ValueError):\n        return max(0.0, float(default))\n    return max(0.0, parsed)\n"
  },
  {
    "path": "api/tunnel_routes.py",
    "content": "from aiohttp import web\nimport server\n\nfrom ..utils.cloudflare import cloudflare_tunnel_manager\nfrom ..utils.config import load_config\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import handle_api_error\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/tunnel/status\")\nasync def tunnel_status_endpoint(request):\n    \"\"\"Return Cloudflare tunnel status and last known details.\"\"\"\n    try:\n        status = cloudflare_tunnel_manager.get_status()\n        config = load_config()\n        master_host = (config.get(\"master\") or {}).get(\"host\")\n        return web.json_response({\n            \"status\": \"success\",\n            \"tunnel\": status,\n            \"master_host\": master_host\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/tunnel/start\")\nasync def tunnel_start_endpoint(request):\n    \"\"\"Start a Cloudflare tunnel pointing at the current ComfyUI server.\"\"\"\n    try:\n        result = await cloudflare_tunnel_manager.start_tunnel()\n        config = load_config()\n        return web.json_response({\n            \"status\": \"success\",\n            \"tunnel\": result,\n            \"master_host\": (config.get(\"master\") or {}).get(\"host\")\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/tunnel/stop\")\nasync def tunnel_stop_endpoint(request):\n    \"\"\"Stop the managed Cloudflare tunnel if running.\"\"\"\n    try:\n        result = await cloudflare_tunnel_manager.stop_tunnel()\n        config = load_config()\n        return web.json_response({\n            \"status\": \"success\",\n            \"tunnel\": result,\n            \"master_host\": (config.get(\"master\") or {}).get(\"host\")\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n"
  },
  {
    "path": "api/usdu_routes.py",
    "content": "import asyncio\nimport io\nimport time\n\nfrom aiohttp import web\nfrom PIL import Image\nimport server\n\nfrom ..upscale.job_models import BaseJobState, ImageJobState, TileJobState\nfrom ..upscale.job_store import MAX_PAYLOAD_SIZE, ensure_tile_jobs_initialized\nfrom ..upscale.payload_parsers import _parse_tiles_from_form\nfrom ..utils.logging import debug_log\nfrom ..utils.network import handle_api_error\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/heartbeat\")\nasync def heartbeat_endpoint(request):\n    try:\n        data = await request.json()\n        worker_id = data.get('worker_id')\n        multi_job_id = data.get('multi_job_id')\n\n        if not worker_id or not multi_job_id:\n            return await handle_api_error(request, \"Missing worker_id or multi_job_id\", 400)\n\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                if isinstance(job_data, BaseJobState):\n                    job_data.worker_status[worker_id] = time.time()\n                    debug_log(f\"Heartbeat from worker {worker_id}\")\n                    return web.json_response({\"status\": \"success\"})\n                return await handle_api_error(request, \"Worker status tracking not available\", 400)\n            return await handle_api_error(request, \"Job not found\", 404)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/submit_tiles\")\nasync def submit_tiles_endpoint(request):\n    \"\"\"Endpoint for workers to submit processed tiles in static mode.\"\"\"\n    try:\n        content_length = request.headers.get('content-length')\n        if content_length and int(content_length) > MAX_PAYLOAD_SIZE:\n            return await handle_api_error(request, f\"Payload too large: {content_length} bytes\", 413)\n\n        data = await request.post()\n        multi_job_id = data.get('multi_job_id')\n        worker_id = data.get('worker_id')\n        is_last = data.get('is_last', 'False').lower() == 'true'\n\n        if multi_job_id is None or worker_id is None:\n            return await handle_api_error(request, \"Missing multi_job_id or worker_id\", 400)\n\n        prompt_server = ensure_tile_jobs_initialized()\n\n        batch_size = int(data.get('batch_size', 0))\n\n        # Handle completion signal\n        if batch_size == 0 and is_last:\n            async with prompt_server.distributed_tile_jobs_lock:\n                if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                    job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                    if not isinstance(job_data, TileJobState):\n                        return await handle_api_error(request, \"Job not configured for tile submissions\", 400)\n                    await job_data.queue.put({\n                        'worker_id': worker_id,\n                        'is_last': True,\n                        'tiles': [],\n                    })\n                    debug_log(f\"Received completion signal from worker {worker_id}\")\n                    return web.json_response({\"status\": \"success\"})\n\n        try:\n            tiles = _parse_tiles_from_form(data)\n        except ValueError as e:\n            return await handle_api_error(request, str(e), 400)\n\n        # Submit tiles to queue\n        async with prompt_server.distributed_tile_jobs_lock:\n            if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                if not isinstance(job_data, TileJobState):\n                    return await handle_api_error(request, \"Job not configured for tile submissions\", 400)\n\n                q = job_data.queue\n                if batch_size > 0 or len(tiles) > 0:\n                    await q.put({\n                        'worker_id': worker_id,\n                        'tiles': tiles,\n                        'is_last': is_last,\n                    })\n                    debug_log(f\"Received {len(tiles)} tiles from worker {worker_id} (is_last={is_last})\")\n                else:\n                    await q.put({\n                        'worker_id': worker_id,\n                        'is_last': True,\n                        'tiles': [],\n                    })\n\n                return web.json_response({\"status\": \"success\"})\n            return await handle_api_error(request, \"Job not found\", 404)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/submit_image\")\nasync def submit_image_endpoint(request):\n    \"\"\"Endpoint for workers to submit processed images in dynamic mode.\"\"\"\n    try:\n        content_length = request.headers.get('content-length')\n        if content_length and int(content_length) > MAX_PAYLOAD_SIZE:\n            return await handle_api_error(request, f\"Payload too large: {content_length} bytes\", 413)\n\n        data = await request.post()\n        multi_job_id = data.get('multi_job_id')\n        worker_id = data.get('worker_id')\n        is_last = data.get('is_last', 'False').lower() == 'true'\n\n        if multi_job_id is None or worker_id is None:\n            return await handle_api_error(request, \"Missing multi_job_id or worker_id\", 400)\n\n        prompt_server = ensure_tile_jobs_initialized()\n\n        # Handle image submission\n        if 'full_image' in data and 'image_idx' in data:\n            image_idx = int(data.get('image_idx'))\n            img_data = data['full_image'].file.read()\n            img = Image.open(io.BytesIO(img_data)).convert(\"RGB\")\n\n            debug_log(f\"Received full image {image_idx} from worker {worker_id}\")\n\n            async with prompt_server.distributed_tile_jobs_lock:\n                if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                    job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                    if not isinstance(job_data, ImageJobState):\n                        return await handle_api_error(request, \"Job not configured for image submissions\", 400)\n                    await job_data.queue.put({\n                        'worker_id': worker_id,\n                        'image_idx': image_idx,\n                        'image': img,\n                        'is_last': is_last,\n                    })\n                    return web.json_response({\"status\": \"success\"})\n\n        # Handle completion signal (no image data)\n        elif is_last:\n            async with prompt_server.distributed_tile_jobs_lock:\n                if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                    job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                    if not isinstance(job_data, ImageJobState):\n                        return await handle_api_error(request, \"Job not configured for image submissions\", 400)\n                    await job_data.queue.put({\n                        'worker_id': worker_id,\n                        'is_last': True,\n                    })\n                    debug_log(f\"Received completion signal from worker {worker_id}\")\n                    return web.json_response({\"status\": \"success\"})\n        else:\n            return await handle_api_error(request, \"Missing image data or invalid request\", 400)\n\n        return await handle_api_error(request, \"Job not found\", 404)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/request_image\")\nasync def request_image_endpoint(request):\n    \"\"\"Endpoint for workers to request tasks (images in dynamic mode, tiles in static mode).\"\"\"\n    try:\n        data = await request.json()\n        worker_id = data.get('worker_id')\n        multi_job_id = data.get('multi_job_id')\n\n        if not worker_id or not multi_job_id:\n            return await handle_api_error(request, \"Missing worker_id or multi_job_id\", 400)\n\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n                if not isinstance(job_data, BaseJobState):\n                    return await handle_api_error(request, \"Invalid job data structure\", 500)\n\n                mode = job_data.mode\n                if isinstance(job_data, ImageJobState):\n                    pending_queue = job_data.pending_images\n                elif isinstance(job_data, TileJobState):\n                    pending_queue = job_data.pending_tasks\n                else:\n                    return await handle_api_error(request, \"Invalid job configuration\", 400)\n\n                try:\n                    task_idx = await asyncio.wait_for(pending_queue.get(), timeout=0.1)\n                    job_data.assigned_to_workers.setdefault(worker_id, []).append(task_idx)\n                    job_data.worker_status[worker_id] = time.time()\n                    remaining = pending_queue.qsize()\n\n                    if mode == 'dynamic':\n                        debug_log(f\"UltimateSDUpscale API - Assigned image {task_idx} to worker {worker_id}\")\n                        return web.json_response({\"image_idx\": task_idx, \"estimated_remaining\": remaining})\n                    debug_log(f\"UltimateSDUpscale API - Assigned tile {task_idx} to worker {worker_id}\")\n                    return web.json_response({\n                        \"tile_idx\": task_idx,\n                        \"estimated_remaining\": remaining,\n                        \"batched_static\": job_data.batched_static,\n                    })\n                except asyncio.TimeoutError:\n                    if mode == 'dynamic':\n                        return web.json_response({\"image_idx\": None})\n                    return web.json_response({\"tile_idx\": None})\n            return await handle_api_error(request, \"Job not found\", 404)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/job_status\")\nasync def job_status_endpoint(request):\n    \"\"\"Endpoint to check if a job is ready.\"\"\"\n    multi_job_id = request.query.get('multi_job_id')\n    if not multi_job_id:\n        return web.json_response({\"ready\": False})\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        ready = bool(isinstance(job_data, BaseJobState) and job_data.queue is not None)\n        return web.json_response({\"ready\": ready})\n"
  },
  {
    "path": "api/worker_routes.py",
    "content": "import json\nimport asyncio\nimport os\nimport time\nimport platform\nimport subprocess\nimport socket\n\nimport torch\nimport aiohttp\nfrom aiohttp import web\nimport server\n\nfrom ..utils.config import load_config\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import (\n    build_worker_url,\n    get_client_session,\n    handle_api_error,\n    normalize_host,\n    probe_worker,\n)\nfrom ..utils.constants import CHUNK_SIZE\nfrom ..workers import get_worker_manager\nfrom .schemas import require_fields, validate_worker_id\nfrom ..workers.detection import (\n    get_machine_id,\n    is_docker_environment,\n    is_runpod_environment,\n)\ntry:\n    from ..utils.async_helpers import PromptValidationError, queue_prompt_payload\nexcept ImportError:\n    from ..utils.async_helpers import queue_prompt_payload\n\n    class PromptValidationError(RuntimeError):\n        def __init__(self, message, validation_error=None, node_errors=None):\n            super().__init__(str(message))\n            self.validation_error = validation_error if isinstance(validation_error, dict) else {}\n            self.node_errors = node_errors if isinstance(node_errors, dict) else {}\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/worker_ws\")\nasync def worker_ws_endpoint(request):\n    \"\"\"WebSocket endpoint for worker prompt dispatch.\"\"\"\n    ws = web.WebSocketResponse(heartbeat=30)\n    await ws.prepare(request)\n\n    async for msg in ws:\n        if msg.type == aiohttp.WSMsgType.TEXT:\n            try:\n                data = json.loads(msg.data or \"{}\")\n            except json.JSONDecodeError:\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": None,\n                    \"ok\": False,\n                    \"error\": \"Invalid JSON payload.\",\n                })\n                continue\n\n            if data.get(\"type\") != \"dispatch_prompt\":\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": data.get(\"request_id\"),\n                    \"ok\": False,\n                    \"error\": \"Unsupported websocket message type.\",\n                })\n                continue\n\n            prompt = data.get(\"prompt\")\n            if not isinstance(prompt, dict):\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": data.get(\"request_id\"),\n                    \"ok\": False,\n                    \"error\": \"Field 'prompt' must be an object.\",\n                })\n                continue\n\n            try:\n                prompt_id = await queue_prompt_payload(\n                    prompt,\n                    workflow_meta=data.get(\"workflow\"),\n                    client_id=data.get(\"client_id\"),\n                )\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": data.get(\"request_id\"),\n                    \"ok\": True,\n                    \"prompt_id\": prompt_id,\n                })\n            except PromptValidationError as exc:\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": data.get(\"request_id\"),\n                    \"ok\": False,\n                    \"error\": str(exc),\n                    \"validation_error\": exc.validation_error,\n                    \"node_errors\": exc.node_errors,\n                })\n            except Exception as exc:\n                await ws.send_json({\n                    \"type\": \"dispatch_ack\",\n                    \"request_id\": data.get(\"request_id\"),\n                    \"ok\": False,\n                    \"error\": str(exc),\n                })\n        elif msg.type == aiohttp.WSMsgType.ERROR:\n            log(f\"[Distributed] Worker websocket error: {ws.exception()}\")\n\n    return ws\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/worker/clear_launching\")\nasync def clear_launching_state(request):\n    \"\"\"Clear the launching flag when worker is confirmed running.\"\"\"\n    try:\n        wm = get_worker_manager()\n        data = await request.json()\n        missing = require_fields(data, \"worker_id\")\n        if missing:\n            return await handle_api_error(request, f\"Missing required field(s): {', '.join(missing)}\", 400)\n\n        worker_id = str(data.get(\"worker_id\")).strip()\n        config = load_config()\n        if not validate_worker_id(worker_id, config):\n            return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n        \n        # Clear launching flag in managed processes\n        if worker_id in wm.processes:\n            if 'launching' in wm.processes[worker_id]:\n                del wm.processes[worker_id]['launching']\n                wm.save_processes()\n                debug_log(f\"Cleared launching state for worker {worker_id}\")\n        \n        return web.json_response({\"status\": \"success\"})\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\ndef get_network_ips():\n    \"\"\"Get all network IPs, trying multiple methods.\"\"\"\n    ips = []\n    hostname = socket.gethostname()\n\n    # Method 1: Try socket.getaddrinfo\n    try:\n        addr_info = socket.getaddrinfo(hostname, None)\n        for info in addr_info:\n            ip = info[4][0]\n            if ip and ip not in ips and not ip.startswith('::'):  # Skip IPv6 for now\n                ips.append(ip)\n    except (socket.gaierror, OSError):\n        pass\n\n    # Method 2: Try to connect to external server and get local IP\n    try:\n        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n        s.connect((\"8.8.8.8\", 80))  # Google DNS\n        local_ip = s.getsockname()[0]\n        s.close()\n        if local_ip not in ips:\n            ips.append(local_ip)\n    except (OSError, socket.error):\n        pass\n\n    # Method 3: Platform-specific commands\n    try:\n        if platform.system() == \"Windows\":\n            # Windows ipconfig\n            result = subprocess.run([\"ipconfig\"], capture_output=True, text=True)\n            lines = result.stdout.split('\\n')\n            for i, line in enumerate(lines):\n                if 'IPv4' in line and i + 1 < len(lines):\n                    ip = lines[i].split(':')[-1].strip()\n                    if ip and ip not in ips:\n                        ips.append(ip)\n        else:\n            # Unix/Linux/Mac ifconfig or ip addr\n            try:\n                result = subprocess.run([\"ip\", \"addr\"], capture_output=True, text=True)\n            except (FileNotFoundError, OSError):\n                try:\n                    result = subprocess.run([\"ifconfig\"], capture_output=True, text=True)\n                except (FileNotFoundError, OSError):\n                    result = None\n\n            import re\n            ip_pattern = re.compile(r'inet\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+)')\n            if result is not None:\n                for match in ip_pattern.finditer(result.stdout):\n                    ip = match.group(1)\n                    if ip and ip not in ips:\n                        ips.append(ip)\n    except (OSError, subprocess.SubprocessError):\n        pass\n\n    return ips\n\n\ndef get_recommended_ip(ips):\n    \"\"\"Choose the best IP for master-worker communication.\"\"\"\n    # Priority order:\n    # 1. Private network ranges (192.168.x.x, 10.x.x.x, 172.16-31.x.x)\n    # 2. Other non-localhost IPs\n    # 3. Localhost as last resort\n\n    private_ips = []\n    public_ips = []\n\n    for ip in ips:\n        if ip.startswith('127.') or ip == 'localhost':\n            continue\n        elif (ip.startswith('192.168.')\n                or ip.startswith('10.')\n                or (ip.startswith('172.') and 16 <= int(ip.split('.')[1]) <= 31)):\n            private_ips.append(ip)\n        else:\n            public_ips.append(ip)\n\n    # Prefer private IPs\n    if private_ips:\n        # Prefer 192.168 range as it's most common\n        for ip in private_ips:\n            if ip.startswith('192.168.'):\n                return ip\n        return private_ips[0]\n    elif public_ips:\n        return public_ips[0]\n    elif ips:\n        return ips[0]\n    else:\n        return None\n\n\ndef _get_cuda_info():\n    \"\"\"Detect CUDA device index and total physical GPU count.\n\n    Returns (cuda_device, cuda_device_count, physical_device_count).\n    All three are 0/None if CUDA is unavailable.\n    \"\"\"\n    if not torch.cuda.is_available():\n        return None, 0, 0\n    try:\n        cuda_device_count = torch.cuda.device_count()\n        cuda_visible = os.environ.get('CUDA_VISIBLE_DEVICES', '')\n        if cuda_visible and cuda_visible.strip():\n            visible_devices = [int(d.strip()) for d in cuda_visible.split(',') if d.strip().isdigit()]\n            if visible_devices:\n                cuda_device = visible_devices[0]\n                try:\n                    result = subprocess.run(\n                        ['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'],\n                        capture_output=True,\n                        text=True,\n                        timeout=5,\n                    )\n                    physical_device_count = (\n                        len(result.stdout.strip().split('\\n'))\n                        if result.returncode == 0\n                        else max(visible_devices) + 1\n                    )\n                except (FileNotFoundError, OSError, subprocess.SubprocessError):\n                    physical_device_count = max(visible_devices) + 1\n                return cuda_device, cuda_device_count, physical_device_count\n            else:\n                return 0, cuda_device_count, cuda_device_count\n        else:\n            cuda_device = torch.cuda.current_device()\n            return cuda_device, cuda_device_count, cuda_device_count\n    except Exception as e:\n        debug_log(f\"CUDA detection error: {e}\")\n        return None, 0, 0\n\n\ndef _collect_network_info_sync():\n    \"\"\"Collect network/cuda info in a worker thread to avoid blocking route handlers.\"\"\"\n    cuda_device, cuda_device_count, physical_device_count = _get_cuda_info()\n    hostname = socket.gethostname()\n    all_ips = get_network_ips()\n    recommended_ip = get_recommended_ip(all_ips)\n    return {\n        \"hostname\": hostname,\n        \"all_ips\": all_ips,\n        \"recommended_ip\": recommended_ip,\n        \"cuda_device\": cuda_device,\n        \"cuda_device_count\": physical_device_count if physical_device_count > 0 else cuda_device_count,\n    }\n\n\ndef _read_worker_log_sync(log_file, lines_to_read):\n    \"\"\"Read worker log content from disk in a threadpool worker.\"\"\"\n    file_size = os.path.getsize(log_file)\n\n    with open(log_file, 'r', encoding='utf-8', errors='replace') as f:\n        if lines_to_read > 0 and file_size > 1024 * 1024:\n            # Read last N lines efficiently from end of file.\n            lines = []\n            f.seek(0, 2)\n            file_length = f.tell()\n            chunk_size = min(CHUNK_SIZE, file_length)\n\n            while len(lines) < lines_to_read and f.tell() > 0:\n                current_pos = max(0, f.tell() - chunk_size)\n                f.seek(current_pos)\n                chunk = f.read(chunk_size)\n                chunk_lines = chunk.splitlines()\n                if current_pos > 0:\n                    chunk_lines = chunk_lines[1:]\n                lines = chunk_lines + lines\n                f.seek(current_pos)\n\n            content = '\\n'.join(lines[-lines_to_read:])\n            truncated = len(lines) > lines_to_read\n        else:\n            content = f.read()\n            truncated = False\n\n    return {\n        \"content\": content,\n        \"file_size\": file_size,\n        \"truncated\": truncated,\n        \"lines_shown\": lines_to_read if truncated else content.count('\\n') + 1,\n    }\n\n\ndef _parse_positive_int_query(value, default, minimum=1, maximum=10000):\n    \"\"\"Parse bounded positive integer query params with sane fallback.\"\"\"\n    try:\n        parsed = int(value)\n    except (TypeError, ValueError):\n        return default\n    parsed = max(minimum, parsed)\n    if maximum is not None:\n        parsed = min(maximum, parsed)\n    return parsed\n\n\ndef _find_worker_by_id(config, worker_id):\n    worker_id_str = str(worker_id).strip()\n    for worker in config.get(\"workers\", []):\n        if str(worker.get(\"id\")).strip() == worker_id_str:\n            return worker\n    return None\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/local_log\")\nasync def get_local_log_endpoint(request):\n    \"\"\"Return this instance's in-memory ComfyUI log buffer.\"\"\"\n    try:\n        from app.logger import get_logs\n    except Exception as e:\n        return await handle_api_error(request, f\"Failed to import app.logger: {e}\", 500)\n\n    try:\n        lines_to_read = _parse_positive_int_query(request.query.get(\"lines\"), default=300, maximum=3000)\n        logs = get_logs()\n        if logs is None:\n            return web.json_response(\n                {\n                    \"status\": \"success\",\n                    \"content\": \"\",\n                    \"entries\": 0,\n                    \"source\": \"memory\",\n                    \"truncated\": False,\n                    \"lines_shown\": 0,\n                }\n            )\n\n        entries = list(logs)\n        selected_entries = entries[-lines_to_read:]\n        content = \"\".join(\n            entry.get(\"m\", \"\") if isinstance(entry, dict) else str(entry)\n            for entry in selected_entries\n        )\n        lines_shown = content.count(\"\\n\") + (1 if content else 0)\n\n        return web.json_response(\n            {\n                \"status\": \"success\",\n                \"content\": content,\n                \"entries\": len(selected_entries),\n                \"source\": \"memory\",\n                \"truncated\": len(entries) > len(selected_entries),\n                \"lines_shown\": lines_shown,\n            }\n        )\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/network_info\")\nasync def get_network_info_endpoint(request):\n    \"\"\"Get network interfaces and recommend best IP for master.\"\"\"\n    try:\n        loop = asyncio.get_running_loop()\n        info = await loop.run_in_executor(None, _collect_network_info_sync)\n        \n        return web.json_response({\n            \"status\": \"success\",\n            **info,\n            \"message\": \"Auto-detected network configuration\"\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.get(\"/distributed/system_info\")\nasync def get_system_info_endpoint(request):\n    \"\"\"Get system information including machine ID for local worker detection.\"\"\"\n    try:\n        import socket\n        \n        return web.json_response({\n            \"status\": \"success\",\n            \"hostname\": socket.gethostname(),\n            \"machine_id\": get_machine_id(),\n            \"platform\": {\n                \"system\": platform.system(),\n                \"machine\": platform.machine(),\n                \"node\": platform.node(),\n                \"path_separator\": os.sep,  # Add path separator\n                \"os_name\": os.name  # Add OS name (posix, nt, etc.)\n            },\n            \"is_docker\": is_docker_environment(),\n            \"is_runpod\": is_runpod_environment(),\n            \"runpod_pod_id\": os.environ.get('RUNPOD_POD_ID')\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n@server.PromptServer.instance.routes.post(\"/distributed/launch_worker\")\nasync def launch_worker_endpoint(request):\n    \"\"\"Launch a worker process from the UI.\"\"\"\n    try:\n        wm = get_worker_manager()\n        data = await request.json()\n        missing = require_fields(data, \"worker_id\")\n        if missing:\n            return await handle_api_error(request, f\"Missing required field(s): {', '.join(missing)}\", 400)\n\n        worker_id = str(data.get(\"worker_id\")).strip()\n        \n        # Find worker config\n        config = load_config()\n        if not validate_worker_id(worker_id, config):\n            return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n        worker = next((w for w in config.get(\"workers\", []) if str(w.get(\"id\")) == worker_id), None)\n        if not worker:\n            return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n        \n        # Ensure consistent string ID\n        worker_id_str = worker_id\n        \n        # Check if already running (managed by this instance)\n        if worker_id_str in wm.processes:\n            proc_info = wm.processes[worker_id_str]\n            process = proc_info.get('process')\n            \n            # Check if still running\n            is_running = False\n            if process:\n                is_running = process.poll() is None\n            else:\n                # Restored process without subprocess object\n                is_running = wm._is_process_running(proc_info['pid'])\n            \n            if is_running:\n                return await handle_api_error(request, \"Worker already running (managed by UI)\", 409)\n            else:\n                # Process is dead, remove it\n                del wm.processes[worker_id_str]\n                wm.save_processes()\n        \n        # Launch the worker\n        try:\n            loop = asyncio.get_running_loop()\n            pid = await loop.run_in_executor(None, wm.launch_worker, worker)\n            log_file = wm.processes[worker_id_str].get('log_file')\n            return web.json_response({\n                \"status\": \"success\",\n                \"pid\": pid,\n                \"message\": f\"Worker {worker['name']} launched\",\n                \"log_file\": log_file\n            })\n        except Exception as e:\n            return await handle_api_error(request, f\"Failed to launch worker: {str(e)}\", 500)\n            \n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n\n\n@server.PromptServer.instance.routes.post(\"/distributed/stop_worker\")\nasync def stop_worker_endpoint(request):\n    \"\"\"Stop a worker process that was launched from the UI.\"\"\"\n    try:\n        wm = get_worker_manager()\n        data = await request.json()\n        missing = require_fields(data, \"worker_id\")\n        if missing:\n            return await handle_api_error(request, f\"Missing required field(s): {', '.join(missing)}\", 400)\n\n        worker_id = str(data.get(\"worker_id\")).strip()\n        config = load_config()\n        if not validate_worker_id(worker_id, config):\n            return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n\n        success, message = wm.stop_worker(worker_id)\n        \n        if success:\n            return web.json_response({\"status\": \"success\", \"message\": message})\n        else:\n            return await handle_api_error(\n                request,\n                message,\n                404 if \"not managed\" in message else 409,\n            )\n            \n    except Exception as e:\n        return await handle_api_error(request, e, 400)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/managed_workers\")\nasync def get_managed_workers_endpoint(request):\n    \"\"\"Get list of workers managed by this UI instance.\"\"\"\n    try:\n        managed = get_worker_manager().get_managed_workers()\n        return web.json_response({\n            \"status\": \"success\",\n            \"managed_workers\": managed\n        })\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/local-worker-status\")\nasync def get_local_worker_status_endpoint(request):\n    \"\"\"Check status of all local workers (localhost/no host specified).\"\"\"\n    try:\n        config = load_config()\n        worker_statuses = {}\n        \n        for worker in config.get(\"workers\", []):\n            # Only check local workers\n            host = normalize_host(worker.get(\"host\")) or \"\"\n            if not host or host in [\"localhost\", \"127.0.0.1\"]:\n                worker_id = worker[\"id\"]\n                port = worker[\"port\"]\n                \n                # Check if worker is enabled\n                if not worker.get(\"enabled\", False):\n                    worker_statuses[worker_id] = {\n                        \"online\": False,\n                        \"enabled\": False,\n                        \"processing\": False,\n                        \"queue_count\": 0\n                    }\n                    continue\n                \n                # Try to connect to worker\n                try:\n                    worker_url = build_worker_url(worker)\n                    data = await probe_worker(worker_url, timeout=2.0)\n                    if data is None:\n                        worker_statuses[worker_id] = {\n                            \"online\": False,\n                            \"enabled\": True,\n                            \"processing\": False,\n                            \"queue_count\": 0,\n                            \"error\": \"Unavailable\",\n                        }\n                        continue\n                    queue_remaining = data.get(\"exec_info\", {}).get(\"queue_remaining\", 0)\n                    worker_statuses[worker_id] = {\n                        \"online\": True,\n                        \"enabled\": True,\n                        \"processing\": queue_remaining > 0,\n                        \"queue_count\": queue_remaining\n                    }\n                except asyncio.TimeoutError:\n                    worker_statuses[worker_id] = {\n                        \"online\": False,\n                        \"enabled\": True,\n                        \"processing\": False,\n                        \"queue_count\": 0,\n                        \"error\": \"Timeout\"\n                    }\n                except Exception as e:\n                    worker_statuses[worker_id] = {\n                        \"online\": False,\n                        \"enabled\": True,\n                        \"processing\": False,\n                        \"queue_count\": 0,\n                        \"error\": str(e)\n                    }\n        \n        return web.json_response({\n            \"status\": \"success\",\n            \"worker_statuses\": worker_statuses\n        })\n    except Exception as e:\n        debug_log(f\"Error checking local worker status: {e}\")\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/worker_log/{worker_id}\")\nasync def get_worker_log_endpoint(request):\n    \"\"\"Get log content for a specific worker.\"\"\"\n    try:\n        wm = get_worker_manager()\n        worker_id = request.match_info['worker_id']\n        \n        # Ensure worker_id is string\n        worker_id = str(worker_id)\n        \n        # Check if we manage this worker\n        if worker_id not in wm.processes:\n            return await handle_api_error(request, f\"Worker {worker_id} not managed by UI\", 404)\n        \n        proc_info = wm.processes[worker_id]\n        log_file = proc_info.get('log_file')\n        \n        if not log_file or not os.path.exists(log_file):\n            return await handle_api_error(request, \"Log file not found\", 404)\n        \n        # Read last N lines (or full file if small)\n        lines_to_read = _parse_positive_int_query(request.query.get('lines'), default=1000)\n        \n        try:\n            loop = asyncio.get_running_loop()\n            payload = await loop.run_in_executor(None, _read_worker_log_sync, log_file, lines_to_read)\n            \n            return web.json_response({\n                \"status\": \"success\",\n                \"content\": payload[\"content\"],\n                \"log_file\": log_file,\n                \"file_size\": payload[\"file_size\"],\n                \"truncated\": payload[\"truncated\"],\n                \"lines_shown\": payload[\"lines_shown\"],\n            })\n            \n        except Exception as e:\n            return await handle_api_error(request, f\"Error reading log file: {str(e)}\", 500)\n            \n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n\n\n@server.PromptServer.instance.routes.get(\"/distributed/remote_worker_log/{worker_id}\")\nasync def get_remote_worker_log_endpoint(request):\n    \"\"\"Proxy a remote worker log request to the worker's local in-memory log endpoint.\"\"\"\n    try:\n        worker_id = str(request.match_info[\"worker_id\"]).strip()\n        config = load_config()\n        worker = _find_worker_by_id(config, worker_id)\n        if not worker:\n            return await handle_api_error(request, f\"Worker {worker_id} not found\", 404)\n\n        # Remote log proxy is only meaningful for remote/cloud workers.\n        host = normalize_host(worker.get(\"host\")) or \"\"\n        if not host:\n            return await handle_api_error(\n                request,\n                f\"Worker {worker_id} is local; use /distributed/worker_log/{worker_id} instead.\",\n                400,\n            )\n\n        lines_to_read = _parse_positive_int_query(request.query.get(\"lines\"), default=300, maximum=3000)\n        worker_url = build_worker_url(worker, \"/distributed/local_log\")\n        session = await get_client_session()\n        async with session.get(\n            worker_url,\n            params={\"lines\": str(lines_to_read)},\n            timeout=aiohttp.ClientTimeout(total=5),\n        ) as resp:\n            if resp.status >= 400:\n                body = await resp.text()\n                return await handle_api_error(\n                    request,\n                    f\"Remote worker {worker_id} returned HTTP {resp.status}: {body[:400]}\",\n                    resp.status,\n                )\n\n            try:\n                data = await resp.json()\n            except Exception as e:\n                return await handle_api_error(\n                    request,\n                    f\"Remote worker {worker_id} returned invalid JSON: {e}\",\n                    502,\n                )\n\n        return web.json_response(data)\n    except Exception as e:\n        return await handle_api_error(request, e, 500)\n"
  },
  {
    "path": "conftest.py",
    "content": "# conftest.py — project-level pytest configuration.\n#\n# Problem: custom_nodes/ComfyUI-Distributed/__init__.py uses relative imports\n# (from .distributed import ...) that fail when pytest tries to import it as a\n# standalone module during Package.setup() for the root package node.\n#\n# Fix: patch Package.setup() to skip the root-package's __init__.py import.\n# All actual package context is provided by each test module via\n# importlib.util.spec_from_file_location with synthetic stub packages.\n\nfrom _pytest.python import Package\n\n_orig_pkg_setup = Package.setup\n\n\ndef _patched_pkg_setup(self) -> None:\n    # Skip the root package setup — its __init__.py uses relative imports\n    # that require a parent package (ComfyUI's plugin loader) which is not\n    # available in the test environment.\n    if self.path == self.config.rootpath:\n        return\n    _orig_pkg_setup(self)\n\n\nPackage.setup = _patched_pkg_setup\n\ncollect_ignore = [\n    \"__init__.py\",\n    \"distributed.py\",\n    \"distributed_upscale.py\",\n]\n"
  },
  {
    "path": "distributed.py",
    "content": "\"\"\"\nComfyUI-Distributed: thin entry point.\nAll implementation lives in workers/, nodes/, api/.\n\"\"\"\nimport atexit\nimport os\n\nimport server\n\nfrom .utils.config import ensure_config_exists\nfrom .utils.logging import debug_log\nfrom .utils.network import cleanup_client_session\nfrom .workers import get_worker_manager\nfrom .workers.startup import delayed_auto_launch, register_async_signals, sync_cleanup\nfrom .upscale.job_store import ensure_tile_jobs_initialized\nfrom .nodes import (\n    NODE_CLASS_MAPPINGS,\n    NODE_DISPLAY_NAME_MAPPINGS,\n    ImageBatchDivider,\n    DistributedCollectorNode,\n    DistributedSeed,\n    DistributedModelName,\n    DistributedValue,\n    AudioBatchDivider,\n    DistributedEmptyImage,\n    AnyType,\n    ByPassTypeTuple,\n    any_type,\n)\nfrom . import api  # noqa: F401 - triggers all @routes.* registrations\nfrom .api.queue_orchestration import ensure_distributed_state\n\nensure_config_exists()\n\n# Aiohttp session cleanup\nasync def _cleanup_session():\n    await cleanup_client_session()\n\n\natexit.register(lambda: None)  # placeholder; real cleanup in sync_cleanup\n\n# Initialize distributed job state on prompt_server\nprompt_server = server.PromptServer.instance\nensure_distributed_state(prompt_server)\nensure_tile_jobs_initialized()\n\n# Worker startup\nif not os.environ.get('COMFYUI_IS_WORKER'):\n    atexit.register(sync_cleanup)\n    delayed_auto_launch()\n    register_async_signals()\n"
  },
  {
    "path": "docs/comfyui-distributed-api.md",
    "content": "# ComfyUI-Distributed API (Experimental)\n\nThis document describes the **public HTTP API** added to ComfyUI-Distributed to allow queueing *distributed* workflows from external tools (scripts, services, CI jobs, render farms, etc.) without using the ComfyUI web UI.\n\n## Demo\n\n- Video walkthrough: https://youtu.be/yiQlPd0MzLk\n\n## Examples Repository\n\n- Examples repo: https://github.com/umanets/ComfyUI-Distributed-API-examples.git\n\n---\n\n## Overview\n\n### What this adds\n\n- `POST /distributed/queue` — queues a workflow using the same distributed orchestration rules as the UI:\n  - Detects distributed nodes in the prompt (`DistributedCollector`, `UltimateSDUpscaleDistributed`).\n  - Resolves enabled/selected workers.\n  - Pings workers (`GET /prompt`) to include only reachable ones.\n  - Dispatches the workflow to workers (`POST /prompt`).\n  - Queues the master workflow in ComfyUI’s prompt queue.\n  - If any `DistributedCollector` has `load_balance=true`, selects one least-busy participant for this run.\n\n### What it does *not* add\n\n- Authentication/authorization.\n- A separate “job status” API for distributed results (you still use ComfyUI’s normal prompt history / websocket flow, and the existing `/distributed/queue_status/{job_id}` behavior for collector queues).\n\n---\n\n## Endpoint: `POST /distributed/queue`\n\nQueue a workflow for distributed execution.\n\n### URL\n\n- `http://<master-host>:<master-port>/distributed/queue`\n\n### Headers\n\n- `Content-Type: application/json`\n\n### Request Body\n\n```json\n{\n  \"prompt\": { \"<node_id>\": { \"class_type\": \"...\", \"inputs\": { } } },\n  \"workflow\": { },\n  \"client_id\": \"external-client\",\n  \"delegate_master\": false,\n  \"enabled_worker_ids\": [\"1\", \"2\"],\n  \"workers\": [\"1\", \"2\"],\n  \"auto_prepare\": true,\n  \"trace_execution_id\": \"exec_1700000000_ab12cd\"\n}\n```\n\n#### Fields\n\n- `prompt` (required unless `workflow.prompt` is present, object)\n  - The ComfyUI prompt/workflow graph, same shape as used by `POST /prompt`.\n- `workflow` (optional, object)\n  - Workflow metadata that ComfyUI normally stores in `extra_pnginfo.workflow`.\n  - If you don’t care about UI metadata, you can omit it.\n- `client_id` (required, string)\n  - Passed through as `extra_data.client_id` (useful if you consume ComfyUI websocket events).\n- `delegate_master` (optional, boolean)\n  - If `true`, attempts “workers-only” execution for workflows based on `DistributedCollector`.\n  - Current limitation: delegate-only mode **does not support** `UltimateSDUpscaleDistributed` and will fall back to running the full prompt on master.\n- `enabled_worker_ids` (required, array of strings)\n  - The explicit worker IDs to consider for this run.\n- `workers` (optional, array of strings or objects with `id`)\n  - Transitional alias for `enabled_worker_ids` used by older clients.\n- `auto_prepare` (optional, boolean)\n  - Kept for wire compatibility.\n  - Backend orchestration always runs with auto-prepare semantics.\n  - If top-level `prompt` is omitted, backend will attempt `workflow.prompt`.\n- `trace_execution_id` (optional, string)\n  - Passed through to orchestration logs.\n  - Server log lines include the marker as `[exec:<trace_execution_id>]`.\n\n##### How to get `enabled_worker_ids`\n\nWorker IDs come from the plugin config (`GET /distributed/config`) under `workers[].id`.\n\nExample (bash + `jq`):\n\n```bash\ncurl -s \"http://127.0.0.1:8188/distributed/config\" \\\n  | jq -r '.workers[] | \"id=\\(.id)\\tname=\\(.name)\\tenabled=\\(.enabled)\\thost=\\(.host)\\tport=\\(.port)\\ttype=\\(.type)\"'\n```\n\nExample (PowerShell):\n\n```powershell\n$cfg = Invoke-RestMethod \"http://127.0.0.1:8188/distributed/config\"\n$cfg.workers | Select-Object id,name,enabled,host,port,type | Format-Table -AutoSize\n```\n\n### Response Body\n\n```json\n{\n  \"prompt_id\": \"<uuid>\",\n  \"worker_count\": 2,\n  \"auto_prepare_supported\": true\n}\n```\n\n- `prompt_id` — the master prompt id queued into ComfyUI.\n- `worker_count` — number of workers that received a dispatched prompt (only those that passed the health check).\n\n### Status Codes\n\n- `200` — queued.\n- `400` — invalid JSON or invalid body.\n- `500` — orchestration/dispatch failure (see server logs for details).\n\n---\n\n## Worker requirements (important)\n\nFor a worker to participate, it must be reachable from the master:\n\n- Health check: `GET <worker-base>/prompt` must return HTTP 200.\n- Dispatch: `POST <worker-base>/prompt` must accept the workflow.\n\nAlso, for collector-based flows:\n\n- Workers will send results back to the master via `POST /distributed/job_complete` (that route must be reachable from workers).\n\n---\n\n## Endpoint: `POST /distributed/job_complete`\n\nSubmit one completed worker image back to the master collector queue.\n\n### URL\n\n- `http://<master-host>:<master-port>/distributed/job_complete`\n\n### Request Body\n\n```json\n{\n  \"job_id\": \"exec_1234567890_17\",\n  \"worker_id\": \"worker-1\",\n  \"batch_idx\": 0,\n  \"image\": \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...\",\n  \"is_last\": false\n}\n```\n\n### Canonical envelope (required fields)\n\n- `job_id` (string, required)\n- `worker_id` (string, required)\n- `batch_idx` (integer >= 0, required)\n- `image` (string, required)\n  - PNG payload as either:\n    - data URL: `data:image/png;base64,...`\n    - raw base64 PNG bytes\n- `is_last` (boolean, required)\n\nLegacy multipart/tensor payload formats are no longer accepted on this endpoint.\n\n### CORS note\n\nIf you call the API from a browser (not from a backend), ensure the master ComfyUI is started with `--enable-cors-header`.\n\n---\n\n## Log Endpoints\n\n### `GET /distributed/worker_log/{worker_id}`\n\nRead log files for workers launched locally by the master UI process manager.\n\n- Intended for managed local workers.\n- Query param: `lines` (optional, default `1000`).\n\n### `GET /distributed/local_log`\n\nRead this ComfyUI instance's in-memory runtime log buffer.\n\n- Available on any ComfyUI-Distributed instance (master or worker).\n- Query param: `lines` (optional, default `300`, max `3000`).\n\n### `GET /distributed/remote_worker_log/{worker_id}`\n\nProxy endpoint on master that fetches logs from a configured remote/cloud worker's\n`/distributed/local_log`.\n\n- Intended for remote/cloud workers in master config.\n- Query param: `lines` (optional, default `300`, max `3000`).\n\n---\n\n## Examples\n\n### 1) Minimal `curl`\n\n```bash\ncurl -X POST \"http://127.0.0.1:8188/distributed/queue\" \\\n  -H \"Content-Type: application/json\" \\\n  -d @payload.json\n```\n\nWhere `payload.json` contains at least:\n\n```json\n{\n  \"prompt\": {\n    \"1\": {\"class_type\": \"KSampler\", \"inputs\": {} }\n  },\n  \"enabled_worker_ids\": [],\n  \"client_id\": \"external-client\"\n}\n```\n\n### 2) Python (`requests`)\n\n```python\nimport requests\n\nurl = \"http://127.0.0.1:8188/distributed/queue\"\npayload = {\n    \"prompt\": {...},\n    \"workflow\": {...},\n    \"client_id\": \"external-client\",\n    \"delegate_master\": False,\n    \"enabled_worker_ids\": [\"1\", \"2\"],\n}\n\nr = requests.post(url, json=payload, timeout=60)\nr.raise_for_status()\nprint(r.json())\n```\n\n### 3) JavaScript (`fetch`)\n\n```js\nconst url = \"http://127.0.0.1:8188/distributed/queue\";\n\nconst payload = {\n  prompt: {/* ... */},\n  workflow: {/* ... */},\n  client_id: \"external-client\",\n  delegate_master: false,\n  enabled_worker_ids: [\"1\", \"2\"],\n};\n\nconst resp = await fetch(url, {\n  method: \"POST\",\n  headers: { \"Content-Type\": \"application/json\" },\n  body: JSON.stringify(payload),\n});\n\nif (!resp.ok) throw new Error(await resp.text());\nconsole.log(await resp.json());\n```\n\n---\n\n## Operational notes / gotchas\n\n- If the workflow contains **no distributed nodes**, the endpoint falls back to normal master queueing and returns `worker_count: 0`.\n- Worker selection is “best-effort”: offline workers are skipped.\n- For public URLs/tunnels: prefer configuring `master.host` with an explicit scheme (`https://...`) to avoid ambiguity.\n\n---\n\n## Changelog (this feature)\n\n- Added `POST /distributed/queue` endpoint.\n- Added orchestration module used by the endpoint.\n"
  },
  {
    "path": "docs/model-download-script.md",
    "content": "## Automating ComfyUI Model Downloads\n> This guide will walk you through creating a shell script to automatically download the necessary models for your ComfyUI workflow, leveraging an advanced Large Language Model (LLM).\n\n1. In ComfyUI (on your local machine), export your workflow as an API workflow\n2. Copy the below prompt and upload the API workflow to an LLM **that has access to the internet**\n\n<details>\n<summary><strong>📋 Click to expand the full prompt</strong></summary>\n\n```\nCreate a sh script that will download the models from this workflow into the correct folders. For reference, these are the paths:\nbase_path: /workspace/ComfyUI\ncheckpoints: models/checkpoints/\nclip: models/clip/\nclip_vision: models/clip_vision/\ncontrolnet: models/controlnet/\ndiffusion_models: models/diffusion_models/\nembeddings: models/embeddings/\nflorence2: models/florence2/\nipadapter: models/ipadapter/\nloras: models/loras/\nstyle_models: models/style_models/\ntext_encoders: models/text_encoders/\nunet: models/unet/\nupscale_models: models/upscale_models/\nvae: models/vae/\n---\nImportant:\nMake sure you find the correct URLs for the models online.\nUse comfy cli to download the models: `comfy model download --url <URL> [--relative-path <PATH>] [--set-civitai-api-token <TOKEN>] [--set-hf-api-token <TOKEN>]`\nMake sure you add `--set-civitai-api-token $CIVITAI_API_TOKEN` for CivitAI download and `--set-hf-api-token $HF_API_TOKEN` for Hugging Face downloads.\n---\nExample:\n#!/bin/bash\n# Download from CivitAI\ncomfy model download --url https://civitai.com/api/download/models/1759168 --relative-path /workspace/ComfyUI/models/checkpoints --set-civitai-api-token $CIVITAI_API_TOKEN\n# Download model from Hugging Face\ncomfy model download --url https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors --relative-path /workspace/ComfyUI/models/unet --set-hf-api-token $HF_API_TOKEN\n# If a model in the workflow was in a subfolder\ncomfy model download --url https://civitai.com/api/download/models/1759168 --relative-path /workspace/ComfyUI/models/checkpoints/SDXL --set-civitai-api-token $CIVITAI_API_TOKEN\n```\n\n</details>\n\n3. Review the LLMs output to make sure all download links are correct and save it as a .sh file, for example `download_models.sh`\n4. Launch the [ComfyUI Distributed Pod](https://console.runpod.io/deploy?template=m21ynvo8yo&ref=ak218p52) with these Environment Variables:\n   - `CIVITAI_API_TOKEN`: [get your token here](https://civitai.com/user/account)\n   - `HF_API_TOKEN`: [get your token here](https://huggingface.co/settings/tokens)\n5. Upload the .sh file to your Runpod instance, into `/workspace`\n6. Then run these commands:\n   - `chmod 755 /workspace/download_models.sh`\n   - `/workspace/download_models.sh`\n7. Confirm each model name (sometimes you might need to rename them to match the name on your local machine)\n"
  },
  {
    "path": "docs/video-upscaler-runpod-preset.md",
    "content": "![Clipboard Image](https://github.com/user-attachments/assets/5dc5224f-3f47-442c-b94a-116afeb28132)\n\n**Accelerated Creative Video Upscaler On Runpod:**\n\n1. Use the [ComfyUI Distributed Pod](https://console.runpod.io/deploy?template=m21ynvo8yo&ref=0bw29uf3ug0p) template.\n2. Filter instances by CUDA 12.8 (add filter in Additional Filters at the top of the page).\n3. Choose 4x 5090s\n4. Press Edit Template to configure the pod's Environment Variables:\n\t- CIVITAI_API_TOKEN: Not necessary for this workflow.\n\t- HF_API_TOKEN: [get your token here](https://huggingface.co/settings/tokens)\n\t- SAGE_ATTENTION: optional optimisation (set to true/false). Recommended for this workflow.\n\t- PRESET_VIDEO_UPSCALER: set to true. This will download everything you need.\n5. Deploy your pod.\n6. Once pod setup is complete, connect to ComfyUI running on your pod.\n7. In ComfyUI, open the GPU panel on the left.\n> If you set SAGE_ATTENTION to true, add \"--use-sage-attention\" to Extra Args on the workers.\n8. Launch the workers.\n9. [Load the workflow.](https://github.com/robertvoy/ComfyUI-Distributed/blob/main/workflows/distributed-upscale-video.json)\n10. Upload video, add prompt and run workflow.\n11. Right-click the Video Combine node and click Save Preview to save the video.\n"
  },
  {
    "path": "docs/worker-setup-guides.md",
    "content": "## Worker Setup Guide\n\n**Master**: The main ComfyUI instance that coordinates and distributes work. This is where you load workflows, manage the queue, and view results.\n\n**Worker**: A ComfyUI instance that receives and processes tasks from the master. Workers handle just the GPU computation and send results back to the master. You can have multiple workers connected to a single master, each utilizing their own GPU.\n\n<img width=\"600\" src=\"https://github.com/user-attachments/assets/609c42aa-8a1c-4a3f-939e-f3552fa1d54f\" />\n\n### Master participation modes\n\nThe master can either contribute GPU work or stay in **orchestrator-only** mode:\n\n- **Participating**: Master renders alongside workers, useful when you want every available GPU.\n- **Orchestrator-only**: Master sends jobs to selected workers but skips local rendering. Enable this by opening the Distributed panel and unchecking the master toggle. The master card will display *“Master disabled: running as orchestrator only.”*\n- **Fallback**: If orchestrator-only is enabled but no workers remain selected, the master automatically re-enables execution to guarantee the workflow still runs. The UI shows a green *“Master fallback execution active”* badge so you know work is executing locally again.\n\n### Types of Workers\n\n- **Local workers**: Additional GPUs on the same machine as the master\n- **Remote workers**: GPUs on different computers within your network\n- **Cloud workers**: GPUs hosted on cloud services like Runpod\n\n## Local workers\n\n<img align=\"right\" width=\"200\" src=\"https://github.com/user-attachments/assets/651e4912-7c23-4e32-bd88-250f5175e129\" />\n\n> These are added automatically on first launch, but you can add them manually if you need to.\n\n\n📺 [Watch Tutorial](https://youtu.be/p6eE3IlAbOs?si=K7Km0_flmPHwRQwz&t=43)\n\n1. **Open** the Distributed GPU panel.\n2. **Click** \"Add Worker\" in the UI.\n3. **Configure** your local worker:\n   - **Name**: A descriptive name for the worker (e.g., \"Studio PC 1\")\n   - **Port**: A unique port number for this worker (e.g., 8189, 8190...).\n   - **CUDA Device**: The GPU index from `nvidia-smi` (e.g., 0, 1).\n   - **Extra Args**: Optional ComfyUI arguments for this specific worker.\n4. **Save** and  launch the local worker.\n\n## Remote workers\n\n<img align=\"right\" width=\"200\"  src=\"https://github.com/user-attachments/assets/84291921-c44e-4556-94f2-a3b16500f4f9\" />\n\n\n> ComfyUI instances running on completely different computers on your network. These allow you to harness GPU power from other machines. Remote workers must be manually started on their respective computers and are connected via IP address.\n\n📺 [Watch Tutorial](https://youtu.be/p6eE3IlAbOs?si=Oxj3EzPyf4jKDvfG&t=140)\n\n**On the Remote Worker Machine:**\n1. **Launch** ComfyUI with the `--listen --enable-cors-header` arguments. ⚠️ **Required!**\n   - This ComfyUI instance will serve as a worker for your main master.\n2. *Optionally* add additional local workers on this machine if it has multiple GPUs:\n   - Access the Distributed GPU panel in this ComfyUI instance\n   - Add workers for any additional GPUs (if they haven't been added automatically)\n   - Make sure they have `--listen` set in `Extra Args`\n   - Launch them\n3. **Open** the ComfyUI port (e.g., 8188) and any additional worker ports (e.g., 8189, 8190) in the firewall.\n  \n**On the Main Machine:**\n1. **Launch** ComfyUI with `--enable-cors-header` launch argument.\n2. **Open** the Distributed GPU panel (sidebar on the left).\n3. **Click** \"Add Worker.\"\n4. **Choose** \"Remote\".\n5. **Configure** your remote worker:\n   - **Name**: A descriptive name for the worker (e.g., \"Server Rack GPU 0\")\n   - **Host**: The remote worker's IP address.\n   - **Port**: The port number used when launching ComfyUI on the remote master/worker (e.g., 8188).\n6. **Save** the remote worker configuration.\n  \n## Cloud workers\n\n<img align=\"right\" width=\"200\"  src=\"https://github.com/user-attachments/assets/a053f3ae-22f0-4e1c-8f2e-f26a1f660adf\" />\n\n> ComfyUI instances running on a cloud service like Runpod. \n\n### Deploy Cloud Worker on Runpod\n\n📺 [Watch Tutorial](https://www.youtube.com/watch?v=wxKKWMQhYTk)\n\n**On Runpod:**\n> If using your own template, make sure you launch ComfyUI with the `--enable-cors-header` argument and you `git clone ComfyUI-Distributed` into custom_nodes. ⚠️ **Required!**\n\n1. Register a [Runpod](https://get.runpod.io/0bw29uf3ug0p) account.\n2. On Runpod, go to Storage > New Network Volume and create a volume that will store the models you need. Start with 40 GB, you can always add more later. Learn more [about Network Volumes](https://docs.runpod.io/pods/storage/create-network-volumes).\n3. Use the [ComfyUI Distributed Pod](https://console.runpod.io/deploy?template=m21ynvo8yo&ref=0bw29uf3ug0p) template.\n4. Make sure your Network Volume is mounted and choose a suitable GPU.\n> ⚠️ To use the ComfyUI Distributed Pod template, you will need to filter instances by CUDA 12.8 (add filter in Additional Filters).\n6. Press Edit Template to configure the pod's Environment Variables:\n\t- CIVITAI_API_TOKEN: [get your token here](https://civitai.com/user/account)\n\t- HF_API_TOKEN: [get your token here](https://huggingface.co/settings/tokens)\n\t- SAGE_ATTENTION: optional optimisation (set to true/false)\n5. Deploy your pod.\n6. Connect to your pod using JupyterLabs. This gives us access to the pod's file system.\n7. Download models into /workspaces/ComfyUI/models/ (these will remain on your network drive even after you terminate the pod). Example commands below:\n```\n# Download from CivitAI\ncomfy model download --url https://civitai.com/api/download/models/1759168 --relative-path /workspace/ComfyUI/models/checkpoints --set-civitai-api-token $CIVITAI_API_TOKEN\n# Download model from Hugging Face\ncomfy model download --url https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors --relative-path /workspace/ComfyUI/models/unet --set-hf-api-token $HF_API_TOKEN\n```\n> ℹ️ Use [this guide](model-download-script.md) to make this process easy. It will generate a shell script that automatically downloads the models for a given workflow.\n9. Access ComfyUI through the Runpod URL.\n10. Download any additional custom nodes you need using the ComfyUI Manager.\n\n**On the Main Machine:**\n1. **Launch** a Cloudflare tunnel.\n   - Download from here: [https://github.com/cloudflare/cloudflared/releases](https://github.com/cloudflare/cloudflared/releases)\n\t- Then run, for example: `cloudflared-windows-amd64.exe tunnel --url http://localhost:8188`\n> ℹ️ Cloudflare tunnels create secure connections without exposing ports directly to the internet and are required for Cloud Workers.\n2. **Copy** the Cloudflare address\n3. **Launch** ComfyUI with `--enable-cors-header` launch argument.\n4. **Open** the Distributed GPU panel (sidebar on the left).\n5. **Edit** the Master's settings to change the host address to the Cloudflare address.\n6. **Click** \"Add Worker.\"\n7. **Choose** \"Cloud\".\n8. **Configure** your cloud worker:\n\t- **Host**: The ComfyUI Runpod address. For example: `wcegfo9tbbml9l-8188.proxy.runpod.net`\n\t- **Port**: 443\n9. **Save** the remote worker configuration.\n\n---\n\n### Deploy Cloud Worker on Other Platforms\n\n**On the Cloud Worker machine:**\n   - Your cloud worker container needs to have the same models and custom nodes as the workflow you want to run on your local machine.\n   - If your cloud platform doesn't provide a secure connection, use Cloudflare to create a tunnel for the worker. Each GPU needs their own tunnel for their respective port.\n\t- For example: `./cloudflared tunnel --url http://localhost:8188`\n1. **Launch** ComfyUI with the `--listen --enable-cors-header` arguments. ⚠️ **Required!**\n2. **Add** workers in the UI panel if the cloud machine has more than one GPU.\n   - Make sure that they also have `--listen` set in `Extra Args`.\n   - Then launch them.\n  \n**On the Main Machine:**\n1. **Launch** a Cloudflare tunnel on your local machine.\n   - Download from here: [https://github.com/cloudflare/cloudflared/releases](https://github.com/cloudflare/cloudflared/releases)\n   - Then run, for example: `cloudflared-windows-amd64.exe tunnel --url http://localhost:8188`\n2. **Copy** the Cloudflare address\n3. **Launch** ComfyUI with `--enable-cors-header` launch argument.\n4. **Open** the Distributed GPU panel (sidebar on the left).\n5. **Edit** the Master's host address and replace it with the Cloudflare address.\n6. **Click** \"Add Worker.\"\n7. **Choose** \"Cloud\".\n8. **Configure** your cloud worker:\n   - **Host**: The remote worker's IP address/domain\n   - **Port**: 443\n9. **Save** the remote worker configuration.\n"
  },
  {
    "path": "nodes/__init__.py",
    "content": "from .utilities import (\n    DistributedSeed,\n    DistributedModelName,\n    DistributedValue,\n    ImageBatchDivider,\n    AudioBatchDivider,\n    DistributedEmptyImage,\n    AnyType,\n    ByPassTypeTuple,\n    any_type,\n)\nfrom .collector import DistributedCollectorNode\n\nNODE_CLASS_MAPPINGS = {\n    \"DistributedCollector\": DistributedCollectorNode,\n    \"DistributedSeed\": DistributedSeed,\n    \"DistributedModelName\": DistributedModelName,\n    \"DistributedValue\": DistributedValue,\n    \"ImageBatchDivider\": ImageBatchDivider,\n    \"AudioBatchDivider\": AudioBatchDivider,\n    \"DistributedEmptyImage\": DistributedEmptyImage,\n}\nNODE_DISPLAY_NAME_MAPPINGS = {\n    \"DistributedCollector\": \"Distributed Collector\",\n    \"DistributedSeed\": \"Distributed Seed\",\n    \"DistributedModelName\": \"Distributed Model Name\",\n    \"DistributedValue\": \"Distributed Value\",\n    \"ImageBatchDivider\": \"Image Batch Divider\",\n    \"AudioBatchDivider\": \"Audio Batch Divider\",\n    \"DistributedEmptyImage\": \"Distributed Empty Image\",\n}\n"
  },
  {
    "path": "nodes/collector.py",
    "content": "import torch\nimport io\nimport json\nimport asyncio\nimport time\nimport base64\n\nimport aiohttp\nimport server as _server\nimport comfy.model_management\nfrom comfy.utils import ProgressBar\n\nfrom ..utils.logging import debug_log, log\nfrom ..utils.config import get_worker_timeout_seconds, load_config, is_master_delegate_only\nfrom ..utils.constants import HEARTBEAT_INTERVAL\nfrom ..utils.image import tensor_to_pil, pil_to_tensor, ensure_contiguous\nfrom ..utils.network import build_worker_url, get_client_session, probe_worker\nfrom ..utils.audio_payload import encode_audio_payload\nfrom ..utils.async_helpers import run_async_in_server_loop\n\nprompt_server = _server.PromptServer.instance\n\n\nclass DistributedCollectorNode:\n    EMPTY_AUDIO = {\"waveform\": torch.zeros(1, 2, 1), \"sample_rate\": 44100}\n\n    @classmethod\n    def INPUT_TYPES(s):\n        return {\n            \"required\": {\n                \"images\": (\"IMAGE\",),\n                \"load_balance\": (\n                    \"BOOLEAN\",\n                    {\n                        \"default\": False,\n                        \"tooltip\": \"Run this workflow on one least-busy participant (master included when participating).\",\n                    },\n                ),\n            },\n            \"optional\": { \"audio\": (\"AUDIO\",) },\n            \"hidden\": {\n                \"multi_job_id\": (\"STRING\", {\"default\": \"\"}),\n                \"is_worker\": (\"BOOLEAN\", {\"default\": False}),\n                \"master_url\": (\"STRING\", {\"default\": \"\"}),\n                \"enabled_worker_ids\": (\"STRING\", {\"default\": \"[]\"}),\n                \"worker_batch_size\": (\"INT\", {\"default\": 1, \"min\": 1, \"max\": 1024}),\n                \"worker_id\": (\"STRING\", {\"default\": \"\"}),\n                \"pass_through\": (\"BOOLEAN\", {\"default\": False}),\n                \"delegate_only\": (\"BOOLEAN\", {\"default\": False}),\n            },\n        }\n\n    RETURN_TYPES = (\"IMAGE\", \"AUDIO\")\n    RETURN_NAMES = (\"images\", \"audio\")\n    FUNCTION = \"run\"\n    CATEGORY = \"image\"\n    \n    def run(self, images, load_balance=False, audio=None, multi_job_id=\"\", is_worker=False, master_url=\"\", enabled_worker_ids=\"[]\", worker_batch_size=1, worker_id=\"\", pass_through=False, delegate_only=False):\n        # Create empty audio if not provided\n        empty_audio = {\"waveform\": torch.zeros(1, 2, 1), \"sample_rate\": 44100}\n\n        if not multi_job_id or pass_through:\n            if pass_through:\n                debug_log(\"Collector: pass-through mode enabled, returning images unchanged\")\n            return (images, audio if audio is not None else empty_audio)\n\n        # Use async helper to run in server loop\n        result = run_async_in_server_loop(\n            self.execute(\n                images,\n                audio,\n                load_balance,\n                multi_job_id,\n                is_worker,\n                master_url,\n                enabled_worker_ids,\n                worker_batch_size,\n                worker_id,\n                delegate_only,\n            )\n        )\n        return result\n\n    async def send_batch_to_master(self, image_batch, audio, multi_job_id, master_url, worker_id):\n        \"\"\"Send image batch to master via canonical JSON envelopes.\"\"\"\n        batch_size = image_batch.shape[0]\n        if batch_size == 0:\n            return\n\n        encoded_audio = encode_audio_payload(audio)\n\n        session = await get_client_session()\n        url = f\"{master_url}/distributed/job_complete\"\n        for batch_idx in range(batch_size):\n            img = tensor_to_pil(image_batch[batch_idx:batch_idx+1], 0)\n            byte_io = io.BytesIO()\n            img.save(byte_io, format='PNG', compress_level=0)\n            encoded_image = base64.b64encode(byte_io.getvalue()).decode('utf-8')\n            payload = {\n                \"job_id\": str(multi_job_id),\n                \"worker_id\": str(worker_id),\n                \"batch_idx\": int(batch_idx),\n                \"image\": f\"data:image/png;base64,{encoded_image}\",\n                \"is_last\": bool(batch_idx == batch_size - 1),\n            }\n            if payload[\"is_last\"] and encoded_audio is not None:\n                payload[\"audio\"] = encoded_audio\n\n            try:\n                async with session.post(\n                    url,\n                    json=payload,\n                    timeout=aiohttp.ClientTimeout(total=60),\n                ) as response:\n                    response.raise_for_status()\n            except Exception as e:\n                log(f\"Worker - Failed to send canonical image envelope to master: {e}\")\n                debug_log(f\"Worker - Full error details: URL={url}\")\n                raise  # Re-raise to handle at caller level\n\n    def _combine_audio(self, master_audio, worker_audio, empty_audio, worker_order=None):\n        \"\"\"Combine audio from master and workers into a single audio output.\n\n        Ordering: master first, then workers in `worker_order` (if provided),\n        then any unexpected worker ids in sorted order.\n        \"\"\"\n        audio_pieces = []\n        sample_rate = 44100\n\n        # Add master audio first if present\n        if master_audio is not None:\n            waveform = master_audio.get(\"waveform\")\n            if waveform is not None and waveform.numel() > 0:\n                audio_pieces.append(waveform)\n                sample_rate = master_audio.get(\"sample_rate\", 44100)\n\n        # Add worker audio in configured enabled-worker order first.\n        ordered_worker_ids = [str(worker_id) for worker_id in (worker_order or [])]\n        seen = set()\n        for worker_id_str in ordered_worker_ids:\n            seen.add(worker_id_str)\n            w_audio = worker_audio.get(worker_id_str)\n            if w_audio is not None:\n                waveform = w_audio.get(\"waveform\")\n                if waveform is not None and waveform.numel() > 0:\n                    audio_pieces.append(waveform)\n                    # Use first available sample rate\n                    if sample_rate == 44100:\n                        sample_rate = w_audio.get(\"sample_rate\", 44100)\n\n        # Append any audio from unexpected worker ids deterministically.\n        for worker_id_str in sorted(worker_audio.keys()):\n            if worker_id_str in seen:\n                continue\n            w_audio = worker_audio[worker_id_str]\n            if w_audio is not None:\n                waveform = w_audio.get(\"waveform\")\n                if waveform is not None and waveform.numel() > 0:\n                    audio_pieces.append(waveform)\n                    if sample_rate == 44100:\n                        sample_rate = w_audio.get(\"sample_rate\", 44100)\n\n        if not audio_pieces:\n            return empty_audio\n\n        try:\n            # Concatenate along the samples dimension (dim=-1)\n            # Ensure all pieces have same batch and channel dimensions\n            combined_waveform = torch.cat(audio_pieces, dim=-1)\n            debug_log(f\"Master - Combined audio: {len(audio_pieces)} pieces, final shape={combined_waveform.shape}\")\n            return {\"waveform\": combined_waveform, \"sample_rate\": sample_rate}\n        except Exception as e:\n            log(f\"[Distributed] Master - Audio combination failed, returning silence: {e}\")\n            return empty_audio\n\n    def _store_worker_result(self, worker_images: dict, item: dict) -> int:\n        \"\"\"Store one canonical queue item in worker_images in-place.\n\n        Canonical format:\n        - item has 'worker_id', 'image_index', and 'tensor'\n        Returns 1 when stored, otherwise 0.\n        \"\"\"\n        worker_id = item['worker_id']\n        tensor = item.get('tensor')\n        image_index = item.get('image_index')\n        if tensor is None or image_index is None:\n            return 0\n\n        worker_images.setdefault(worker_id, {})\n        worker_images[worker_id][image_index] = tensor\n        return 1\n\n    def _reorder_and_combine_tensors(\n        self,\n        worker_images: dict,\n        worker_order: list,\n        master_batch_size: int,\n        images_on_cpu,\n        delegate_mode: bool,\n        fallback_images,\n    ) -> torch.Tensor:\n        \"\"\"Assemble final tensor: master first, then workers in enabled order.\"\"\"\n        ordered_tensors = []\n        if not delegate_mode and images_on_cpu is not None:\n            for i in range(master_batch_size):\n                ordered_tensors.append(images_on_cpu[i:i+1])\n\n        ordered_worker_ids = [str(worker_id) for worker_id in (worker_order or [])]\n        seen = set()\n        for worker_id_str in ordered_worker_ids:\n            seen.add(worker_id_str)\n            if worker_id_str not in worker_images:\n                continue\n            for idx in sorted(worker_images[worker_id_str].keys()):\n                ordered_tensors.append(worker_images[worker_id_str][idx])\n\n        # Append any unexpected worker ids deterministically.\n        for worker_id_str in sorted(worker_images.keys()):\n            if worker_id_str in seen:\n                continue\n            for idx in sorted(worker_images[worker_id_str].keys()):\n                ordered_tensors.append(worker_images[worker_id_str][idx])\n\n        cpu_tensors = []\n        for t in ordered_tensors:\n            if t.is_cuda:\n                t = t.cpu()\n            t = ensure_contiguous(t)\n            cpu_tensors.append(t)\n\n        if cpu_tensors:\n            return ensure_contiguous(torch.cat(cpu_tensors, dim=0))\n        elif fallback_images is not None:\n            return ensure_contiguous(fallback_images)\n        else:\n            raise ValueError(\"No image data collected from master or workers\")\n\n    async def execute(self, images, audio, load_balance=False, multi_job_id=\"\", is_worker=False, master_url=\"\", enabled_worker_ids=\"[]\", worker_batch_size=1, worker_id=\"\", delegate_only=False):\n        if is_worker:\n            # Worker mode: send images and audio to master in a single batch\n            debug_log(f\"Worker - Job {multi_job_id} complete. Sending {images.shape[0]} image(s) to master\")\n            await self.send_batch_to_master(images, audio, multi_job_id, master_url, worker_id)\n            return (images, audio if audio is not None else self.EMPTY_AUDIO)\n        else:\n            delegate_mode = delegate_only or is_master_delegate_only()\n            # Master mode: collect images and audio from workers\n            enabled_workers_raw = json.loads(enabled_worker_ids)\n            enabled_workers = []\n            seen_enabled = set()\n            for worker_id in enabled_workers_raw:\n                worker_id_str = str(worker_id)\n                if worker_id_str in seen_enabled:\n                    continue\n                seen_enabled.add(worker_id_str)\n                enabled_workers.append(worker_id_str)\n            expected_workers = set(enabled_workers)\n            num_workers = len(expected_workers)\n            if num_workers == 0:\n                return (images, audio if audio is not None else self.EMPTY_AUDIO)\n\n            # Create the queue before any expensive local work to avoid job_complete race.\n            async with prompt_server.distributed_jobs_lock:\n                if multi_job_id not in prompt_server.distributed_pending_jobs:\n                    prompt_server.distributed_pending_jobs[multi_job_id] = asyncio.Queue()\n                    debug_log(f\"Master - Initialized queue early for job {multi_job_id}\")\n                else:\n                    existing_size = prompt_server.distributed_pending_jobs[multi_job_id].qsize()\n                    debug_log(f\"Master - Using existing queue for job {multi_job_id} (current size: {existing_size})\")\n\n            if delegate_mode:\n                master_batch_size = 0\n                images_on_cpu = None\n                master_audio = None\n                debug_log(f\"Master - Job {multi_job_id}: Delegate-only mode enabled, collecting exclusively from {num_workers} workers\")\n            else:\n                images_on_cpu = images.cpu()\n                master_batch_size = images.shape[0]\n                master_audio = audio  # Keep master's audio for later\n                debug_log(f\"Master - Job {multi_job_id}: Master has {master_batch_size} images, collecting from {num_workers} workers...\")\n\n                # Ensure master images are contiguous\n                images_on_cpu = ensure_contiguous(images_on_cpu)\n\n\n            # Initialize storage for collected images and audio\n            worker_images = {}  # Dict to store images by worker_id and index\n            worker_audio = {}   # Dict to store audio by worker_id\n            \n            # Collect images until all workers report they're done\n            collected_count = 0\n            workers_done = set()\n            \n            # Use unified worker timeout from config/UI with simple sliced waits\n            base_timeout = float(get_worker_timeout_seconds())\n            slice_timeout = min(max(0.1, HEARTBEAT_INTERVAL / 20.0), base_timeout)\n            last_activity = time.time()\n            \n            \n            # Get queue size before starting\n            async with prompt_server.distributed_jobs_lock:\n                q = prompt_server.distributed_pending_jobs[multi_job_id]\n                initial_size = q.qsize()\n\n            # NEW: Initialize progress bar for workers (total = num_workers)\n            p = ProgressBar(num_workers)\n\n            def mark_worker_done(done_worker_id):\n                done_worker_id = str(done_worker_id)\n                if done_worker_id not in expected_workers:\n                    debug_log(\n                        f\"Master - Ignoring completion from unexpected worker {done_worker_id} for job {multi_job_id}\"\n                    )\n                    return\n                if done_worker_id in workers_done:\n                    debug_log(\n                        f\"Master - Ignoring duplicate completion from worker {done_worker_id} for job {multi_job_id}\"\n                    )\n                    return\n                workers_done.add(done_worker_id)\n                p.update(1)  # +1 per completed expected worker\n\n            try:\n                while len(workers_done) < num_workers:\n                    # Check for user interruption to abort collection promptly\n                    comfy.model_management.throw_exception_if_processing_interrupted()\n                    try:\n                        # Get the queue again each time to ensure we have the right reference\n                        async with prompt_server.distributed_jobs_lock:\n                            q = prompt_server.distributed_pending_jobs[multi_job_id]\n                            current_size = q.qsize()\n                        \n                        result = await asyncio.wait_for(q.get(), timeout=slice_timeout)\n                        worker_id = result['worker_id']\n                        is_last = result.get('is_last', False)\n                        count = self._store_worker_result(worker_images, result)\n                        collected_count += count\n                        debug_log(\n                            f\"Master - Got canonical result from worker {worker_id}, \"\n                            f\"image {result.get('image_index', 0)}, is_last={is_last}\"\n                        )\n\n                        # Collect audio data if present\n                        result_audio = result.get('audio')\n                        if result_audio is not None:\n                            worker_audio[worker_id] = result_audio\n                            debug_log(f\"Master - Got audio from worker {worker_id}\")\n\n                        # Record activity and refresh timeout baseline\n                        last_activity = time.time()\n                        base_timeout = float(get_worker_timeout_seconds())\n\n                        if is_last:\n                            mark_worker_done(worker_id)\n                        \n                    except asyncio.TimeoutError:\n                        # If we still have time, continue polling; otherwise handle timeout\n                        if (time.time() - last_activity) < base_timeout:\n                            comfy.model_management.throw_exception_if_processing_interrupted()\n                            continue\n                        # Re-check for user interruption after timeout expiry\n                        comfy.model_management.throw_exception_if_processing_interrupted()\n                        missing_workers = set(str(w) for w in enabled_workers) - workers_done\n                        elapsed = time.time() - last_activity\n                        for missing_worker_id in sorted(missing_workers):\n                            log(\n                                \"Master - Heartbeat timeout: \"\n                                f\"worker={missing_worker_id}, elapsed={elapsed:.1f}s\"\n                            )\n                        log(\n                            f\"Master - Heartbeat timeout. Still waiting for workers: {list(missing_workers)} \"\n                            f\"(elapsed={elapsed:.1f}s)\"\n                        )\n\n                        # Probe missing workers' /prompt endpoints to check if they are actively processing\n                        any_busy = False\n                        try:\n                            cfg = load_config()\n                            cfg_workers = cfg.get('workers', [])\n                            for wid in list(missing_workers):\n                                wrec = next((w for w in cfg_workers if str(w.get('id')) == str(wid)), None)\n                                if not wrec:\n                                    debug_log(f\"Collector probe: worker {wid} not found in config\")\n                                    continue\n                                worker_url = build_worker_url(wrec)\n                                try:\n                                    payload = await probe_worker(worker_url, timeout=2.0)\n                                    queue_remaining = None\n                                    if payload is not None:\n                                        queue_remaining = int(payload.get('exec_info', {}).get('queue_remaining', 0))\n                                    debug_log(\n                                        \"Collector probe: worker \"\n                                        f\"{wid} online={payload is not None} queue_remaining={queue_remaining}\"\n                                    )\n                                    if payload is not None and queue_remaining and queue_remaining > 0:\n                                        any_busy = True\n                                        log(\n                                            f\"Master - Probe grace: worker {wid} appears busy \"\n                                            f\"(queue_remaining={queue_remaining}). Continuing to wait.\"\n                                        )\n                                        break\n                                except Exception as e:\n                                    debug_log(f\"Collector probe failed for worker {wid}: {e}\")\n                        except Exception as e:\n                            debug_log(f\"Collector probe setup error: {e}\")\n\n                        if any_busy:\n                            # Refresh last_activity and continue waiting\n                            last_activity = time.time()\n                            # Refresh base timeout in case the user changed it in UI\n                            base_timeout = float(get_worker_timeout_seconds())\n                            continue\n                        \n                        # Check queue size again with lock\n                        async with prompt_server.distributed_jobs_lock:\n                            if multi_job_id in prompt_server.distributed_pending_jobs:\n                                final_q = prompt_server.distributed_pending_jobs[multi_job_id]\n                                final_size = final_q.qsize()\n                                \n                                # Try to drain any remaining items\n                                remaining_items = []\n                                while not final_q.empty():\n                                    try:\n                                        item = final_q.get_nowait()\n                                        remaining_items.append(item)\n                                    except asyncio.QueueEmpty:\n                                        break\n                                \n                                if remaining_items:\n                                    # Process them\n                                    for item in remaining_items:\n                                        worker_id = item['worker_id']\n                                        is_last = item.get('is_last', False)\n\n                                        collected_count += self._store_worker_result(worker_images, item)\n                                        \n                                        if is_last:\n                                            mark_worker_done(worker_id)\n                            else:\n                                log(f\"Master - Queue {multi_job_id} no longer exists!\")\n                        break\n            except comfy.model_management.InterruptProcessingException:\n                # Cleanup queue on interruption and re-raise to abort prompt cleanly\n                async with prompt_server.distributed_jobs_lock:\n                    if multi_job_id in prompt_server.distributed_pending_jobs:\n                        del prompt_server.distributed_pending_jobs[multi_job_id]\n                raise\n            \n            total_collected = sum(len(imgs) for imgs in worker_images.values())\n            \n            # Clean up job queue\n            async with prompt_server.distributed_jobs_lock:\n                if multi_job_id in prompt_server.distributed_pending_jobs:\n                    del prompt_server.distributed_pending_jobs[multi_job_id]\n\n            try:\n                combined = self._reorder_and_combine_tensors(\n                    worker_images, enabled_workers, master_batch_size, images_on_cpu, delegate_mode, images\n                )\n                debug_log(f\"Master - Job {multi_job_id} complete. Combined {combined.shape[0]} images total \"\n                          f\"(master: {master_batch_size}, workers: {combined.shape[0] - master_batch_size})\")\n\n                # Combine audio from master and workers\n                combined_audio = self._combine_audio(master_audio, worker_audio, self.EMPTY_AUDIO, enabled_workers)\n\n                return (combined, combined_audio)\n            except Exception as e:\n                log(f\"Master - Error combining images: {e}\")\n                # Return just the master images as fallback\n                return (images, audio if audio is not None else self.EMPTY_AUDIO)\n"
  },
  {
    "path": "nodes/distributed_upscale.py",
    "content": "import json\nimport math\nfrom functools import wraps\n\nimport comfy.samplers\n\nfrom ..utils.logging import debug_log, log\nfrom ..utils.async_helpers import run_async_in_server_loop\nfrom ..upscale.job_store import ensure_tile_jobs_initialized\n\nfrom ..upscale.tile_ops import TileOpsMixin\nfrom ..upscale.result_collector import ResultCollectorMixin\nfrom ..upscale.worker_comms import WorkerCommsMixin\nfrom ..upscale.job_state import JobStateMixin\nfrom ..upscale.modes.single_gpu import SingleGpuModeMixin\nfrom ..upscale.modes.static import StaticModeMixin\nfrom ..upscale.modes.dynamic import DynamicModeMixin\n\ndef sync_wrapper(async_func):\n    \"\"\"Decorator to wrap async methods for synchronous execution.\"\"\"\n    @wraps(async_func)\n    def sync_func(self, *args, **kwargs):\n        # Use run_async_in_server_loop for ComfyUI compatibility\n        return run_async_in_server_loop(\n            async_func(self, *args, **kwargs),\n            timeout=600.0  # 10 minute timeout for long operations\n        )\n    return sync_func\n\ndef _parse_enabled_worker_ids(enabled_worker_ids):\n    \"\"\"Parse enabled worker IDs from either JSON or list input.\"\"\"\n    if isinstance(enabled_worker_ids, list):\n        return [str(worker_id) for worker_id in enabled_worker_ids]\n    if not enabled_worker_ids:\n        return []\n    if isinstance(enabled_worker_ids, str):\n        try:\n            parsed = json.loads(enabled_worker_ids)\n        except json.JSONDecodeError:\n            log(\"USDU Dist: Invalid enabled_worker_ids JSON; defaulting to no workers.\")\n            return []\n        if isinstance(parsed, list):\n            return [str(wid) for wid in parsed]\n    return []\n\nclass UltimateSDUpscaleDistributed(\n    DynamicModeMixin,\n    StaticModeMixin,\n    SingleGpuModeMixin,\n    ResultCollectorMixin,\n    WorkerCommsMixin,\n    JobStateMixin,\n    TileOpsMixin,\n):\n\n    \"\"\"\n    Distributed version of Ultimate SD Upscale (No Upscale).\n    \n    Supports three processing modes:\n    1. Single GPU: No workers available, process everything locally\n    2. Static Mode: Small batches, distributes tiles across workers (flattened)\n    3. Dynamic Mode: Large batches, assigns whole images to workers dynamically\n    \n    Features:\n    - Multi-mode batch handling for efficient video/image upscaling\n    - Tiled VAE support for memory efficiency\n    - Dynamic load balancing for large batches\n    - Backward compatible with single-image workflows\n    \n    Environment Variables:\n    - COMFYUI_MAX_BATCH: Chunk size for tile sending (default 20)\n    - COMFYUI_MAX_PAYLOAD_SIZE: Max API payload bytes (default 50MB)\n    \n    Threshold: dynamic_threshold input controls mode switch (default 8)\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"Initialize the node and ensure persistent storage exists.\"\"\"\n        # Pre-initialize the persistent storage on node creation\n        ensure_tile_jobs_initialized()\n        debug_log(\"UltimateSDUpscaleDistributed - Node initialized\")\n\n    @classmethod\n    def INPUT_TYPES(s):\n        return {\n            \"required\": {\n                \"upscaled_image\": (\"IMAGE\",),\n                \"model\": (\"MODEL\",),\n                \"positive\": (\"CONDITIONING\",),\n                \"negative\": (\"CONDITIONING\",),\n                \"vae\": (\"VAE\",),\n                \"seed\": (\"INT\", {\"default\": 0, \"min\": 0, \"max\": 0xffffffffffffffff}),\n                \"steps\": (\"INT\", {\"default\": 20, \"min\": 1, \"max\": 10000}),\n                \"cfg\": (\"FLOAT\", {\"default\": 8.0, \"min\": 0.0, \"max\": 100.0}),\n                \"sampler_name\": (comfy.samplers.KSampler.SAMPLERS,),\n                \"scheduler\": (comfy.samplers.KSampler.SCHEDULERS,),\n                \"denoise\": (\"FLOAT\", {\"default\": 0.5, \"min\": 0.0, \"max\": 1.0, \"step\": 0.01}),\n                \"tile_width\": (\"INT\", {\"default\": 512, \"min\": 64, \"max\": 2048, \"step\": 8}),\n                \"tile_height\": (\"INT\", {\"default\": 512, \"min\": 64, \"max\": 2048, \"step\": 8}),\n                \"padding\": (\"INT\", {\"default\": 32, \"min\": 0, \"max\": 256, \"step\": 8}),\n                \"mask_blur\": (\"INT\", {\"default\": 8, \"min\": 0, \"max\": 256}),\n                \"force_uniform_tiles\": (\"BOOLEAN\", {\"default\": True}),\n                \"tiled_decode\": (\"BOOLEAN\", {\"default\": False}),\n            },\n            \"hidden\": {\n                \"multi_job_id\": (\"STRING\", {\"default\": \"\"}),\n                \"is_worker\": (\"BOOLEAN\", {\"default\": False}),\n                \"master_url\": (\"STRING\", {\"default\": \"\"}),\n                \"enabled_worker_ids\": (\"STRING\", {\"default\": \"[]\"}),\n                \"worker_id\": (\"STRING\", {\"default\": \"\"}),\n                \"tile_indices\": (\"STRING\", {\"default\": \"\"}),  # Unused - kept for compatibility\n                \"dynamic_threshold\": (\"INT\", {\"default\": 8, \"min\": 1, \"max\": 64}),\n            },\n        }\n\n    RETURN_TYPES = (\"IMAGE\",)\n    FUNCTION = \"run\"\n    CATEGORY = \"image/upscaling\"\n\n    @classmethod\n    def IS_CHANGED(cls, **kwargs):\n        \"\"\"Force re-execution.\"\"\"\n        return float(\"nan\")  # Always re-execute\n\n    def run(self, upscaled_image, model, positive, negative, vae, seed, steps, cfg, \n            sampler_name, scheduler, denoise, tile_width, tile_height, padding, \n            mask_blur, force_uniform_tiles, tiled_decode,\n            multi_job_id=\"\", is_worker=False, master_url=\"\", enabled_worker_ids=\"[]\", \n            worker_id=\"\", tile_indices=\"\", dynamic_threshold=8):\n        \"\"\"Entry point - runs SYNCHRONOUSLY like Ultimate SD Upscaler.\"\"\"\n        # Strict WAN/FLOW batching: error if batch is not 4n+1 (except allow 1)\n        try:\n            batch_size = int(getattr(upscaled_image, 'shape', [1])[0])\n        except Exception:\n            batch_size = 1\n        # Enforce 4n+1 batches globally for any model when batch > 1 (master only)\n        if not is_worker and batch_size != 1 and (batch_size % 4 != 1):\n            raise ValueError(\n                f\"Batch size {batch_size} is not of the form 4n+1. \"\n                \"This node requires batch sizes of 1 or 4n+1 (1, 5, 9, 13, ...). \"\n                \"Please adjust the batch size.\"\n            )\n        if not multi_job_id:\n            # No distributed processing, run single GPU version\n            return self.process_single_gpu(upscaled_image, model, positive, negative, vae,\n                                          seed, steps, cfg, sampler_name, scheduler, denoise,\n                                          tile_width, tile_height, padding, mask_blur, force_uniform_tiles, tiled_decode)\n        \n        if is_worker:\n            # Worker mode: process tiles synchronously\n            return self.process_worker(upscaled_image, model, positive, negative, vae,\n                                      seed, steps, cfg, sampler_name, scheduler, denoise,\n                                      tile_width, tile_height, padding, mask_blur,\n                                      force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                                      worker_id, enabled_worker_ids, dynamic_threshold)\n        else:\n            # Master mode: distribute and collect synchronously\n            return self.process_master(upscaled_image, model, positive, negative, vae,\n                                     seed, steps, cfg, sampler_name, scheduler, denoise,\n                                     tile_width, tile_height, padding, mask_blur,\n                                     force_uniform_tiles, tiled_decode, multi_job_id, enabled_worker_ids, \n                                     dynamic_threshold)\n\n    def process_worker(self, upscaled_image, model, positive, negative, vae,\n                      seed, steps, cfg, sampler_name, scheduler, denoise,\n                      tile_width, tile_height, padding, mask_blur,\n                      force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                      worker_id, enabled_worker_ids, dynamic_threshold):\n        \"\"\"Unified worker processing - handles both static and dynamic modes.\"\"\"\n        # Get batch size to determine mode\n        batch_size = upscaled_image.shape[0]\n        \n        # Ensure mode consistency across master/workers via shared threshold\n        # Determine mode (must match master's logic)\n        enabled_workers = json.loads(enabled_worker_ids)\n        num_workers = len(enabled_workers)\n        # Compute number of tiles for this image to decide if tile distribution makes sense\n        _, height, width, _ = upscaled_image.shape\n        all_tiles = self.calculate_tiles(width, height, self.round_to_multiple(tile_width), self.round_to_multiple(tile_height), force_uniform_tiles)\n        num_tiles_per_image = len(all_tiles)\n\n        mode = self._determine_processing_mode(batch_size, num_workers, dynamic_threshold)\n        # For USDU-style processing, we want tile distribution whenever workers are available\n        # and there is more than one tile to process, even if batch == 1.\n        if num_workers > 0 and num_tiles_per_image > 1:\n            mode = \"static\"\n            \n        debug_log(f\"USDU Dist Worker - Batch size {batch_size}\")\n        \n        if mode == \"dynamic\":\n            return self.process_worker_dynamic(upscaled_image, model, positive, negative, vae,\n                                             seed, steps, cfg, sampler_name, scheduler, denoise,\n                                             tile_width, tile_height, padding, mask_blur,\n                                             force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                                             worker_id, enabled_worker_ids, dynamic_threshold)\n        \n        # Static mode - enhanced with health monitoring and retry logic\n        return self._process_worker_static_sync(upscaled_image, model, positive, negative, vae,\n                                               seed, steps, cfg, sampler_name, scheduler, denoise,\n                                               tile_width, tile_height, padding, mask_blur,\n                                               force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                                               worker_id, enabled_workers)\n\n    def process_master(self, upscaled_image, model, positive, negative, vae,\n                      seed, steps, cfg, sampler_name, scheduler, denoise,\n                      tile_width, tile_height, padding, mask_blur,\n                      force_uniform_tiles, tiled_decode, multi_job_id, enabled_worker_ids, \n                      dynamic_threshold):\n        \"\"\"Unified master processing with enhanced monitoring and failure handling.\"\"\"\n        # Round tile dimensions\n        tile_width = self.round_to_multiple(tile_width)\n        tile_height = self.round_to_multiple(tile_height)\n        \n        # Get image dimensions and batch size\n        batch_size, height, width, _ = upscaled_image.shape\n        \n        # Calculate all tiles and grid\n        all_tiles = self.calculate_tiles(width, height, tile_width, tile_height, force_uniform_tiles)\n        num_tiles_per_image = len(all_tiles)\n        rows = math.ceil(height / tile_height)\n        cols = math.ceil(width / tile_width)\n        log(\n            f\"USDU Dist: Canvas {width}x{height} | Tile {tile_width}x{tile_height} | Grid {rows}x{cols} ({num_tiles_per_image} tiles/image) | Batch {batch_size}\"\n        )\n        \n        # Parse enabled workers\n        enabled_workers = json.loads(enabled_worker_ids)\n        num_workers = len(enabled_workers)\n        \n        # Determine processing mode\n        mode = self._determine_processing_mode(batch_size, num_workers, dynamic_threshold)\n        # Prefer tile-based static distribution when workers are available and there are multiple tiles,\n        # even for batch == 1, to spread tiles across GPUs like the legacy dynamic tile queue.\n        if num_workers > 0 and num_tiles_per_image > 1:\n            mode = \"static\"\n        \n        log(f\"USDU Dist: Workers {num_workers} | Mode {mode} | Threshold {dynamic_threshold}\")\n\n        if mode == \"single_gpu\":\n            # No workers, process all tiles locally\n            return self.process_single_gpu(upscaled_image, model, positive, negative, vae,\n                                         seed, steps, cfg, sampler_name, scheduler, denoise,\n                                         tile_width, tile_height, padding, mask_blur, force_uniform_tiles, tiled_decode)\n        \n        elif mode == \"dynamic\":\n            # Dynamic mode for large batches\n            return self.process_master_dynamic(upscaled_image, model, positive, negative, vae,\n                                             seed, steps, cfg, sampler_name, scheduler, denoise,\n                                             tile_width, tile_height, padding, mask_blur,\n                                             force_uniform_tiles, tiled_decode, multi_job_id, enabled_workers)\n        \n        # Static mode - enhanced with unified job management\n        return self._process_master_static_sync(upscaled_image, model, positive, negative, vae,\n                                               seed, steps, cfg, sampler_name, scheduler, denoise,\n                                               tile_width, tile_height, padding, mask_blur,\n                                               force_uniform_tiles, tiled_decode, multi_job_id, enabled_workers,\n                                               all_tiles, num_tiles_per_image)\n\n    def _determine_processing_mode(self, batch_size: int, num_workers: int, dynamic_threshold: int) -> str:\n        \"\"\"Determines processing mode per requested policy:\n        - any workers     => prefer static (tile-based) for USDU\n        - no workers      => single_gpu\n        \"\"\"\n        if num_workers == 0:\n            return \"single_gpu\"\n        # Default to static when distributed; master/worker may still override if special cases arise\n        return \"static\"\n\n# Ensure initialization before registering routes\nensure_tile_jobs_initialized()\n\n# Node registration\nNODE_CLASS_MAPPINGS = {\n    \"UltimateSDUpscaleDistributed\": UltimateSDUpscaleDistributed,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n    \"UltimateSDUpscaleDistributed\": \"Ultimate SD Upscale Distributed (No Upscale)\",\n}\n"
  },
  {
    "path": "nodes/utilities.py",
    "content": "import torch\nimport json\n\nfrom ..utils.logging import debug_log, log\n\n\ndef _chunk_bounds(total_items: int, n_splits: int) -> list[tuple[int, int]]:\n    \"\"\"Return contiguous [start, end) bounds for n_splits chunks.\"\"\"\n    split_count = max(1, int(n_splits))\n    total = max(0, int(total_items))\n    base, remainder = divmod(total, split_count)\n\n    bounds: list[tuple[int, int]] = []\n    start = 0\n    for idx in range(split_count):\n        size = base + (1 if idx < remainder else 0)\n        end = start + size\n        bounds.append((start, end))\n        start = end\n    return bounds\n\n\nclass DistributedSeed:\n    \"\"\"\n    Distributes seed values across multiple GPUs.\n    On master: passes through the original seed.\n    On workers: adds offset based on worker ID.\n    \"\"\"\n    \n    @classmethod\n    def INPUT_TYPES(cls):\n        return {\n            \"required\": {\n                \"seed\": (\"INT\", {\n                    \"default\": 1125899906842, \n                    \"min\": 0,\n                    \"max\": 1125899906842624,\n                    \"forceInput\": False  # Widget by default, can be converted to input\n                }),\n            },\n            \"hidden\": {\n                \"is_worker\": (\"BOOLEAN\", {\"default\": False}),\n                \"worker_id\": (\"STRING\", {\"default\": \"\"}),\n            },\n        }\n    \n    RETURN_TYPES = (\"INT\",)\n    RETURN_NAMES = (\"seed\",)\n    FUNCTION = \"distribute\"\n    CATEGORY = \"utils\"\n    \n    def distribute(self, seed, is_worker=False, worker_id=\"\"):\n        if not is_worker:\n            # Master node: pass through original values\n            debug_log(f\"Distributor - Master: seed={seed}\")\n            return (seed,)\n        else:\n            # Worker node: apply offset based on worker index\n            # Find worker index from enabled_worker_ids\n            try:\n                # Worker IDs are passed as \"worker_0\", \"worker_1\", etc.\n                if worker_id.startswith(\"worker_\"):\n                    worker_index = int(worker_id.split(\"_\")[1])\n                else:\n                    # Fallback: try to parse as direct index\n                    worker_index = int(worker_id)\n                \n                offset = worker_index + 1\n                new_seed = seed + offset\n                debug_log(f\"Distributor - Worker {worker_index}: seed={seed} → {new_seed}\")\n                return (new_seed,)\n            except (ValueError, IndexError) as e:\n                debug_log(f\"Distributor - Error parsing worker_id '{worker_id}': {e}\")\n                # Fallback: return original seed\n                return (seed,)\n\n\n# Define ByPassTypeTuple for flexible return types\nclass AnyType(str):\n    def __ne__(self, __value: object) -> bool:\n        return False\n\nany_type = AnyType(\"*\")\n\n\nclass DistributedValue:\n    \"\"\"\n    Outputs a different value per worker.\n    On master: returns default_value.\n    On workers: looks up the worker-specific value from a JSON map,\n    falling back to default_value if not set.\n    \"\"\"\n\n    @classmethod\n    def INPUT_TYPES(cls):\n        return {\n            \"required\": {\n                \"default_value\": (\"STRING\", {\"default\": \"\"}),\n                \"worker_values\": (\"STRING\", {\"default\": \"{}\"}),\n            },\n            \"hidden\": {\n                \"is_worker\": (\"BOOLEAN\", {\"default\": False}),\n                \"worker_id\": (\"STRING\", {\"default\": \"\"}),\n            },\n        }\n\n    RETURN_TYPES = (any_type,)\n    RETURN_NAMES = (\"value\",)\n    FUNCTION = \"distribute\"\n    CATEGORY = \"utils\"\n\n    @staticmethod\n    def _coerce(value, value_type):\n        \"\"\"Convert a string value to the requested type.\"\"\"\n        if value_type == \"INT\":\n            return int(float(value))\n        if value_type == \"FLOAT\":\n            return float(value)\n        return value  # STRING and COMBO stay as strings\n\n    @staticmethod\n    def _coerce_safe(value, value_type):\n        \"\"\"Best-effort coercion with graceful fallback to original value.\"\"\"\n        try:\n            return DistributedValue._coerce(value, value_type)\n        except (TypeError, ValueError):\n            return value\n\n    def distribute(self, default_value, worker_values=\"{}\", is_worker=False, worker_id=\"\"):\n        values = {}\n        value_type = \"STRING\"\n\n        try:\n            values = json.loads(worker_values) if isinstance(worker_values, str) else worker_values\n            if not isinstance(values, dict):\n                values = {}\n        except json.JSONDecodeError as e:\n            debug_log(f\"DistributedValue - Error parsing worker_values: {e}\")\n            values = {}\n\n        value_type = values.get(\"_type\", \"STRING\")\n        coerced_default = self._coerce_safe(default_value, value_type)\n\n        if not is_worker:\n            debug_log(f\"DistributedValue - Master: returning default '{coerced_default}'\")\n            return (coerced_default,)\n\n        try:\n            if worker_id.startswith(\"worker_\"):\n                idx = int(worker_id.split(\"_\")[1])\n            else:\n                idx = int(worker_id)\n            key = str(idx + 1)  # worker_0 → key \"1\" (1-indexed)\n            raw = values.get(key, \"\")\n            if raw:\n                coerced = self._coerce(raw, value_type)\n                debug_log(f\"DistributedValue - Worker {idx}: returning '{coerced}'\")\n                return (coerced,)\n        except (ValueError, IndexError) as e:\n            debug_log(f\"DistributedValue - Error: {e}\")\n        debug_log(f\"DistributedValue - Worker fallback: returning default '{coerced_default}'\")\n        return (coerced_default,)\n\nclass DistributedModelName:\n    @classmethod\n    def INPUT_TYPES(cls):\n        return {\n            \"required\": {\n                \"text\": (\"STRING\", {\"default\": \"\"}),\n            },\n            \"hidden\": {\n                \"unique_id\": \"UNIQUE_ID\",\n                \"extra_pnginfo\": \"EXTRA_PNGINFO\",\n            },\n        }\n\n    RETURN_TYPES = (any_type,)\n    RETURN_NAMES = (\"output\",)\n    FUNCTION = \"log_input\"\n    OUTPUT_NODE = True\n    CATEGORY = \"utils\"\n\n    def _stringify(self, value):\n        if isinstance(value, str):\n            return value\n        if isinstance(value, (int, float, bool)):\n            return str(value)\n        try:\n            return json.dumps(value, indent=4)\n        except Exception:\n            return str(value)\n\n    def _update_workflow(self, extra_pnginfo, unique_id, values):\n        if not extra_pnginfo:\n            return\n        info = extra_pnginfo[0] if isinstance(extra_pnginfo, list) else extra_pnginfo\n        if not isinstance(info, dict) or \"workflow\" not in info:\n            return\n        node_id = None\n        if isinstance(unique_id, list) and unique_id:\n            node_id = str(unique_id[0])\n        elif unique_id is not None:\n            node_id = str(unique_id)\n        if not node_id:\n            return\n        workflow = info[\"workflow\"]\n        node = next((x for x in workflow[\"nodes\"] if str(x.get(\"id\")) == node_id), None)\n        if node:\n            node[\"widgets_values\"] = [values]\n\n    def log_input(self, text, unique_id=None, extra_pnginfo=None):\n        values = []\n        if isinstance(text, list):\n            for val in text:\n                values.append(self._stringify(val))\n        else:\n            values.append(self._stringify(text))\n\n        # Keep widget display in workflow metadata if available.\n        self._update_workflow(extra_pnginfo, unique_id, values)\n\n        if isinstance(values, list) and len(values) == 1:\n            return {\"ui\": {\"text\": values}, \"result\": (values[0],)}\n        return {\"ui\": {\"text\": values}, \"result\": (values,)}\n\nclass ByPassTypeTuple(tuple):\n    def __getitem__(self, index):\n        if index > 0:\n            index = 0\n        item = super().__getitem__(index)\n        if isinstance(item, str):\n            return any_type\n        return item\n\nclass ImageBatchDivider:\n    @classmethod\n    def INPUT_TYPES(s):\n        return {\n            \"required\": {\n                \"images\": (\"IMAGE\",),\n                \"divide_by\": (\"INT\", {\n                    \"default\": 2, \n                    \"min\": 1, \n                    \"max\": 10, \n                    \"step\": 1,\n                    \"display\": \"number\",\n                    \"tooltip\": \"Number of parts to divide the batch into\"\n                }),\n            }\n        }\n    \n    RETURN_TYPES = ByPassTypeTuple((\"IMAGE\", ))  # Flexible for variable outputs\n    RETURN_NAMES = ByPassTypeTuple(tuple([f\"batch_{i+1}\" for i in range(10)]))\n    FUNCTION = \"divide_batch\"\n    OUTPUT_NODE = True\n    CATEGORY = \"image\"\n    \n    def divide_batch(self, images, divide_by):\n        total_splits = max(1, min(int(divide_by), 10))\n        total_frames = images.shape[0]\n        empty_tensor = images[:0]\n        bounds = _chunk_bounds(total_frames, total_splits)\n        outputs = [images[start:end] if end > start else empty_tensor for start, end in bounds]\n\n        while len(outputs) < 10:\n            outputs.append(empty_tensor)\n\n        return tuple(outputs[:10])\n\n\nclass AudioBatchDivider:\n    \"\"\"Divides an audio waveform into multiple parts along the time/samples dimension.\"\"\"\n\n    @classmethod\n    def INPUT_TYPES(s):\n        return {\n            \"required\": {\n                \"audio\": (\"AUDIO\",),\n                \"divide_by\": (\"INT\", {\n                    \"default\": 2,\n                    \"min\": 1,\n                    \"max\": 10,\n                    \"step\": 1,\n                    \"display\": \"number\",\n                    \"tooltip\": \"Number of parts to divide the audio into\"\n                }),\n            }\n        }\n\n    RETURN_TYPES = ByPassTypeTuple((\"AUDIO\",))  # Flexible for variable outputs\n    RETURN_NAMES = ByPassTypeTuple(tuple([f\"audio_{i+1}\" for i in range(10)]))\n    FUNCTION = \"divide_audio\"\n    OUTPUT_NODE = True\n    CATEGORY = \"audio\"\n\n    def divide_audio(self, audio, divide_by):\n        import torch\n\n        waveform = audio.get(\"waveform\")\n        sample_rate = audio.get(\"sample_rate\", 44100)\n\n        if waveform is None or waveform.numel() == 0:\n            # Return empty audio for all outputs\n            empty_audio = {\"waveform\": torch.zeros(1, 2, 1), \"sample_rate\": sample_rate}\n            return tuple([empty_audio] * 10)\n\n        total_splits = max(1, min(int(divide_by), 10))\n        total_samples = int(waveform.shape[-1])\n        bounds = _chunk_bounds(total_samples, total_splits)\n\n        outputs = []\n        empty_waveform = waveform[..., :0]\n        for start, end in bounds:\n            split_waveform = waveform[..., start:end] if end > start else empty_waveform\n            outputs.append({\n                \"waveform\": split_waveform,\n                \"sample_rate\": sample_rate\n            })\n\n        # Pad with empty audio up to max (10) to match RETURN_TYPES length\n        empty_audio = {\n            \"waveform\": empty_waveform,\n            \"sample_rate\": sample_rate\n        }\n\n        while len(outputs) < 10:\n            outputs.append(empty_audio)\n\n        return tuple(outputs)\n\n\nclass DistributedEmptyImage:\n    \"\"\"Produces an empty IMAGE batch used when the master delegates all work.\"\"\"\n\n    @classmethod\n    def INPUT_TYPES(cls):\n        return {\n            \"required\": {\n                \"height\": (\"INT\", {\"default\": 64, \"min\": 1, \"max\": 4096, \"step\": 1}),\n                \"width\": (\"INT\", {\"default\": 64, \"min\": 1, \"max\": 4096, \"step\": 1}),\n                \"channels\": (\"INT\", {\"default\": 3, \"min\": 1, \"max\": 4, \"step\": 1}),\n            }\n        }\n\n    RETURN_TYPES = (\"IMAGE\",)\n    FUNCTION = \"create\"\n    CATEGORY = \"image\"\n\n    def create(self, height, width, channels):\n        import torch\n\n        shape = (0, height, width, channels)\n        tensor = torch.zeros(shape, dtype=torch.float32)\n        return (tensor,)\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"comfyui-distributed-web-tests\",\n  \"private\": true,\n  \"type\": \"module\",\n  \"scripts\": {\n    \"test:web\": \"bash ./scripts/test-web.sh\",\n    \"test:web:watch\": \"bash ./scripts/test-web.sh --watch\"\n  },\n  \"devDependencies\": {\n    \"vitest\": \"^2.1.9\"\n  }\n}\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"ComfyUI-Distributed\"\ndescription = \"ComfyUI extension that enables multi-GPU processing locally, remotely and in the cloud\"\nversion = \"1.4.4\"\nlicense = {file = \"LICENSE\"}\ndependencies  = []\n\n[project.urls]\nRepository = \"https://github.com/robertvoy/ComfyUI-Distributed\"\n#  Used by Comfy Registry https://comfyregistry.org\n\n[tool.comfy]\nPublisherId = \"robertvoy\"\nDisplayName = \"ComfyUI-Distributed\"\nIcon = \"https://raw.githubusercontent.com/robertvoy/ComfyUI-Distributed/refs/heads/main/web/distributed-logo-icon.png\"\n\n[tool.pytest.ini_options]\ntestpaths = [\"tests\"]\npythonpath = [\".\"]\n"
  },
  {
    "path": "scripts/test-web.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd -- \"$(dirname -- \"${BASH_SOURCE[0]}\")\" && pwd)\"\nREPO_ROOT=\"$(cd -- \"${SCRIPT_DIR}/..\" && pwd)\"\n\nexport NVM_DIR=\"${NVM_DIR:-$HOME/.nvm}\"\nif [[ -s \"${NVM_DIR}/nvm.sh\" ]]; then\n    # shellcheck source=/dev/null\n    . \"${NVM_DIR}/nvm.sh\"\nfi\n\nif ! command -v node >/dev/null 2>&1 || ! command -v npm >/dev/null 2>&1; then\n    echo \"[test-web] node/npm are not available.\" >&2\n    echo \"[test-web] Install nvm and Node, or ensure node/npm are on PATH.\" >&2\n    exit 1\nfi\n\nif [[ -f \"${REPO_ROOT}/.nvmrc\" ]] && command -v nvm >/dev/null 2>&1; then\n    nvm use >/dev/null\nfi\n\ncd \"${REPO_ROOT}\"\n\nif [[ \"${1:-}\" == \"--watch\" ]]; then\n    exec npx vitest web/tests\nfi\n\nexec npx vitest run web/tests\n"
  },
  {
    "path": "tests/api/test_config_routes.py",
    "content": "import copy\nimport importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\nclass _FakeResponse:\n    def __init__(self, payload, status=200):\n        self.payload = payload\n        self.status = status\n\n\nclass _FakeRequest:\n    def __init__(self, payload=None):\n        self._payload = payload\n\n    async def json(self):\n        return self._payload\n\n\ndef _load_config_routes_module():\n    module_path = Path(__file__).resolve().parents[2] / \"api\" / \"config_routes.py\"\n    package_name = \"dist_api_config_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: _FakeResponse(payload, status=status)\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    class _Routes:\n        def get(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n        def post(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(instance=types.SimpleNamespace(routes=_Routes()))\n    sys.modules[\"server\"] = server_module\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n\n    async def _handle_api_error(_request, error, status=500):\n        return _FakeResponse({\"status\": \"error\", \"message\": str(error)}, status=status)\n\n    network_module.handle_api_error = _handle_api_error\n    network_module.normalize_host = lambda value: value\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    default_config = {\n        \"workers\": [],\n        \"master\": {\"host\": \"\"},\n        \"settings\": {\"debug\": False},\n        \"tunnel\": {},\n    }\n\n    config_module = types.ModuleType(f\"{package_name}.utils.config\")\n    config_module.load_config = lambda: copy.deepcopy(default_config)\n    config_module.save_config = lambda _cfg: True\n    sys.modules[f\"{package_name}.utils.config\"] = config_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.api.config_routes\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n\n    return module\n\n\nconfig_routes = _load_config_routes_module()\n\n\nclass ConfigRoutesTests(unittest.IsolatedAsyncioTestCase):\n    async def test_get_config_returns_core_sections(self):\n        cfg = {\"workers\": [], \"master\": {}, \"settings\": {}, \"tunnel\": {}}\n        with patch.object(config_routes, \"load_config\", return_value=cfg):\n            response = await config_routes.get_config_endpoint(_FakeRequest())\n\n        self.assertEqual(response.status, 200)\n        self.assertIn(\"workers\", response.payload)\n        self.assertIn(\"master\", response.payload)\n        self.assertIn(\"settings\", response.payload)\n\n    async def test_update_config_valid_field_persists(self):\n        cfg = {\"workers\": [], \"master\": {}, \"settings\": {\"debug\": False}, \"tunnel\": {}}\n        with patch.object(config_routes, \"load_config\", return_value=cfg), patch.object(\n            config_routes, \"save_config\", return_value=True\n        ):\n            response = await config_routes.update_config_endpoint(_FakeRequest({\"debug\": True}))\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload[\"status\"], \"success\")\n        self.assertTrue(response.payload[\"config\"][\"settings\"][\"debug\"])\n\n    async def test_update_config_unknown_field_returns_400(self):\n        cfg = {\"workers\": [], \"master\": {}, \"settings\": {\"debug\": False}, \"tunnel\": {}}\n        with patch.object(config_routes, \"load_config\", return_value=cfg):\n            response = await config_routes.update_config_endpoint(_FakeRequest({\"unknown_field\": 1}))\n\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"unknown_field\", \" \".join(response.payload.get(\"error\", [])).lower())\n\n    async def test_update_config_wrong_type_returns_400(self):\n        cfg = {\"workers\": [], \"master\": {}, \"settings\": {\"debug\": False}, \"tunnel\": {}}\n        with patch.object(config_routes, \"load_config\", return_value=cfg):\n            response = await config_routes.update_config_endpoint(_FakeRequest({\"debug\": \"true\"}))\n\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"debug\", \" \".join(response.payload.get(\"error\", [])).lower())\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/api/test_distributed_queue.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nimport asyncio\nimport base64\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom unittest.mock import AsyncMock, patch\n\nimport numpy as np\nimport torch\n\n\nclass _FakeResponse:\n    def __init__(self, payload, status=200):\n        self.payload = payload\n        self.status = status\n\n\nclass _FakeRequest:\n    def __init__(self, payload):\n        self._payload = payload\n\n    async def json(self):\n        return self._payload\n\n\ndef _load_job_routes_module():\n    module_path = Path(__file__).resolve().parents[2] / \"api\" / \"job_routes.py\"\n    package_name = \"dist_api_queue_testpkg\"\n\n    # Reset package namespace to avoid stale module state across test runs.\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    # aiohttp.web stub\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: _FakeResponse(payload, status=status)\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    # server module stub with route decorators\n    class _Routes:\n        def get(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n        def post(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n    prompt_server_instance = types.SimpleNamespace(\n        routes=_Routes(),\n        distributed_jobs_lock=None,\n        distributed_pending_jobs={},\n    )\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(instance=prompt_server_instance)\n    sys.modules[\"server\"] = server_module\n\n    # torch stub (only needed to satisfy import)\n    created_torch_stub = False\n    if \"torch\" not in sys.modules:\n        created_torch_stub = True\n        torch_module = types.ModuleType(\"torch\")\n        torch_module.cuda = types.SimpleNamespace(\n            is_available=lambda: False,\n            empty_cache=lambda: None,\n            ipc_collect=lambda: None,\n        )\n        sys.modules[\"torch\"] = torch_module\n\n    # PIL stub (only needed to satisfy import)\n    created_pil_stub = False\n    if \"PIL\" not in sys.modules:\n        created_pil_stub = True\n        pil_module = types.ModuleType(\"PIL\")\n        image_module = types.ModuleType(\"PIL.Image\")\n        pil_module.Image = image_module\n        sys.modules[\"PIL\"] = pil_module\n        sys.modules[\"PIL.Image\"] = image_module\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    image_module = types.ModuleType(f\"{package_name}.utils.image\")\n    image_module.pil_to_tensor = lambda *_args, **_kwargs: None\n    image_module.ensure_contiguous = lambda tensor: tensor\n    sys.modules[f\"{package_name}.utils.image\"] = image_module\n\n    audio_payload_module = types.ModuleType(f\"{package_name}.utils.audio_payload\")\n\n    def _decode_audio_payload(payload):\n        if payload is None:\n            return None\n        if not isinstance(payload, dict):\n            raise ValueError(\"Field 'audio' must be an object when provided.\")\n\n        encoded = payload.get(\"data\")\n        shape = payload.get(\"shape\")\n        dtype = payload.get(\"dtype\", \"float32\")\n        sample_rate = payload.get(\"sample_rate\", 44100)\n        if not isinstance(encoded, str) or not encoded.strip():\n            raise ValueError(\"Field 'audio.data' must be a non-empty base64 string.\")\n        if not isinstance(shape, list) or len(shape) != 3:\n            raise ValueError(\"Field 'audio.shape' must be a 3-item list.\")\n        if dtype != \"float32\":\n            raise ValueError(\"Field 'audio.dtype' must be 'float32'.\")\n        try:\n            shape_tuple = tuple(int(dim) for dim in shape)\n        except Exception as exc:\n            raise ValueError(\"Field 'audio.shape' must contain integers.\") from exc\n\n        raw = base64.b64decode(encoded, validate=True)\n        expected_bytes = int(np.prod(shape_tuple, dtype=np.int64)) * 4\n        if len(raw) != expected_bytes:\n            raise ValueError(\"Field 'audio.data' byte size mismatch.\")\n\n        waveform = torch.from_numpy(np.frombuffer(raw, dtype=np.float32).reshape(shape_tuple).copy())\n        return {\"waveform\": waveform, \"sample_rate\": int(sample_rate)}\n\n    audio_payload_module.decode_audio_payload = _decode_audio_payload\n    sys.modules[f\"{package_name}.utils.audio_payload\"] = audio_payload_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n\n    async def _handle_api_error(_request, error, status=500):\n        return _FakeResponse({\"status\": \"error\", \"message\": str(error)}, status=status)\n\n    network_module.handle_api_error = _handle_api_error\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    constants_module = types.ModuleType(f\"{package_name}.utils.constants\")\n    constants_module.MEMORY_CLEAR_DELAY = 0.0\n    constants_module.JOB_INIT_GRACE_PERIOD = 10.0\n    sys.modules[f\"{package_name}.utils.constants\"] = constants_module\n\n    async_helpers_module = types.ModuleType(f\"{package_name}.utils.async_helpers\")\n    async_helpers_module.queue_prompt_payload = AsyncMock(return_value=\"prompt_local\")\n    sys.modules[f\"{package_name}.utils.async_helpers\"] = async_helpers_module\n\n    queue_orchestration_module = types.ModuleType(f\"{package_name}.api.queue_orchestration\")\n    queue_orchestration_module.orchestrate_distributed_execution = AsyncMock(return_value=(\"prompt_dist\", 7, 1, {}))\n    sys.modules[f\"{package_name}.api.queue_orchestration\"] = queue_orchestration_module\n\n    @dataclass(frozen=True)\n    class _QueuePayload:\n        prompt: dict\n        workflow_meta: object\n        client_id: str\n        delegate_master: object\n        enabled_worker_ids: list\n        auto_prepare: bool\n        trace_execution_id: object\n\n    def _parse_queue_request_payload(data):\n        if not isinstance(data, dict):\n            raise ValueError(\"Expected a JSON object body\")\n        prompt = data.get(\"prompt\")\n        if not isinstance(prompt, dict):\n            raise ValueError(\"Field 'prompt' must be an object\")\n        enabled = data.get(\"enabled_worker_ids\")\n        if not isinstance(enabled, list):\n            raise ValueError(\"enabled_worker_ids required\")\n        client_id = data.get(\"client_id\")\n        if not isinstance(client_id, str) or not client_id.strip():\n            raise ValueError(\"client_id required\")\n        return _QueuePayload(\n            prompt=prompt,\n            workflow_meta=data.get(\"workflow\"),\n            client_id=client_id,\n            delegate_master=data.get(\"delegate_master\"),\n            enabled_worker_ids=enabled,\n            auto_prepare=bool(data.get(\"auto_prepare\", True)),\n            trace_execution_id=data.get(\"trace_execution_id\"),\n        )\n\n    queue_request_module = types.ModuleType(f\"{package_name}.api.queue_request\")\n    queue_request_module.parse_queue_request_payload = _parse_queue_request_payload\n    sys.modules[f\"{package_name}.api.queue_request\"] = queue_request_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.api.job_routes\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n    if created_torch_stub:\n        sys.modules.pop(\"torch\", None)\n    if created_pil_stub:\n        sys.modules.pop(\"PIL.Image\", None)\n        sys.modules.pop(\"PIL\", None)\n\n    return module\n\n\njob_routes = _load_job_routes_module()\n\n\nclass DistributedQueueEndpointTests(unittest.IsolatedAsyncioTestCase):\n    async def test_distributed_queue_happy_path_returns_prompt_metadata(self):\n        request = _FakeRequest(\n            {\n                \"prompt\": {\"1\": {\"class_type\": \"Node\"}},\n                \"enabled_worker_ids\": [\"w1\"],\n                \"client_id\": \"client-1\",\n                \"auto_prepare\": True,\n            }\n        )\n        with patch.object(\n            job_routes,\n            \"orchestrate_distributed_execution\",\n            new=AsyncMock(return_value=(\"prompt_123\", 42, 2, {})),\n        ):\n            response = await job_routes.distributed_queue_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"prompt_id\"), \"prompt_123\")\n        self.assertEqual(response.payload.get(\"number\"), 42)\n        self.assertEqual(response.payload.get(\"node_errors\"), {})\n        self.assertTrue(response.payload.get(\"auto_prepare_supported\"))\n\n    async def test_distributed_queue_missing_prompt_returns_400(self):\n        request = _FakeRequest(\n            {\n                \"enabled_worker_ids\": [\"w1\"],\n                \"client_id\": \"client-1\",\n            }\n        )\n        response = await job_routes.distributed_queue_endpoint(request)\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"prompt\", response.payload.get(\"message\", \"\").lower())\n\n    async def test_distributed_queue_missing_enabled_worker_ids_returns_400(self):\n        request = _FakeRequest(\n            {\n                \"prompt\": {\"1\": {\"class_type\": \"Node\"}},\n                \"client_id\": \"client-1\",\n            }\n        )\n        response = await job_routes.distributed_queue_endpoint(request)\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"enabled_worker_ids\", response.payload.get(\"message\", \"\").lower())\n\n\nclass JobCompleteAudioPayloadTests(unittest.IsolatedAsyncioTestCase):\n    def _encoded_audio_payload(self):\n        waveform = np.arange(8, dtype=np.float32).reshape(1, 2, 4)\n        return {\n            \"sample_rate\": 44100,\n            \"shape\": [1, 2, 4],\n            \"dtype\": \"float32\",\n            \"data\": base64.b64encode(waveform.tobytes()).decode(\"ascii\"),\n        }\n\n    async def test_job_complete_accepts_audio_payload(self):\n        queue = asyncio.Queue()\n        job_routes.prompt_server.distributed_jobs_lock = asyncio.Lock()\n        job_routes.prompt_server.distributed_pending_jobs = {\"job-1\": queue}\n        request = _FakeRequest(\n            {\n                \"job_id\": \"job-1\",\n                \"worker_id\": \"worker-1\",\n                \"batch_idx\": 0,\n                \"image\": \"data:image/png;base64,AAAA\",\n                \"audio\": self._encoded_audio_payload(),\n                \"is_last\": True,\n            }\n        )\n\n        with patch.object(job_routes, \"_decode_canonical_png_tensor\", return_value=\"tensor-data\"):\n            response = await job_routes.job_complete_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        queued = await queue.get()\n        self.assertEqual(queued[\"worker_id\"], \"worker-1\")\n        self.assertTrue(queued[\"is_last\"])\n        self.assertIsNotNone(queued[\"audio\"])\n        self.assertEqual(queued[\"audio\"][\"sample_rate\"], 44100)\n        self.assertEqual(tuple(queued[\"audio\"][\"waveform\"].shape), (1, 2, 4))\n\n    def test_decode_audio_payload_rejects_bad_shape(self):\n        bad = {\n            \"sample_rate\": 44100,\n            \"shape\": [1, 2],\n            \"dtype\": \"float32\",\n            \"data\": base64.b64encode(b\"\\x00\\x00\\x00\\x00\").decode(\"ascii\"),\n        }\n        with self.assertRaises(ValueError):\n            job_routes._decode_audio_payload(bad)\n\n    def test_decode_audio_payload_rejects_bad_dtype(self):\n        payload = {\n            \"sample_rate\": 44100,\n            \"shape\": [1, 2, 4],\n            \"dtype\": \"float16\",\n            \"data\": base64.b64encode((np.zeros((1, 2, 4), dtype=np.float32)).tobytes()).decode(\"ascii\"),\n        }\n        with self.assertRaises(ValueError):\n            job_routes._decode_audio_payload(payload)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/api/test_media_sync.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\n\ndef _load_media_sync_module():\n    module_path = Path(__file__).resolve().parents[2] / \"api\" / \"orchestration\" / \"media_sync.py\"\n    package_name = \"dist_ms_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    orch_pkg = types.ModuleType(f\"{package_name}.api.orchestration\")\n    orch_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api.orchestration\"] = orch_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n    network_module.build_worker_url = lambda worker, endpoint=\"\": f\"http://localhost{endpoint}\"\n\n    async def _fake_session():\n        raise RuntimeError(\"network calls not used in pure-function tests\")\n\n    network_module.get_client_session = _fake_session\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    trace_module = types.ModuleType(f\"{package_name}.utils.trace_logger\")\n    trace_module.trace_debug = lambda *_args, **_kwargs: None\n    trace_module.trace_info = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.trace_logger\"] = trace_module\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n\n        class _ClientTimeout:\n            def __init__(self, total=None):\n                pass\n\n        class _FormData:\n            def add_field(self, *args, **kwargs):\n                pass\n\n        aiohttp_module.ClientTimeout = _ClientTimeout\n        aiohttp_module.FormData = _FormData\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.api.orchestration.media_sync\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n\n    return module\n\n\nms = _load_media_sync_module()\n\n\n# ---------------------------------------------------------------------------\n# convert_paths_for_platform\n# ---------------------------------------------------------------------------\n\nclass ConvertPathsForPlatformTests(unittest.TestCase):\n    def test_forward_slash_target_normalises_backslashes(self):\n        obj = {\"ckpt_name\": \"C:\\\\Models\\\\model.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"/\")\n        self.assertEqual(result[\"ckpt_name\"], \"C:/Models/model.safetensors\")\n\n    def test_backslash_target_normalises_forward_slashes(self):\n        obj = {\"ckpt_name\": \"/models/checkpoints/model.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertIn(\"\\\\\", result[\"ckpt_name\"])\n        self.assertNotIn(\"/\", result[\"ckpt_name\"])\n\n    def test_relative_media_paths_always_stay_forward_slash(self):\n        \"\"\"Relative image/video/audio paths (Comfy annotated style) must not be backslash-ified.\"\"\"\n        obj = {\"image\": \"subfolder/my_photo.png\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertEqual(result[\"image\"], \"subfolder/my_photo.png\")\n\n    def test_relative_audio_paths_stay_forward_slash(self):\n        obj = {\"audio\": \"subfolder/my_track.wav\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertEqual(result[\"audio\"], \"subfolder/my_track.wav\")\n\n    def test_annotated_relative_media_path_stays_forward_slash(self):\n        obj = {\"image\": \"input/frame.jpg [abc123]\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertIn(\"/\", result[\"image\"])\n        self.assertNotIn(\"\\\\\", result[\"image\"].split(\"[\")[0])\n\n    def test_non_filename_strings_are_untouched(self):\n        obj = {\"prompt\": \"a beautiful cat\", \"count\": 5}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertEqual(result[\"prompt\"], \"a beautiful cat\")\n        self.assertEqual(result[\"count\"], 5)\n\n    def test_url_strings_are_untouched(self):\n        obj = {\"url\": \"https://example.com/model.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertEqual(result[\"url\"], \"https://example.com/model.safetensors\")\n\n    def test_invalid_separator_returns_obj_unchanged(self):\n        obj = {\"ckpt_name\": \"/models/model.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"|\")\n        self.assertEqual(result, obj)\n\n    def test_nested_dict_is_processed_recursively(self):\n        obj = {\"node\": {\"ckpt_name\": \"C:\\\\Models\\\\model.safetensors\"}}\n        result = ms.convert_paths_for_platform(obj, \"/\")\n        self.assertEqual(result[\"node\"][\"ckpt_name\"], \"C:/Models/model.safetensors\")\n\n    def test_list_items_are_processed_recursively(self):\n        obj = [{\"ckpt_name\": \"C:\\\\Models\\\\model.safetensors\"}, \"plain string\"]\n        result = ms.convert_paths_for_platform(obj, \"/\")\n        self.assertEqual(result[0][\"ckpt_name\"], \"C:/Models/model.safetensors\")\n        self.assertEqual(result[1], \"plain string\")\n\n    def test_non_string_scalar_values_are_untouched(self):\n        obj = {\"seed\": 42, \"enabled\": True, \"ratio\": 1.5}\n        result = ms.convert_paths_for_platform(obj, \"/\")\n        self.assertEqual(result[\"seed\"], 42)\n        self.assertTrue(result[\"enabled\"])\n\n    def test_absolute_unix_path_to_windows(self):\n        obj = {\"lora\": \"/home/user/loras/my_lora.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"\\\\\")\n        self.assertNotIn(\"/\", result[\"lora\"])\n\n    def test_already_normalised_path_is_idempotent(self):\n        obj = {\"ckpt\": \"C:/Models/model.safetensors\"}\n        result = ms.convert_paths_for_platform(obj, \"/\")\n        self.assertEqual(result[\"ckpt\"], \"C:/Models/model.safetensors\")\n\n\n# ---------------------------------------------------------------------------\n# _find_media_references\n# ---------------------------------------------------------------------------\n\nclass FindMediaReferencesTests(unittest.TestCase):\n    def test_finds_image_input(self):\n        prompt = {\"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"photo.png\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"photo.png\", refs)\n\n    def test_finds_video_input(self):\n        prompt = {\"1\": {\"class_type\": \"LoadVideo\", \"inputs\": {\"video\": \"clip.mp4\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"clip.mp4\", refs)\n\n    def test_finds_file_input_for_load_video(self):\n        prompt = {\"1\": {\"class_type\": \"LoadVideo\", \"inputs\": {\"file\": \"1 - Copy.mp4\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"1 - Copy.mp4\", refs)\n\n    def test_finds_audio_input(self):\n        prompt = {\"1\": {\"class_type\": \"LoadAudio\", \"inputs\": {\"audio\": \"track.wav\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"track.wav\", refs)\n\n    def test_strips_annotation_suffix(self):\n        prompt = {\"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"photo.jpg [abc123]\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"photo.jpg\", refs)\n        self.assertFalse(any(\"[\" in r for r in refs))\n\n    def test_normalises_backslashes_in_path(self):\n        prompt = {\"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"sub\\\\img.png\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"sub/img.png\", refs)\n\n    def test_ignores_non_media_text_inputs(self):\n        prompt = {\"1\": {\"class_type\": \"CLIPTextEncode\", \"inputs\": {\"text\": \"a cat\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertEqual(refs, [])\n\n    def test_ignores_node_link_values(self):\n        \"\"\"Inputs that are [node_id, slot] lists should be ignored.\"\"\"\n        prompt = {\"1\": {\"class_type\": \"Anything\", \"inputs\": {\"image\": [\"2\", 0]}}}\n        refs = ms._find_media_references(prompt)\n        self.assertEqual(refs, [])\n\n    def test_deduplicates_same_file_across_nodes(self):\n        prompt = {\n            \"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"cat.png\"}},\n            \"2\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"cat.png\"}},\n        }\n        refs = ms._find_media_references(prompt)\n        self.assertEqual(len(refs), 1)\n\n    def test_returns_sorted_list(self):\n        prompt = {\n            \"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"z_image.png\"}},\n            \"2\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"a_image.jpg\"}},\n        }\n        refs = ms._find_media_references(prompt)\n        self.assertEqual(refs, sorted(refs))\n\n    def test_ignores_non_dict_nodes(self):\n        prompt = {\"1\": \"not a node dict\", \"2\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"img.png\"}}}\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"img.png\", refs)\n\n    def test_empty_prompt_returns_empty_list(self):\n        self.assertEqual(ms._find_media_references({}), [])\n\n    def test_multiple_media_types_all_found(self):\n        prompt = {\n            \"1\": {\"class_type\": \"LoadImage\", \"inputs\": {\"image\": \"frame.png\"}},\n            \"2\": {\"class_type\": \"LoadVideo\", \"inputs\": {\"video\": \"clip.mp4\"}},\n            \"3\": {\"class_type\": \"LoadAudio\", \"inputs\": {\"audio\": \"track.wav\"}},\n        }\n        refs = ms._find_media_references(prompt)\n        self.assertIn(\"frame.png\", refs)\n        self.assertIn(\"clip.mp4\", refs)\n        self.assertIn(\"track.wav\", refs)\n\n\nclass RewritePromptMediaInputsTests(unittest.TestCase):\n    def test_rewrites_video_file_input_to_worker_path(self):\n        prompt = {\n            \"79\": {\"class_type\": \"LoadVideo\", \"inputs\": {\"file\": \"1 - Copy.mp4\"}},\n        }\n        ms._rewrite_prompt_media_inputs(prompt, {\"1 - Copy.mp4\": \"videos/1 - Copy.mp4\"})\n        self.assertEqual(prompt[\"79\"][\"inputs\"][\"file\"], \"videos/1 - Copy.mp4\")\n\n    def test_rewrites_audio_input_and_strips_annotation_when_matching(self):\n        prompt = {\n            \"1\": {\"class_type\": \"LoadAudio\", \"inputs\": {\"audio\": \"song.wav [input]\"}},\n        }\n        ms._rewrite_prompt_media_inputs(prompt, {\"song.wav\": \"song.wav\"})\n        self.assertEqual(prompt[\"1\"][\"inputs\"][\"audio\"], \"song.wav\")\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/api/test_usdu_routes.py",
    "content": "import asyncio\nimport importlib.util\nimport io\nimport sys\nimport types\nimport unittest\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\n\nfrom PIL import Image\n\n\nclass _FakeResponse:\n    def __init__(self, payload, status=200):\n        self.payload = payload\n        self.status = status\n\n\nclass _FakeRequest:\n    def __init__(self, json_payload=None, post_payload=None, headers=None, query=None):\n        self._json_payload = json_payload\n        self._post_payload = post_payload or {}\n        self.headers = headers or {}\n        self.query = query or {}\n\n    async def json(self):\n        return self._json_payload\n\n    async def post(self):\n        return self._post_payload\n\n\nclass _Routes:\n    def post(self, _path):\n        def _decorator(fn):\n            return fn\n\n        return _decorator\n\n    def get(self, _path):\n        def _decorator(fn):\n            return fn\n\n        return _decorator\n\n\ndef _load_usdu_routes_module():\n    module_path = Path(__file__).resolve().parents[2] / \"api\" / \"usdu_routes.py\"\n    package_name = \"dist_api_usdu_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    upscale_pkg = types.ModuleType(f\"{package_name}.upscale\")\n    upscale_pkg.__path__ = []\n    sys.modules[f\"{package_name}.upscale\"] = upscale_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    prompt_server_holder = {\n        \"value\": types.SimpleNamespace(\n            distributed_tile_jobs_lock=asyncio.Lock(),\n            distributed_pending_tile_jobs={},\n        )\n    }\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: _FakeResponse(payload, status=status)\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(instance=types.SimpleNamespace(routes=_Routes()))\n    sys.modules[\"server\"] = server_module\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n\n    async def _handle_api_error(_request, error, status=500):\n        return _FakeResponse({\"status\": \"error\", \"message\": str(error)}, status=status)\n\n    network_module.handle_api_error = _handle_api_error\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    job_store_module = types.ModuleType(f\"{package_name}.upscale.job_store\")\n    job_store_module.MAX_PAYLOAD_SIZE = 1024\n    job_store_module.ensure_tile_jobs_initialized = lambda: prompt_server_holder[\"value\"]\n    sys.modules[f\"{package_name}.upscale.job_store\"] = job_store_module\n\n    job_models_module = types.ModuleType(f\"{package_name}.upscale.job_models\")\n\n    class BaseJobState:\n        pass\n\n    @dataclass\n    class TileJobState(BaseJobState):\n        multi_job_id: str\n        mode: str = field(default=\"static\", init=False)\n        queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n        pending_tasks: asyncio.Queue = field(default_factory=asyncio.Queue)\n        completed_tasks: dict = field(default_factory=dict)\n        worker_status: dict = field(default_factory=dict)\n        assigned_to_workers: dict = field(default_factory=dict)\n        batch_size: int = 0\n        num_tiles_per_image: int = 0\n        batched_static: bool = False\n\n    @dataclass\n    class ImageJobState(BaseJobState):\n        multi_job_id: str\n        mode: str = field(default=\"dynamic\", init=False)\n        queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n        pending_images: asyncio.Queue = field(default_factory=asyncio.Queue)\n        completed_images: dict = field(default_factory=dict)\n        worker_status: dict = field(default_factory=dict)\n        assigned_to_workers: dict = field(default_factory=dict)\n        batch_size: int = 0\n        num_tiles_per_image: int = 0\n        batched_static: bool = False\n\n        @property\n        def pending_tasks(self):\n            return self.pending_images\n\n        @property\n        def completed_tasks(self):\n            return self.completed_images\n\n    job_models_module.BaseJobState = BaseJobState\n    job_models_module.TileJobState = TileJobState\n    job_models_module.ImageJobState = ImageJobState\n    sys.modules[f\"{package_name}.upscale.job_models\"] = job_models_module\n\n    parsers_module = types.ModuleType(f\"{package_name}.upscale.payload_parsers\")\n    parsers_module._parse_tiles_from_form = lambda _data: []\n    sys.modules[f\"{package_name}.upscale.payload_parsers\"] = parsers_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.api.usdu_routes\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n\n    module.web = types.SimpleNamespace(\n        json_response=lambda payload, status=200: _FakeResponse(payload, status=status)\n    )\n\n    module._prompt_server_holder = prompt_server_holder\n    module._TileJobState = TileJobState\n    module._ImageJobState = ImageJobState\n    return module\n\n\nusdu_routes = _load_usdu_routes_module()\n\n\nclass _UploadField:\n    def __init__(self, data):\n        self.file = io.BytesIO(data)\n\n\ndef _tiny_png_bytes():\n    image = Image.new(\"RGB\", (1, 1), (255, 0, 0))\n    buf = io.BytesIO()\n    image.save(buf, format=\"PNG\")\n    return buf.getvalue()\n\n\nclass USDURoutesTests(unittest.IsolatedAsyncioTestCase):\n    async def asyncSetUp(self):\n        usdu_routes._prompt_server_holder[\"value\"] = types.SimpleNamespace(\n            distributed_tile_jobs_lock=asyncio.Lock(),\n            distributed_pending_tile_jobs={},\n        )\n\n    async def test_heartbeat_updates_worker_status(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        job_data = usdu_routes._TileJobState(\"job-1\")\n        prompt_server.distributed_pending_tile_jobs[\"job-1\"] = job_data\n\n        request = _FakeRequest(json_payload={\"worker_id\": \"worker-a\", \"multi_job_id\": \"job-1\"})\n        response = await usdu_routes.heartbeat_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"status\"), \"success\")\n        self.assertIn(\"worker-a\", job_data.worker_status)\n\n    async def test_heartbeat_missing_fields_returns_400(self):\n        request = _FakeRequest(json_payload={\"worker_id\": \"worker-a\"})\n        response = await usdu_routes.heartbeat_endpoint(request)\n\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"missing\", response.payload.get(\"message\", \"\").lower())\n\n    async def test_request_image_dynamic_assigns_next_image(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        job_data = usdu_routes._ImageJobState(\"job-2\")\n        await job_data.pending_images.put(7)\n        prompt_server.distributed_pending_tile_jobs[\"job-2\"] = job_data\n\n        request = _FakeRequest(json_payload={\"worker_id\": \"worker-a\", \"multi_job_id\": \"job-2\"})\n        response = await usdu_routes.request_image_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"image_idx\"), 7)\n        self.assertEqual(response.payload.get(\"estimated_remaining\"), 0)\n        self.assertEqual(job_data.assigned_to_workers[\"worker-a\"], [7])\n        self.assertIn(\"worker-a\", job_data.worker_status)\n\n    async def test_request_image_static_assigns_tile_and_batched_flag(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        job_data = usdu_routes._TileJobState(\"job-3\")\n        job_data.batched_static = True\n        await job_data.pending_tasks.put(4)\n        prompt_server.distributed_pending_tile_jobs[\"job-3\"] = job_data\n\n        request = _FakeRequest(json_payload={\"worker_id\": \"worker-a\", \"multi_job_id\": \"job-3\"})\n        response = await usdu_routes.request_image_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"tile_idx\"), 4)\n        self.assertTrue(response.payload.get(\"batched_static\"))\n        self.assertEqual(job_data.assigned_to_workers[\"worker-a\"], [4])\n\n    async def test_submit_tiles_completion_signal_enqueues_last_marker(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        job_data = usdu_routes._TileJobState(\"job-4\")\n        prompt_server.distributed_pending_tile_jobs[\"job-4\"] = job_data\n\n        request = _FakeRequest(\n            post_payload={\n                \"multi_job_id\": \"job-4\",\n                \"worker_id\": \"worker-a\",\n                \"batch_size\": \"0\",\n                \"is_last\": \"true\",\n            },\n            headers={\"content-length\": \"128\"},\n        )\n\n        response = await usdu_routes.submit_tiles_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        queued = await job_data.queue.get()\n        self.assertEqual(queued[\"worker_id\"], \"worker-a\")\n        self.assertTrue(queued[\"is_last\"])\n        self.assertEqual(queued[\"tiles\"], [])\n\n    async def test_submit_image_enqueues_processed_image_payload(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        job_data = usdu_routes._ImageJobState(\"job-5\")\n        prompt_server.distributed_pending_tile_jobs[\"job-5\"] = job_data\n\n        request = _FakeRequest(\n            post_payload={\n                \"multi_job_id\": \"job-5\",\n                \"worker_id\": \"worker-a\",\n                \"image_idx\": \"2\",\n                \"full_image\": _UploadField(_tiny_png_bytes()),\n                \"is_last\": \"false\",\n            },\n            headers={\"content-length\": \"256\"},\n        )\n\n        response = await usdu_routes.submit_image_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        queued = await job_data.queue.get()\n        self.assertEqual(queued[\"worker_id\"], \"worker-a\")\n        self.assertEqual(queued[\"image_idx\"], 2)\n        self.assertIn(\"image\", queued)\n\n    async def test_job_status_endpoint_reports_ready(self):\n        prompt_server = usdu_routes._prompt_server_holder[\"value\"]\n        prompt_server.distributed_pending_tile_jobs[\"job-6\"] = usdu_routes._TileJobState(\"job-6\")\n\n        request = _FakeRequest(query={\"multi_job_id\": \"job-6\"})\n        response = await usdu_routes.job_status_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertTrue(response.payload.get(\"ready\"))\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/api/test_worker_routes.py",
    "content": "import importlib.util\nimport os\nimport sys\nimport tempfile\nimport types\nimport unittest\nfrom collections import deque\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\nclass _FakeResponse:\n    def __init__(self, payload, status=200):\n        self.payload = payload\n        self.status = status\n\n\nclass _FakeRequest:\n    def __init__(self, payload=None, match_info=None, query=None):\n        self._payload = payload\n        self.match_info = match_info or {}\n        self.query = query or {}\n\n    async def json(self):\n        return self._payload\n\n\nclass _FakeHTTPClientResponse:\n    def __init__(self, payload, status=200):\n        self._payload = payload\n        self.status = status\n\n    async def __aenter__(self):\n        return self\n\n    async def __aexit__(self, _exc_type, _exc, _tb):\n        return False\n\n    async def json(self):\n        return self._payload\n\n    async def text(self):\n        return str(self._payload)\n\n\nclass _FakeHTTPClientSession:\n    def __init__(self, payload, status=200):\n        self._payload = payload\n        self._status = status\n        self.calls = []\n\n    def get(self, url, params=None, timeout=None):\n        self.calls.append({\"url\": url, \"params\": params, \"timeout\": timeout})\n        return _FakeHTTPClientResponse(self._payload, status=self._status)\n\n\nclass _DummyWorkerManager:\n    def __init__(self):\n        self.processes = {}\n\n    def launch_worker(self, worker):\n        worker_id = str(worker[\"id\"])\n        self.processes[worker_id] = {\n            \"pid\": 12345,\n            \"log_file\": f\"/tmp/distributed_worker_{worker_id}.log\",\n            \"process\": None,\n        }\n        return 12345\n\n    def _is_process_running(self, _pid):\n        return False\n\n    def save_processes(self):\n        return None\n\n    def stop_worker(self, _worker_id):\n        return True, \"Stopped\"\n\n    def get_managed_workers(self):\n        return []\n\n\nclass _ImmediateLoop:\n    async def run_in_executor(self, _executor, func, *args):\n        return func(*args)\n\n\ndef _load_worker_routes_module():\n    module_path = Path(__file__).resolve().parents[2] / \"api\" / \"worker_routes.py\"\n    package_name = \"dist_api_worker_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    workers_pkg = types.ModuleType(f\"{package_name}.workers\")\n    workers_pkg.__path__ = []\n    workers_pkg.get_worker_manager = lambda: _DummyWorkerManager()\n    sys.modules[f\"{package_name}.workers\"] = workers_pkg\n\n    detection_module = types.ModuleType(f\"{package_name}.workers.detection\")\n    detection_module.is_local_worker = lambda *_args, **_kwargs: True\n    detection_module.is_same_physical_host = lambda *_args, **_kwargs: True\n    detection_module.get_machine_id = lambda: \"machine-id\"\n    detection_module.is_docker_environment = lambda: False\n    detection_module.is_runpod_environment = lambda: False\n    detection_module.get_comms_channel = lambda *_args, **_kwargs: \"lan\"\n    sys.modules[f\"{package_name}.workers.detection\"] = detection_module\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n\n        class _ClientTimeout:\n            def __init__(self, total=None):\n                self.total = total\n\n        class _WSMsgType:\n            TEXT = \"TEXT\"\n            ERROR = \"ERROR\"\n            CLOSED = \"CLOSED\"\n\n        class _WebSocketResponse:\n            def __init__(self, *args, **kwargs):\n                self.args = args\n                self.kwargs = kwargs\n\n            async def prepare(self, _request):\n                return None\n\n            async def send_json(self, _payload):\n                return None\n\n            def __aiter__(self):\n                async def _empty():\n                    if False:\n                        yield None\n                return _empty()\n\n        aiohttp_module.ClientTimeout = _ClientTimeout\n        aiohttp_module.WSMsgType = _WSMsgType\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: _FakeResponse(payload, status=status),\n            WebSocketResponse=_WebSocketResponse,\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    class _Routes:\n        def get(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n        def post(self, _path):\n            def _decorator(fn):\n                return fn\n            return _decorator\n\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(instance=types.SimpleNamespace(routes=_Routes()))\n    sys.modules[\"server\"] = server_module\n\n    created_torch_stub = False\n    if \"torch\" not in sys.modules:\n        created_torch_stub = True\n        torch_module = types.ModuleType(\"torch\")\n        torch_module.cuda = types.SimpleNamespace(\n            is_available=lambda: False,\n            empty_cache=lambda: None,\n            ipc_collect=lambda: None,\n            current_device=lambda: 0,\n            device_count=lambda: 0,\n        )\n        sys.modules[\"torch\"] = torch_module\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    config_module = types.ModuleType(f\"{package_name}.utils.config\")\n    config_module.load_config = lambda: {\"workers\": []}\n    sys.modules[f\"{package_name}.utils.config\"] = config_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n\n    async def _handle_api_error(_request, error, status=500):\n        return _FakeResponse({\"status\": \"error\", \"message\": str(error)}, status=status)\n\n    network_module.handle_api_error = _handle_api_error\n    network_module.normalize_host = lambda value: value\n    network_module.build_worker_url = lambda worker, endpoint=\"\": f\"http://localhost:{worker.get('port', 8188)}{endpoint}\"\n\n    async def _probe_worker(*_args, **_kwargs):\n        return None\n\n    network_module.probe_worker = _probe_worker\n\n    async def _get_client_session():\n        raise RuntimeError(\"not used in these tests\")\n\n    network_module.get_client_session = _get_client_session\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    constants_module = types.ModuleType(f\"{package_name}.utils.constants\")\n    constants_module.CHUNK_SIZE = 8192\n    sys.modules[f\"{package_name}.utils.constants\"] = constants_module\n\n    async_helpers_module = types.ModuleType(f\"{package_name}.utils.async_helpers\")\n\n    async def _queue_prompt_payload(*_args, **_kwargs):\n        return \"prompt-id\"\n\n    async_helpers_module.queue_prompt_payload = _queue_prompt_payload\n\n    class _PromptValidationError(RuntimeError):\n        def __init__(self, message=\"invalid prompt\", validation_error=None, node_errors=None):\n            super().__init__(message)\n            self.validation_error = validation_error if isinstance(validation_error, dict) else {}\n            self.node_errors = node_errors if isinstance(node_errors, dict) else {}\n\n    async_helpers_module.PromptValidationError = _PromptValidationError\n    sys.modules[f\"{package_name}.utils.async_helpers\"] = async_helpers_module\n\n    schemas_module = types.ModuleType(f\"{package_name}.api.schemas\")\n\n    def _require_fields(data, *fields):\n        missing = []\n        for field in fields:\n            value = data.get(field) if isinstance(data, dict) else None\n            if value is None or (isinstance(value, str) and not value.strip()):\n                missing.append(field)\n        return missing\n\n    def _validate_worker_id(worker_id, config):\n        return any(str(worker.get(\"id\")) == str(worker_id) for worker in config.get(\"workers\", []))\n\n    schemas_module.require_fields = _require_fields\n    schemas_module.validate_worker_id = _validate_worker_id\n    sys.modules[f\"{package_name}.api.schemas\"] = schemas_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.api.worker_routes\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n    if created_torch_stub:\n        sys.modules.pop(\"torch\", None)\n\n    return module\n\n\nworker_routes = _load_worker_routes_module()\n\n\nclass WorkerRoutesTests(unittest.IsolatedAsyncioTestCase):\n    async def test_launch_worker_valid_id_returns_200(self):\n        manager = _DummyWorkerManager()\n        config = {\"workers\": [{\"id\": \"worker-a\", \"name\": \"Worker A\", \"port\": 8188}]}\n        request = _FakeRequest({\"worker_id\": \"worker-a\"})\n\n        with patch.object(worker_routes, \"get_worker_manager\", return_value=manager), patch.object(\n            worker_routes, \"load_config\", return_value=config\n        ), patch.object(\n            worker_routes.asyncio, \"get_running_loop\", return_value=_ImmediateLoop()\n        ):\n            response = await worker_routes.launch_worker_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"status\"), \"success\")\n        self.assertEqual(response.payload.get(\"pid\"), 12345)\n\n    async def test_launch_worker_unknown_id_returns_404(self):\n        manager = _DummyWorkerManager()\n        config = {\"workers\": [{\"id\": \"worker-a\", \"name\": \"Worker A\", \"port\": 8188}]}\n        request = _FakeRequest({\"worker_id\": \"missing-worker\"})\n\n        with patch.object(worker_routes, \"get_worker_manager\", return_value=manager), patch.object(\n            worker_routes, \"load_config\", return_value=config\n        ):\n            response = await worker_routes.launch_worker_endpoint(request)\n\n        self.assertEqual(response.status, 404)\n        self.assertIn(\"not found\", response.payload.get(\"message\", \"\").lower())\n\n    async def test_worker_log_returns_content_json(self):\n        manager = _DummyWorkerManager()\n        with tempfile.NamedTemporaryFile(\"w\", delete=False, encoding=\"utf-8\") as handle:\n            handle.write(\"line-1\\nline-2\\nline-3\\n\")\n            log_path = handle.name\n\n        manager.processes[\"worker-a\"] = {\n            \"pid\": 9999,\n            \"log_file\": log_path,\n            \"process\": None,\n        }\n\n        request = _FakeRequest(match_info={\"worker_id\": \"worker-a\"}, query={\"lines\": \"2\"})\n        try:\n            with patch.object(worker_routes, \"get_worker_manager\", return_value=manager), patch.object(\n                worker_routes.asyncio, \"get_running_loop\", return_value=_ImmediateLoop()\n            ):\n                response = await worker_routes.get_worker_log_endpoint(request)\n        finally:\n            if os.path.exists(log_path):\n                os.remove(log_path)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"status\"), \"success\")\n        self.assertIn(\"content\", response.payload)\n        self.assertIn(\"line-3\", response.payload[\"content\"])\n\n    async def test_local_log_reads_memory_buffer(self):\n        request = _FakeRequest(query={\"lines\": \"2\"})\n        fake_logs = deque(\n            [\n                {\"m\": \"line-1\\n\"},\n                {\"m\": \"line-2\\n\"},\n                {\"m\": \"line-3\\n\"},\n            ],\n            maxlen=300,\n        )\n        app_module = types.ModuleType(\"app\")\n        app_module.__path__ = []\n        logger_module = types.ModuleType(\"app.logger\")\n        logger_module.get_logs = lambda: fake_logs\n        app_module.logger = logger_module\n\n        with patch.dict(sys.modules, {\"app\": app_module, \"app.logger\": logger_module}):\n            response = await worker_routes.get_local_log_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"status\"), \"success\")\n        self.assertEqual(response.payload.get(\"source\"), \"memory\")\n        self.assertEqual(response.payload.get(\"entries\"), 2)\n        self.assertIn(\"line-3\", response.payload.get(\"content\", \"\"))\n\n    async def test_remote_worker_log_proxies_to_worker_local_log_endpoint(self):\n        config = {\n            \"workers\": [\n                {\n                    \"id\": \"worker-remote\",\n                    \"name\": \"Remote Worker\",\n                    \"host\": \"worker.example.com\",\n                    \"port\": 8188,\n                    \"type\": \"remote\",\n                }\n            ]\n        }\n        request = _FakeRequest(match_info={\"worker_id\": \"worker-remote\"}, query={\"lines\": \"120\"})\n        proxied_payload = {\n            \"status\": \"success\",\n            \"content\": \"remote-log-content\\n\",\n            \"entries\": 1,\n            \"source\": \"memory\",\n            \"truncated\": False,\n            \"lines_shown\": 1,\n        }\n        fake_session = _FakeHTTPClientSession(proxied_payload)\n\n        async def _fake_get_client_session():\n            return fake_session\n\n        with patch.object(worker_routes, \"load_config\", return_value=config), patch.object(\n            worker_routes, \"get_client_session\", side_effect=_fake_get_client_session\n        ):\n            response = await worker_routes.get_remote_worker_log_endpoint(request)\n\n        self.assertEqual(response.status, 200)\n        self.assertEqual(response.payload.get(\"content\"), \"remote-log-content\\n\")\n        self.assertEqual(len(fake_session.calls), 1)\n        self.assertEqual(fake_session.calls[0][\"params\"], {\"lines\": \"120\"})\n        self.assertTrue(fake_session.calls[0][\"url\"].endswith(\"/distributed/local_log\"))\n\n    async def test_remote_worker_log_rejects_local_workers(self):\n        config = {\"workers\": [{\"id\": \"worker-local\", \"name\": \"Local Worker\", \"port\": 8188}]}\n        request = _FakeRequest(match_info={\"worker_id\": \"worker-local\"})\n\n        with patch.object(worker_routes, \"load_config\", return_value=config):\n            response = await worker_routes.get_remote_worker_log_endpoint(request)\n\n        self.assertEqual(response.status, 400)\n        self.assertIn(\"local\", response.payload.get(\"message\", \"\").lower())\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "# This conftest.py marks the tests/ directory as the pytest collection root,\n# preventing pytest from traversing into the parent package's __init__.py.\n"
  },
  {
    "path": "tests/test_async_helpers.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\n\nclass _PromptQueue:\n    def __init__(self):\n        self.items = []\n\n    def put(self, item):\n        self.items.append(item)\n\n\ndef _load_async_helpers_module():\n    module_path = Path(__file__).resolve().parents[1] / \"utils\" / \"async_helpers.py\"\n    package_name = \"dist_async_helpers_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    execution_module = types.ModuleType(\"execution\")\n\n    async def _validate_prompt(prompt_id, prompt, partial_execution_targets):\n        return (True, None, [\"9\"], {})\n\n    execution_module.validate_prompt = _validate_prompt\n    execution_module.SENSITIVE_EXTRA_DATA_KEYS = []\n    sys.modules[\"execution\"] = execution_module\n\n    prompt_server = types.SimpleNamespace(\n        trigger_on_prompt=lambda payload: payload,\n        number=12,\n        prompt_queue=_PromptQueue(),\n    )\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(instance=prompt_server)\n    sys.modules[\"server\"] = server_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n    network_module.get_server_loop = lambda: None\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.utils.async_helpers\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module, prompt_server\n\n\nasync_helpers, prompt_server = _load_async_helpers_module()\n\n\nclass QueuePromptPayloadTests(unittest.IsolatedAsyncioTestCase):\n    async def test_queue_prompt_payload_includes_create_time_and_client_metadata(self):\n        result = await async_helpers.queue_prompt_payload(\n            {\"1\": {\"class_type\": \"Node\"}},\n            workflow_meta={\"id\": \"workflow-1\"},\n            client_id=\"client-1\",\n            include_queue_metadata=True,\n        )\n\n        self.assertIsInstance(result[\"prompt_id\"], str)\n        self.assertTrue(result[\"prompt_id\"])\n        self.assertEqual(result[\"number\"], 12)\n        self.assertEqual(result[\"node_errors\"], {})\n\n        self.assertEqual(prompt_server.number, 13)\n        self.assertEqual(len(prompt_server.prompt_queue.items), 1)\n        queued_item = prompt_server.prompt_queue.items[0]\n        self.assertEqual(queued_item[0], 12)\n        extra_data = queued_item[3]\n        self.assertEqual(extra_data[\"client_id\"], \"client-1\")\n        self.assertIn(\"create_time\", extra_data)\n        self.assertIsInstance(extra_data[\"create_time\"], int)\n        self.assertGreater(extra_data[\"create_time\"], 0)\n        self.assertEqual(extra_data[\"extra_pnginfo\"][\"workflow\"], {\"id\": \"workflow-1\"})\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_batch_dividers.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\nimport torch\n\n\ndef _load_utilities_module():\n    module_path = Path(__file__).resolve().parents[1] / \"nodes\" / \"utilities.py\"\n    package_name = \"dist_divider_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    nodes_pkg = types.ModuleType(f\"{package_name}.nodes\")\n    nodes_pkg.__path__ = []\n    sys.modules[f\"{package_name}.nodes\"] = nodes_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.nodes.utilities\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nutils = _load_utilities_module()\n\n\nclass ImageBatchDividerTests(unittest.TestCase):\n    def test_divides_images_into_contiguous_chunks(self):\n        divider = utils.ImageBatchDivider()\n        images = torch.arange(10, dtype=torch.float32).reshape(10, 1, 1, 1)\n\n        outputs = divider.divide_batch(images, 3)\n\n        self.assertEqual(outputs[0].shape[0], 4)\n        self.assertEqual(outputs[1].shape[0], 3)\n        self.assertEqual(outputs[2].shape[0], 3)\n        self.assertEqual(outputs[0][:, 0, 0, 0].tolist(), [0.0, 1.0, 2.0, 3.0])\n        self.assertEqual(outputs[1][:, 0, 0, 0].tolist(), [4.0, 5.0, 6.0])\n        self.assertEqual(outputs[2][:, 0, 0, 0].tolist(), [7.0, 8.0, 9.0])\n\n    def test_unused_image_outputs_are_empty(self):\n        divider = utils.ImageBatchDivider()\n        images = torch.arange(4, dtype=torch.float32).reshape(4, 1, 1, 1)\n\n        outputs = divider.divide_batch(images, 2)\n\n        self.assertEqual(len(outputs), 10)\n        for idx in range(2, 10):\n            self.assertEqual(outputs[idx].shape[0], 0)\n\n\nclass AudioBatchDividerTests(unittest.TestCase):\n    def test_divides_audio_samples_into_contiguous_chunks(self):\n        divider = utils.AudioBatchDivider()\n        audio = {\n            \"waveform\": torch.arange(10, dtype=torch.float32).reshape(1, 1, 10),\n            \"sample_rate\": 24000,\n        }\n\n        outputs = divider.divide_audio(audio, 3)\n\n        self.assertEqual(outputs[0][\"waveform\"][0, 0].tolist(), [0.0, 1.0, 2.0, 3.0])\n        self.assertEqual(outputs[1][\"waveform\"][0, 0].tolist(), [4.0, 5.0, 6.0])\n        self.assertEqual(outputs[2][\"waveform\"][0, 0].tolist(), [7.0, 8.0, 9.0])\n\n    def test_unused_audio_outputs_are_empty(self):\n        divider = utils.AudioBatchDivider()\n        audio = {\n            \"waveform\": torch.arange(8, dtype=torch.float32).reshape(1, 1, 8),\n            \"sample_rate\": 24000,\n        }\n\n        outputs = divider.divide_audio(audio, 2)\n\n        self.assertEqual(len(outputs), 10)\n        for idx in range(2, 10):\n            self.assertEqual(outputs[idx][\"waveform\"].shape[-1], 0)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_config.py",
    "content": "import importlib.util\nimport json\nimport os\nimport sys\nimport tempfile\nimport types\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\ndef _load_config_module():\n    module_path = Path(__file__).resolve().parents[1] / \"utils\" / \"config.py\"\n    package_name = \"dist_cfg_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.logging\")\n    logging_module.log = lambda *_args, **_kwargs: None\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.logging\"] = logging_module\n\n    constants_module = types.ModuleType(f\"{package_name}.constants\")\n    constants_module.HEARTBEAT_TIMEOUT = 30\n    sys.modules[f\"{package_name}.constants\"] = constants_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.config\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nconfig = _load_config_module()\n\n\n# ---------------------------------------------------------------------------\n# _merge_with_defaults\n# ---------------------------------------------------------------------------\n\nclass MergeWithDefaultsTests(unittest.TestCase):\n    def test_non_dict_input_returns_defaults(self):\n        result = config._merge_with_defaults(\"not a dict\", {\"key\": \"default\"})\n        self.assertEqual(result, {\"key\": \"default\"})\n\n    def test_fills_missing_keys_with_defaults(self):\n        result = config._merge_with_defaults({}, {\"a\": 1, \"b\": 2})\n        self.assertEqual(result, {\"a\": 1, \"b\": 2})\n\n    def test_loaded_value_overrides_default(self):\n        result = config._merge_with_defaults({\"a\": 99}, {\"a\": 1, \"b\": 2})\n        self.assertEqual(result[\"a\"], 99)\n        self.assertEqual(result[\"b\"], 2)\n\n    def test_nested_dict_merges_recursively(self):\n        defaults = {\"settings\": {\"debug\": False, \"count\": 5}}\n        loaded = {\"settings\": {\"debug\": True}}\n        result = config._merge_with_defaults(loaded, defaults)\n        self.assertTrue(result[\"settings\"][\"debug\"])\n        self.assertEqual(result[\"settings\"][\"count\"], 5)\n\n    def test_preserves_unknown_keys_for_forward_compatibility(self):\n        result = config._merge_with_defaults({\"extra_key\": \"extra\"}, {\"a\": 1})\n        self.assertEqual(result[\"extra_key\"], \"extra\")\n\n    def test_none_loaded_value_overrides_default(self):\n        \"\"\"Explicitly set None in config should override non-None default.\"\"\"\n        result = config._merge_with_defaults({\"a\": None}, {\"a\": \"default\"})\n        self.assertIsNone(result[\"a\"])\n\n    def test_non_dict_nested_loaded_value_replaces_dict_default(self):\n        \"\"\"If loaded has a scalar where default has a dict, use the scalar.\"\"\"\n        defaults = {\"settings\": {\"debug\": False}}\n        loaded = {\"settings\": \"flat_string\"}\n        result = config._merge_with_defaults(loaded, defaults)\n        self.assertEqual(result[\"settings\"], \"flat_string\")\n\n\n# ---------------------------------------------------------------------------\n# load_config\n# ---------------------------------------------------------------------------\n\nclass LoadConfigTests(unittest.TestCase):\n    def setUp(self):\n        config.invalidate_config_cache()\n\n    def tearDown(self):\n        config.invalidate_config_cache()\n\n    def test_returns_defaults_when_file_missing(self):\n        with patch.object(config, \"CONFIG_FILE\", \"/nonexistent/path/config.json\"):\n            cfg = config.load_config()\n        defaults = config.get_default_config()\n        self.assertEqual(cfg[\"settings\"][\"debug\"], defaults[\"settings\"][\"debug\"])\n        self.assertIn(\"workers\", cfg)\n\n    def test_loads_valid_json_file(self):\n        data = {\n            \"workers\": [{\"id\": \"w1\"}],\n            \"master\": {\"host\": \"test.host\"},\n            \"settings\": {},\n            \"tunnel\": {},\n        }\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".json\", delete=False, encoding=\"utf-8\") as f:\n            json.dump(data, f)\n            tmp_path = f.name\n        try:\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                cfg = config.load_config()\n            self.assertEqual(cfg[\"master\"][\"host\"], \"test.host\")\n            self.assertEqual(len(cfg[\"workers\"]), 1)\n        finally:\n            os.unlink(tmp_path)\n\n    def test_merges_loaded_file_with_defaults(self):\n        \"\"\"Loaded file with partial settings should be filled in from defaults.\"\"\"\n        data = {\"master\": {\"host\": \"h\"}, \"workers\": [], \"settings\": {\"debug\": True}, \"tunnel\": {}}\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".json\", delete=False, encoding=\"utf-8\") as f:\n            json.dump(data, f)\n            tmp_path = f.name\n        try:\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                cfg = config.load_config()\n            # debug was set to True\n            self.assertTrue(cfg[\"settings\"][\"debug\"])\n            # auto_launch_workers is a default key and should be present\n            self.assertIn(\"auto_launch_workers\", cfg[\"settings\"])\n        finally:\n            os.unlink(tmp_path)\n\n    def test_falls_back_to_defaults_on_invalid_json(self):\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".json\", delete=False, encoding=\"utf-8\") as f:\n            f.write(\"{invalid json{{\")\n            tmp_path = f.name\n        try:\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                cfg = config.load_config()\n            self.assertIn(\"settings\", cfg)\n            self.assertIn(\"workers\", cfg)\n        finally:\n            os.unlink(tmp_path)\n\n    def test_second_call_returns_cached_object(self):\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".json\", delete=False, encoding=\"utf-8\") as f:\n            json.dump(config.get_default_config(), f)\n            tmp_path = f.name\n        try:\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                cfg1 = config.load_config()\n                cfg2 = config.load_config()\n            self.assertIs(cfg1, cfg2)\n        finally:\n            os.unlink(tmp_path)\n\n    def test_invalidate_cache_forces_reload(self):\n        data = config.get_default_config()\n        data[\"master\"][\"host\"] = \"first\"\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".json\", delete=False, encoding=\"utf-8\") as f:\n            json.dump(data, f)\n            tmp_path = f.name\n        try:\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                cfg1 = config.load_config()\n                config.invalidate_config_cache()\n                data[\"master\"][\"host\"] = \"second\"\n                with open(tmp_path, \"w\", encoding=\"utf-8\") as fh:\n                    json.dump(data, fh)\n                cfg2 = config.load_config()\n            self.assertEqual(cfg1[\"master\"][\"host\"], \"first\")\n            self.assertEqual(cfg2[\"master\"][\"host\"], \"second\")\n        finally:\n            os.unlink(tmp_path)\n\n\n# ---------------------------------------------------------------------------\n# save_config\n# ---------------------------------------------------------------------------\n\nclass SaveConfigTests(unittest.TestCase):\n    def setUp(self):\n        config.invalidate_config_cache()\n\n    def tearDown(self):\n        config.invalidate_config_cache()\n\n    def test_saves_and_reloads_correctly(self):\n        data = config.get_default_config()\n        data[\"master\"][\"host\"] = \"saved.host\"\n        with tempfile.TemporaryDirectory() as tmpdir:\n            tmp_path = os.path.join(tmpdir, \"config.json\")\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                result = config.save_config(data)\n                self.assertTrue(result)\n                loaded = config.load_config()\n        self.assertEqual(loaded[\"master\"][\"host\"], \"saved.host\")\n\n    def test_returns_false_when_path_unwritable(self):\n        with patch.object(config, \"CONFIG_FILE\", \"/nonexistent_dir/config.json\"):\n            result = config.save_config({})\n        self.assertFalse(result)\n\n    def test_save_invalidates_cache(self):\n        \"\"\"After saving, the cache should be cleared so next load re-reads.\"\"\"\n        with tempfile.TemporaryDirectory() as tmpdir:\n            tmp_path = os.path.join(tmpdir, \"config.json\")\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                data = config.get_default_config()\n                config.save_config(data)\n                # Cache is now None; load_config should re-read\n                self.assertIsNone(config._config_cache)\n\n    def test_written_file_is_valid_json(self):\n        data = config.get_default_config()\n        with tempfile.TemporaryDirectory() as tmpdir:\n            tmp_path = os.path.join(tmpdir, \"config.json\")\n            with patch.object(config, \"CONFIG_FILE\", tmp_path):\n                config.save_config(data)\n            with open(tmp_path, encoding=\"utf-8\") as fh:\n                parsed = json.load(fh)\n        self.assertEqual(parsed[\"master\"], data[\"master\"])\n\n\n# ---------------------------------------------------------------------------\n# get_worker_timeout_seconds\n# ---------------------------------------------------------------------------\n\nclass GetWorkerTimeoutSecondsTests(unittest.TestCase):\n    def test_returns_configured_value(self):\n        cfg = config.get_default_config()\n        cfg[\"settings\"][\"worker_timeout_seconds\"] = 120\n        with patch.object(config, \"load_config\", return_value=cfg):\n            self.assertEqual(config.get_worker_timeout_seconds(), 120)\n\n    def test_clamps_zero_to_one(self):\n        cfg = config.get_default_config()\n        cfg[\"settings\"][\"worker_timeout_seconds\"] = 0\n        with patch.object(config, \"load_config\", return_value=cfg):\n            self.assertEqual(config.get_worker_timeout_seconds(), 1)\n\n    def test_clamps_negative_to_one(self):\n        cfg = config.get_default_config()\n        cfg[\"settings\"][\"worker_timeout_seconds\"] = -10\n        with patch.object(config, \"load_config\", return_value=cfg):\n            self.assertEqual(config.get_worker_timeout_seconds(), 1)\n\n    def test_falls_back_to_provided_default_when_key_missing(self):\n        cfg = config.get_default_config()\n        # worker_timeout_seconds is not present in default config\n        cfg[\"settings\"].pop(\"worker_timeout_seconds\", None)\n        with patch.object(config, \"load_config\", return_value=cfg):\n            result = config.get_worker_timeout_seconds(default=45)\n        self.assertEqual(result, 45)\n\n    def test_fallback_also_clamped_to_one(self):\n        cfg = config.get_default_config()\n        cfg[\"settings\"].pop(\"worker_timeout_seconds\", None)\n        with patch.object(config, \"load_config\", return_value=cfg):\n            result = config.get_worker_timeout_seconds(default=0)\n        self.assertEqual(result, 1)\n\n\n# ---------------------------------------------------------------------------\n# is_master_delegate_only\n# ---------------------------------------------------------------------------\n\nclass IsMasterDelegateOnlyTests(unittest.TestCase):\n    def test_returns_false_by_default(self):\n        cfg = config.get_default_config()\n        with patch.object(config, \"load_config\", return_value=cfg):\n            self.assertFalse(config.is_master_delegate_only())\n\n    def test_returns_true_when_enabled(self):\n        cfg = config.get_default_config()\n        cfg[\"settings\"][\"master_delegate_only\"] = True\n        with patch.object(config, \"load_config\", return_value=cfg):\n            self.assertTrue(config.is_master_delegate_only())\n\n    def test_returns_false_on_exception(self):\n        def _raise():\n            raise RuntimeError(\"config exploded\")\n\n        with patch.object(config, \"load_config\", side_effect=RuntimeError(\"boom\")):\n            self.assertFalse(config.is_master_delegate_only())\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_detection.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\ndef _load_detection_module():\n    module_path = Path(__file__).resolve().parents[1] / \"workers\" / \"detection.py\"\n    package_name = \"dist_det_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    workers_pkg = types.ModuleType(f\"{package_name}.workers\")\n    workers_pkg.__path__ = []\n    sys.modules[f\"{package_name}.workers\"] = workers_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n    network_module.normalize_host = lambda value: value\n\n    async def _fake_session():\n        raise RuntimeError(\"network calls not used in these tests\")\n\n    network_module.get_client_session = _fake_session\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n\n        class _ClientTimeout:\n            def __init__(self, total=None):\n                pass\n\n        aiohttp_module.ClientTimeout = _ClientTimeout\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.workers.detection\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n\n    return module\n\n\ndetection = _load_detection_module()\n\n\n# ---------------------------------------------------------------------------\n# is_docker_environment\n# ---------------------------------------------------------------------------\n\nclass IsDockerEnvironmentTests(unittest.TestCase):\n    def test_true_when_dockerenv_file_exists(self):\n        with patch.object(detection.os.path, \"exists\", return_value=True), \\\n             patch.dict(detection.os.environ, {}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"my-laptop\"):\n            self.assertTrue(detection.is_docker_environment())\n\n    def test_true_when_docker_container_env_var_is_set(self):\n        with patch.object(detection.os.path, \"exists\", return_value=False), \\\n             patch.dict(detection.os.environ, {\"DOCKER_CONTAINER\": \"1\"}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"my-laptop\"):\n            self.assertTrue(detection.is_docker_environment())\n\n    def test_true_when_platform_node_contains_docker(self):\n        with patch.object(detection.os.path, \"exists\", return_value=False), \\\n             patch.dict(detection.os.environ, {}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"my-docker-host\"):\n            self.assertTrue(detection.is_docker_environment())\n\n    def test_false_when_none_of_the_signals_are_present(self):\n        with patch.object(detection.os.path, \"exists\", return_value=False), \\\n             patch.dict(detection.os.environ, {}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"my-laptop\"):\n            self.assertFalse(detection.is_docker_environment())\n\n    def test_docker_node_name_is_case_insensitive(self):\n        with patch.object(detection.os.path, \"exists\", return_value=False), \\\n             patch.dict(detection.os.environ, {}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"My-Docker-Box\"):\n            self.assertTrue(detection.is_docker_environment())\n\n    def test_docker_env_var_empty_string_is_falsy(self):\n        \"\"\"An empty DOCKER_CONTAINER env var should NOT trigger docker detection.\"\"\"\n        with patch.object(detection.os.path, \"exists\", return_value=False), \\\n             patch.dict(detection.os.environ, {\"DOCKER_CONTAINER\": \"\"}, clear=True), \\\n             patch.object(detection.platform, \"node\", return_value=\"my-laptop\"):\n            self.assertFalse(detection.is_docker_environment())\n\n\n# ---------------------------------------------------------------------------\n# is_runpod_environment\n# ---------------------------------------------------------------------------\n\nclass IsRunpodEnvironmentTests(unittest.TestCase):\n    def test_true_when_runpod_pod_id_is_set(self):\n        with patch.dict(detection.os.environ, {\"RUNPOD_POD_ID\": \"pod-abc\"}, clear=True):\n            self.assertTrue(detection.is_runpod_environment())\n\n    def test_true_when_runpod_api_key_is_set(self):\n        with patch.dict(detection.os.environ, {\"RUNPOD_API_KEY\": \"key-xyz\"}, clear=True):\n            self.assertTrue(detection.is_runpod_environment())\n\n    def test_true_when_both_vars_are_set(self):\n        with patch.dict(\n            detection.os.environ,\n            {\"RUNPOD_POD_ID\": \"pod-abc\", \"RUNPOD_API_KEY\": \"key-xyz\"},\n            clear=True,\n        ):\n            self.assertTrue(detection.is_runpod_environment())\n\n    def test_false_when_neither_var_is_set(self):\n        with patch.dict(detection.os.environ, {}, clear=True):\n            self.assertFalse(detection.is_runpod_environment())\n\n    def test_true_when_pod_id_is_empty_string(self):\n        \"\"\"is not None check means even empty string counts as detected.\"\"\"\n        with patch.dict(detection.os.environ, {\"RUNPOD_POD_ID\": \"\"}, clear=True):\n            self.assertTrue(detection.is_runpod_environment())\n\n\n# ---------------------------------------------------------------------------\n# is_local_worker (synchronous paths only)\n# ---------------------------------------------------------------------------\n\nclass IsLocalWorkerTests(unittest.IsolatedAsyncioTestCase):\n    async def test_true_for_localhost_host(self):\n        result = await detection.is_local_worker({\"host\": \"localhost\", \"port\": 8188})\n        self.assertTrue(result)\n\n    async def test_true_for_127_0_0_1(self):\n        result = await detection.is_local_worker({\"host\": \"127.0.0.1\", \"port\": 8188})\n        self.assertTrue(result)\n\n    async def test_true_for_0_0_0_0(self):\n        result = await detection.is_local_worker({\"host\": \"0.0.0.0\", \"port\": 8188})\n        self.assertTrue(result)\n\n    async def test_true_when_type_is_local(self):\n        result = await detection.is_local_worker({\"type\": \"local\", \"host\": \"remote.example.com\"})\n        self.assertTrue(result)\n\n    async def test_false_for_remote_host(self):\n        result = await detection.is_local_worker({\"host\": \"remote.example.com\", \"port\": 8188})\n        self.assertFalse(result)\n\n    async def test_true_when_no_host_key(self):\n        \"\"\"Missing host defaults to 'localhost'.\"\"\"\n        result = await detection.is_local_worker({\"port\": 8188})\n        self.assertTrue(result)\n\n\n# ---------------------------------------------------------------------------\n# get_machine_id\n# ---------------------------------------------------------------------------\n\nclass GetMachineIdTests(unittest.TestCase):\n    def test_returns_a_string(self):\n        result = detection.get_machine_id()\n        self.assertIsInstance(result, str)\n\n    def test_returns_non_empty_string(self):\n        result = detection.get_machine_id()\n        self.assertTrue(len(result) > 0)\n\n    def test_stable_across_calls(self):\n        r1 = detection.get_machine_id()\n        r2 = detection.get_machine_id()\n        self.assertEqual(r1, r2)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_dispatch_selection.py",
    "content": "import asyncio\nimport importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\ndef _load_dispatch_module():\n    module_path = Path(__file__).resolve().parents[1] / \"api\" / \"orchestration\" / \"dispatch.py\"\n\n    package_name = \"dist_dispatch_testpkg\"\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    orch_pkg = types.ModuleType(f\"{package_name}.api.orchestration\")\n    orch_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api.orchestration\"] = orch_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n    network_module.build_worker_url = lambda *_args, **_kwargs: \"http://example.invalid\"\n\n    async def _probe_worker(*_args, **_kwargs):\n        return None\n\n    network_module.probe_worker = _probe_worker\n\n    async def _fake_session():\n        raise RuntimeError(\"get_client_session should be mocked in these tests\")\n\n    network_module.get_client_session = _fake_session\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    created_aiohttp_stub = False\n    if \"aiohttp\" not in sys.modules:\n        created_aiohttp_stub = True\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n\n        class _ClientTimeout:\n            def __init__(self, total=None):\n                self.total = total\n\n        class _ClientConnectorError(Exception):\n            pass\n\n        class _WSMsgType:\n            TEXT = \"TEXT\"\n            ERROR = \"ERROR\"\n            CLOSED = \"CLOSED\"\n\n        class _TCPConnector:\n            def __init__(self, *args, **kwargs):\n                self.args = args\n                self.kwargs = kwargs\n\n        class _ClientSession:\n            def __init__(self, *args, **kwargs):\n                self.closed = False\n\n            async def close(self):\n                self.closed = True\n\n        aiohttp_module.ClientTimeout = _ClientTimeout\n        aiohttp_module.ClientConnectorError = _ClientConnectorError\n        aiohttp_module.WSMsgType = _WSMsgType\n        aiohttp_module.TCPConnector = _TCPConnector\n        aiohttp_module.ClientSession = _ClientSession\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: {\"payload\": payload, \"status\": status}\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.api.orchestration.dispatch\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    if created_aiohttp_stub:\n        sys.modules.pop(\"aiohttp\", None)\n    return module\n\n\ndispatch = _load_dispatch_module()\n\n\nclass DispatchSelectionTests(unittest.IsolatedAsyncioTestCase):\n    async def test_select_active_workers_filters_offline(self):\n        workers = [\n            {\"id\": \"w1\", \"name\": \"Worker 1\"},\n            {\"id\": \"w2\", \"name\": \"Worker 2\"},\n            {\"id\": \"w3\", \"name\": \"Worker 3\"},\n        ]\n\n        async def fake_probe(worker):\n            return worker[\"id\"] != \"w2\"\n\n        with patch.object(dispatch, \"worker_is_active\", side_effect=fake_probe):\n            active_workers, delegate_master = await dispatch.select_active_workers(\n                workers,\n                use_websocket=False,\n                delegate_master=False,\n                probe_concurrency=3,\n            )\n\n        self.assertEqual([w[\"id\"] for w in active_workers], [\"w1\", \"w3\"])\n        self.assertFalse(delegate_master)\n\n    async def test_select_active_workers_disables_delegate_when_all_offline(self):\n        workers = [{\"id\": \"w1\", \"name\": \"Worker 1\"}]\n\n        async def fake_probe(_worker):\n            return False\n\n        with patch.object(dispatch, \"worker_is_active\", side_effect=fake_probe):\n            active_workers, delegate_master = await dispatch.select_active_workers(\n                workers,\n                use_websocket=False,\n                delegate_master=True,\n                probe_concurrency=1,\n            )\n\n        self.assertEqual(active_workers, [])\n        self.assertFalse(delegate_master)\n\n    async def test_select_active_workers_uses_websocket_probe_when_enabled(self):\n        workers = [{\"id\": \"w1\", \"name\": \"Worker 1\"}, {\"id\": \"w2\", \"name\": \"Worker 2\"}]\n\n        async def fake_http_probe(_worker):\n            return False\n\n        async def fake_ws_probe(_worker):\n            return True\n\n        with patch.object(dispatch, \"worker_is_active\", side_effect=fake_http_probe) as http_probe, patch.object(\n            dispatch,\n            \"worker_ws_is_active\",\n            side_effect=fake_ws_probe,\n        ) as ws_probe:\n            active_workers, _ = await dispatch.select_active_workers(\n                workers,\n                use_websocket=True,\n                delegate_master=False,\n                probe_concurrency=4,\n            )\n\n        self.assertEqual([w[\"id\"] for w in active_workers], [\"w1\", \"w2\"])\n        self.assertEqual(ws_probe.call_count, 2)\n        self.assertEqual(http_probe.call_count, 0)\n\n    async def test_probe_concurrency_is_bounded(self):\n        workers = [{\"id\": f\"w{i}\", \"name\": f\"Worker {i}\"} for i in range(6)]\n        state = {\"in_flight\": 0, \"max_in_flight\": 0}\n\n        async def fake_probe(_worker):\n            state[\"in_flight\"] += 1\n            state[\"max_in_flight\"] = max(state[\"max_in_flight\"], state[\"in_flight\"])\n            await asyncio.sleep(0.01)\n            state[\"in_flight\"] -= 1\n            return True\n\n        with patch.object(dispatch, \"worker_is_active\", side_effect=fake_probe):\n            active_workers, _ = await dispatch.select_active_workers(\n                workers,\n                use_websocket=False,\n                delegate_master=False,\n                probe_concurrency=2,\n            )\n\n        self.assertEqual(len(active_workers), len(workers))\n        self.assertLessEqual(state[\"max_in_flight\"], 2)\n        self.assertGreaterEqual(state[\"max_in_flight\"], 2)\n\n    async def test_select_least_busy_worker_round_robins_idle_workers(self):\n        workers = [\n            {\"id\": \"w1\", \"name\": \"Worker 1\"},\n            {\"id\": \"w2\", \"name\": \"Worker 2\"},\n            {\"id\": \"w3\", \"name\": \"Worker 3\"},\n        ]\n        queue_map = {\"w1\": 0, \"w2\": 0, \"w3\": 2}\n\n        async def fake_probe(worker_url, timeout=3.0):\n            worker_id = worker_url.rsplit(\"/\", 1)[-1]\n            return {\"exec_info\": {\"queue_remaining\": queue_map[worker_id]}}\n\n        with patch.object(dispatch, \"build_worker_url\", side_effect=lambda worker: f\"http://host/{worker['id']}\"), patch.object(\n            dispatch,\n            \"probe_worker\",\n            side_effect=fake_probe,\n        ):\n            dispatch._least_busy_rr_index = 0\n            selected1 = await dispatch.select_least_busy_worker(workers, probe_concurrency=3)\n            selected2 = await dispatch.select_least_busy_worker(workers, probe_concurrency=3)\n            selected3 = await dispatch.select_least_busy_worker(workers, probe_concurrency=3)\n\n        self.assertEqual(selected1[\"id\"], \"w1\")\n        self.assertEqual(selected2[\"id\"], \"w2\")\n        self.assertEqual(selected3[\"id\"], \"w1\")\n\n    async def test_select_least_busy_worker_chooses_smallest_queue_when_all_busy(self):\n        workers = [\n            {\"id\": \"w1\", \"name\": \"Worker 1\"},\n            {\"id\": \"w2\", \"name\": \"Worker 2\"},\n            {\"id\": \"w3\", \"name\": \"Worker 3\"},\n        ]\n        queue_map = {\"w1\": 5, \"w2\": 2, \"w3\": 4}\n\n        async def fake_probe(worker_url, timeout=3.0):\n            worker_id = worker_url.rsplit(\"/\", 1)[-1]\n            return {\"exec_info\": {\"queue_remaining\": queue_map[worker_id]}}\n\n        with patch.object(dispatch, \"build_worker_url\", side_effect=lambda worker: f\"http://host/{worker['id']}\"), patch.object(\n            dispatch,\n            \"probe_worker\",\n            side_effect=fake_probe,\n        ):\n            selected = await dispatch.select_least_busy_worker(workers, probe_concurrency=2)\n\n        self.assertEqual(selected[\"id\"], \"w2\")\n\n    async def test_select_least_busy_worker_returns_none_when_all_probes_fail(self):\n        workers = [{\"id\": \"w1\", \"name\": \"Worker 1\"}]\n\n        async def fake_probe(_worker_url, timeout=3.0):\n            return None\n\n        with patch.object(dispatch, \"build_worker_url\", side_effect=lambda worker: f\"http://host/{worker['id']}\"), patch.object(\n            dispatch,\n            \"probe_worker\",\n            side_effect=fake_probe,\n        ):\n            selected = await dispatch.select_least_busy_worker(workers, probe_concurrency=1)\n\n        self.assertIsNone(selected)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_distributed_value.py",
    "content": "import json\nimport unittest\n\n\nclass DistributedValueTests(unittest.TestCase):\n    \"\"\"Unit tests for the DistributedValue node's distribute() method.\"\"\"\n\n    def _make_node(self):\n        # Import inline to avoid plugin-level imports\n        import importlib.util\n        import sys\n        import types\n        from pathlib import Path\n        from unittest.mock import MagicMock\n\n        module_path = Path(__file__).resolve().parents[1] / \"nodes\" / \"utilities.py\"\n        pkg_name = \"dv_test_pkg\"\n\n        for mod_name in list(sys.modules):\n            if mod_name == pkg_name or mod_name.startswith(f\"{pkg_name}.\"):\n                del sys.modules[mod_name]\n\n        # Mock torch if not available\n        if \"torch\" not in sys.modules:\n            sys.modules[\"torch\"] = MagicMock()\n\n        root_pkg = types.ModuleType(pkg_name)\n        root_pkg.__path__ = []\n        sys.modules[pkg_name] = root_pkg\n\n        utils_pkg = types.ModuleType(f\"{pkg_name}.utils\")\n        utils_pkg.__path__ = []\n        sys.modules[f\"{pkg_name}.utils\"] = utils_pkg\n\n        logging_mod = types.ModuleType(f\"{pkg_name}.utils.logging\")\n        logging_mod.debug_log = lambda *_a, **_k: None\n        logging_mod.log = lambda *_a, **_k: None\n        sys.modules[f\"{pkg_name}.utils.logging\"] = logging_mod\n\n        spec = importlib.util.spec_from_file_location(\n            f\"{pkg_name}.nodes.utilities\", module_path\n        )\n        mod = importlib.util.module_from_spec(spec)\n        spec.loader.exec_module(mod)\n        return mod.DistributedValue()\n\n    def setUp(self):\n        self.node = self._make_node()\n\n    def test_master_returns_default(self):\n        result = self.node.distribute(\n            default_value=\"model_a\",\n            worker_values=\"{}\",\n            is_worker=False,\n            worker_id=\"\",\n        )\n        self.assertEqual(result, (\"model_a\",))\n\n    def test_master_coerces_default_int(self):\n        values = json.dumps({\"_type\": \"INT\"})\n        result = self.node.distribute(\n            default_value=\"42\",\n            worker_values=values,\n            is_worker=False,\n            worker_id=\"\",\n        )\n        self.assertEqual(result, (42,))\n        self.assertIsInstance(result[0], int)\n\n    def test_master_coerces_default_float(self):\n        values = json.dumps({\"_type\": \"FLOAT\"})\n        result = self.node.distribute(\n            default_value=\"2.5\",\n            worker_values=values,\n            is_worker=False,\n            worker_id=\"\",\n        )\n        self.assertEqual(result, (2.5,))\n        self.assertIsInstance(result[0], float)\n\n    def test_worker_returns_specific_value(self):\n        values = json.dumps({\"1\": \"model_x\", \"2\": \"model_y\"})\n        result = self.node.distribute(\n            default_value=\"default\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (\"model_x\",))\n\n    def test_worker_second_index(self):\n        values = json.dumps({\"1\": \"model_x\", \"2\": \"model_y\"})\n        result = self.node.distribute(\n            default_value=\"default\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_1\",\n        )\n        self.assertEqual(result, (\"model_y\",))\n\n    def test_worker_falls_back_to_default_when_key_missing(self):\n        values = json.dumps({\"_type\": \"INT\", \"1\": \"3\"})\n        result = self.node.distribute(\n            default_value=\"9\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_5\",\n        )\n        self.assertEqual(result, (9,))\n        self.assertIsInstance(result[0], int)\n\n    def test_worker_falls_back_to_default_on_empty_value(self):\n        values = json.dumps({\"1\": \"\"})\n        result = self.node.distribute(\n            default_value=\"fallback\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (\"fallback\",))\n\n    def test_worker_falls_back_on_invalid_json(self):\n        result = self.node.distribute(\n            default_value=\"safe\",\n            worker_values=\"not-json\",\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (\"safe\",))\n\n    def test_worker_falls_back_on_invalid_worker_id(self):\n        values = json.dumps({\"1\": \"model_x\"})\n        result = self.node.distribute(\n            default_value=\"safe\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"bad_id\",\n        )\n        self.assertEqual(result, (\"safe\",))\n\n    def test_worker_id_as_direct_integer(self):\n        values = json.dumps({\"1\": \"model_x\"})\n        result = self.node.distribute(\n            default_value=\"default\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"0\",\n        )\n        self.assertEqual(result, (\"model_x\",))\n\n    def test_type_int_coerces_value(self):\n        values = json.dumps({\"_type\": \"INT\", \"1\": \"42\"})\n        result = self.node.distribute(\n            default_value=\"0\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (42,))\n        self.assertIsInstance(result[0], int)\n\n    def test_type_float_coerces_value(self):\n        values = json.dumps({\"_type\": \"FLOAT\", \"1\": \"3.14\"})\n        result = self.node.distribute(\n            default_value=\"0\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertAlmostEqual(result[0], 3.14)\n        self.assertIsInstance(result[0], float)\n\n    def test_type_combo_stays_string(self):\n        values = json.dumps({\"_type\": \"COMBO\", \"1\": \"model_v2\"})\n        result = self.node.distribute(\n            default_value=\"model_v1\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (\"model_v2\",))\n        self.assertIsInstance(result[0], str)\n\n    def test_type_string_default_stays_string(self):\n        values = json.dumps({\"1\": \"hello\"})\n        result = self.node.distribute(\n            default_value=\"default\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (\"hello\",))\n        self.assertIsInstance(result[0], str)\n\n    def test_int_coerce_handles_float_string(self):\n        \"\"\"INT coercion of '3.7' should truncate to 3.\"\"\"\n        values = json.dumps({\"_type\": \"INT\", \"1\": \"3.7\"})\n        result = self.node.distribute(\n            default_value=\"0\",\n            worker_values=values,\n            is_worker=True,\n            worker_id=\"worker_0\",\n        )\n        self.assertEqual(result, (3,))\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_job_timeout.py",
    "content": "import asyncio\nimport importlib.util\nimport sys\nimport time\nimport types\nimport unittest\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\n\n\ndef _load_job_timeout_module():\n    module_path = Path(__file__).resolve().parents[1] / \"upscale\" / \"job_timeout.py\"\n    package_name = \"dist_job_timeout_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    upscale_pkg = types.ModuleType(f\"{package_name}.upscale\")\n    upscale_pkg.__path__ = []\n    sys.modules[f\"{package_name}.upscale\"] = upscale_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    config_holder = {\"value\": {\"settings\": {}, \"workers\": []}}\n    probe_holder = {\"fn\": None}\n    prompt_server_holder = {\n        \"value\": types.SimpleNamespace(\n            distributed_tile_jobs_lock=asyncio.Lock(),\n            distributed_pending_tile_jobs={},\n        )\n    }\n\n    config_module = types.ModuleType(f\"{package_name}.utils.config\")\n    config_module.load_config = lambda: config_holder[\"value\"]\n    sys.modules[f\"{package_name}.utils.config\"] = config_module\n\n    constants_module = types.ModuleType(f\"{package_name}.utils.constants\")\n    constants_module.HEARTBEAT_TIMEOUT = 60\n    sys.modules[f\"{package_name}.utils.constants\"] = constants_module\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    network_module = types.ModuleType(f\"{package_name}.utils.network\")\n    network_module.build_worker_url = lambda worker: f\"http://{worker.get('host', '127.0.0.1')}:{worker.get('port', 8188)}\"\n\n    async def _probe_worker(url, timeout=2.0):\n        fn = probe_holder[\"fn\"]\n        if fn is None:\n            return None\n        return await fn(url, timeout)\n\n    network_module.probe_worker = _probe_worker\n    sys.modules[f\"{package_name}.utils.network\"] = network_module\n\n    job_store_module = types.ModuleType(f\"{package_name}.upscale.job_store\")\n    job_store_module.ensure_tile_jobs_initialized = lambda: prompt_server_holder[\"value\"]\n    sys.modules[f\"{package_name}.upscale.job_store\"] = job_store_module\n\n    job_models_module = types.ModuleType(f\"{package_name}.upscale.job_models\")\n\n    class BaseJobState:\n        pass\n\n    @dataclass\n    class ImageJobState(BaseJobState):\n        multi_job_id: str\n        mode: str = field(default=\"dynamic\", init=False)\n        queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n        pending_images: asyncio.Queue = field(default_factory=asyncio.Queue)\n        completed_images: dict = field(default_factory=dict)\n        worker_status: dict = field(default_factory=dict)\n        assigned_to_workers: dict = field(default_factory=dict)\n        batch_size: int = 0\n        num_tiles_per_image: int = 0\n        batched_static: bool = False\n\n        @property\n        def pending_tasks(self):\n            return self.pending_images\n\n        @property\n        def completed_tasks(self):\n            return self.completed_images\n\n    @dataclass\n    class TileJobState(BaseJobState):\n        multi_job_id: str\n        mode: str = field(default=\"static\", init=False)\n        queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n        pending_tasks: asyncio.Queue = field(default_factory=asyncio.Queue)\n        completed_tasks: dict = field(default_factory=dict)\n        worker_status: dict = field(default_factory=dict)\n        assigned_to_workers: dict = field(default_factory=dict)\n        batch_size: int = 0\n        num_tiles_per_image: int = 0\n        batched_static: bool = False\n\n    job_models_module.BaseJobState = BaseJobState\n    job_models_module.ImageJobState = ImageJobState\n    job_models_module.TileJobState = TileJobState\n    sys.modules[f\"{package_name}.upscale.job_models\"] = job_models_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.upscale.job_timeout\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    module._config_holder = config_holder\n    module._probe_holder = probe_holder\n    module._prompt_server_holder = prompt_server_holder\n    module._ImageJobState = ImageJobState\n    module._TileJobState = TileJobState\n    return module\n\n\njt = _load_job_timeout_module()\n\n\nclass JobTimeoutRequeueTests(unittest.IsolatedAsyncioTestCase):\n    async def asyncSetUp(self):\n        jt._prompt_server_holder[\"value\"] = types.SimpleNamespace(\n            distributed_tile_jobs_lock=asyncio.Lock(),\n            distributed_pending_tile_jobs={},\n        )\n        jt._config_holder[\"value\"] = {\n            \"settings\": {\"worker_timeout_seconds\": 5},\n            \"workers\": [{\"id\": \"worker-1\", \"host\": \"worker.local\", \"port\": 8188}],\n        }\n\n    async def test_requeues_only_incomplete_dynamic_tasks_for_timed_out_worker(self):\n        async def _offline_probe(_url, _timeout):\n            return None\n\n        jt._probe_holder[\"fn\"] = _offline_probe\n        prompt_server = jt._prompt_server_holder[\"value\"]\n        job_data = jt._ImageJobState(\"job-1\")\n        job_data.worker_status[\"worker-1\"] = time.time() - 60.0\n        job_data.assigned_to_workers[\"worker-1\"] = [0, 1]\n        job_data.completed_images[1] = \"done\"\n        prompt_server.distributed_pending_tile_jobs[\"job-1\"] = job_data\n\n        requeued = await jt._check_and_requeue_timed_out_workers(\"job-1\", total_tasks=2)\n\n        self.assertEqual(requeued, 1)\n        self.assertEqual(await job_data.pending_images.get(), 0)\n        self.assertNotIn(\"worker-1\", job_data.worker_status)\n        self.assertEqual(job_data.assigned_to_workers[\"worker-1\"], [])\n\n    async def test_busy_probe_graces_worker_and_skips_requeue(self):\n        async def _busy_probe(_url, _timeout):\n            return {\"exec_info\": {\"queue_remaining\": 3}}\n\n        jt._probe_holder[\"fn\"] = _busy_probe\n        prompt_server = jt._prompt_server_holder[\"value\"]\n        job_data = jt._ImageJobState(\"job-2\")\n        old_heartbeat = time.time() - 60.0\n        job_data.worker_status[\"worker-1\"] = old_heartbeat\n        job_data.assigned_to_workers[\"worker-1\"] = [0]\n        prompt_server.distributed_pending_tile_jobs[\"job-2\"] = job_data\n\n        requeued = await jt._check_and_requeue_timed_out_workers(\"job-2\", total_tasks=1)\n\n        self.assertEqual(requeued, 0)\n        self.assertIn(\"worker-1\", job_data.worker_status)\n        self.assertGreaterEqual(job_data.worker_status[\"worker-1\"], old_heartbeat)\n        self.assertTrue(job_data.pending_images.empty())\n\n    async def test_completed_dynamic_task_is_not_requeued(self):\n        async def _offline_probe(_url, _timeout):\n            return None\n\n        jt._probe_holder[\"fn\"] = _offline_probe\n        prompt_server = jt._prompt_server_holder[\"value\"]\n        job_data = jt._ImageJobState(\"job-3\")\n        job_data.worker_status[\"worker-1\"] = time.time() - 60.0\n        job_data.assigned_to_workers[\"worker-1\"] = [7]\n        job_data.completed_images[7] = \"complete\"\n        prompt_server.distributed_pending_tile_jobs[\"job-3\"] = job_data\n\n        requeued = await jt._check_and_requeue_timed_out_workers(\"job-3\", total_tasks=1)\n\n        self.assertEqual(requeued, 0)\n        self.assertTrue(job_data.pending_images.empty())\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_network_helpers.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\n\ndef _load_network_module():\n    module_path = Path(__file__).resolve().parents[1] / \"utils\" / \"network.py\"\n\n    package_name = \"dist_utils_testpkg\"\n    package_module = types.ModuleType(package_name)\n    package_module.__path__ = []  # mark as package\n    sys.modules[package_name] = package_module\n\n    logging_module = types.ModuleType(f\"{package_name}.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.logging\"] = logging_module\n\n    server_module = types.ModuleType(\"server\")\n    server_module.PromptServer = types.SimpleNamespace(\n        instance=types.SimpleNamespace(address=\"127.0.0.1\", port=8188, loop=None)\n    )\n    sys.modules[\"server\"] = server_module\n\n    if \"aiohttp\" not in sys.modules:\n        aiohttp_module = types.ModuleType(\"aiohttp\")\n\n        class _TCPConnector:\n            def __init__(self, *args, **kwargs):\n                self.args = args\n                self.kwargs = kwargs\n\n        class _ClientSession:\n            def __init__(self, *args, **kwargs):\n                self.closed = False\n\n            async def close(self):\n                self.closed = True\n\n        aiohttp_module.TCPConnector = _TCPConnector\n        aiohttp_module.ClientSession = _ClientSession\n        aiohttp_module.web = types.SimpleNamespace(\n            json_response=lambda payload, status=200: {\"payload\": payload, \"status\": status}\n        )\n        sys.modules[\"aiohttp\"] = aiohttp_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.network\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nnetwork = _load_network_module()\n\n\nclass NetworkHelpersTests(unittest.TestCase):\n    def test_normalize_host_strips_protocol_and_path(self):\n        self.assertEqual(network.normalize_host(\"  https://example.com/a/b  \"), \"example.com\")\n\n    def test_normalize_host_keeps_none(self):\n        self.assertIsNone(network.normalize_host(None))\n\n    def test_build_worker_url_defaults_to_server_address(self):\n        worker = {\"id\": \"w1\", \"port\": 8189}\n        self.assertEqual(network.build_worker_url(worker, \"/prompt\"), \"http://127.0.0.1:8189/prompt\")\n\n    def test_build_worker_url_cloud_defaults_to_https(self):\n        worker = {\"id\": \"w2\", \"host\": \"foo.proxy.runpod.net\", \"port\": 443}\n        self.assertEqual(network.build_worker_url(worker), \"https://foo.proxy.runpod.net\")\n\n    def test_build_worker_url_keeps_explicit_scheme(self):\n        worker = {\"id\": \"w3\", \"host\": \"https://worker.example.com\", \"port\": 1234}\n        self.assertEqual(network.build_worker_url(worker, \"/prompt\"), \"https://worker.example.com/prompt\")\n\n    def test_build_master_url_uses_https_for_cloud_host(self):\n        cfg = {\"master\": {\"host\": \"demo.proxy.runpod.net\"}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8188)\n        self.assertEqual(\n            network.build_master_url(config=cfg, prompt_server_instance=prompt_server),\n            \"https://demo.proxy.runpod.net\",\n        )\n\n    def test_build_master_url_keeps_explicit_scheme(self):\n        cfg = {\"master\": {\"host\": \"https://master.example.com/\"}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8188)\n        self.assertEqual(\n            network.build_master_url(config=cfg, prompt_server_instance=prompt_server),\n            \"https://master.example.com\",\n        )\n\n    def test_build_master_url_ignores_stale_saved_port_and_uses_runtime_port(self):\n        cfg = {\"master\": {\"host\": \"192.168.68.56\", \"port\": 8001}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8188)\n        self.assertEqual(\n            network.build_master_url(config=cfg, prompt_server_instance=prompt_server),\n            \"http://192.168.68.56:8188\",\n        )\n\n    def test_build_master_url_keeps_explicit_port_in_host(self):\n        cfg = {\"master\": {\"host\": \"192.168.68.56:8001\"}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8188)\n        self.assertEqual(\n            network.build_master_url(config=cfg, prompt_server_instance=prompt_server),\n            \"http://192.168.68.56:8001\",\n        )\n\n    def test_build_master_url_falls_back_to_server_address(self):\n        cfg = {\"master\": {\"host\": \"\", \"port\": 8001}}\n        prompt_server = types.SimpleNamespace(address=\"0.0.0.0\", port=8190)\n        self.assertEqual(\n            network.build_master_url(config=cfg, prompt_server_instance=prompt_server),\n            \"http://127.0.0.1:8190\",\n        )\n\n    def test_build_master_callback_url_uses_loopback_for_local_worker(self):\n        cfg = {\"master\": {\"host\": \"192.168.68.56\"}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8001)\n        worker = {\"id\": \"w1\", \"type\": \"local\", \"host\": \"localhost\", \"port\": 8189}\n        self.assertEqual(\n            network.build_master_callback_url(worker, config=cfg, prompt_server_instance=prompt_server),\n            \"http://127.0.0.1:8001\",\n        )\n\n    def test_build_master_callback_url_keeps_public_master_url_for_remote_worker(self):\n        cfg = {\"master\": {\"host\": \"192.168.68.56\"}}\n        prompt_server = types.SimpleNamespace(address=\"127.0.0.1\", port=8001)\n        worker = {\"id\": \"w2\", \"type\": \"remote\", \"host\": \"192.168.68.99\", \"port\": 8189}\n        self.assertEqual(\n            network.build_master_callback_url(worker, config=cfg, prompt_server_instance=prompt_server),\n            \"http://192.168.68.56:8001\",\n        )\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_payload_parsers.py",
    "content": "import importlib.util\nimport io\nimport json\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\ntry:\n    from PIL import Image as PILImage\n    PIL_AVAILABLE = True\nexcept ImportError:\n    PIL_AVAILABLE = False\n\n\ndef _load_payload_parsers_module():\n    # payload_parsers.py has no relative imports; only stdlib + PIL\n    module_path = Path(__file__).resolve().parents[1] / \"upscale\" / \"payload_parsers.py\"\n    spec = importlib.util.spec_from_file_location(\"upscale_payload_parsers\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nif PIL_AVAILABLE:\n    pp = _load_payload_parsers_module()\n\n\n# ---------------------------------------------------------------------------\n# Helpers\n# ---------------------------------------------------------------------------\n\ndef _make_png_bytes(width=64, height=64, color=(128, 64, 32)):\n    \"\"\"Return raw PNG bytes for a solid-colour image.\"\"\"\n    img = PILImage.new(\"RGB\", (width, height), color=color)\n    buf = io.BytesIO()\n    img.save(buf, format=\"PNG\")\n    return buf.getvalue()\n\n\nclass _MockFileField:\n    \"\"\"Minimal multipart file-field stub.\"\"\"\n\n    class _MockFile:\n        def __init__(self, data: bytes):\n            self._buf = io.BytesIO(data)\n\n        def read(self) -> bytes:\n            return self._buf.read()\n\n    def __init__(self, data: bytes):\n        self.file = self._MockFile(data)\n\n\ndef _make_form(n_tiles, *, padding=None, extra_meta=None, image_color=(128, 64, 32)):\n    \"\"\"Build a minimal form-data dict with `n_tiles` tile entries.\"\"\"\n    image_bytes = _make_png_bytes(color=image_color)\n    metadata = []\n    for i in range(n_tiles):\n        entry = {\n            \"tile_idx\": i,\n            \"x\": i * 64,\n            \"y\": 0,\n            \"extracted_width\": 64,\n            \"extracted_height\": 64,\n        }\n        if extra_meta and i < len(extra_meta):\n            entry.update(extra_meta[i])\n        metadata.append(entry)\n\n    form = {\"tiles_metadata\": json.dumps(metadata)}\n    if padding is not None:\n        form[\"padding\"] = str(padding)\n    for i in range(n_tiles):\n        form[f\"tile_{i}\"] = _MockFileField(image_bytes)\n    return form\n\n\n# ---------------------------------------------------------------------------\n# Tests\n# ---------------------------------------------------------------------------\n\n@unittest.skipUnless(PIL_AVAILABLE, \"PIL not installed\")\nclass ParseTilesFromFormTests(unittest.TestCase):\n\n    # --- happy paths ---\n\n    def test_single_tile_returns_one_entry(self):\n        tiles = pp._parse_tiles_from_form(_make_form(1))\n        self.assertEqual(len(tiles), 1)\n\n    def test_multiple_tiles_all_returned(self):\n        tiles = pp._parse_tiles_from_form(_make_form(3))\n        self.assertEqual(len(tiles), 3)\n\n    def test_tile_image_is_pil_image(self):\n        tiles = pp._parse_tiles_from_form(_make_form(1))\n        self.assertIsInstance(tiles[0][\"image\"], PILImage.Image)\n\n    def test_tile_metadata_fields_are_parsed(self):\n        tiles = pp._parse_tiles_from_form(_make_form(1))\n        tile = tiles[0]\n        self.assertEqual(tile[\"tile_idx\"], 0)\n        self.assertEqual(tile[\"x\"], 0)\n        self.assertEqual(tile[\"y\"], 0)\n        self.assertEqual(tile[\"extracted_width\"], 64)\n        self.assertEqual(tile[\"extracted_height\"], 64)\n\n    def test_padding_is_parsed_from_form(self):\n        tiles = pp._parse_tiles_from_form(_make_form(1, padding=16))\n        self.assertEqual(tiles[0][\"padding\"], 16)\n\n    def test_default_padding_is_zero(self):\n        form = _make_form(1)\n        form.pop(\"padding\", None)\n        tiles = pp._parse_tiles_from_form(form)\n        self.assertEqual(tiles[0][\"padding\"], 0)\n\n    def test_invalid_padding_string_falls_back_to_zero(self):\n        form = _make_form(1)\n        form[\"padding\"] = \"not_a_number\"\n        tiles = pp._parse_tiles_from_form(form)\n        self.assertEqual(tiles[0][\"padding\"], 0)\n\n    def test_optional_batch_idx_included_when_present(self):\n        extra = [{\"batch_idx\": 2}]\n        tiles = pp._parse_tiles_from_form(_make_form(1, extra_meta=extra))\n        self.assertEqual(tiles[0][\"batch_idx\"], 2)\n\n    def test_optional_global_idx_included_when_present(self):\n        extra = [{\"global_idx\": 5}]\n        tiles = pp._parse_tiles_from_form(_make_form(1, extra_meta=extra))\n        self.assertEqual(tiles[0][\"global_idx\"], 5)\n\n    def test_batch_idx_and_global_idx_absent_when_not_in_metadata(self):\n        tiles = pp._parse_tiles_from_form(_make_form(1))\n        self.assertNotIn(\"batch_idx\", tiles[0])\n        self.assertNotIn(\"global_idx\", tiles[0])\n\n    def test_tile_indices_match_metadata_order(self):\n        tiles = pp._parse_tiles_from_form(_make_form(3))\n        for i, tile in enumerate(tiles):\n            self.assertEqual(tile[\"tile_idx\"], i)\n\n    def test_x_coordinates_reflect_metadata(self):\n        tiles = pp._parse_tiles_from_form(_make_form(3))\n        self.assertEqual(tiles[1][\"x\"], 64)\n        self.assertEqual(tiles[2][\"x\"], 128)\n\n    # --- error cases ---\n\n    def test_missing_tiles_metadata_raises_value_error(self):\n        with self.assertRaises(ValueError, msg=\"Missing tiles_metadata\"):\n            pp._parse_tiles_from_form({})\n\n    def test_invalid_json_metadata_raises_value_error(self):\n        form = {\"tiles_metadata\": \"{not valid json}\"}\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n    def test_non_list_metadata_raises_value_error(self):\n        form = {\"tiles_metadata\": json.dumps({\"not\": \"a list\"})}\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n    def test_missing_tile_file_field_raises_value_error(self):\n        form = {\n            \"tiles_metadata\": json.dumps([{\"tile_idx\": 0, \"x\": 0, \"y\": 0}]),\n            # tile_0 intentionally omitted\n        }\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n    def test_tile_field_without_file_attr_raises_value_error(self):\n        form = {\n            \"tiles_metadata\": json.dumps([{\"tile_idx\": 0, \"x\": 0, \"y\": 0}]),\n            \"tile_0\": \"plain string without .file\",\n        }\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n    def test_non_image_bytes_raises_value_error(self):\n        class _BadFileField:\n            class _BadFile:\n                def read(self):\n                    return b\"this is definitely not image data\"\n            file = _BadFile()\n\n        form = {\n            \"tiles_metadata\": json.dumps([{\"tile_idx\": 0, \"x\": 0, \"y\": 0}]),\n            \"tile_0\": _BadFileField(),\n        }\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n    def test_invalid_metadata_value_type_raises_value_error(self):\n        \"\"\"Non-integer metadata fields (x, y, etc.) should raise ValueError.\"\"\"\n        form = {\n            \"tiles_metadata\": json.dumps([{\"tile_idx\": 0, \"x\": \"not_int\", \"y\": 0}]),\n            \"tile_0\": _MockFileField(_make_png_bytes()),\n        }\n        with self.assertRaises(ValueError):\n            pp._parse_tiles_from_form(form)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_prompt_transform.py",
    "content": "import importlib.util\nimport json\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\n\ndef _load_prompt_transform_module():\n    module_path = Path(__file__).resolve().parents[1] / \"api\" / \"orchestration\" / \"prompt_transform.py\"\n    package_name = \"dist_pt_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    api_pkg = types.ModuleType(f\"{package_name}.api\")\n    api_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api\"] = api_pkg\n\n    orch_pkg = types.ModuleType(f\"{package_name}.api.orchestration\")\n    orch_pkg.__path__ = []\n    sys.modules[f\"{package_name}.api.orchestration\"] = orch_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.api.orchestration.prompt_transform\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\npt = _load_prompt_transform_module()\n\n\n# ---------------------------------------------------------------------------\n# Helpers\n# ---------------------------------------------------------------------------\n\ndef _linear_prompt():\n    \"\"\"1 → 2 → 3 → 4(DistributedCollector) → 5(SaveImage)\"\"\"\n    return {\n        \"1\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {}},\n        \"2\": {\"class_type\": \"CLIPTextEncode\", \"inputs\": {\"clip\": [\"1\", 1]}},\n        \"3\": {\"class_type\": \"KSampler\", \"inputs\": {\"model\": [\"1\", 0], \"positive\": [\"2\", 0]}},\n        \"4\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"images\": [\"3\", 0]}},\n        \"5\": {\"class_type\": \"SaveImage\", \"inputs\": {\"images\": [\"4\", 0]}},\n    }\n\n\ndef _collector_only_prompt():\n    \"\"\"1(Checkpoint) → 2(DistributedCollector) [no downstream from 2]\"\"\"\n    return {\n        \"1\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {}},\n        \"2\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"images\": [\"1\", 0]}},\n    }\n\n\ndef _delegate_prompt():\n    \"\"\"1 → 2 → 3(DistributedCollector) → 4(SaveImage)\"\"\"\n    return {\n        \"1\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {}},\n        \"2\": {\"class_type\": \"KSampler\", \"inputs\": {\"model\": [\"1\", 0]}},\n        \"3\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"images\": [\"2\", 0]}},\n        \"4\": {\"class_type\": \"SaveImage\", \"inputs\": {\"images\": [\"3\", 0]}},\n    }\n\n\ndef _apply(prompt, participant_id, enabled_worker_ids=None, delegate_master=False):\n    if enabled_worker_ids is None:\n        enabled_worker_ids = [\"worker-a\", \"worker-b\"]\n    idx = pt.PromptIndex(prompt)\n    job_id_map = pt.generate_job_id_map(idx, \"run\")\n    return pt.apply_participant_overrides(\n        prompt,\n        participant_id=participant_id,\n        enabled_worker_ids=enabled_worker_ids,\n        job_id_map=job_id_map,\n        master_url=\"http://master.example.com\",\n        delegate_master=delegate_master,\n        prompt_index=idx,\n    )\n\n\n# ---------------------------------------------------------------------------\n# PromptIndex\n# ---------------------------------------------------------------------------\n\nclass PromptIndexTests(unittest.TestCase):\n    def test_nodes_by_class_groups_correctly(self):\n        prompt = {\n            \"1\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n            \"3\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n        }\n        idx = pt.PromptIndex(prompt)\n        self.assertCountEqual(idx.nodes_for_class(\"DistributedCollector\"), [\"2\", \"3\"])\n        self.assertEqual(idx.nodes_for_class(\"CheckpointLoaderSimple\"), [\"1\"])\n\n    def test_nodes_for_class_unknown_returns_empty(self):\n        idx = pt.PromptIndex({\"1\": {\"class_type\": \"KSampler\", \"inputs\": {}}})\n        self.assertEqual(idx.nodes_for_class(\"Nonexistent\"), [])\n\n    def test_nodes_without_class_type_are_indexed_under_none(self):\n        prompt = {\"1\": {\"inputs\": {}}}\n        idx = pt.PromptIndex(prompt)\n        # Should not raise; nodes_for_class with None key or missing class_type\n        self.assertEqual(idx.nodes_for_class(\"KSampler\"), [])\n\n    def test_copy_prompt_is_a_deep_copy(self):\n        prompt = {\"1\": {\"class_type\": \"KSampler\", \"inputs\": {\"seed\": 42}}}\n        idx = pt.PromptIndex(prompt)\n        copy = idx.copy_prompt()\n        copy[\"1\"][\"inputs\"][\"seed\"] = 999\n        self.assertEqual(prompt[\"1\"][\"inputs\"][\"seed\"], 42)\n\n    def test_has_upstream_direct_connection(self):\n        \"\"\"Node 4 reads directly from node 3 (KSampler).\"\"\"\n        idx = pt.PromptIndex(_linear_prompt())\n        self.assertTrue(idx.has_upstream(\"4\", \"KSampler\"))\n\n    def test_has_upstream_transitive_connection(self):\n        \"\"\"Node 4 → 3 → 2 → 1 (CheckpointLoaderSimple).\"\"\"\n        idx = pt.PromptIndex(_linear_prompt())\n        self.assertTrue(idx.has_upstream(\"4\", \"CheckpointLoaderSimple\"))\n\n    def test_has_upstream_returns_false_when_no_path(self):\n        idx = pt.PromptIndex(_linear_prompt())\n        # CheckpointLoaderSimple has no upstream nodes\n        self.assertFalse(idx.has_upstream(\"1\", \"DistributedCollector\"))\n\n    def test_has_upstream_result_is_cached(self):\n        idx = pt.PromptIndex(_linear_prompt())\n        r1 = idx.has_upstream(\"4\", \"KSampler\")\n        r2 = idx.has_upstream(\"4\", \"KSampler\")\n        self.assertEqual(r1, r2)\n        self.assertIn((\"4\", \"KSampler\"), idx._upstream_cache)\n\n    def test_has_upstream_does_not_infinite_loop_on_cycle(self):\n        \"\"\"Cyclic references in inputs should not cause infinite recursion.\"\"\"\n        prompt = {\n            \"1\": {\"class_type\": \"A\", \"inputs\": {\"x\": [\"2\", 0]}},\n            \"2\": {\"class_type\": \"B\", \"inputs\": {\"x\": [\"1\", 0]}},\n        }\n        idx = pt.PromptIndex(prompt)\n        # Should terminate without error\n        result = idx.has_upstream(\"1\", \"NonExistent\")\n        self.assertFalse(result)\n\n\n# ---------------------------------------------------------------------------\n# find_nodes_by_class\n# ---------------------------------------------------------------------------\n\nclass FindNodesByClassTests(unittest.TestCase):\n    def test_finds_matching_nodes(self):\n        prompt = {\n            \"1\": {\"class_type\": \"KSampler\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n        }\n        result = pt.find_nodes_by_class(prompt, \"KSampler\")\n        self.assertEqual(result, [\"1\"])\n\n    def test_returns_empty_when_no_match(self):\n        prompt = {\"1\": {\"class_type\": \"KSampler\", \"inputs\": {}}}\n        self.assertEqual(pt.find_nodes_by_class(prompt, \"DistributedCollector\"), [])\n\n    def test_skips_non_dict_nodes(self):\n        prompt = {\"1\": \"not a dict\", \"2\": {\"class_type\": \"KSampler\", \"inputs\": {}}}\n        result = pt.find_nodes_by_class(prompt, \"KSampler\")\n        self.assertEqual(result, [\"2\"])\n\n\n# ---------------------------------------------------------------------------\n# prune_prompt_for_worker\n# ---------------------------------------------------------------------------\n\nclass PrunePromptForWorkerTests(unittest.TestCase):\n    def test_no_distributed_nodes_returns_prompt_unchanged(self):\n        prompt = {\n            \"1\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"SaveImage\", \"inputs\": {\"images\": [\"1\", 0]}},\n        }\n        result = pt.prune_prompt_for_worker(prompt)\n        self.assertCountEqual(result.keys(), [\"1\", \"2\"])\n\n    def test_keeps_collector_and_upstream(self):\n        prompt = _linear_prompt()\n        result = pt.prune_prompt_for_worker(prompt)\n        for node_id in (\"1\", \"2\", \"3\", \"4\"):\n            self.assertIn(node_id, result)\n\n    def test_removes_downstream_of_collector(self):\n        prompt = _linear_prompt()\n        result = pt.prune_prompt_for_worker(prompt)\n        self.assertNotIn(\"5\", result)\n\n    def test_injects_preview_image_when_downstream_exists(self):\n        prompt = _linear_prompt()\n        result = pt.prune_prompt_for_worker(prompt)\n        preview_nodes = [n for n in result.values() if n.get(\"class_type\") == \"PreviewImage\"]\n        self.assertEqual(len(preview_nodes), 1)\n        self.assertEqual(preview_nodes[0][\"inputs\"][\"images\"], [\"4\", 0])\n\n    def test_no_preview_image_when_no_downstream(self):\n        result = pt.prune_prompt_for_worker(_collector_only_prompt())\n        preview_nodes = [n for n in result.values() if n.get(\"class_type\") == \"PreviewImage\"]\n        self.assertEqual(len(preview_nodes), 0)\n\n    def test_unrelated_nodes_are_pruned(self):\n        prompt = {\n            \"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"UnrelatedNode\", \"inputs\": {}},  # no connection to 1\n        }\n        result = pt.prune_prompt_for_worker(prompt)\n        self.assertIn(\"1\", result)\n        self.assertNotIn(\"2\", result)\n\n    def test_result_is_a_copy_not_same_object(self):\n        prompt = _linear_prompt()\n        result = pt.prune_prompt_for_worker(prompt)\n        # Mutating the result should not affect the original\n        original_keys = set(prompt.keys())\n        result[\"NEW\"] = {\"class_type\": \"Test\", \"inputs\": {}}\n        self.assertEqual(set(prompt.keys()), original_keys)\n\n    def test_upscale_node_is_treated_as_distributed(self):\n        prompt = {\n            \"1\": {\"class_type\": \"KSampler\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"UltimateSDUpscaleDistributed\", \"inputs\": {\"image\": [\"1\", 0]}},\n            \"3\": {\"class_type\": \"SaveImage\", \"inputs\": {\"images\": [\"2\", 0]}},\n        }\n        result = pt.prune_prompt_for_worker(prompt)\n        self.assertIn(\"1\", result)\n        self.assertIn(\"2\", result)\n        self.assertNotIn(\"3\", result)\n\n\n# ---------------------------------------------------------------------------\n# prepare_delegate_master_prompt\n# ---------------------------------------------------------------------------\n\nclass PrepareDelegateMasterPromptTests(unittest.TestCase):\n    def test_keeps_collector_and_downstream(self):\n        prompt = _delegate_prompt()\n        result = pt.prepare_delegate_master_prompt(prompt, [\"3\"])\n        self.assertIn(\"3\", result)\n        self.assertIn(\"4\", result)\n        self.assertNotIn(\"1\", result)\n        self.assertNotIn(\"2\", result)\n\n    def test_removes_dangling_upstream_refs(self):\n        \"\"\"Collector must not retain dangling refs to pruned upstream nodes.\"\"\"\n        prompt = _delegate_prompt()\n        result = pt.prepare_delegate_master_prompt(prompt, [\"3\"])\n        collector_inputs = result[\"3\"].get(\"inputs\", {})\n        # Original \"images\" pointed at node 2, which is pruned.\n        # It should now point at a newly injected placeholder node.\n        self.assertIn(\"images\", collector_inputs)\n        source_id = str(collector_inputs[\"images\"][0])\n        self.assertNotEqual(source_id, \"2\")\n        self.assertIn(source_id, result)\n        self.assertEqual(result[source_id].get(\"class_type\"), \"DistributedEmptyImage\")\n\n    def test_injects_empty_image_placeholder(self):\n        prompt = _delegate_prompt()\n        result = pt.prepare_delegate_master_prompt(prompt, [\"3\"])\n        empty_nodes = [(nid, n) for nid, n in result.items() if n.get(\"class_type\") == \"DistributedEmptyImage\"]\n        self.assertEqual(len(empty_nodes), 1)\n        placeholder_id = empty_nodes[0][0]\n        self.assertEqual(result[\"3\"][\"inputs\"][\"images\"], [placeholder_id, 0])\n\n    def test_one_placeholder_per_collector(self):\n        \"\"\"Two collectors → two placeholders.\"\"\"\n        prompt = {\n            \"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n            \"3\": {\"class_type\": \"SaveImage\", \"inputs\": {\"images\": [\"1\", 0]}},\n        }\n        result = pt.prepare_delegate_master_prompt(prompt, [\"1\", \"2\"])\n        empty_nodes = [n for n in result.values() if n.get(\"class_type\") == \"DistributedEmptyImage\"]\n        self.assertEqual(len(empty_nodes), 2)\n\n    def test_result_is_independent_copy(self):\n        prompt = _delegate_prompt()\n        result = pt.prepare_delegate_master_prompt(prompt, [\"3\"])\n        result[\"3\"][\"inputs\"][\"NEW\"] = \"injected\"\n        # Original should be untouched\n        self.assertNotIn(\"NEW\", prompt[\"3\"].get(\"inputs\", {}))\n\n\n# ---------------------------------------------------------------------------\n# generate_job_id_map\n# ---------------------------------------------------------------------------\n\nclass GenerateJobIdMapTests(unittest.TestCase):\n    def test_maps_collector_nodes(self):\n        prompt = {\n            \"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"KSampler\", \"inputs\": {}},\n        }\n        idx = pt.PromptIndex(prompt)\n        job_map = pt.generate_job_id_map(idx, \"prefix\")\n        self.assertEqual(job_map[\"1\"], \"prefix_1\")\n        self.assertNotIn(\"2\", job_map)\n\n    def test_maps_upscale_nodes(self):\n        prompt = {\n            \"5\": {\"class_type\": \"UltimateSDUpscaleDistributed\", \"inputs\": {}},\n        }\n        idx = pt.PromptIndex(prompt)\n        job_map = pt.generate_job_id_map(idx, \"run\")\n        self.assertEqual(job_map[\"5\"], \"run_5\")\n\n    def test_empty_prompt_returns_empty_map(self):\n        idx = pt.PromptIndex({})\n        self.assertEqual(pt.generate_job_id_map(idx, \"prefix\"), {})\n\n    def test_stable_ids_across_calls(self):\n        prompt = {\"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}}}\n        idx = pt.PromptIndex(prompt)\n        m1 = pt.generate_job_id_map(idx, \"run\")\n        m2 = pt.generate_job_id_map(idx, \"run\")\n        self.assertEqual(m1, m2)\n\n\n# ---------------------------------------------------------------------------\n# apply_participant_overrides – DistributedCollector\n# ---------------------------------------------------------------------------\n\nclass ApplyOverridesCollectorTests(unittest.TestCase):\n    def _collector_prompt(self):\n        return {\"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}}}\n\n    def test_worker_sets_is_worker_true(self):\n        result = _apply(self._collector_prompt(), \"worker-a\")\n        self.assertTrue(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_worker_sets_master_url(self):\n        result = _apply(self._collector_prompt(), \"worker-a\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"master_url\"], \"http://master.example.com\")\n\n    def test_worker_sets_worker_id(self):\n        result = _apply(self._collector_prompt(), \"worker-a\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"worker-a\")\n\n    def test_worker_sets_delegate_only_false(self):\n        result = _apply(self._collector_prompt(), \"worker-a\")\n        self.assertFalse(result[\"1\"][\"inputs\"][\"delegate_only\"])\n\n    def test_master_sets_is_worker_false(self):\n        result = _apply(self._collector_prompt(), \"master\")\n        self.assertFalse(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_master_clears_stale_master_url(self):\n        prompt = {\"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"master_url\": \"stale\"}}}\n        result = _apply(prompt, \"master\")\n        self.assertNotIn(\"master_url\", result[\"1\"][\"inputs\"])\n\n    def test_master_clears_stale_worker_id(self):\n        prompt = {\"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"worker_id\": \"stale\"}}}\n        result = _apply(prompt, \"master\")\n        self.assertNotIn(\"worker_id\", result[\"1\"][\"inputs\"])\n\n    def test_master_with_delegate_master_sets_delegate_only_true(self):\n        result = _apply(self._collector_prompt(), \"master\", delegate_master=True)\n        self.assertTrue(result[\"1\"][\"inputs\"][\"delegate_only\"])\n\n    def test_master_without_delegate_master_sets_delegate_only_false(self):\n        result = _apply(self._collector_prompt(), \"master\", delegate_master=False)\n        self.assertFalse(result[\"1\"][\"inputs\"][\"delegate_only\"])\n\n    def test_enabled_worker_ids_serialized_as_json(self):\n        enabled = [\"worker-a\", \"worker-b\"]\n        result = _apply(self._collector_prompt(), \"master\", enabled_worker_ids=enabled)\n        self.assertEqual(result[\"1\"][\"inputs\"][\"enabled_worker_ids\"], json.dumps(enabled))\n\n    def test_multi_job_id_is_set_from_job_map(self):\n        prompt = {\"1\": {\"class_type\": \"DistributedCollector\", \"inputs\": {}}}\n        idx = pt.PromptIndex(prompt)\n        job_id_map = {\"1\": \"run_abc_1\"}\n        result = pt.apply_participant_overrides(\n            prompt,\n            participant_id=\"worker-a\",\n            enabled_worker_ids=[\"worker-a\"],\n            job_id_map=job_id_map,\n            master_url=\"http://master\",\n            delegate_master=False,\n            prompt_index=idx,\n        )\n        self.assertEqual(result[\"1\"][\"inputs\"][\"multi_job_id\"], \"run_abc_1\")\n\n\n# ---------------------------------------------------------------------------\n# apply_participant_overrides – DistributedSeed\n# ---------------------------------------------------------------------------\n\nclass ApplyOverridesSeedTests(unittest.TestCase):\n    def _seed_prompt(self):\n        return {\"1\": {\"class_type\": \"DistributedSeed\", \"inputs\": {}}}\n\n    def test_worker_sets_is_worker_true(self):\n        result = _apply(self._seed_prompt(), \"worker-a\")\n        self.assertTrue(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_worker_id_reflects_index_in_enabled_list(self):\n        result = _apply(self._seed_prompt(), \"worker-b\", enabled_worker_ids=[\"worker-a\", \"worker-b\"])\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"worker_1\")\n\n    def test_master_sets_is_worker_false(self):\n        result = _apply(self._seed_prompt(), \"master\")\n        self.assertFalse(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_master_sets_empty_worker_id(self):\n        result = _apply(self._seed_prompt(), \"master\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"\")\n\n\n# ---------------------------------------------------------------------------\n# apply_participant_overrides – UltimateSDUpscaleDistributed\n# ---------------------------------------------------------------------------\n\nclass ApplyOverridesUpscaleTests(unittest.TestCase):\n    def _upscale_prompt(self):\n        return {\"1\": {\"class_type\": \"UltimateSDUpscaleDistributed\", \"inputs\": {}}}\n\n    def test_worker_sets_is_worker_true(self):\n        result = _apply(self._upscale_prompt(), \"worker-a\")\n        self.assertTrue(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_worker_sets_master_url_and_worker_id(self):\n        result = _apply(self._upscale_prompt(), \"worker-a\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"master_url\"], \"http://master.example.com\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"worker-a\")\n\n    def test_master_clears_master_url_and_worker_id(self):\n        prompt = {\"1\": {\"class_type\": \"UltimateSDUpscaleDistributed\", \"inputs\": {\"master_url\": \"x\", \"worker_id\": \"y\"}}}\n        result = _apply(prompt, \"master\")\n        self.assertNotIn(\"master_url\", result[\"1\"][\"inputs\"])\n        self.assertNotIn(\"worker_id\", result[\"1\"][\"inputs\"])\n\n    def test_collector_downstream_of_upscale_gets_pass_through(self):\n        \"\"\"A DistributedCollector that is downstream of UltimateSDUpscaleDistributed → pass_through=True.\"\"\"\n        prompt = {\n            \"1\": {\"class_type\": \"UltimateSDUpscaleDistributed\", \"inputs\": {}},\n            \"2\": {\"class_type\": \"DistributedCollector\", \"inputs\": {\"images\": [\"1\", 0]}},\n        }\n        result = _apply(prompt, \"worker-a\", enabled_worker_ids=[\"worker-a\"])\n        self.assertTrue(result[\"2\"][\"inputs\"].get(\"pass_through\"))\n\n\n# ---------------------------------------------------------------------------\n# apply_participant_overrides – DistributedValue\n# ---------------------------------------------------------------------------\n\nclass ApplyOverridesValueTests(unittest.TestCase):\n    def _value_prompt(self):\n        return {\"1\": {\"class_type\": \"DistributedValue\", \"inputs\": {}}}\n\n    def test_worker_sets_is_worker_true(self):\n        result = _apply(self._value_prompt(), \"worker-a\")\n        self.assertTrue(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_worker_id_reflects_index_in_enabled_list(self):\n        result = _apply(self._value_prompt(), \"worker-b\", enabled_worker_ids=[\"worker-a\", \"worker-b\"])\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"worker_1\")\n\n    def test_master_sets_is_worker_false(self):\n        result = _apply(self._value_prompt(), \"master\")\n        self.assertFalse(result[\"1\"][\"inputs\"][\"is_worker\"])\n\n    def test_master_sets_empty_worker_id(self):\n        result = _apply(self._value_prompt(), \"master\")\n        self.assertEqual(result[\"1\"][\"inputs\"][\"worker_id\"], \"\")\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_queue_request.py",
    "content": "import importlib.util\nimport unittest\nfrom pathlib import Path\n\n\ndef _load_queue_request_module():\n    module_path = Path(__file__).resolve().parents[1] / \"api\" / \"queue_request.py\"\n    spec = importlib.util.spec_from_file_location(\"queue_request\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nqueue_request = _load_queue_request_module()\nparse_queue_request_payload = queue_request.parse_queue_request_payload\n\n\nclass QueueRequestPayloadTests(unittest.TestCase):\n    def _base_payload(self):\n        return {\n            \"prompt\": {\"1\": {\"class_type\": \"Anything\"}},\n            \"enabled_worker_ids\": [\"worker-1\"],\n            \"client_id\": \"client-1\",\n        }\n\n    def test_normalizes_enabled_worker_ids(self):\n        payload_data = self._base_payload()\n        payload_data[\"enabled_worker_ids\"] = [\"a\", 2, 3]\n        payload_data[\"delegate_master\"] = True\n        payload = parse_queue_request_payload(\n            payload_data\n        )\n        self.assertEqual(payload.enabled_worker_ids, [\"a\", \"2\", \"3\"])\n        self.assertTrue(payload.delegate_master)\n\n    def test_supports_legacy_workers_field(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"enabled_worker_ids\", None)\n        payload_data[\"workers\"] = [{\"id\": \"w1\"}, \"w2\", {\"id\": 3}, {\"name\": \"no-id\"}]\n        payload = parse_queue_request_payload(\n            payload_data\n        )\n        self.assertEqual(payload.enabled_worker_ids, [\"w1\", \"w2\", \"3\"])\n\n    def test_supports_auto_prepare_prompt_fallback(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"prompt\", None)\n        payload_data[\"auto_prepare\"] = True\n        payload_data[\"workflow\"] = {\n            \"prompt\": {\n                \"10\": {\"class_type\": \"DistributedCollector\"},\n            }\n        }\n        payload = parse_queue_request_payload(\n            payload_data\n        )\n        self.assertIn(\"10\", payload.prompt)\n        self.assertTrue(payload.auto_prepare)\n\n    def test_normalizes_trace_execution_id(self):\n        payload_data = self._base_payload()\n        payload_data[\"trace_execution_id\"] = \"  exec_123  \"\n        payload = parse_queue_request_payload(\n            payload_data\n        )\n        self.assertEqual(payload.trace_execution_id, \"exec_123\")\n\n    def test_blank_trace_execution_id_normalizes_to_none(self):\n        payload_data = self._base_payload()\n        payload_data[\"trace_execution_id\"] = \"   \"\n        payload = parse_queue_request_payload(\n            payload_data\n        )\n        self.assertIsNone(payload.trace_execution_id)\n\n    def test_auto_prepare_defaults_true(self):\n        payload = parse_queue_request_payload(self._base_payload())\n        self.assertTrue(payload.auto_prepare)\n\n    def test_workers_field_must_be_list(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"enabled_worker_ids\", None)\n        payload_data[\"workers\"] = \"worker-a\"\n        with self.assertRaisesRegex(ValueError, \"Field 'workers' must be a list\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_trace_execution_id_must_be_string(self):\n        payload_data = self._base_payload()\n        payload_data[\"trace_execution_id\"] = 123\n        with self.assertRaisesRegex(ValueError, \"trace_execution_id must be a string\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_auto_prepare_false_still_falls_back_to_workflow_prompt(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"prompt\", None)\n        payload_data[\"auto_prepare\"] = False\n        payload_data[\"workflow\"] = {\n            \"prompt\": {\"10\": {\"class_type\": \"DistributedCollector\"}},\n        }\n        payload = parse_queue_request_payload(payload_data)\n        self.assertIn(\"10\", payload.prompt)\n        self.assertFalse(payload.auto_prepare)\n\n    def test_auto_prepare_must_be_boolean(self):\n        payload_data = self._base_payload()\n        payload_data[\"auto_prepare\"] = \"true\"\n        with self.assertRaisesRegex(ValueError, \"auto_prepare must be a boolean\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_invalid_delegate_master_type_raises(self):\n        payload_data = self._base_payload()\n        payload_data[\"delegate_master\"] = \"yes\"\n        with self.assertRaisesRegex(ValueError, \"delegate_master must be a boolean\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_invalid_enabled_worker_ids_type_raises(self):\n        payload_data = self._base_payload()\n        payload_data[\"enabled_worker_ids\"] = \"worker-a\"\n        with self.assertRaisesRegex(ValueError, \"enabled_worker_ids must be a list\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_invalid_top_level_payload_raises(self):\n        with self.assertRaisesRegex(ValueError, \"Expected a JSON object body\"):\n            parse_queue_request_payload([\"not\", \"an\", \"object\"])\n\n    def test_missing_prompt_raises(self):\n        with self.assertRaisesRegex(ValueError, \"Field 'prompt' must be an object\"):\n            parse_queue_request_payload(\n                {\n                    \"workflow\": {},\n                    \"enabled_worker_ids\": [\"worker-1\"],\n                    \"client_id\": \"client-1\",\n                }\n            )\n\n    def test_missing_enabled_worker_ids_raises(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"enabled_worker_ids\", None)\n        with self.assertRaisesRegex(ValueError, \"enabled_worker_ids required\"):\n            parse_queue_request_payload(payload_data)\n\n    def test_missing_client_id_raises(self):\n        payload_data = self._base_payload()\n        payload_data.pop(\"client_id\", None)\n        with self.assertRaisesRegex(ValueError, \"client_id required\"):\n            parse_queue_request_payload(payload_data)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_static_mode.py",
    "content": "import asyncio\nimport importlib.util\nimport sys\nimport types\nimport unittest\nfrom pathlib import Path\n\nimport torch\n\n\ndef _load_static_mode_module():\n    module_path = Path(__file__).resolve().parents[1] / \"upscale\" / \"modes\" / \"static.py\"\n    package_name = \"dist_static_mode_testpkg\"\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    upscale_pkg = types.ModuleType(f\"{package_name}.upscale\")\n    upscale_pkg.__path__ = []\n    sys.modules[f\"{package_name}.upscale\"] = upscale_pkg\n\n    modes_pkg = types.ModuleType(f\"{package_name}.upscale.modes\")\n    modes_pkg.__path__ = []\n    sys.modules[f\"{package_name}.upscale.modes\"] = modes_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    created_comfy_stub = False\n    if \"comfy\" not in sys.modules:\n        created_comfy_stub = True\n        comfy_module = types.ModuleType(\"comfy\")\n        model_mgmt = types.ModuleType(\"comfy.model_management\")\n\n        class _InterruptProcessingException(Exception):\n            pass\n\n        model_mgmt.processing_interrupted = lambda: False\n        model_mgmt.throw_exception_if_processing_interrupted = lambda: None\n        model_mgmt.InterruptProcessingException = _InterruptProcessingException\n\n        comfy_module.model_management = model_mgmt\n        sys.modules[\"comfy\"] = comfy_module\n        sys.modules[\"comfy.model_management\"] = model_mgmt\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    image_module = types.ModuleType(f\"{package_name}.utils.image\")\n    from PIL import Image as PILImage\n    import numpy as np\n\n    def _tensor_to_pil(img_tensor, batch_index=0):\n        return PILImage.fromarray((255 * img_tensor[batch_index].cpu().numpy()).astype(np.uint8))\n\n    def _pil_to_tensor(image):\n        arr = np.array(image).astype(np.float32) / 255.0\n        return torch.from_numpy(arr).unsqueeze(0)\n\n    image_module.tensor_to_pil = _tensor_to_pil\n    image_module.pil_to_tensor = _pil_to_tensor\n    sys.modules[f\"{package_name}.utils.image\"] = image_module\n\n    async_helpers_module = types.ModuleType(f\"{package_name}.utils.async_helpers\")\n\n    def _run_async_in_server_loop(coro, timeout=None):\n        if timeout is not None:\n            return asyncio.run(asyncio.wait_for(coro, timeout=timeout))\n        return asyncio.run(coro)\n\n    async_helpers_module.run_async_in_server_loop = _run_async_in_server_loop\n    sys.modules[f\"{package_name}.utils.async_helpers\"] = async_helpers_module\n\n    config_module = types.ModuleType(f\"{package_name}.utils.config\")\n    config_module.get_worker_timeout_seconds = lambda: 60\n    sys.modules[f\"{package_name}.utils.config\"] = config_module\n\n    constants_module = types.ModuleType(f\"{package_name}.utils.constants\")\n    constants_module.HEARTBEAT_INTERVAL = 10.0\n    constants_module.JOB_POLL_INTERVAL = 0.0\n    constants_module.JOB_POLL_MAX_ATTEMPTS = 3\n    constants_module.MAX_BATCH = 20\n    constants_module.TILE_SEND_TIMEOUT = 1.0\n    constants_module.TILE_WAIT_TIMEOUT = 1.0\n    sys.modules[f\"{package_name}.utils.constants\"] = constants_module\n\n    job_store_module = types.ModuleType(f\"{package_name}.upscale.job_store\")\n\n    async def _noop(*_args, **_kwargs):\n        return None\n\n    job_store_module.ensure_tile_jobs_initialized = lambda: types.SimpleNamespace(\n        distributed_tile_jobs_lock=asyncio.Lock(),\n        distributed_pending_tile_jobs={},\n    )\n    job_store_module.init_static_job_batched = _noop\n    job_store_module._mark_task_completed = _noop\n    job_store_module._cleanup_job = _noop\n    job_store_module._drain_results_queue = _noop\n    job_store_module._get_completed_count = _noop\n    sys.modules[f\"{package_name}.upscale.job_store\"] = job_store_module\n\n    job_models_module = types.ModuleType(f\"{package_name}.upscale.job_models\")\n\n    class _TileJobState:\n        pass\n\n    job_models_module.TileJobState = _TileJobState\n    sys.modules[f\"{package_name}.upscale.job_models\"] = job_models_module\n\n    spec = importlib.util.spec_from_file_location(f\"{package_name}.upscale.modes.static\", module_path)\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n\n    if created_comfy_stub:\n        sys.modules.pop(\"comfy.model_management\", None)\n        sys.modules.pop(\"comfy\", None)\n\n    return module\n\n\nstatic_mode = _load_static_mode_module()\n\n\nclass _FakeStaticWorker(static_mode.StaticModeMixin):\n    def __init__(self):\n        self.sent_batches = []\n        self.request_calls = 0\n        self.heartbeat_calls = 0\n        self.job_ready = True\n        self.tile_sequence = [(0, 0, True), (None, 0, True)]\n\n    def round_to_multiple(self, value):\n        return value\n\n    def calculate_tiles(self, _width, _height, _tile_width, _tile_height, _force_uniform_tiles):\n        return [(0, 0)]\n\n    def _poll_job_ready(self, *_args, **_kwargs):\n        return self.job_ready\n\n    async def _request_tile_from_master(self, *_args, **_kwargs):\n        self.request_calls += 1\n        return self.tile_sequence.pop(0)\n\n    async def _send_heartbeat_to_master(self, *_args, **_kwargs):\n        self.heartbeat_calls += 1\n\n    async def send_tiles_batch_to_master(\n        self,\n        processed_tiles,\n        _multi_job_id,\n        _master_url,\n        _padding,\n        _worker_id,\n        is_final_flush=False,\n    ):\n        self.sent_batches.append(\n            {\n                \"tiles\": list(processed_tiles),\n                \"is_final_flush\": bool(is_final_flush),\n            }\n        )\n\n    def _extract_and_process_tile(self, upscaled_image, *_args, **_kwargs):\n        batch_size = upscaled_image.shape[0]\n        processed_batch = torch.zeros((batch_size, 2, 2, 3), dtype=torch.float32)\n        return processed_batch, 0, 0, 2, 2\n\n    def create_tile_mask(self, *_args, **_kwargs):\n        from PIL import Image\n        return Image.new(\"L\", (4, 4), 255)\n\n    def blend_tile(self, base_image, *_args, **_kwargs):\n        return base_image\n\n\ndef _call_worker_static(fake_worker):\n    image = torch.zeros((1, 4, 4, 3), dtype=torch.float32)\n    return fake_worker._process_worker_static_sync(\n        image,\n        model=None,\n        positive=None,\n        negative=None,\n        vae=None,\n        seed=1,\n        steps=1,\n        cfg=1.0,\n        sampler_name=\"euler\",\n        scheduler=\"normal\",\n        denoise=0.5,\n        tile_width=4,\n        tile_height=4,\n        padding=8,\n        mask_blur=4,\n        force_uniform_tiles=True,\n        tiled_decode=False,\n        multi_job_id=\"job-1\",\n        master_url=\"http://master:8188\",\n        worker_id=\"worker-1\",\n        enabled_workers=[\"worker-1\"],\n    )\n\n\nclass StaticModeWorkerFlowTests(unittest.TestCase):\n    def test_worker_static_aborts_when_job_not_ready(self):\n        worker = _FakeStaticWorker()\n        worker.job_ready = False\n\n        result = _call_worker_static(worker)\n\n        self.assertEqual(result[0].shape[0], 1)\n        self.assertEqual(worker.request_calls, 0)\n        self.assertEqual(worker.heartbeat_calls, 0)\n        self.assertEqual(worker.sent_batches, [])\n\n    def test_worker_static_requests_tiles_and_flushes_final_batch(self):\n        worker = _FakeStaticWorker()\n\n        _call_worker_static(worker)\n\n        self.assertEqual(worker.request_calls, 2)  # one tile, then sentinel\n        self.assertEqual(worker.heartbeat_calls, 1)\n        self.assertEqual(len(worker.sent_batches), 1)\n        self.assertTrue(worker.sent_batches[0][\"is_final_flush\"])\n        tiles = worker.sent_batches[0][\"tiles\"]\n        self.assertEqual(len(tiles), 1)\n        self.assertEqual(tiles[0][\"tile_idx\"], 0)\n        self.assertEqual(tiles[0][\"global_idx\"], 0)\n        self.assertEqual(tiles[0][\"batch_idx\"], 0)\n\n    def test_flush_empty_final_still_sends_completion_signal(self):\n        worker = _FakeStaticWorker()\n\n        returned = worker._flush_tiles_to_master(\n            [],\n            \"job-1\",\n            \"http://master:8188\",\n            8,\n            \"worker-1\",\n            is_final_flush=True,\n        )\n\n        self.assertEqual(returned, [])\n        self.assertEqual(len(worker.sent_batches), 1)\n        self.assertEqual(worker.sent_batches[0][\"tiles\"], [])\n        self.assertTrue(worker.sent_batches[0][\"is_final_flush\"])\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_worker_process_runtime.py",
    "content": "import importlib.util\nimport sys\nimport types\nimport unittest\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom unittest.mock import patch\n\n\ndef _load_process_module(module_filename: str):\n    module_path = Path(__file__).resolve().parents[1] / \"workers\" / \"process\" / module_filename\n    package_name = \"dist_proc_testpkg\"\n    module_name = module_filename[:-3]\n\n    for mod_name in list(sys.modules):\n        if mod_name == package_name or mod_name.startswith(f\"{package_name}.\"):\n            del sys.modules[mod_name]\n\n    root_pkg = types.ModuleType(package_name)\n    root_pkg.__path__ = []\n    sys.modules[package_name] = root_pkg\n\n    workers_pkg = types.ModuleType(f\"{package_name}.workers\")\n    workers_pkg.__path__ = []\n    sys.modules[f\"{package_name}.workers\"] = workers_pkg\n\n    process_pkg = types.ModuleType(f\"{package_name}.workers.process\")\n    process_pkg.__path__ = []\n    sys.modules[f\"{package_name}.workers.process\"] = process_pkg\n\n    utils_pkg = types.ModuleType(f\"{package_name}.utils\")\n    utils_pkg.__path__ = []\n    sys.modules[f\"{package_name}.utils\"] = utils_pkg\n\n    logging_module = types.ModuleType(f\"{package_name}.utils.logging\")\n    logging_module.debug_log = lambda *_args, **_kwargs: None\n    logging_module.log = lambda *_args, **_kwargs: None\n    sys.modules[f\"{package_name}.utils.logging\"] = logging_module\n\n    process_module = types.ModuleType(f\"{package_name}.utils.process\")\n    process_module.get_python_executable = lambda: \"/usr/bin/test-python\"\n    sys.modules[f\"{package_name}.utils.process\"] = process_module\n\n    spec = importlib.util.spec_from_file_location(\n        f\"{package_name}.workers.process.{module_name}\",\n        module_path,\n    )\n    module = importlib.util.module_from_spec(spec)\n    assert spec is not None and spec.loader is not None\n    spec.loader.exec_module(module)\n    return module\n\n\nroot_discovery_module = _load_process_module(\"root_discovery.py\")\nlaunch_builder_module = _load_process_module(\"launch_builder.py\")\n\n\nclass ComfyRootDiscoveryTests(unittest.TestCase):\n    def test_prefers_loaded_comfyui_module_path(self):\n        discovery = root_discovery_module.ComfyRootDiscovery()\n        server_module = types.SimpleNamespace(__file__=\"/opt/ComfyUI/server.py\")\n\n        def fake_exists(path):\n            return path == \"/opt/ComfyUI/main.py\"\n\n        with patch.dict(sys.modules, {\"server\": server_module}, clear=False), \\\n             patch.object(root_discovery_module.os.path, \"exists\", side_effect=fake_exists), \\\n             patch.dict(root_discovery_module.os.environ, {}, clear=True):\n            self.assertEqual(discovery.find_comfy_root(), \"/opt/ComfyUI\")\n\n\nclass LaunchCommandBuilderTests(unittest.TestCase):\n    def test_inherits_runtime_layout_args_for_desktop(self):\n        builder = launch_builder_module.LaunchCommandBuilder()\n        runtime_args = Namespace(\n            listen=\"127.0.0.1\",\n            base_directory=\"C:/Users/test/ComfyUI\",\n            temp_directory=None,\n            input_directory=\"C:/Users/test/ComfyUI/input\",\n            output_directory=\"C:/Users/test/ComfyUI/output\",\n            user_directory=\"C:/Users/test/ComfyUI/user\",\n            front_end_root=\"C:/Program Files/ComfyUI/web_custom_versions/desktop_app\",\n            extra_model_paths_config=[[\"C:/Users/test/AppData/Roaming/ComfyUI/extra_models_config.yaml\"]],\n            enable_manager=True,\n            disable_manager_ui=False,\n            enable_manager_legacy_ui=False,\n            windows_standalone_build=True,\n            log_stdout=True,\n            verbose=\"INFO\",\n            enable_cors_header=\"*\",\n        )\n        comfy_module = types.ModuleType(\"comfy\")\n        comfy_cli_args = types.ModuleType(\"comfy.cli_args\")\n        comfy_cli_args.args = runtime_args\n\n        worker_config = {\n            \"port\": 9001,\n            \"extra_args\": \"--preview-method auto\",\n        }\n\n        def fake_exists(path):\n            return path == \"/desktop/ComfyUI/main.py\"\n\n        with patch.dict(\n            sys.modules,\n            {\"comfy\": comfy_module, \"comfy.cli_args\": comfy_cli_args},\n            clear=False,\n        ), patch.object(launch_builder_module.os.path, \"exists\", side_effect=fake_exists):\n            cmd = builder.build_launch_command(worker_config, \"/desktop/ComfyUI\")\n\n        self.assertEqual(cmd[:2], [\"/usr/bin/test-python\", \"/desktop/ComfyUI/main.py\"])\n        self.assertIn(\"--listen\", cmd)\n        self.assertIn(\"127.0.0.1\", cmd)\n        self.assertIn(\"--base-directory\", cmd)\n        self.assertIn(\"C:/Users/test/ComfyUI\", cmd)\n        self.assertIn(\"--input-directory\", cmd)\n        self.assertIn(\"--output-directory\", cmd)\n        self.assertIn(\"--user-directory\", cmd)\n        self.assertIn(\"--front-end-root\", cmd)\n        self.assertIn(\"--extra-model-paths-config\", cmd)\n        self.assertIn(\"C:/Users/test/AppData/Roaming/ComfyUI/extra_models_config.yaml\", cmd)\n        self.assertIn(\"--enable-manager\", cmd)\n        self.assertIn(\"--windows-standalone-build\", cmd)\n        self.assertIn(\"--log-stdout\", cmd)\n        self.assertIn(\"--disable-auto-launch\", cmd)\n        self.assertIn(\"--enable-cors-header\", cmd)\n        self.assertIn(\"*\", cmd)\n        self.assertIn(\"--port\", cmd)\n        self.assertIn(\"9001\", cmd)\n        self.assertNotIn(\"--auto-launch\", cmd)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "upscale/__init__.py",
    "content": ""
  },
  {
    "path": "upscale/conditioning.py",
    "content": "import copy\n\n\ndef clone_control_chain(control, clone_hint=True):\n    \"\"\"Shallow copy the ControlNet chain, optionally cloning hints but sharing models.\"\"\"\n    if control is None:\n        return None\n    new_control = copy.copy(control)\n    if clone_hint and hasattr(control, 'cond_hint_original'):\n        hint = getattr(control, 'cond_hint_original', None)\n        new_control.cond_hint_original = hint.clone() if hint is not None else None\n    if hasattr(control, 'previous_controlnet'):\n        new_control.previous_controlnet = clone_control_chain(control.previous_controlnet, clone_hint)\n    return new_control\n\n\ndef clone_conditioning(cond_list, clone_hints=True):\n    \"\"\"Clone conditioning without duplicating ControlNet models.\"\"\"\n    new_cond = []\n    for emb, cond_dict in cond_list:\n        new_emb = emb.clone() if emb is not None else None\n        new_dict = cond_dict.copy()\n        if 'control' in new_dict:\n            new_dict['control'] = clone_control_chain(new_dict['control'], clone_hints)\n        if 'mask' in new_dict and new_dict['mask'] is not None:\n            new_dict['mask'] = new_dict['mask'].clone()\n        if 'pooled_output' in new_dict and new_dict['pooled_output'] is not None:\n            new_dict['pooled_output'] = new_dict['pooled_output'].clone()\n        if 'area' in new_dict:\n            new_dict['area'] = new_dict['area'][:]\n        new_cond.append([new_emb, new_dict])\n    return new_cond\n"
  },
  {
    "path": "upscale/job_models.py",
    "content": "from dataclasses import dataclass, field\nimport asyncio\nimport time\n\n\nclass BaseJobState:\n    \"\"\"Marker base class for typed USDU job state containers.\"\"\"\n\n\n@dataclass\nclass TileJobState(BaseJobState):\n    \"\"\"Typed state container for static (tile) USDU jobs.\"\"\"\n\n    multi_job_id: str\n    mode: str = field(default=\"static\", init=False)\n    queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n    pending_tasks: asyncio.Queue = field(default_factory=asyncio.Queue)\n    completed_tasks: dict = field(default_factory=dict)\n    worker_status: dict = field(default_factory=dict)\n    assigned_to_workers: dict = field(default_factory=dict)\n    batch_size: int = 0\n    num_tiles_per_image: int = 0\n    batched_static: bool = False\n    created_at: float = field(default_factory=time.monotonic)\n\n\n@dataclass\nclass ImageJobState(BaseJobState):\n    \"\"\"Typed state container for dynamic (per-image) USDU jobs.\"\"\"\n\n    multi_job_id: str\n    mode: str = field(default=\"dynamic\", init=False)\n    queue: asyncio.Queue = field(default_factory=asyncio.Queue)\n    pending_images: asyncio.Queue = field(default_factory=asyncio.Queue)\n    completed_images: dict = field(default_factory=dict)\n    worker_status: dict = field(default_factory=dict)\n    assigned_to_workers: dict = field(default_factory=dict)\n    batch_size: int = 0\n    num_tiles_per_image: int = 0\n    batched_static: bool = False\n    created_at: float = field(default_factory=time.monotonic)\n\n    @property\n    def pending_tasks(self):\n        return self.pending_images\n\n    @property\n    def completed_tasks(self):\n        return self.completed_images\n"
  },
  {
    "path": "upscale/job_state.py",
    "content": "import asyncio\n\nfrom ..utils.logging import debug_log\nfrom .job_store import ensure_tile_jobs_initialized\nfrom .job_timeout import _check_and_requeue_timed_out_workers as _requeue_usdu\nfrom .job_models import ImageJobState, TileJobState\n\n\nclass JobStateMixin:\n    async def _get_job_data(self, multi_job_id):\n        \"\"\"Return current job data reference while holding lock briefly.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            return prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n\n    async def _get_all_completed_tasks(self, multi_job_id):\n        \"\"\"Helper to retrieve all completed tasks from the job data.\"\"\"\n        job_data = await self._get_job_data(multi_job_id)\n        if isinstance(job_data, TileJobState):\n            return dict(job_data.completed_tasks)\n        if isinstance(job_data, ImageJobState):\n            return dict(job_data.completed_images)\n        return {}\n\n    async def _get_next_image_index(self, multi_job_id):\n        \"\"\"Get next image index from pending queue for master.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        pending_queue = None\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                pending_queue = job_data.pending_images\n\n        if pending_queue is None:\n            return None\n\n        try:\n            return await asyncio.wait_for(pending_queue.get(), timeout=1.0)\n        except asyncio.TimeoutError:\n            return None\n\n    async def _get_next_tile_index(self, multi_job_id):\n        \"\"\"Get next tile index from pending queue for master in static mode.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        pending_queue = None\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, TileJobState):\n                pending_queue = job_data.pending_tasks\n\n        if pending_queue is None:\n            return None\n\n        try:\n            return await asyncio.wait_for(pending_queue.get(), timeout=0.1)\n        except asyncio.TimeoutError:\n            return None\n\n    async def _get_total_completed_count(self, multi_job_id):\n        \"\"\"Get total count of all completed images (master + workers).\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                return len(job_data.completed_images)\n            if isinstance(job_data, TileJobState):\n                return len(job_data.completed_tasks)\n            return 0\n\n    async def _get_all_completed_images(self, multi_job_id):\n        \"\"\"Get all completed images.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                return job_data.completed_images.copy()\n            return {}\n\n    async def _get_pending_count(self, multi_job_id):\n        \"\"\"Get count of pending images in the queue.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                return job_data.pending_images.qsize()\n            if isinstance(job_data, TileJobState):\n                return job_data.pending_tasks.qsize()\n            return 0\n\n    async def _drain_worker_results_queue(self, multi_job_id):\n        \"\"\"Drain pending worker results from queue and update completed images.\"\"\"\n        prompt_server = ensure_tile_jobs_initialized()\n        worker_queue = None\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                worker_queue = job_data.queue\n\n        if worker_queue is None:\n            return 0\n\n        drained_results = []\n        while True:\n            try:\n                drained_results.append(worker_queue.get_nowait())\n            except asyncio.QueueEmpty:\n                break\n\n        if not drained_results:\n            return 0\n\n        collected = 0\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if not isinstance(job_data, ImageJobState):\n                return 0\n\n            for result in drained_results:\n                worker_id = result.get(\"worker_id\")\n                if \"image_idx\" in result and \"image\" in result:\n                    image_idx = result[\"image_idx\"]\n                    image_pil = result[\"image\"]\n                    if image_idx not in job_data.completed_images:\n                        job_data.completed_images[image_idx] = image_pil\n                        collected += 1\n                        debug_log(f\"Drained image {image_idx} from worker {worker_id}\")\n\n        if collected > 0:\n            debug_log(f\"Drained {collected} worker images during retry\")\n\n        return collected\n\n    async def _check_and_requeue_timed_out_workers(self, multi_job_id, batch_size):\n        \"\"\"Check for timed out workers and requeue their assigned images.\"\"\"\n        return await _requeue_usdu(multi_job_id, batch_size)\n"
  },
  {
    "path": "upscale/job_store.py",
    "content": "import asyncio\nimport os\nimport time\nfrom typing import List, Optional\n\nimport server\n\nfrom ..utils.logging import debug_log\nfrom .job_models import BaseJobState, ImageJobState, TileJobState\n\n# Configure maximum payload size (50MB default, configurable via environment variable)\nMAX_PAYLOAD_SIZE = int(os.environ.get('COMFYUI_MAX_PAYLOAD_SIZE', str(50 * 1024 * 1024)))\n\n\ndef ensure_tile_jobs_initialized():\n    \"\"\"Ensure tile job storage is initialized on the server instance.\"\"\"\n    prompt_server = server.PromptServer.instance\n    if not hasattr(prompt_server, 'distributed_pending_tile_jobs'):\n        debug_log(\"Initializing persistent tile job queue on server instance.\")\n        prompt_server.distributed_pending_tile_jobs = {}\n        prompt_server.distributed_tile_jobs_lock = asyncio.Lock()\n    else:\n        invalid_job_ids = [\n            job_id\n            for job_id, job_data in prompt_server.distributed_pending_tile_jobs.items()\n            if not isinstance(job_data, BaseJobState)\n        ]\n        for job_id in invalid_job_ids:\n            debug_log(f\"Removing invalid job state for {job_id}\")\n            del prompt_server.distributed_pending_tile_jobs[job_id]\n    return prompt_server\n\n\nasync def _init_job_queue(\n    multi_job_id,\n    mode,\n    batch_size=None,\n    num_tiles_per_image=None,\n    all_indices=None,\n    enabled_workers=None,\n    batched_static: bool = False,\n):\n    \"\"\"Unified initialization for job queues in static and dynamic modes.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n            debug_log(f\"Queue already exists for {multi_job_id}\")\n            return\n\n        if mode == 'dynamic':\n            job_data = ImageJobState(multi_job_id=multi_job_id)\n        elif mode == 'static':\n            job_data = TileJobState(multi_job_id=multi_job_id)\n        else:\n            raise ValueError(f\"Unknown mode: {mode}\")\n\n        job_data.worker_status = {w: time.time() for w in enabled_workers or []}\n        job_data.assigned_to_workers = {w: [] for w in enabled_workers or []}\n\n        if mode == 'dynamic':\n            job_data.batch_size = int(batch_size or 0)\n            pending_queue = job_data.pending_images\n            for i in (all_indices or range(int(batch_size or 0))):\n                await pending_queue.put(i)\n            debug_log(f\"Initialized image queue with {batch_size} pending items\")\n        elif mode == 'static':\n            job_data.num_tiles_per_image = int(num_tiles_per_image or 0)\n            job_data.batch_size = int(batch_size or 0)\n            job_data.batched_static = bool(batched_static)\n            # For batched static distribution, populate only tile ids [0..num_tiles_per_image-1]\n            pending_queue = job_data.pending_tasks\n            if batched_static and num_tiles_per_image is not None:\n                for i in range(num_tiles_per_image):\n                    await pending_queue.put(i)\n            else:\n                total_tiles = int(batch_size or 0) * int(num_tiles_per_image or 0)\n                for i in range(total_tiles):\n                    await pending_queue.put(i)\n\n        prompt_server.distributed_pending_tile_jobs[multi_job_id] = job_data\n\n\nasync def init_dynamic_job(\n    multi_job_id: str,\n    batch_size: int,\n    enabled_workers: List[str],\n    all_indices: Optional[List[int]] = None,\n):\n    \"\"\"Initialize queue for dynamic mode (per-image), with collector fields.\"\"\"\n    await _init_job_queue(\n        multi_job_id,\n        'dynamic',\n        batch_size=batch_size,\n        all_indices=all_indices or list(range(batch_size)),\n        enabled_workers=enabled_workers,\n    )\n    debug_log(f\"Job {multi_job_id} initialized with {batch_size} images\")\n\n\nasync def init_static_job_batched(\n    multi_job_id: str,\n    batch_size: int,\n    num_tiles_per_image: int,\n    enabled_workers: List[str],\n):\n    \"\"\"Initialize queue for static mode (batched-per-tile).\"\"\"\n    await _init_job_queue(\n        multi_job_id,\n        'static',\n        batch_size=batch_size,\n        num_tiles_per_image=num_tiles_per_image,\n        enabled_workers=enabled_workers,\n        batched_static=True,\n    )\n\n\nasync def _drain_results_queue(multi_job_id):\n    \"\"\"Drain pending results from queue and update completed_tasks. Returns count drained.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        if not isinstance(job_data, BaseJobState):\n            return 0\n        q = job_data.queue\n        completed_tasks = job_data.completed_tasks\n\n        collected = 0\n        while True:\n            try:\n                result = q.get_nowait()\n            except asyncio.QueueEmpty:\n                break\n\n            worker_id = result['worker_id']\n            is_last = result.get('is_last', False)\n\n            if 'image_idx' in result and 'image' in result:\n                task_id = result['image_idx']\n                if task_id not in completed_tasks:\n                    completed_tasks[task_id] = result['image']\n                    collected += 1\n            elif 'tiles' in result:\n                for tile_data in result['tiles']:\n                    task_id = tile_data.get('global_idx', tile_data['tile_idx'])\n                    if task_id not in completed_tasks:\n                        completed_tasks[task_id] = tile_data\n                        collected += 1\n            if is_last:\n                if worker_id in job_data.worker_status:\n                    del job_data.worker_status[worker_id]\n\n        return collected\n\n\nasync def _get_completed_count(multi_job_id):\n    \"\"\"Get count of completed tasks.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        if isinstance(job_data, BaseJobState):\n            return len(job_data.completed_tasks)\n        return 0\n\n\nasync def _mark_task_completed(multi_job_id, task_id, result):\n    \"\"\"Mark a task as completed.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        if isinstance(job_data, BaseJobState):\n            job_data.completed_tasks[task_id] = result\n\n\nasync def _cleanup_job(multi_job_id):\n    \"\"\"Cleanup the job data.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    async with prompt_server.distributed_tile_jobs_lock:\n        if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n            del prompt_server.distributed_pending_tile_jobs[multi_job_id]\n            debug_log(f\"Cleaned up job {multi_job_id}\")\n"
  },
  {
    "path": "upscale/job_timeout.py",
    "content": "import time\n\nfrom ..utils.config import load_config\nfrom ..utils.constants import HEARTBEAT_TIMEOUT\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import build_worker_url, probe_worker\nfrom .job_models import BaseJobState\nfrom .job_store import ensure_tile_jobs_initialized\n\n\ndef _find_worker_record(worker_id):\n    \"\"\"Return worker config entry by id, or None when missing.\"\"\"\n    workers = load_config().get(\"workers\", [])\n    return next((w for w in workers if str(w.get(\"id\")) == str(worker_id)), None)\n\n\nasync def _check_and_requeue_timed_out_workers(multi_job_id, total_tasks):\n    \"\"\"Check timed out workers and requeue their tasks. Returns requeued count.\"\"\"\n    prompt_server = ensure_tile_jobs_initialized()\n    current_time = time.time()\n\n    # Allow override via config setting 'worker_timeout_seconds'\n    cfg = load_config()\n    hb_timeout = int(cfg.get(\"settings\", {}).get(\"worker_timeout_seconds\", HEARTBEAT_TIMEOUT))\n\n    # Snapshot timed-out workers and job details under lock.\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        if not isinstance(job_data, BaseJobState):\n            return 0\n\n        completed_tasks_snapshot = set(job_data.completed_tasks.keys())\n        batched_static_snapshot = bool(job_data.batched_static)\n        num_tiles_per_image_snapshot = int(job_data.num_tiles_per_image or 1)\n        batch_size_snapshot = int(job_data.batch_size or 1)\n\n        timed_out_workers = []\n        for worker, last_heartbeat in list(job_data.worker_status.items()):\n            age = current_time - float(last_heartbeat)\n            debug_log(f\"Timeout check: worker={worker} age={age:.1f}s threshold={hb_timeout}s\")\n            if age > hb_timeout:\n                timed_out_workers.append(\n                    {\n                        \"worker_id\": worker,\n                        \"last_heartbeat\": float(last_heartbeat),\n                        \"assigned_tasks\": list(job_data.assigned_to_workers.get(worker, [])),\n                    }\n                )\n\n    if not timed_out_workers:\n        return 0\n\n    # Probe outside lock to avoid lock contention on network latency.\n    workers_to_requeue = []\n    workers_graced = []\n    for worker_info in timed_out_workers:\n        worker = worker_info[\"worker_id\"]\n        assigned = worker_info[\"assigned_tasks\"]\n        age = current_time - worker_info[\"last_heartbeat\"]\n\n        incomplete_assigned = 0\n        try:\n            if assigned:\n                if batched_static_snapshot:\n                    for task_id in assigned:\n                        for b in range(batch_size_snapshot):\n                            gidx = b * num_tiles_per_image_snapshot + task_id\n                            if gidx not in completed_tasks_snapshot:\n                                incomplete_assigned += 1\n                                break\n                else:\n                    for task_id in assigned:\n                        if task_id not in completed_tasks_snapshot:\n                            incomplete_assigned += 1\n            debug_log(\n                f\"Assigned diagnostics: total_assigned={len(assigned)} \"\n                f\"incomplete_assigned={incomplete_assigned}\"\n            )\n        except Exception as e:\n            debug_log(f\"Assigned diagnostics failed for worker {worker}: {e}\")\n\n        busy = False\n        probe_queue = None\n        try:\n            worker_record = _find_worker_record(worker)\n            if worker_record:\n                worker_url = build_worker_url(worker_record)\n                debug_log(f\"Probing worker {worker} at {worker_url}/prompt\")\n                payload = await probe_worker(worker_url, timeout=2.0)\n                if payload is not None:\n                    probe_queue = int(payload.get(\"exec_info\", {}).get(\"queue_remaining\", 0))\n                    busy = probe_queue is not None and probe_queue > 0\n            else:\n                debug_log(f\"Probe skipped; worker {worker} not found in config\")\n        except Exception as e:\n            debug_log(f\"Probe failed for worker {worker}: {e}\")\n        finally:\n            debug_log(\n                f\"Probe diagnostics: online={probe_queue is not None} queue_remaining={probe_queue}\"\n            )\n\n        if busy:\n            workers_graced.append(worker)\n            debug_log(f\"Heartbeat grace: worker {worker} busy via probe; skipping requeue\")\n            continue\n\n        log(f\"Worker {worker} heartbeat timed out after {age:.1f}s\")\n        workers_to_requeue.append((worker, assigned))\n\n    # Re-acquire lock and apply requeue/cleanup decisions.\n    async with prompt_server.distributed_tile_jobs_lock:\n        job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n        if not isinstance(job_data, BaseJobState):\n            return 0\n\n        # Refresh heartbeat for workers that we proved are still busy.\n        for worker in workers_graced:\n            if worker in job_data.worker_status:\n                job_data.worker_status[worker] = current_time\n\n        requeued_count = 0\n        completed_tasks = job_data.completed_tasks\n        batched_static = bool(job_data.batched_static)\n        num_tiles_per_image = int(job_data.num_tiles_per_image or 1)\n        batch_size = int(job_data.batch_size or 1)\n        for worker, assigned_snapshot in workers_to_requeue:\n            # Use current assignments if present, falling back to the snapshot.\n            assigned_tasks = list(job_data.assigned_to_workers.get(worker, assigned_snapshot))\n            for task_id in assigned_tasks:\n                # If batched_static, task_id is a tile_idx; consider it complete only if\n                # all corresponding global_idx entries are present in completed_tasks.\n                if batched_static:\n                    all_done = True\n                    for b in range(batch_size):\n                        gidx = b * num_tiles_per_image + task_id\n                        if gidx not in completed_tasks:\n                            all_done = False\n                            break\n                    if not all_done:\n                        await job_data.pending_tasks.put(task_id)\n                        requeued_count += 1\n                else:\n                    if task_id not in completed_tasks:\n                        await job_data.pending_tasks.put(task_id)\n                        requeued_count += 1\n            job_data.worker_status.pop(worker, None)\n            if worker in job_data.assigned_to_workers:\n                job_data.assigned_to_workers[worker] = []\n\n        return requeued_count\n"
  },
  {
    "path": "upscale/modes/__init__.py",
    "content": ""
  },
  {
    "path": "upscale/modes/dynamic.py",
    "content": "import asyncio, torch\nfrom PIL import Image\nimport comfy.model_management\nfrom ...utils.logging import debug_log, log\nfrom ...utils.image import tensor_to_pil, pil_to_tensor\nfrom ...utils.async_helpers import run_async_in_server_loop\nfrom ...utils.config import get_worker_timeout_seconds\nfrom ...utils.constants import TILE_WAIT_TIMEOUT, TILE_SEND_TIMEOUT\nfrom ..job_store import ensure_tile_jobs_initialized, init_dynamic_job\n\n\nclass DynamicModeMixin:\n    \"\"\"\n    Dynamic (per-image queue) USDU mode behaviors for master and worker roles.\n\n    Expected co-mixins on `self`:\n    - TileOpsMixin (`calculate_tiles`, `_slice_conditioning`, `_process_and_blend_tile`).\n    - JobStateMixin (image queue/task completion helpers).\n    - WorkerCommsMixin (`_request_image_from_master`, `_send_full_image_to_master`, `_send_heartbeat_to_master`).\n    \"\"\"\n\n    def process_master_dynamic(self, upscaled_image, model, positive, negative, vae,\n                              seed, steps, cfg, sampler_name, scheduler, denoise,\n                              tile_width, tile_height, padding, mask_blur,\n                              force_uniform_tiles, tiled_decode, multi_job_id, enabled_workers):\n        \"\"\"Dynamic mode for large batches - assigns whole images to workers dynamically, including master.\"\"\"\n        # Get batch size and dimensions\n        batch_size, height, width, _ = upscaled_image.shape\n        num_workers = len(enabled_workers)\n        \n        log(f\"USDU Dist: Image queue distribution | Batch {batch_size} | Workers {num_workers} | Canvas {width}x{height} | Tile {tile_width}x{tile_height}\")\n\n        # No fixed share - all images are dynamic\n        all_indices = list(range(batch_size))\n        \n        debug_log(f\"Processing {batch_size} images dynamically across master + {num_workers} workers.\")\n        \n        # Calculate tiles for processing\n        all_tiles = self.calculate_tiles(width, height, tile_width, tile_height, force_uniform_tiles)\n        \n        # Initialize job queue for communication\n        try:\n            run_async_in_server_loop(\n                init_dynamic_job(multi_job_id, batch_size, enabled_workers, all_indices),\n                timeout=2.0\n            )\n        except Exception as e:\n            debug_log(f\"UltimateSDUpscale Master - Queue initialization error: {e}\")\n            raise RuntimeError(f\"Failed to initialize dynamic mode queue: {e}\")\n        \n        # Convert batch to PIL list\n        result_images = [tensor_to_pil(upscaled_image[b:b+1], 0).convert('RGB').copy() for b in range(batch_size)]\n        \n        # Process images dynamically with master participating\n        prompt_server = ensure_tile_jobs_initialized()\n        processed_count = 0\n        consecutive_retries = 0\n        max_consecutive_retries = 10\n        \n        # Process loop - master pulls from queue and processes synchronously\n        while processed_count < batch_size:\n            # Try to get an image to process\n            image_idx = run_async_in_server_loop(\n                self._get_next_image_index(multi_job_id),\n                timeout=5.0  # Short timeout to allow frequent checks\n            )\n\n            if image_idx is not None:\n                # Reset retry counter and process locally\n                consecutive_retries = 0\n                debug_log(f\"Master processing image {image_idx} dynamically\")\n                processed_count += 1\n\n                # Process locally\n                single_tensor = upscaled_image[image_idx:image_idx+1]\n                local_image = result_images[image_idx]\n                image_seed = seed\n                \n                # Pre-slice conditioning once per image (not per tile)\n                positive_sliced, negative_sliced = self._slice_conditioning(positive, negative, image_idx)\n                \n                for tile_idx, pos in enumerate(all_tiles):\n                    source_tensor = pil_to_tensor(local_image)\n                    if single_tensor.is_cuda:\n                        source_tensor = source_tensor.cuda()\n                    local_image = self._process_and_blend_tile(\n                        tile_idx, pos, source_tensor, local_image,\n                        model, positive_sliced, negative_sliced, vae, image_seed, steps, cfg,\n                        sampler_name, scheduler, denoise, tile_width, tile_height,\n                        padding, mask_blur, width, height, force_uniform_tiles,\n                        tiled_decode, batch_idx=image_idx\n                    )\n                    \n                    # Yield after each tile to minimize worker downtime\n                    run_async_in_server_loop(self._async_yield(), timeout=0.1)\n                    # Note: No per-tile drain here – that's what makes this \"per-image\"\n                \n                result_images[image_idx] = local_image\n                \n                # Mark as completed\n                run_async_in_server_loop(\n                    self._mark_image_completed(multi_job_id, image_idx, local_image),\n                    timeout=5.0\n                )\n                \n                # NEW: Drain after the full image is marked complete (catches workers who finished during master's processing)\n                drained_count = run_async_in_server_loop(\n                    self._drain_worker_results_queue(multi_job_id),\n                    timeout=5.0\n                )\n                \n                if drained_count > 0:\n                    debug_log(f\"Drained {drained_count} worker images after master's image {image_idx}\")\n                \n                # NEW: Log overall progress (includes master's image + any drained workers)\n                completed_now = run_async_in_server_loop(\n                    self._get_total_completed_count(multi_job_id),\n                    timeout=1.0\n                )\n                log(f\"USDU Dist: Images progress {completed_now}/{batch_size}\")\n                \n                # Yield to allow workers to get new images after completing one\n                run_async_in_server_loop(self._async_yield(), timeout=0.1)\n            else:\n                # Queue empty: collect any queued worker results to update progress\n                drained_count = run_async_in_server_loop(\n                    self._drain_worker_results_queue(multi_job_id),\n                    timeout=5.0\n                )\n                run_async_in_server_loop(self._async_yield(), timeout=0.1)  # Yield after drain\n                \n                # Check for timed out workers and requeue their images\n                requeued_count = run_async_in_server_loop(\n                    self._check_and_requeue_timed_out_workers(multi_job_id, batch_size),\n                    timeout=5.0\n                )\n                run_async_in_server_loop(self._async_yield(), timeout=0.1)  # Yield after requeue\n                \n                if requeued_count > 0:\n                    log(f\"Requeued {requeued_count} images from timed out workers\")\n                    consecutive_retries = 0  # Reset since we have work to do\n                    continue\n\n                # Now check total completed (includes newly collected)\n                completed_now = run_async_in_server_loop(\n                    self._get_total_completed_count(multi_job_id),\n                    timeout=1.0\n                )\n                \n                log(f\"USDU Dist: Images progress {completed_now}/{batch_size}\")\n                \n                if completed_now >= batch_size:\n                    break\n\n                run_async_in_server_loop(self._async_yield(), timeout=0.1)  # Yield before pending check\n                \n                # Check if there are pending images in the queue (could be requeued)\n                pending_count = run_async_in_server_loop(\n                    self._get_pending_count(multi_job_id),\n                    timeout=1.0\n                )\n                \n                if pending_count > 0:\n                    consecutive_retries = 0  # Reset retries since there's work to do\n                    continue\n\n                consecutive_retries += 1\n                if consecutive_retries >= max_consecutive_retries:\n                    log(f\"Max retries ({max_consecutive_retries}) reached. Forcing collection of remaining results.\")\n                    break  # Force exit to collection phase\n\n                debug_log(\"Waiting for workers\")\n                # Use async sleep to allow event loop to process worker requests\n                run_async_in_server_loop(asyncio.sleep(2), timeout=3.0)\n        \n        debug_log(f\"Master processed {processed_count} images locally\")\n        \n        # Get all completed images to check what needs to be collected\n        all_completed = run_async_in_server_loop(\n            self._get_all_completed_images(multi_job_id),\n            timeout=5.0\n        )\n        \n        # Calculate how many we still need to collect\n        remaining_to_collect = batch_size - len(all_completed)\n        \n        if remaining_to_collect > 0:\n            debug_log(f\"Waiting for {remaining_to_collect} more images from workers\")\n            # Use the unified worker timeout for the collection phase\n            collection_timeout = float(get_worker_timeout_seconds())\n            collected_images = run_async_in_server_loop(\n                self._async_collect_dynamic_images(multi_job_id, remaining_to_collect, num_workers, batch_size, processed_count),\n                timeout=collection_timeout\n            )\n            \n            # Merge collected with already completed\n            all_completed.update(collected_images)\n        \n        # Update result images with all completed images\n        for idx, processed_img in all_completed.items():\n            if idx < batch_size:\n                result_images[idx] = processed_img\n        \n        # Convert back to tensor\n        result_tensor = torch.cat([pil_to_tensor(img) for img in result_images], dim=0) if batch_size > 1 else pil_to_tensor(result_images[0])\n        if upscaled_image.is_cuda:\n            result_tensor = result_tensor.cuda()\n        \n        debug_log(f\"UltimateSDUpscale Master - Job {multi_job_id} complete\")\n        log(f\"Completed processing all {batch_size} images\")\n        return (result_tensor,)\n\n    def process_worker_dynamic(self, upscaled_image, model, positive, negative, vae,\n                               seed, steps, cfg, sampler_name, scheduler, denoise,\n                               tile_width, tile_height, padding, mask_blur,\n                               force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                               worker_id, enabled_worker_ids, dynamic_threshold):\n        \"\"\"Worker processing in dynamic mode - processes whole images.\"\"\"\n        # Round tile dimensions\n        tile_width = self.round_to_multiple(tile_width)\n        tile_height = self.round_to_multiple(tile_height)\n\n        # Get dimensions and tile grid\n        batch_size, height, width, _ = upscaled_image.shape\n        all_tiles = self.calculate_tiles(width, height, tile_width, tile_height, force_uniform_tiles)\n        log(f\"USDU Dist Worker[{worker_id[:8]}]: Processing image queue | Batch {batch_size}\")\n\n        # Keep track of processed images for is_last detection\n        processed_count = 0\n\n        # Poll for job readiness to avoid races during master init\n        max_poll_attempts = 20  # ~20s at 1s sleep\n        if not self._poll_job_ready(multi_job_id, master_url, worker_id=worker_id, max_attempts=max_poll_attempts):\n            log(f\"Job {multi_job_id} not ready after {max_poll_attempts} attempts, aborting\")\n            return (upscaled_image,)\n\n        # Loop to request and process images\n        while True:\n            # Request an image to process\n            image_idx, estimated_remaining = run_async_in_server_loop(\n                self._request_image_from_master(multi_job_id, master_url, worker_id),\n                timeout=TILE_WAIT_TIMEOUT\n            )\n\n            if image_idx is None:\n                debug_log(f\"USDU Dist Worker - No more images to process\")\n                break\n\n            debug_log(f\"Worker[{worker_id[:8]}] - Assigned image {image_idx}\")\n            processed_count += 1\n\n            # Determine if this should be marked as last for this worker\n            is_last_for_worker = (estimated_remaining == 0)\n\n            # Extract single image tensor\n            single_tensor = upscaled_image[image_idx:image_idx+1]\n\n            # Convert to PIL for processing\n            local_image = tensor_to_pil(single_tensor, 0).copy()\n\n            # Process all tiles for this image\n            image_seed = seed\n\n            # Pre-slice conditioning once per image (not per tile)\n            positive_sliced, negative_sliced = self._slice_conditioning(positive, negative, image_idx)\n\n            for tile_idx, pos in enumerate(all_tiles):\n                source_tensor = pil_to_tensor(local_image)\n                if single_tensor.is_cuda:\n                    source_tensor = source_tensor.cuda()\n                local_image = self._process_and_blend_tile(\n                    tile_idx, pos, source_tensor, local_image,\n                    model, positive_sliced, negative_sliced, vae, image_seed, steps, cfg,\n                    sampler_name, scheduler, denoise, tile_width, tile_height,\n                    padding, mask_blur, width, height, force_uniform_tiles,\n                    tiled_decode, batch_idx=image_idx\n                )\n                run_async_in_server_loop(\n                    self._send_heartbeat_to_master(multi_job_id, master_url, worker_id),\n                    timeout=5.0\n                )\n\n            # Send processed image back to master\n            try:\n                # Use the estimated remaining to determine if this is the last image\n                is_last = is_last_for_worker\n                run_async_in_server_loop(\n                    self._send_full_image_to_master(local_image, image_idx, multi_job_id,\n                                                    master_url, worker_id, is_last),\n                    timeout=TILE_SEND_TIMEOUT\n                )\n                # Send heartbeat after processing\n                run_async_in_server_loop(\n                    self._send_heartbeat_to_master(multi_job_id, master_url, worker_id),\n                    timeout=5.0\n                )\n                if is_last:\n                    break\n            except Exception as e:\n                log(f\"USDU Dist Worker[{worker_id[:8]}] - Error sending image {image_idx}: {e}\")\n                # Continue processing other images\n\n        # Send final is_last signal\n        debug_log(f\"Worker[{worker_id[:8]}] processed {processed_count} images, sending completion signal\")\n        try:\n            run_async_in_server_loop(\n                self._send_worker_complete_signal(multi_job_id, master_url, worker_id),\n                timeout=TILE_SEND_TIMEOUT\n            )\n        except Exception as e:\n            log(f\"USDU Dist Worker[{worker_id[:8]}] - Error sending completion signal: {e}\")\n\n        return (upscaled_image,)\n"
  },
  {
    "path": "upscale/modes/single_gpu.py",
    "content": "import math, torch\nfrom PIL import Image\nfrom ...utils.logging import debug_log, log\nfrom ...utils.image import tensor_to_pil, pil_to_tensor\n\n\nclass SingleGpuModeMixin:\n    def process_single_gpu(self, upscaled_image, model, positive, negative, vae,\n                          seed, steps, cfg, sampler_name, scheduler, denoise,\n                          tile_width, tile_height, padding, mask_blur, force_uniform_tiles, tiled_decode):\n        \"\"\"Process all tiles on a single GPU (no distribution), batching per tile like USDU.\"\"\"\n        # Round tile dimensions\n        tile_width = self.round_to_multiple(tile_width)\n        tile_height = self.round_to_multiple(tile_height)\n\n        # Get image dimensions and batch size\n        batch_size, height, width, _ = upscaled_image.shape\n\n        # Calculate all tiles\n        all_tiles = self.calculate_tiles(width, height, tile_width, tile_height, force_uniform_tiles)\n\n        rows = math.ceil(height / tile_height)\n        cols = math.ceil(width / tile_width)\n        log(\n            f\"USDU Dist: Single GPU | Canvas {width}x{height} | Tile {tile_width}x{tile_height} | Grid {rows}x{cols} ({len(all_tiles)} tiles/image) | Batch {batch_size}\"\n        )\n\n        # Prepare result images list\n        result_images = []\n        for b in range(batch_size):\n            image_pil = tensor_to_pil(upscaled_image[b:b+1], 0).convert('RGB')\n            result_images.append(image_pil.copy())\n\n        # Precompute tile masks once\n        tile_masks = []\n        for tx, ty in all_tiles:\n            tile_masks.append(self.create_tile_mask(width, height, tx, ty, tile_width, tile_height, mask_blur))\n\n        # Process tiles batched across images\n        for tile_idx, (tx, ty) in enumerate(all_tiles):\n            # Progressive state parity: extract each tile from the current updated image batch.\n            source_batch = torch.cat([pil_to_tensor(img) for img in result_images], dim=0)\n            if upscaled_image.is_cuda:\n                source_batch = source_batch.cuda()\n\n            # Extract batched tile\n            tile_batch, x1, y1, ew, eh = self.extract_batch_tile_with_padding(\n                source_batch, tx, ty, tile_width, tile_height, padding, force_uniform_tiles\n            )\n\n            # Process batch\n            region = (x1, y1, x1 + ew, y1 + eh)\n            processed_batch = self.process_tiles_batch(tile_batch, model, positive, negative, vae,\n                                                       seed, steps, cfg, sampler_name, scheduler, denoise,\n                                                       tiled_decode, region, (width, height))\n\n            # Blend results back into each image using cached mask\n            tile_mask = tile_masks[tile_idx]\n            for b in range(batch_size):\n                tile_pil = tensor_to_pil(processed_batch, b)\n                # Resize back to extracted size\n                if tile_pil.size != (ew, eh):\n                    tile_pil = tile_pil.resize((ew, eh), Image.LANCZOS)\n                result_images[b] = self.blend_tile(result_images[b], tile_pil, x1, y1, (ew, eh), tile_mask, padding)\n\n        # Convert back to tensor\n        result_tensors = [pil_to_tensor(img) for img in result_images]\n        result_tensor = torch.cat(result_tensors, dim=0)\n        if upscaled_image.is_cuda:\n            result_tensor = result_tensor.cuda()\n\n        return (result_tensor,)\n"
  },
  {
    "path": "upscale/modes/static.py",
    "content": "import asyncio, time, torch\nfrom PIL import Image\nimport comfy.model_management\nfrom ...utils.logging import debug_log, log\nfrom ...utils.image import tensor_to_pil, pil_to_tensor\nfrom ...utils.async_helpers import run_async_in_server_loop\nfrom ...utils.config import get_worker_timeout_seconds\nfrom ...utils.constants import (\n    HEARTBEAT_INTERVAL,\n    JOB_POLL_INTERVAL,\n    JOB_POLL_MAX_ATTEMPTS,\n    MAX_BATCH,\n    TILE_SEND_TIMEOUT,\n    TILE_WAIT_TIMEOUT,\n)\nfrom ..job_store import (\n    ensure_tile_jobs_initialized, init_static_job_batched,\n    _mark_task_completed, _cleanup_job, _drain_results_queue, _get_completed_count,\n)\nfrom ..job_models import TileJobState\n\n\nclass StaticModeMixin:\n    \"\"\"\n    Static (tile-queue) USDU mode behaviors for master and worker roles.\n\n    Expected co-mixins on `self`:\n    - TileOpsMixin (`calculate_tiles`, tile extract/blend helpers).\n    - JobStateMixin (`_get_next_tile_index`, `_get_all_completed_tasks`, requeue checks).\n    - WorkerCommsMixin (`send_tiles_batch_to_master`, `_request_tile_from_master`, `_send_heartbeat_to_master`).\n    \"\"\"\n\n    def _poll_job_ready(self, multi_job_id, master_url, worker_id=None, max_attempts=JOB_POLL_MAX_ATTEMPTS):\n        \"\"\"Poll master for job readiness to avoid worker/master initialization race.\"\"\"\n        for attempt in range(max_attempts):\n            ready = run_async_in_server_loop(\n                self._check_job_status(multi_job_id, master_url),\n                timeout=5.0\n            )\n            if ready:\n                if worker_id:\n                    debug_log(f\"Worker[{worker_id[:8]}] job {multi_job_id} ready after {attempt} attempts\")\n                else:\n                    debug_log(f\"Job {multi_job_id} ready after {attempt} attempts\")\n                return True\n            time.sleep(JOB_POLL_INTERVAL)\n        return False\n\n    def _extract_and_process_tile(\n        self,\n        upscaled_image,\n        tile_id,\n        all_tiles,\n        tile_width,\n        tile_height,\n        padding,\n        force_uniform_tiles,\n        model,\n        positive,\n        negative,\n        vae,\n        seed,\n        steps,\n        cfg,\n        sampler_name,\n        scheduler,\n        denoise,\n        tiled_decode,\n        width,\n        height,\n    ):\n        \"\"\"Extract one tile position for the whole batch and process it.\"\"\"\n        tx, ty = all_tiles[tile_id]\n        tile_batch, x1, y1, ew, eh = self.extract_batch_tile_with_padding(\n            upscaled_image, tx, ty, tile_width, tile_height, padding, force_uniform_tiles\n        )\n        region = (x1, y1, x1 + ew, y1 + eh)\n        processed_batch = self.process_tiles_batch(\n            tile_batch, model, positive, negative, vae,\n            seed, steps, cfg, sampler_name, scheduler, denoise, tiled_decode,\n            region, (width, height)\n        )\n        return processed_batch, x1, y1, ew, eh\n\n    def _flush_tiles_to_master(\n        self,\n        processed_tiles,\n        multi_job_id,\n        master_url,\n        padding,\n        worker_id,\n        is_final_flush=False,\n    ):\n        \"\"\"Send accumulated tile payloads to master and return a fresh accumulator.\"\"\"\n        if not processed_tiles:\n            if is_final_flush:\n                run_async_in_server_loop(\n                    self.send_tiles_batch_to_master(\n                        [],\n                        multi_job_id,\n                        master_url,\n                        padding,\n                        worker_id,\n                        is_final_flush=True,\n                    ),\n                    timeout=TILE_SEND_TIMEOUT,\n                )\n            return processed_tiles\n        run_async_in_server_loop(\n            self.send_tiles_batch_to_master(\n                processed_tiles,\n                multi_job_id,\n                master_url,\n                padding,\n                worker_id,\n                is_final_flush=is_final_flush,\n            ),\n            timeout=TILE_SEND_TIMEOUT\n        )\n        return []\n\n    def _master_process_one_tile(\n        self,\n        tile_id,\n        all_tiles,\n        upscaled_image,\n        result_images,\n        tile_masks,\n        multi_job_id,\n        batch_size,\n        num_tiles_per_image,\n        tile_width,\n        tile_height,\n        padding,\n        force_uniform_tiles,\n        model,\n        positive,\n        negative,\n        vae,\n        seed,\n        steps,\n        cfg,\n        sampler_name,\n        scheduler,\n        denoise,\n        tiled_decode,\n        width,\n        height,\n    ):\n        \"\"\"Process one tile_id across the batch and blend into result_images.\"\"\"\n        source_batch = torch.cat([pil_to_tensor(img) for img in result_images], dim=0)\n        if upscaled_image.is_cuda:\n            source_batch = source_batch.cuda()\n        processed_batch, x1, y1, ew, eh = self._extract_and_process_tile(\n            source_batch,\n            tile_id,\n            all_tiles,\n            tile_width,\n            tile_height,\n            padding,\n            force_uniform_tiles,\n            model,\n            positive,\n            negative,\n            vae,\n            seed,\n            steps,\n            cfg,\n            sampler_name,\n            scheduler,\n            denoise,\n            tiled_decode,\n            width,\n            height,\n        )\n        tile_mask = tile_masks[tile_id]\n        out_bs = processed_batch.shape[0] if hasattr(processed_batch, \"shape\") else batch_size\n        processed_items = min(batch_size, out_bs)\n        for b in range(processed_items):\n            tile_pil = tensor_to_pil(processed_batch, b)\n            if tile_pil.size != (ew, eh):\n                tile_pil = tile_pil.resize((ew, eh), Image.LANCZOS)\n            result_images[b] = self.blend_tile(result_images[b], tile_pil, x1, y1, (ew, eh), tile_mask, padding)\n            global_idx = b * num_tiles_per_image + tile_id\n            run_async_in_server_loop(\n                _mark_task_completed(multi_job_id, global_idx, {'batch_idx': b, 'tile_idx': tile_id}),\n                timeout=5.0\n            )\n        return processed_items\n\n    def _process_worker_static_sync(self, upscaled_image, model, positive, negative, vae,\n                                    seed, steps, cfg, sampler_name, scheduler, denoise,\n                                    tile_width, tile_height, padding, mask_blur,\n                                    force_uniform_tiles, tiled_decode, multi_job_id, master_url,\n                                    worker_id, enabled_workers):\n        \"\"\"Worker static mode processing with optional dynamic queue pulling.\"\"\"\n        # Round tile dimensions\n        tile_width = self.round_to_multiple(tile_width)\n        tile_height = self.round_to_multiple(tile_height)\n        \n        # Get dimensions and calculate tiles\n        _, height, width, _ = upscaled_image.shape\n        all_tiles = self.calculate_tiles(width, height, tile_width, tile_height, force_uniform_tiles)\n        num_tiles_per_image = len(all_tiles)\n        batch_size = upscaled_image.shape[0]\n        total_tiles = batch_size * num_tiles_per_image\n        \n        processed_tiles = []\n        working_images = []\n        for b in range(batch_size):\n            image_pil = tensor_to_pil(upscaled_image[b:b+1], 0)\n            working_images.append(image_pil.copy())\n        tile_masks = []\n        for tx, ty in all_tiles:\n            tile_masks.append(self.create_tile_mask(width, height, tx, ty, tile_width, tile_height, mask_blur))\n        \n        # Dynamic queue mode (static processing): process batched-per-tile\n        log(f\"USDU Dist Worker[{worker_id[:8]}]: Canvas {width}x{height} | Tile {tile_width}x{tile_height} | Tiles/image {num_tiles_per_image} | Batch {batch_size}\")\n        processed_count = 0\n\n        max_poll_attempts = JOB_POLL_MAX_ATTEMPTS\n        if not self._poll_job_ready(multi_job_id, master_url, worker_id=worker_id, max_attempts=max_poll_attempts):\n            log(f\"Job {multi_job_id} not ready after {max_poll_attempts} attempts, aborting\")\n            return (upscaled_image,)\n\n        # Main processing loop - pull tile ids from queue\n        while True:\n            # Request a tile to process\n            tile_idx, estimated_remaining, batched_static = run_async_in_server_loop(\n                self._request_tile_from_master(multi_job_id, master_url, worker_id),\n                timeout=TILE_WAIT_TIMEOUT\n            )\n\n            if tile_idx is None:\n                debug_log(f\"Worker[{worker_id[:8]}] - No more tiles to process\")\n                break\n\n            # Always batched-per-tile in static mode\n            debug_log(f\"Worker[{worker_id[:8]}] - Assigned tile_id {tile_idx}\")\n            processed_count += batch_size\n            tile_id = tile_idx\n            source_batch = torch.cat([pil_to_tensor(img) for img in working_images], dim=0)\n            if upscaled_image.is_cuda:\n                source_batch = source_batch.cuda()\n            processed_batch, x1, y1, ew, eh = self._extract_and_process_tile(\n                source_batch,\n                tile_id,\n                all_tiles,\n                tile_width,\n                tile_height,\n                padding,\n                force_uniform_tiles,\n                model,\n                positive,\n                negative,\n                vae,\n                seed,\n                steps,\n                cfg,\n                sampler_name,\n                scheduler,\n                denoise,\n                tiled_decode,\n                width,\n                height,\n            )\n            # Queue results\n            for b in range(batch_size):\n                tile_pil = tensor_to_pil(processed_batch, b)\n                if tile_pil.size != (ew, eh):\n                    tile_pil = tile_pil.resize((ew, eh), Image.LANCZOS)\n                working_images[b] = self.blend_tile(\n                    working_images[b],\n                    tile_pil,\n                    x1,\n                    y1,\n                    (ew, eh),\n                    tile_masks[tile_id],\n                    padding,\n                )\n                processed_tiles.append({\n                    'tile': processed_batch[b:b+1],\n                    'tile_idx': tile_id,\n                    'x': x1,\n                    'y': y1,\n                    'extracted_width': ew,\n                    'extracted_height': eh,\n                    'padding': padding,\n                    'batch_idx': b,\n                    'global_idx': b * num_tiles_per_image + tile_id\n                })\n\n            # Send heartbeat\n            try:\n                run_async_in_server_loop(\n                    self._send_heartbeat_to_master(multi_job_id, master_url, worker_id),\n                    timeout=5.0\n                )\n            except Exception as e:\n                debug_log(f\"Worker[{worker_id[:8]}] heartbeat failed: {e}\")\n\n            # Send tiles in batches within loop\n            if len(processed_tiles) >= MAX_BATCH:\n                processed_tiles = self._flush_tiles_to_master(\n                    processed_tiles, multi_job_id, master_url, padding, worker_id, is_final_flush=False\n                )\n\n        # Send any remaining tiles\n        processed_tiles = self._flush_tiles_to_master(\n            processed_tiles, multi_job_id, master_url, padding, worker_id, is_final_flush=True\n        )\n        \n        debug_log(f\"Worker {worker_id} completed all assigned and requeued tiles\")\n        return (upscaled_image,)\n\n    async def _async_collect_and_monitor_static(self, multi_job_id, total_tiles, expected_total):\n        \"\"\"Async helper for collection and monitoring in static mode.\n        Returns collected tasks dict. Caller should check if all tasks are complete.\"\"\"\n        last_progress_log = time.time()\n        progress_interval = 5.0\n        last_heartbeat_check = time.time()\n        last_completed_count = 0\n        \n        while True:\n            # Check for user interruption\n            if comfy.model_management.processing_interrupted():\n                log(\"Processing interrupted by user\")\n                raise comfy.model_management.InterruptProcessingException()\n            \n            # Drain any pending results\n            collected_count = await _drain_results_queue(multi_job_id)\n            \n            # Check and requeue timed-out workers periodically\n            current_time = time.time()\n            if current_time - last_heartbeat_check >= HEARTBEAT_INTERVAL:\n                requeued_count = await self._check_and_requeue_timed_out_workers(multi_job_id, expected_total)\n                if requeued_count > 0:\n                    log(f\"Requeued {requeued_count} tasks from timed-out workers\")\n                last_heartbeat_check = current_time\n            \n            # Get current completion count\n            completed_count = await _get_completed_count(multi_job_id)\n            \n            # Progress logging\n            if current_time - last_progress_log >= progress_interval:\n                log(f\"Progress: {completed_count}/{expected_total} tasks completed\")\n                last_progress_log = current_time\n            \n            # Check if all tasks are completed\n            if completed_count >= expected_total:\n                debug_log(f\"All {expected_total} tasks completed\")\n                break\n            \n            # If no active workers remain and there are pending tasks, return for local processing\n            prompt_server = ensure_tile_jobs_initialized()\n            async with prompt_server.distributed_tile_jobs_lock:\n                job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n                if isinstance(job_data, TileJobState):\n                    pending_queue = job_data.pending_tasks\n                    active_workers = list(job_data.worker_status.keys())\n                    if pending_queue and not pending_queue.empty() and len(active_workers) == 0:\n                        log(f\"No active workers remaining with {expected_total - completed_count} tasks pending. Returning for local processing.\")\n                        break\n            \n            # Wait a bit before next check\n            await asyncio.sleep(0.1)\n        \n        # Get all completed tasks for return\n        return await self._get_all_completed_tasks(multi_job_id)\n\n    def _process_master_static_sync(self, upscaled_image, model, positive, negative, vae,\n                                    seed, steps, cfg, sampler_name, scheduler, denoise,\n                                    tile_width, tile_height, padding, mask_blur,\n                                    force_uniform_tiles, tiled_decode, multi_job_id, enabled_workers,\n                                    all_tiles, num_tiles_per_image):\n        \"\"\"Static mode master processing with optional dynamic queue pulling.\"\"\"\n        batch_size = upscaled_image.shape[0]\n        _, height, width, _ = upscaled_image.shape\n        total_tiles = batch_size * num_tiles_per_image\n        \n        # Convert batch to PIL list for processing\n        result_images = []\n        for b in range(batch_size):\n            image_pil = tensor_to_pil(upscaled_image[b:b+1], 0)\n            result_images.append(image_pil.copy())\n        \n        # Initialize queue: pending queue holds tile ids (batched per tile)\n        log(\"USDU Dist: Using tile queue distribution\")\n        run_async_in_server_loop(\n            init_static_job_batched(multi_job_id, batch_size, num_tiles_per_image, enabled_workers),\n            timeout=10.0\n        )\n        debug_log(\n            f\"Initialized tile-id queue with {num_tiles_per_image} ids for batch {batch_size}\"\n        )\n\n        # Precompute masks for all tile positions to avoid repeated Gaussian blur work during blending\n        tile_masks = []\n        for idx, (tx, ty) in enumerate(all_tiles):\n            tile_masks.append(self.create_tile_mask(width, height, tx, ty, tile_width, tile_height, mask_blur))\n\n        processed_count = 0\n        consecutive_no_tile = 0\n        max_consecutive_no_tile = 2\n\n        while processed_count < total_tiles:\n            comfy.model_management.throw_exception_if_processing_interrupted()\n            tile_idx = run_async_in_server_loop(\n                self._get_next_tile_index(multi_job_id),\n                timeout=5.0\n            )\n            if tile_idx is not None:\n                consecutive_no_tile = 0\n                tile_id = tile_idx\n                processed_count += self._master_process_one_tile(\n                    tile_id,\n                    all_tiles,\n                    upscaled_image,\n                    result_images,\n                    tile_masks,\n                    multi_job_id,\n                    batch_size,\n                    num_tiles_per_image,\n                    tile_width,\n                    tile_height,\n                    padding,\n                    force_uniform_tiles,\n                    model,\n                    positive,\n                    negative,\n                    vae,\n                    seed,\n                    steps,\n                    cfg,\n                    sampler_name,\n                    scheduler,\n                    denoise,\n                    tiled_decode,\n                    width,\n                    height,\n                )\n                log(f\"USDU Dist: Tiles progress {processed_count}/{total_tiles} (tile {tile_id})\")\n            else:\n                consecutive_no_tile += 1\n                if consecutive_no_tile >= max_consecutive_no_tile:\n                    debug_log(f\"Master processed {processed_count} tiles, moving to collection phase\")\n                    break\n                time.sleep(0.1)\n        master_processed_count = processed_count\n        \n        # Continue processing any remaining tiles while collecting worker results\n        remaining_tiles = total_tiles - master_processed_count\n        if remaining_tiles > 0:\n            debug_log(f\"Master waiting for {remaining_tiles} tiles from workers\")\n            \n            # Collect worker results using async operations\n            try:\n                # Wait until either all tasks are collected or there are no active workers left\n                collected_tasks = run_async_in_server_loop(\n                    self._async_collect_and_monitor_static(multi_job_id, total_tiles, expected_total=total_tiles),\n                    timeout=None\n                )\n            except comfy.model_management.InterruptProcessingException:\n                # Clean up job on interruption\n                run_async_in_server_loop(_cleanup_job(multi_job_id), timeout=5.0)\n                raise\n            \n            # Check if we need to process any remaining tasks locally after collection\n            completed_count = len(collected_tasks)\n            if completed_count < total_tiles:\n                log(f\"Processing remaining {total_tiles - completed_count} tasks locally after worker failures\")\n                \n                # Process any remaining pending tasks (batched-per-tile)\n                while True:\n                    # Check for user interruption\n                    comfy.model_management.throw_exception_if_processing_interrupted()\n\n                    # Get next tile_id from pending queue\n                    tile_id = run_async_in_server_loop(\n                        self._get_next_tile_index(multi_job_id),\n                        timeout=5.0\n                    )\n\n                    if tile_id is None:\n                        break\n\n                    self._master_process_one_tile(\n                        tile_id,\n                        all_tiles,\n                        upscaled_image,\n                        result_images,\n                        tile_masks,\n                        multi_job_id,\n                        batch_size,\n                        num_tiles_per_image,\n                        tile_width,\n                        tile_height,\n                        padding,\n                        force_uniform_tiles,\n                        model,\n                        positive,\n                        negative,\n                        vae,\n                        seed,\n                        steps,\n                        cfg,\n                        sampler_name,\n                        scheduler,\n                        denoise,\n                        tiled_decode,\n                        width,\n                        height,\n                    )\n        else:\n            # Master processed all tiles\n            collected_tasks = run_async_in_server_loop(\n                self._get_all_completed_tasks(multi_job_id),\n                timeout=5.0\n            )\n        \n        # Blend worker tiles synchronously in deterministic tile order.\n        def _sort_key(item):\n            global_idx, tile_data = item\n            batch_idx = tile_data.get('batch_idx', global_idx // num_tiles_per_image)\n            tile_idx = tile_data.get('tile_idx', global_idx % num_tiles_per_image)\n            return (tile_idx, batch_idx, global_idx)\n\n        for global_idx, tile_data in sorted(collected_tasks.items(), key=_sort_key):\n            # Skip tiles that don't have tensor data (already processed)\n            if 'tensor' not in tile_data and 'image' not in tile_data:\n                continue\n            \n            batch_idx = tile_data.get('batch_idx', global_idx // num_tiles_per_image)\n            tile_idx = tile_data.get('tile_idx', global_idx % num_tiles_per_image)\n            \n            if batch_idx >= batch_size:\n                continue\n            \n            # Blend tile synchronously\n            x = tile_data.get('x', 0)\n            y = tile_data.get('y', 0)\n            # Prefer PIL image if present to avoid reconversion\n            if 'image' in tile_data:\n                tile_pil = tile_data['image']\n            else:\n                tile_tensor = tile_data['tensor']\n                tile_pil = tensor_to_pil(tile_tensor, 0)\n            orig_x, orig_y = all_tiles[tile_idx]\n            tile_mask = tile_masks[tile_idx]\n            extracted_width = tile_data.get('extracted_width', tile_width + 2 * padding)\n            extracted_height = tile_data.get('extracted_height', tile_height + 2 * padding)\n            result_images[batch_idx] = self.blend_tile(result_images[batch_idx], tile_pil,\n                                                      x, y, (extracted_width, extracted_height), tile_mask, padding)\n        \n        try:\n            # Convert back to tensor\n            if batch_size == 1:\n                result_tensor = pil_to_tensor(result_images[0])\n            else:\n                result_tensors = [pil_to_tensor(img) for img in result_images]\n                result_tensor = torch.cat(result_tensors, dim=0)\n            \n            if upscaled_image.is_cuda:\n                result_tensor = result_tensor.cuda()\n            \n            log(f\"UltimateSDUpscale Master - Job {multi_job_id} complete\")\n            return (result_tensor,)\n        finally:\n            # Cleanup (async operation) - always execute\n            run_async_in_server_loop(_cleanup_job(multi_job_id), timeout=5.0)\n"
  },
  {
    "path": "upscale/payload_parsers.py",
    "content": "import io\nimport json\n\nfrom PIL import Image\n\n\ndef _parse_tiles_from_form(data):\n    \"\"\"Parse tiles submitted via multipart/form-data into a list of tile dicts.\"\"\"\n    try:\n        padding = int(data.get('padding', 0)) if data.get('padding') is not None else 0\n    except Exception:\n        padding = 0\n\n    meta_raw = data.get('tiles_metadata')\n    if meta_raw is None:\n        raise ValueError(\"Missing tiles_metadata\")\n\n    try:\n        metadata = json.loads(meta_raw)\n    except Exception as e:\n        raise ValueError(f\"Invalid tiles_metadata JSON: {e}\")\n\n    if not isinstance(metadata, list):\n        raise ValueError(\"tiles_metadata must be a list\")\n\n    tiles = []\n    for i, meta in enumerate(metadata):\n        file_field = data.get(f'tile_{i}')\n        if file_field is None or not hasattr(file_field, 'file'):\n            raise ValueError(f\"Missing tile data for index {i}\")\n\n        raw = file_field.file.read()\n        try:\n            img = Image.open(io.BytesIO(raw)).convert(\"RGB\")\n        except Exception as e:\n            raise ValueError(f\"Invalid image data for tile {i}: {e}\")\n\n        try:\n            tile_info = {\n                'image': img,\n                'tile_idx': int(meta.get('tile_idx', i)),\n                'x': int(meta.get('x', 0)),\n                'y': int(meta.get('y', 0)),\n                'extracted_width': int(meta.get('extracted_width', img.width)),\n                'extracted_height': int(meta.get('extracted_height', img.height)),\n                'padding': int(padding),\n            }\n        except Exception as e:\n            raise ValueError(f\"Invalid metadata values for tile {i}: {e}\")\n\n        if 'batch_idx' in meta:\n            try:\n                tile_info['batch_idx'] = int(meta['batch_idx'])\n            except Exception:\n                pass\n        if 'global_idx' in meta:\n            try:\n                tile_info['global_idx'] = int(meta['global_idx'])\n            except Exception:\n                pass\n\n        tiles.append(tile_info)\n\n    return tiles\n"
  },
  {
    "path": "upscale/result_collector.py",
    "content": "import asyncio, time\nimport comfy.model_management\nimport server\nfrom ..utils.constants import DYNAMIC_MODE_MAX_POLL_TIMEOUT, HEARTBEAT_INTERVAL\nfrom ..utils.logging import debug_log, log\nfrom ..utils.config import get_worker_timeout_seconds\nfrom .job_store import ensure_tile_jobs_initialized, _mark_task_completed\nfrom .job_timeout import _check_and_requeue_timed_out_workers\nfrom .job_models import BaseJobState, ImageJobState, TileJobState\n\n\nclass ResultCollectorMixin:\n    \"\"\"\n    Mixin for master-side result collection in USDU distributed jobs.\n\n    Expected co-mixins/attributes:\n    - JobStateMixin methods for queue/task access.\n    - `self._check_and_requeue_timed_out_workers(...)` coroutine.\n    - `self._async_yield(...)` optional helper from WorkerCommsMixin.\n    \"\"\"\n\n    def _log_worker_timeout_status(self, job_data, current_time: float, multi_job_id: str) -> list[str]:\n        \"\"\"Log timeout elapsed seconds for each tracked worker and return worker ids.\"\"\"\n        if not isinstance(job_data, BaseJobState):\n            return []\n\n        worker_status = dict(job_data.worker_status)\n        for worker_id, last_seen in worker_status.items():\n            elapsed = max(0.0, current_time - float(last_seen))\n            log(\n                \"UltimateSDUpscale Master - Heartbeat timeout: \"\n                f\"job={multi_job_id}, worker={worker_id}, elapsed={elapsed:.1f}s\"\n            )\n        return list(worker_status.keys())\n\n    async def _async_collect_results(self, multi_job_id, num_workers, mode='static', \n                                   remaining_to_collect=None, batch_size=None):\n        \"\"\"Unified async helper to collect results from workers (tiles or images).\"\"\"\n        # Get the already initialized queue\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            if multi_job_id not in prompt_server.distributed_pending_tile_jobs:\n                raise RuntimeError(f\"Job queue not initialized for {multi_job_id}\")\n            job_data = prompt_server.distributed_pending_tile_jobs[multi_job_id]\n            if mode == 'dynamic':\n                if not isinstance(job_data, ImageJobState):\n                    raise RuntimeError(\n                        f\"Mode mismatch: expected dynamic, got {getattr(job_data, 'mode', 'unknown')}\"\n                    )\n                q = job_data.queue\n                completed_images = job_data.completed_images\n                expected_count = remaining_to_collect or batch_size\n            elif mode == 'static':\n                if not isinstance(job_data, TileJobState):\n                    raise RuntimeError(\n                        f\"Mode mismatch: expected static, got {getattr(job_data, 'mode', 'unknown')}\"\n                    )\n                q = job_data.queue\n                expected_count = len(job_data.completed_tasks) + job_data.pending_tasks.qsize()\n            else:\n                raise RuntimeError(f\"Unsupported mode: {mode}\")\n        \n        item_type = \"images\" if mode == 'dynamic' else \"tiles\"\n        debug_log(f\"UltimateSDUpscale Master - Starting collection, expecting {expected_count} {item_type} from {num_workers} workers\")\n        \n        collected_results = {}\n        workers_done = set()\n        # Unify collector/upscaler wait behavior with the UI worker timeout\n        timeout = float(get_worker_timeout_seconds())\n        last_heartbeat_check = time.time()\n        wait_started_at = time.time()\n        collected_count = 0\n        \n        while len(workers_done) < num_workers:\n            # Check for user interruption\n            if comfy.model_management.processing_interrupted():\n                log(\"Processing interrupted by user\")\n                raise comfy.model_management.InterruptProcessingException()\n                \n            # For dynamic mode with remaining_to_collect, check if we've collected enough\n            if mode == 'dynamic' and remaining_to_collect and collected_count >= remaining_to_collect:\n                break\n\n            job_data_snapshot = None\n            async with prompt_server.distributed_tile_jobs_lock:\n                current_job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n                if isinstance(current_job_data, BaseJobState):\n                    job_data_snapshot = current_job_data\n\n            try:\n                # Shorter poll for dynamic mode, but never exceed the configured timeout\n                wait_timeout = (min(DYNAMIC_MODE_MAX_POLL_TIMEOUT, timeout) if mode == 'dynamic' else timeout)\n                result = await asyncio.wait_for(q.get(), timeout=wait_timeout)\n                worker_id = result['worker_id']\n                is_last = result.get('is_last', False)\n                \n                if mode == 'static':\n                    # Handle tiles\n                    tiles = result.get('tiles', [])\n                    debug_log(\n                        f\"UltimateSDUpscale Master - Received batch of {len(tiles)} tiles from worker \"\n                        f\"'{worker_id}' (is_last={is_last})\"\n                    )\n\n                    for tile_data in tiles:\n                        if 'batch_idx' not in tile_data:\n                            log(\"UltimateSDUpscale Master - Missing batch_idx in tile data, skipping\")\n                            continue\n\n                        tile_idx = tile_data['tile_idx']\n                        key = tile_data.get('global_idx', tile_idx)\n                        entry = {\n                            'tile_idx': tile_idx,\n                            'x': tile_data['x'],\n                            'y': tile_data['y'],\n                            'extracted_width': tile_data['extracted_width'],\n                            'extracted_height': tile_data['extracted_height'],\n                            'padding': tile_data['padding'],\n                            'worker_id': worker_id,\n                            'batch_idx': tile_data.get('batch_idx', 0),\n                            'global_idx': tile_data.get('global_idx', tile_idx),\n                        }\n                        if 'image' in tile_data:\n                            entry['image'] = tile_data['image']\n                        elif 'tensor' in tile_data:\n                            entry['tensor'] = tile_data['tensor']\n                        collected_results[key] = entry\n                \n                elif mode == 'dynamic':\n                    # Handle full images\n                    if 'image_idx' in result and 'image' in result:\n                        image_idx = result['image_idx']\n                        image_pil = result['image']\n                        completed_images[image_idx] = image_pil\n                        collected_results[image_idx] = image_pil\n                        collected_count += 1\n                        debug_log(f\"UltimateSDUpscale Master - Received image {image_idx} from worker {worker_id}\")\n                \n                if is_last:\n                    workers_done.add(worker_id)\n                    debug_log(f\"UltimateSDUpscale Master - Worker {worker_id} completed\")\n                    \n            except asyncio.TimeoutError:\n                current_time = time.time()\n                waiting_workers = self._log_worker_timeout_status(job_data_snapshot, current_time, multi_job_id)\n                if mode == 'dynamic':\n                    # Check for worker timeouts periodically\n                    if current_time - last_heartbeat_check >= HEARTBEAT_INTERVAL:\n                        # Use the class method to check and requeue\n                        requeued = await self._check_and_requeue_timed_out_workers(multi_job_id, batch_size)\n                        if requeued > 0:\n                            log(f\"UltimateSDUpscale Master - Requeued {requeued} images from timed out workers\")\n                        last_heartbeat_check = current_time\n                    \n                    # Check if we've been waiting too long overall\n                    if current_time - wait_started_at > timeout:\n                        elapsed = current_time - wait_started_at\n                        log(\n                            \"UltimateSDUpscale Master - Heartbeat timeout while waiting for images; \"\n                            f\"workers={waiting_workers}, elapsed={elapsed:.1f}s\"\n                        )\n                        break\n                else:\n                    elapsed = current_time - wait_started_at\n                    log(\n                        f\"UltimateSDUpscale Master - Heartbeat timeout waiting for {item_type}; \"\n                        f\"workers={waiting_workers}, elapsed={elapsed:.1f}s\"\n                    )\n                    break\n        \n        debug_log(f\"UltimateSDUpscale Master - Collection complete. Got {len(collected_results)} {item_type} from {len(workers_done)} workers\")\n        \n        # Clean up job queue\n        async with prompt_server.distributed_tile_jobs_lock:\n            if multi_job_id in prompt_server.distributed_pending_tile_jobs:\n                del prompt_server.distributed_pending_tile_jobs[multi_job_id]\n        \n        return collected_results if mode == 'static' else completed_images\n\n    async def _async_collect_worker_tiles(self, multi_job_id, num_workers):\n        \"\"\"Async helper to collect tiles from workers.\"\"\"\n        return await self._async_collect_results(multi_job_id, num_workers, mode='static')\n\n    async def _mark_image_completed(self, multi_job_id, image_idx, image_pil):\n        \"\"\"Mark an image as completed in the job data.\"\"\"\n        # Mark the image as completed with the image data\n        await _mark_task_completed(multi_job_id, image_idx, {'image': image_pil})\n        prompt_server = ensure_tile_jobs_initialized()\n        async with prompt_server.distributed_tile_jobs_lock:\n            job_data = prompt_server.distributed_pending_tile_jobs.get(multi_job_id)\n            if isinstance(job_data, ImageJobState):\n                job_data.completed_images[image_idx] = image_pil\n\n    async def _async_collect_dynamic_images(self, multi_job_id, remaining_to_collect, num_workers, batch_size, master_processed_count):\n        \"\"\"Collect remaining processed images from workers.\"\"\"\n        return await self._async_collect_results(multi_job_id, num_workers, mode='dynamic', \n                                               remaining_to_collect=remaining_to_collect, \n                                               batch_size=batch_size)\n"
  },
  {
    "path": "upscale/tile_ops.py",
    "content": "import math, torch\nfrom contextlib import nullcontext\nfrom PIL import Image, ImageFilter, ImageDraw\nfrom typing import List, Tuple\nimport comfy.samplers, comfy.model_management\nfrom ..utils.logging import debug_log, log\nfrom ..utils.image import tensor_to_pil, pil_to_tensor\nfrom ..utils.usdu_utils import crop_cond, get_crop_region, expand_crop\nfrom ..utils.crop_model_patch import crop_model_cond\nfrom .conditioning import clone_conditioning\n\n\nclass TileOpsMixin:\n    def round_to_multiple(self, value: int, multiple: int = 8) -> int:\n        \"\"\"Round value to nearest multiple.\"\"\"\n        return round(value / multiple) * multiple\n\n    def calculate_tiles(self, image_width: int, image_height: int,\n                       tile_width: int, tile_height: int, force_uniform_tiles: bool = True) -> List[Tuple[int, int]]:\n        \"\"\"Calculate tile positions to match Ultimate SD Upscale.\n\n        Positions are a simple grid starting at (0,0) with steps of\n        `tile_width` and `tile_height`, using ceil(rows/cols) to cover edges.\n        Uniform vs non-uniform affects only crop/resize, not positions.\n        \"\"\"\n        rows = math.ceil(image_height / tile_height)\n        cols = math.ceil(image_width / tile_width)\n        tiles: List[Tuple[int, int]] = []\n        for yi in range(rows):\n            for xi in range(cols):\n                tiles.append((xi * tile_width, yi * tile_height))\n        return tiles\n\n    def extract_tile_with_padding(self, image: torch.Tensor, x: int, y: int,\n                                 tile_width: int, tile_height: int, padding: int,\n                                 force_uniform_tiles: bool) -> Tuple[torch.Tensor, int, int, int, int]:\n        \"\"\"Extract a tile region and resize to match USDU cropping logic.\n\n        Mirrors ComfyUI_UltimateSDUpscale processing:\n        - Build a mask with a white rectangle at the tile rect\n        - Compute crop_region via get_crop_region(mask, padding)\n        - If force_uniform_tiles: expand by crop/aspect ratio, then resize to\n          fixed processing size of round_to_multiple(tile + padding)\n        - Else: target is ceil(crop_size/8)*8 per dimension\n        - Extract the crop and resize to target tile_size\n        Returns the resized tensor and crop origin/size for blending.\n        \"\"\"\n        _, h, w, _ = image.shape\n\n        # Create mask and compute initial padded crop region\n        mask = Image.new('L', (w, h), 0)\n        draw = ImageDraw.Draw(mask)\n        draw.rectangle([x, y, x + tile_width, y + tile_height], fill=255)\n        x1, y1, x2, y2 = get_crop_region(mask, padding)\n\n        # Determine crop + processing size\n        if force_uniform_tiles:\n            process_w = self.round_to_multiple(tile_width + padding, 8)\n            process_h = self.round_to_multiple(tile_height + padding, 8)\n            crop_w = x2 - x1\n            crop_h = y2 - y1\n            crop_ratio = crop_w / crop_h if crop_h != 0 else 1.0\n            process_ratio = process_w / process_h if process_h != 0 else 1.0\n            if crop_ratio > process_ratio:\n                target_w = crop_w\n                target_h = round(crop_w / process_ratio) if process_ratio != 0 else crop_h\n            else:\n                target_w = round(crop_h * process_ratio)\n                target_h = crop_h\n            (x1, y1, x2, y2), _ = expand_crop((x1, y1, x2, y2), w, h, target_w, target_h)\n            target_w = process_w\n            target_h = process_h\n        else:\n            crop_w = x2 - x1\n            crop_h = y2 - y1\n            target_w = max(8, math.ceil(crop_w / 8) * 8)\n            target_h = max(8, math.ceil(crop_h / 8) * 8)\n            (x1, y1, x2, y2), (target_w, target_h) = expand_crop((x1, y1, x2, y2), w, h, target_w, target_h)\n\n        # Actual extracted size before resizing (for blending)\n        extracted_width = x2 - x1\n        extracted_height = y2 - y1\n\n        # Extract tile and resize to processing size\n        tile = image[:, y1:y2, x1:x2, :]\n        tile_pil = tensor_to_pil(tile, 0)\n        if tile_pil.size != (target_w, target_h):\n            tile_pil = tile_pil.resize((target_w, target_h), Image.LANCZOS)\n\n        tile_tensor = pil_to_tensor(tile_pil)\n        if image.is_cuda:\n            tile_tensor = tile_tensor.cuda()\n\n        return tile_tensor, x1, y1, extracted_width, extracted_height\n\n    def extract_batch_tile_with_padding(self, images: torch.Tensor, x: int, y: int,\n                                        tile_width: int, tile_height: int, padding: int,\n                                        force_uniform_tiles: bool) -> Tuple[torch.Tensor, int, int, int, int]:\n        \"\"\"Extract a tile region for the entire batch and resize to USDU logic.\n\n        - Computes a single crop region from a mask at (x,y,w,h) with padding\n        - force_uniform_tiles controls target processing size logic\n        - Returns a batched tensor [B,H',W',C] and crop origin/size for blending\n        \"\"\"\n        batch, h, w, _ = images.shape\n\n        # Create mask and compute initial padded crop region (same for all images)\n        mask = Image.new('L', (w, h), 0)\n        draw = ImageDraw.Draw(mask)\n        draw.rectangle([x, y, x + tile_width, y + tile_height], fill=255)\n        x1, y1, x2, y2 = get_crop_region(mask, padding)\n\n        # Determine crop + processing size\n        if force_uniform_tiles:\n            process_w = self.round_to_multiple(tile_width + padding, 8)\n            process_h = self.round_to_multiple(tile_height + padding, 8)\n            crop_w = x2 - x1\n            crop_h = y2 - y1\n            crop_ratio = crop_w / crop_h if crop_h != 0 else 1.0\n            process_ratio = process_w / process_h if process_h != 0 else 1.0\n            if crop_ratio > process_ratio:\n                target_w = crop_w\n                target_h = round(crop_w / process_ratio) if process_ratio != 0 else crop_h\n            else:\n                target_w = round(crop_h * process_ratio)\n                target_h = crop_h\n            (x1, y1, x2, y2), _ = expand_crop((x1, y1, x2, y2), w, h, target_w, target_h)\n            target_w = process_w\n            target_h = process_h\n        else:\n            crop_w = x2 - x1\n            crop_h = y2 - y1\n            target_w = max(8, math.ceil(crop_w / 8) * 8)\n            target_h = max(8, math.ceil(crop_h / 8) * 8)\n            (x1, y1, x2, y2), (target_w, target_h) = expand_crop((x1, y1, x2, y2), w, h, target_w, target_h)\n\n        extracted_width = x2 - x1\n        extracted_height = y2 - y1\n\n        # Slice batch region\n        tiles = images[:, y1:y2, x1:x2, :]\n\n        # Resize each tile to target size\n        resized_tiles = []\n        for i in range(batch):\n            tile_pil = tensor_to_pil(tiles, i)\n            if tile_pil.size != (target_w, target_h):\n                tile_pil = tile_pil.resize((target_w, target_h), Image.LANCZOS)\n            resized_tiles.append(pil_to_tensor(tile_pil))\n        tile_batch = torch.cat(resized_tiles, dim=0)\n\n        if images.is_cuda:\n            tile_batch = tile_batch.cuda()\n\n        return tile_batch, x1, y1, extracted_width, extracted_height\n\n    def process_tile(self, tile_tensor: torch.Tensor, model, positive, negative, vae,\n                     seed: int, steps: int, cfg: float, sampler_name: str, \n                     scheduler: str, denoise: float, tiled_decode: bool = False,\n                     batch_idx: int = 0, region: Tuple[int, int, int, int] = None,\n                     image_size: Tuple[int, int] = None) -> torch.Tensor:\n        \"\"\"Process a single tile through SD sampling. \n        Note: positive and negative should already be pre-sliced for the current batch_idx.\"\"\"\n        debug_log(f\"[process_tile] Processing tile for batch_idx={batch_idx}, seed={seed}, region={region}\")\n        \n        \n        # Import here to avoid circular dependencies\n        from nodes import common_ksampler, VAEEncode, VAEDecode\n        \n        # Try to import tiled VAE nodes if available\n        try:\n            from nodes import VAEEncodeTiled, VAEDecodeTiled\n            tiled_vae_available = True\n        except ImportError:\n            tiled_vae_available = False\n            if tiled_decode:\n                debug_log(\"Tiled VAE nodes not available, falling back to standard VAE\")\n        \n        # Convert to PIL and back to ensure clean tensor without gradient tracking\n        tile_pil = tensor_to_pil(tile_tensor, 0)\n        clean_tensor = pil_to_tensor(tile_pil)\n        \n        # Ensure tensor is detached and doesn't require gradients\n        clean_tensor = clean_tensor.detach()\n        if hasattr(clean_tensor, 'requires_grad_'):\n            clean_tensor.requires_grad_(False)\n        \n        # Move to correct device\n        if tile_tensor.is_cuda:\n            clean_tensor = clean_tensor.cuda()\n            clean_tensor = clean_tensor.detach()  # Detach again after device transfer\n        \n        # Clone conditioning per tile (shares models, clones hints for cropping)\n        positive_tile = clone_conditioning(positive, clone_hints=True)\n        negative_tile = clone_conditioning(negative, clone_hints=True)\n        \n        # Crop conditioning to tile region if provided (assumes hints at image resolution)\n        if region is not None and image_size is not None:\n            init_size = image_size  # (width, height) of full image\n            canvas_size = image_size\n            tile_size = (tile_tensor.shape[2], tile_tensor.shape[1])  # (width, height)\n            w_pad = 0  # No extra pad needed; region already includes padding\n            h_pad = 0\n            positive_cropped = crop_cond(positive_tile, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n            negative_cropped = crop_cond(negative_tile, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        else:\n            # No region cropping needed, use cloned conditioning as-is\n            positive_cropped = positive_tile\n            negative_cropped = negative_tile\n        \n        # Encode to latent (always non-tiled, matching original node)\n        latent = VAEEncode().encode(vae, clean_tensor)[0]\n        \n        # Sample with model patch cropping parity (ControlNet patch hints)\n        if region is not None and image_size is not None:\n            model_ctx = crop_model_cond(\n                model,\n                region,\n                image_size,\n                image_size,\n                (clean_tensor.shape[2], clean_tensor.shape[1]),\n            )\n        else:\n            model_ctx = nullcontext(model)\n        with model_ctx as model_for_sampling:\n            samples = common_ksampler(\n                model_for_sampling, seed, steps, cfg, sampler_name, scheduler,\n                positive_cropped, negative_cropped, latent, denoise=denoise\n            )[0]\n        \n        # Decode back to image\n        if tiled_decode and tiled_vae_available:\n            image = VAEDecodeTiled().decode(vae, samples, tile_size=512)[0]\n        else:\n            image = VAEDecode().decode(vae, samples)[0]\n        \n        return image\n\n    def process_tiles_batch(self, tile_batch: torch.Tensor, model, positive, negative, vae,\n                            seed: int, steps: int, cfg: float, sampler_name: str,\n                            scheduler: str, denoise: float, tiled_decode: bool,\n                            region: Tuple[int, int, int, int], image_size: Tuple[int, int]) -> torch.Tensor:\n        \"\"\"Process a batch of tiles together (USDU behavior).\n\n        tile_batch: [B, H, W, C]\n        Returns image batch tensor [B, H, W, C]\n        \"\"\"\n        # Import locally to avoid circular deps\n        from nodes import common_ksampler, VAEEncode, VAEDecode\n        try:\n            from nodes import VAEEncodeTiled, VAEDecodeTiled\n            tiled_vae_available = True\n        except ImportError:\n            tiled_vae_available = False\n\n        # Detach and move device\n        clean = tile_batch.detach()\n        if hasattr(clean, 'requires_grad_'):\n            clean.requires_grad_(False)\n        if tile_batch.is_cuda:\n            clean = clean.cuda().detach()\n\n        # Clone/crop conditioning once for the region\n        positive_tile = clone_conditioning(positive, clone_hints=True)\n        negative_tile = clone_conditioning(negative, clone_hints=True)\n\n        init_size = image_size\n        canvas_size = image_size\n        tile_size = (clean.shape[2], clean.shape[1])  # (W,H)\n        w_pad = 0\n        h_pad = 0\n        positive_cropped = crop_cond(positive_tile, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        negative_cropped = crop_cond(negative_tile, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n\n        # Encode -> Sample -> Decode\n        latent = VAEEncode().encode(vae, clean)[0]\n        with crop_model_cond(model, region, image_size, image_size, tile_size) as model_for_sampling:\n            samples = common_ksampler(\n                model_for_sampling, seed, steps, cfg, sampler_name, scheduler,\n                positive_cropped, negative_cropped, latent, denoise=denoise\n            )[0]\n        if tiled_decode and tiled_vae_available:\n            image = VAEDecodeTiled().decode(vae, samples, tile_size=512)[0]\n        else:\n            image = VAEDecode().decode(vae, samples)[0]\n\n        return image\n\n    def create_tile_mask(self, image_width: int, image_height: int,\n                        x: int, y: int, tile_width: int, tile_height: int, \n                        mask_blur: int) -> Image.Image:\n        \"\"\"Create a mask for blending tiles - matches Ultimate SD Upscale approach.\n        \n        Creates a black image with a white rectangle at the tile position,\n        then applies blur to create soft edges.\n        \"\"\"\n        # Create a full-size mask matching the image dimensions\n        mask = Image.new('L', (image_width, image_height), 0)  # Black background\n        \n        # Draw white rectangle at tile position\n        draw = ImageDraw.Draw(mask)\n        draw.rectangle([x, y, x + tile_width, y + tile_height], fill=255)\n        \n        # Apply blur to soften edges\n        if mask_blur > 0:\n            mask = mask.filter(ImageFilter.GaussianBlur(mask_blur))\n        \n        return mask\n\n    def blend_tile(self, base_image: Image.Image, tile_image: Image.Image,\n                  x: int, y: int, extracted_size: Tuple[int, int], \n                  mask: Image.Image, padding: int) -> Image.Image:\n        \"\"\"Blend a processed tile back into the base image using Ultimate SD Upscale's exact approach.\n        \n        This follows the exact method from ComfyUI_UltimateSDUpscale/modules/processing.py\n        \"\"\"\n        extracted_width, extracted_height = extracted_size\n        \n        # Debug logging (uncomment if needed)\n        # debug_log(f\"[Blend] Placing tile at ({x}, {y}), size: {extracted_width}x{extracted_height}\")\n        \n        # Calculate the crop region that was used for extraction\n        crop_region = (x, y, x + extracted_width, y + extracted_height)\n        \n        # The mask is already full-size, no need to crop\n        \n        # Resize the processed tile back to the extracted size\n        if tile_image.size != (extracted_width, extracted_height):\n            tile_resized = tile_image.resize((extracted_width, extracted_height), Image.LANCZOS)\n        else:\n            tile_resized = tile_image\n        \n        # Follow Ultimate SD Upscale blending approach:\n        # Put the tile into position\n        image_tile_only = Image.new('RGBA', base_image.size)\n        image_tile_only.paste(tile_resized, crop_region[:2])\n        \n        # Add the mask as an alpha channel\n        # Must make a copy due to the possibility of an edge becoming black\n        temp = image_tile_only.copy()\n        temp.putalpha(mask)  # Use the full image mask\n        image_tile_only.paste(temp, image_tile_only)\n        \n        # Add back the tile to the initial image according to the mask in the alpha channel\n        result = base_image.convert('RGBA')\n        result.alpha_composite(image_tile_only)\n        \n        # Convert back to RGB\n        return result.convert('RGB')\n\n    def _slice_conditioning(self, positive, negative, batch_idx):\n        \"\"\"Helper to slice conditioning for a specific batch index.\"\"\"\n        # Clone and slice conditioning properly, including ControlNet hints\n        positive_sliced = clone_conditioning(positive)\n        negative_sliced = clone_conditioning(negative)\n        \n        for cond_list in [positive_sliced, negative_sliced]:\n            for i in range(len(cond_list)):\n                emb, cond_dict = cond_list[i]\n                if emb.shape[0] > 1:\n                    cond_list[i][0] = emb[batch_idx:batch_idx+1]\n                if 'control' in cond_dict:\n                    control = cond_dict['control']\n                    while control is not None:\n                        hint = control.cond_hint_original\n                        if hint.shape[0] > 1:\n                            control.cond_hint_original = hint[batch_idx:batch_idx+1]\n                        control = control.previous_controlnet\n                if 'mask' in cond_dict and cond_dict['mask'].shape[0] > 1:\n                    cond_dict['mask'] = cond_dict['mask'][batch_idx:batch_idx+1]\n        \n        return positive_sliced, negative_sliced\n\n    def _process_and_blend_tile(self, tile_idx, tile_pos, upscaled_image, result_image,\n                               model, positive, negative, vae, seed, steps, cfg,\n                               sampler_name, scheduler, denoise, tile_width, tile_height,\n                               padding, mask_blur, image_width, image_height, force_uniform_tiles,\n                               tiled_decode, batch_idx: int = 0):\n        \"\"\"Process a single tile and blend it into the result image.\"\"\"\n        x, y = tile_pos\n        \n        # Extract and process tile\n        tile_tensor, x1, y1, ew, eh = self.extract_tile_with_padding(\n            upscaled_image, x, y, tile_width, tile_height, padding, force_uniform_tiles\n        )\n        \n        processed_tile = self.process_tile(tile_tensor, model, positive, negative, vae,\n                                         seed, steps, cfg, sampler_name, \n                                         scheduler, denoise, tiled_decode, batch_idx=batch_idx,\n                                         region=(x1, y1, x1 + ew, y1 + eh), image_size=(image_width, image_height))\n        \n        # Convert and blend\n        processed_pil = tensor_to_pil(processed_tile, 0)\n        # Create mask for this specific tile (no cache here; only used in single-tile path)\n        tile_mask = self.create_tile_mask(image_width, image_height, x, y, tile_width, tile_height, mask_blur)\n        # Use extraction position and size for blending\n        result_image = self.blend_tile(result_image, processed_pil, \n                                     x1, y1, (ew, eh), tile_mask, padding)\n        \n        return result_image\n\n    def _process_single_tile(self, global_idx, num_tiles_per_image, upscaled_image, all_tiles,\n                                  model, positive, negative, vae, seed, steps, cfg, sampler_name,\n                                  scheduler, denoise, tiled_decode, tile_width, tile_height, padding,\n                                  width, height, force_uniform_tiles, sliced_conditioning_cache):\n        \"\"\"Process a single tile.\"\"\"\n        # Calculate which image and tile this corresponds to\n        batch_idx = global_idx // num_tiles_per_image\n        tile_idx = global_idx % num_tiles_per_image\n        \n        # Skip if batch_idx is out of range\n        if batch_idx >= upscaled_image.shape[0]:\n            debug_log(f\"Warning: Calculated batch_idx {batch_idx} exceeds batch size {upscaled_image.shape[0]}\")\n            return None\n        \n        # Get or create sliced conditioning for this batch index\n        if batch_idx not in sliced_conditioning_cache:\n            positive_sliced, negative_sliced = self._slice_conditioning(positive, negative, batch_idx)\n            sliced_conditioning_cache[batch_idx] = (positive_sliced, negative_sliced)\n        else:\n            positive_sliced, negative_sliced = sliced_conditioning_cache[batch_idx]\n        \n        x, y = all_tiles[tile_idx]\n        \n        # Extract tile from the specific image in the batch\n        tile_tensor, x1, y1, ew, eh = self.extract_tile_with_padding(\n            upscaled_image[batch_idx:batch_idx+1], x, y, tile_width, tile_height, padding, force_uniform_tiles\n        )\n        \n        # Process tile through SD with the exact seed (USDU parity)\n        image_seed = seed\n        processed_tile = self.process_tile(tile_tensor, model, positive_sliced, negative_sliced, vae,\n                                         image_seed, steps, cfg, sampler_name,\n                                         scheduler, denoise, tiled_decode, batch_idx=batch_idx,\n                                         region=(x1, y1, x1 + ew, y1 + eh), image_size=(width, height))\n        \n        return {\n            'tile': processed_tile,\n            'global_idx': global_idx,\n            'batch_idx': batch_idx,\n            'tile_idx': tile_idx,\n            'x': x1,\n            'y': y1,\n            'extracted_width': ew,\n            'extracted_height': eh\n        }\n"
  },
  {
    "path": "upscale/worker_comms.py",
    "content": "import asyncio, io, json, time\nimport aiohttp\nfrom PIL import Image\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import get_client_session\nfrom ..utils.constants import TILE_SEND_TIMEOUT\nfrom ..utils.usdu_managment import MAX_PAYLOAD_SIZE, _send_heartbeat_to_master\nfrom ..utils.image import tensor_to_pil\n\n\nclass WorkerCommsMixin:\n    async def _send_heartbeat_to_master(self, multi_job_id, master_url, worker_id):\n        \"\"\"Proxy heartbeat helper used by worker processing mixins.\"\"\"\n        await _send_heartbeat_to_master(multi_job_id, master_url, worker_id)\n\n    async def send_tiles_batch_to_master(self, processed_tiles, multi_job_id, master_url, \n                                       padding, worker_id, is_final_flush=False):\n        \"\"\"Send all processed tiles to master, chunked if large.\"\"\"\n        if not processed_tiles:\n            if is_final_flush:\n                await self._send_tiles_completion_signal(multi_job_id, master_url, worker_id)\n            return  # Early exit if empty\n\n        total_tiles = len(processed_tiles)\n        debug_log(f\"Worker[{worker_id[:8]}] - Preparing to send {total_tiles} tiles (size-aware chunks)\")\n\n        # Prepare encoded images and sizes to enable size-aware chunking\n        encoded = []\n        for idx, tile_data in enumerate(processed_tiles):\n            img = tensor_to_pil(tile_data['tile'], 0)\n            bio = io.BytesIO()\n            # Keep compression low to balance speed and size; adjust if needed\n            img.save(bio, format='PNG', compress_level=0)\n            raw = bio.getvalue()\n            encoded.append({\n                'bytes': raw,\n                'meta': {\n                    'tile_idx': tile_data['tile_idx'],\n                    'x': tile_data['x'],\n                    'y': tile_data['y'],\n                    'extracted_width': tile_data['extracted_width'],\n                    'extracted_height': tile_data['extracted_height'],\n                    **({'batch_idx': tile_data['batch_idx']} if 'batch_idx' in tile_data else {}),\n                    **({'global_idx': tile_data['global_idx']} if 'global_idx' in tile_data else {}),\n                }\n            })\n\n        # Size-aware chunking\n        max_bytes = int(MAX_PAYLOAD_SIZE) - (1024 * 1024)  # 1MB headroom\n        i = 0\n        chunk_index = 0\n        while i < total_tiles:\n            data = aiohttp.FormData()\n            data.add_field('multi_job_id', multi_job_id)\n            data.add_field('worker_id', str(worker_id))\n            data.add_field('padding', str(padding))\n\n            metadata = []\n            used = 0\n            j = i\n            while j < total_tiles:\n                img_bytes = encoded[j]['bytes']\n                meta = encoded[j]['meta']\n                # Rough overhead for fields + JSON\n                overhead = 1024\n                if used + len(img_bytes) + overhead > max_bytes and j > i:\n                    break\n                # Accept this tile in this chunk\n                metadata.append(meta)\n                data.add_field(f'tile_{j - i}', io.BytesIO(img_bytes), filename=f'tile_{j}.png', content_type='image/png')\n                used += len(img_bytes) + overhead\n                j += 1\n\n            # Ensure at least one tile per chunk\n            if j == i:\n                # Single oversized tile, send anyway\n                meta = encoded[j]['meta']\n                metadata.append(meta)\n                data.add_field('tile_0', io.BytesIO(encoded[j]['bytes']), filename=f'tile_{j}.png', content_type='image/png')\n                j += 1\n\n            chunk_size = j - i\n            is_chunk_last = (j >= total_tiles)\n            data.add_field('is_last', str(bool(is_final_flush and is_chunk_last)))\n            data.add_field('batch_size', str(chunk_size))\n            data.add_field('tiles_metadata', json.dumps(metadata), content_type='application/json')\n\n            # Retry logic with exponential backoff\n            max_retries = 5\n            retry_delay = 0.5\n            for attempt in range(max_retries):\n                try:\n                    session = await get_client_session()\n                    url = f\"{master_url}/distributed/submit_tiles\"\n                    async with session.post(url, data=data) as response:\n                        response.raise_for_status()\n                        break\n                except Exception as e:\n                    if attempt < max_retries - 1:\n                        await asyncio.sleep(retry_delay)\n                        retry_delay = min(retry_delay * 2, 5.0)\n                    else:\n                        log(f\"UltimateSDUpscale Worker - Failed to send chunk {chunk_index} after {max_retries} attempts: {e}\")\n                        raise\n\n            debug_log(f\"Worker[{worker_id[:8]}] - Sent chunk {chunk_index} ({chunk_size} tiles, ~{used/1e6:.2f} MB)\")\n            chunk_index += 1\n            i = j\n\n    async def _send_tiles_completion_signal(self, multi_job_id, master_url, worker_id):\n        \"\"\"Send completion signal to master in static mode when no tiles are left.\"\"\"\n        data = aiohttp.FormData()\n        data.add_field('multi_job_id', multi_job_id)\n        data.add_field('worker_id', str(worker_id))\n        data.add_field('is_last', 'true')\n        data.add_field('batch_size', '0')\n\n        session = await get_client_session()\n        url = f\"{master_url}/distributed/submit_tiles\"\n        async with session.post(url, data=data) as response:\n            response.raise_for_status()\n            debug_log(f\"Worker {worker_id} sent static completion signal\")\n\n    async def _request_work_item_from_master(\n        self,\n        multi_job_id,\n        master_url,\n        worker_id,\n        endpoint=\"/distributed/request_image\",\n    ):\n        \"\"\"Request one work item from master with retry/backoff and total timeout.\"\"\"\n        max_retries = 10\n        retry_delay = 0.5\n        start_time = time.monotonic()\n        url = f\"{master_url}{endpoint}\"\n\n        for attempt in range(max_retries):\n            if time.monotonic() - start_time > 30:\n                log(f\"Total request timeout after 30s for worker {worker_id}\")\n                return None\n\n            try:\n                session = await get_client_session()\n                async with session.post(url, json={\n                    'worker_id': str(worker_id),\n                    'multi_job_id': multi_job_id\n                }) as response:\n                    if response.status == 200:\n                        return await response.json()\n                    if response.status == 404:\n                        text = await response.text()\n                        debug_log(f\"Job not found (404), will retry: {text}\")\n                        await asyncio.sleep(1.0)\n                    else:\n                        text = await response.text()\n                        debug_log(\n                            f\"Request work item failed ({response.status}) for worker {worker_id}: {text}\"\n                        )\n\n            except Exception as exc:\n                if attempt < max_retries - 1:\n                    debug_log(f\"Retry {attempt + 1}/{max_retries} after error: {exc}\")\n                    await asyncio.sleep(retry_delay)\n                    retry_delay = min(retry_delay * 2, 5.0)\n                else:\n                    log(f\"Failed to request work item after {max_retries} attempts: {exc}\")\n                    raise\n\n        return None\n\n    async def _request_image_from_master(self, multi_job_id, master_url, worker_id):\n        \"\"\"Request an image index to process from master in dynamic mode.\"\"\"\n        data = await self._request_work_item_from_master(multi_job_id, master_url, worker_id)\n        if not data:\n            return None, 0\n        image_idx = data.get('image_idx')\n        estimated_remaining = data.get('estimated_remaining', 0)\n        return image_idx, estimated_remaining\n\n    async def _request_tile_from_master(self, multi_job_id, master_url, worker_id):\n        \"\"\"Request a tile index to process from master in static mode (reusing dynamic infrastructure).\"\"\"\n        data = await self._request_work_item_from_master(multi_job_id, master_url, worker_id)\n        if not data:\n            return None, 0, False\n        tile_idx = data.get('tile_idx')\n        estimated_remaining = data.get('estimated_remaining', 0)\n        batched_static = data.get('batched_static', False)\n        return tile_idx, estimated_remaining, batched_static\n\n    async def _send_full_image_to_master(self, image_pil, image_idx, multi_job_id, \n                                        master_url, worker_id, is_last):\n        \"\"\"Send a processed full image back to master in dynamic mode.\"\"\"\n        # Serialize image to PNG\n        byte_io = io.BytesIO()\n        image_pil.save(byte_io, format='PNG', compress_level=0)\n        byte_io.seek(0)\n        \n        # Prepare form data\n        data = aiohttp.FormData()\n        data.add_field('multi_job_id', multi_job_id)\n        data.add_field('worker_id', str(worker_id))\n        data.add_field('image_idx', str(image_idx))\n        data.add_field('is_last', str(is_last))\n        data.add_field('full_image', byte_io, filename=f'image_{image_idx}.png', \n                      content_type='image/png')\n        \n        # Retry logic\n        max_retries = 5\n        retry_delay = 0.5\n        \n        for attempt in range(max_retries):\n            try:\n                session = await get_client_session()\n                url = f\"{master_url}/distributed/submit_image\"\n                \n                async with session.post(url, data=data) as response:\n                    response.raise_for_status()\n                    debug_log(f\"Successfully sent image {image_idx} to master\")\n                    return\n                    \n            except Exception as e:\n                if attempt < max_retries - 1:\n                    debug_log(f\"Retry {attempt + 1}/{max_retries} after error: {e}\")\n                    await asyncio.sleep(retry_delay)\n                    retry_delay *= 2\n                else:\n                    log(f\"Failed to send image {image_idx} after {max_retries} attempts: {e}\")\n                    raise\n\n    async def _send_worker_complete_signal(self, multi_job_id, master_url, worker_id):\n        \"\"\"Send completion signal to master in dynamic mode.\"\"\"\n        # Send a dummy request with is_last=True\n        data = aiohttp.FormData()\n        data.add_field('multi_job_id', multi_job_id)\n        data.add_field('worker_id', str(worker_id))\n        data.add_field('is_last', 'true')\n        # No image data - just completion signal\n        \n        session = await get_client_session()\n        url = f\"{master_url}/distributed/submit_image\"\n        \n        async with session.post(url, data=data) as response:\n            response.raise_for_status()\n            debug_log(f\"Worker {worker_id} sent completion signal\")\n\n    async def _check_job_status(self, multi_job_id, master_url):\n        \"\"\"Check if job is ready on the master.\"\"\"\n        try:\n            session = await get_client_session()\n            url = f\"{master_url}/distributed/job_status?multi_job_id={multi_job_id}\"\n            async with session.get(url) as response:\n                if response.status == 200:\n                    data = await response.json()\n                    return data.get('ready', False)\n                return False\n        except Exception as e:\n            debug_log(f\"Job status check failed: {e}\")\n            return False\n\n    async def _async_yield(self):\n        \"\"\"Simple async yield to allow event loop processing.\"\"\"\n        await asyncio.sleep(0)\n"
  },
  {
    "path": "utils/__init__.py",
    "content": "\"\"\"\nUtility modules for ComfyUI-Distributed extension.\n\"\"\"\n\n# Make utils importable as a package"
  },
  {
    "path": "utils/async_helpers.py",
    "content": "\"\"\"\nAsync helper utilities for ComfyUI-Distributed.\n\"\"\"\nimport asyncio\nimport threading\nimport time\nimport uuid\nimport execution\nimport server\nfrom typing import Optional, Any, Coroutine\nfrom .network import get_server_loop\n\ndef run_async_in_server_loop(coro: Coroutine, timeout: Optional[float] = None) -> Any:\n    \"\"\"\n    Run async coroutine in server's event loop and wait for result.\n    \n    This is useful when you need to run async code from a synchronous context\n    but want to use the server's existing event loop instead of creating a new one.\n    \n    Args:\n        coro: The coroutine to run\n        timeout: Optional timeout in seconds\n        \n    Returns:\n        The result of the coroutine\n        \n    Raises:\n        TimeoutError: If the operation times out\n        Exception: Any exception raised by the coroutine\n    \"\"\"\n    event = threading.Event()\n    result = None\n    error = None\n    \n    async def wrapper():\n        nonlocal result, error\n        try:\n            result = await coro\n        except Exception as e:\n            error = e\n        finally:\n            event.set()\n    \n    # Schedule on server's event loop\n    loop = get_server_loop()\n    asyncio.run_coroutine_threadsafe(wrapper(), loop)\n    \n    # Wait for completion\n    if not event.wait(timeout):\n        raise TimeoutError(f\"Async operation timed out after {timeout} seconds\")\n    \n    if error:\n        raise error\n    return result\n\n\nprompt_server = server.PromptServer.instance\n\n\ndef _summarize_node_errors(node_errors: dict) -> str:\n    if not isinstance(node_errors, dict) or not node_errors:\n        return \"\"\n\n    parts = []\n    for node_id, entry in node_errors.items():\n        if not isinstance(entry, dict):\n            continue\n        class_type = str(entry.get(\"class_type\") or \"UnknownNode\")\n        for err in entry.get(\"errors\", []):\n            if not isinstance(err, dict):\n                continue\n            message = str(err.get(\"message\") or \"validation error\")\n            details = str(err.get(\"details\") or \"\").strip()\n            parts.append(\n                f\"{class_type}#{node_id}: {message}{f' ({details})' if details else ''}\"\n            )\n            if len(parts) >= 5:\n                return \" | \".join(parts)\n    return \" | \".join(parts)\n\n\nclass PromptValidationError(RuntimeError):\n    \"\"\"Raised when a prompt fails ComfyUI validation with structured context.\"\"\"\n\n    def __init__(self, error_payload, node_errors=None):\n        payload = error_payload if isinstance(error_payload, dict) else {\n            \"type\": \"prompt_validation_failed\",\n            \"message\": str(error_payload),\n            \"details\": \"\",\n            \"extra_info\": {},\n        }\n        self.validation_error = dict(payload)\n        self.node_errors = node_errors if isinstance(node_errors, dict) else {}\n\n        if self.node_errors:\n            details = str(self.validation_error.get(\"details\") or \"\").strip()\n            if not details:\n                summary = _summarize_node_errors(self.node_errors)\n                if summary:\n                    self.validation_error[\"details\"] = summary\n\n        merged = dict(self.validation_error)\n        if self.node_errors:\n            merged[\"node_errors\"] = self.node_errors\n        super().__init__(f\"Invalid prompt: {merged}\")\n\n\nasync def queue_prompt_payload(\n    prompt_obj,\n    workflow_meta=None,\n    client_id=None,\n    include_queue_metadata=False,\n):\n    \"\"\"Validate and queue a prompt via ComfyUI's prompt queue.\"\"\"\n    payload = {\"prompt\": prompt_obj}\n    payload = prompt_server.trigger_on_prompt(payload)\n    prompt = payload[\"prompt\"]\n\n    prompt_id = str(uuid.uuid4())\n    valid = await execution.validate_prompt(prompt_id, prompt, None)\n    if not valid[0]:\n        error_payload = valid[1] if len(valid) > 1 else \"Prompt outputs failed validation\"\n        node_errors = valid[3] if len(valid) > 3 else {}\n        raise PromptValidationError(error_payload, node_errors)\n\n    extra_data = {\"create_time\": int(time.time() * 1000)}\n    if workflow_meta:\n        extra_data.setdefault(\"extra_pnginfo\", {})[\"workflow\"] = workflow_meta\n    if client_id:\n        extra_data[\"client_id\"] = client_id\n\n    sensitive = {}\n    for key in getattr(execution, \"SENSITIVE_EXTRA_DATA_KEYS\", []):\n        if key in extra_data:\n            sensitive[key] = extra_data.pop(key)\n\n    number = getattr(prompt_server, \"number\", 0)\n    prompt_server.number = number + 1\n    prompt_queue_item = (number, prompt_id, prompt, extra_data, valid[2], sensitive)\n    prompt_server.prompt_queue.put(prompt_queue_item)\n\n    if include_queue_metadata:\n        return {\n            \"prompt_id\": prompt_id,\n            \"number\": number,\n            \"node_errors\": {},\n        }\n\n    return prompt_id\n"
  },
  {
    "path": "utils/audio_payload.py",
    "content": "import base64\nimport binascii\nimport os\n\nimport numpy as np\nimport torch\n\nfrom .image import ensure_contiguous\n\n\nMAX_AUDIO_PAYLOAD_BYTES = int(\n    os.environ.get(\"COMFYUI_MAX_AUDIO_PAYLOAD_BYTES\", str(256 * 1024 * 1024))\n)\n\n\ndef encode_audio_payload(audio_payload):\n    \"\"\"Serialize an AUDIO dict into JSON-safe canonical envelope payload.\"\"\"\n    if not isinstance(audio_payload, dict):\n        return None\n\n    waveform = audio_payload.get(\"waveform\")\n    if waveform is None or not isinstance(waveform, torch.Tensor) or waveform.numel() == 0:\n        return None\n\n    sample_rate = audio_payload.get(\"sample_rate\", 44100)\n    try:\n        sample_rate = int(sample_rate)\n    except (TypeError, ValueError):\n        sample_rate = 44100\n\n    waveform_cpu = waveform.detach().to(device=\"cpu\", dtype=torch.float32).contiguous()\n    data_bytes = waveform_cpu.numpy().tobytes()\n    if len(data_bytes) > MAX_AUDIO_PAYLOAD_BYTES:\n        raise ValueError(\n            f\"Audio payload too large: {len(data_bytes)} bytes exceeds {MAX_AUDIO_PAYLOAD_BYTES}.\"\n        )\n\n    return {\n        \"sample_rate\": sample_rate,\n        \"shape\": [int(dim) for dim in waveform_cpu.shape],\n        \"dtype\": \"float32\",\n        \"data\": base64.b64encode(data_bytes).decode(\"ascii\"),\n    }\n\n\ndef decode_audio_payload(audio_payload):\n    \"\"\"Decode canonical envelope audio payload into an AUDIO dict.\"\"\"\n    if audio_payload is None:\n        return None\n    if not isinstance(audio_payload, dict):\n        raise ValueError(\"Field 'audio' must be an object when provided.\")\n\n    encoded = audio_payload.get(\"data\")\n    shape = audio_payload.get(\"shape\")\n    sample_rate = audio_payload.get(\"sample_rate\", 44100)\n    dtype = audio_payload.get(\"dtype\", \"float32\")\n\n    if not isinstance(encoded, str) or not encoded.strip():\n        raise ValueError(\"Field 'audio.data' must be a non-empty base64 string.\")\n    if not isinstance(shape, list) or len(shape) != 3:\n        raise ValueError(\"Field 'audio.shape' must be a 3-item list [batch, channels, samples].\")\n    if dtype != \"float32\":\n        raise ValueError(\"Field 'audio.dtype' must be 'float32'.\")\n\n    try:\n        shape_tuple = tuple(int(dim) for dim in shape)\n    except (TypeError, ValueError) as exc:\n        raise ValueError(\"Field 'audio.shape' must contain integers.\") from exc\n\n    if shape_tuple[0] <= 0 or shape_tuple[1] <= 0 or shape_tuple[2] < 0:\n        raise ValueError(\n            \"Field 'audio.shape' must be [batch>0, channels>0, samples>=0].\"\n        )\n\n    try:\n        sample_rate = int(sample_rate)\n    except (TypeError, ValueError) as exc:\n        raise ValueError(\"Field 'audio.sample_rate' must be an integer.\") from exc\n    if sample_rate <= 0:\n        raise ValueError(\"Field 'audio.sample_rate' must be positive.\")\n\n    try:\n        raw = base64.b64decode(encoded, validate=True)\n    except (binascii.Error, ValueError) as exc:\n        raise ValueError(\"Field 'audio.data' is not valid base64.\") from exc\n\n    if len(raw) > MAX_AUDIO_PAYLOAD_BYTES:\n        raise ValueError(\n            f\"Field 'audio.data' too large: {len(raw)} bytes exceeds {MAX_AUDIO_PAYLOAD_BYTES}.\"\n        )\n\n    expected_bytes = int(np.prod(shape_tuple, dtype=np.int64)) * 4\n    if len(raw) != expected_bytes:\n        raise ValueError(\n            f\"Field 'audio.data' byte size mismatch: expected {expected_bytes}, got {len(raw)}.\"\n        )\n\n    array = np.frombuffer(raw, dtype=np.float32).reshape(shape_tuple)\n    waveform = torch.from_numpy(array.copy())\n    return {\n        \"waveform\": ensure_contiguous(waveform),\n        \"sample_rate\": sample_rate,\n    }\n"
  },
  {
    "path": "utils/cloudflare/__init__.py",
    "content": "from .tunnel import CloudflareTunnelManager\n\ncloudflare_tunnel_manager = CloudflareTunnelManager()\n\n__all__ = [\"CloudflareTunnelManager\", \"cloudflare_tunnel_manager\"]\n"
  },
  {
    "path": "utils/cloudflare/binary.py",
    "content": "\"\"\"Cloudflared binary discovery and download helpers.\"\"\"\n\nimport os\nimport platform\nimport shutil\nimport stat\nfrom urllib import error as urlerror\nfrom urllib import request\n\nfrom ..logging import debug_log\n\n\ndef _get_project_root():\n    return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n\ndef _get_cloudflared_dir():\n    return os.path.join(_get_project_root(), \"bin\")\n\n\ndef _get_platform_binary_name():\n    system = platform.system().lower()\n    machine = platform.machine().lower()\n\n    if system == \"windows\":\n        if \"arm\" in machine:\n            return \"cloudflared-windows-arm64.exe\"\n        return \"cloudflared-windows-amd64.exe\"\n    if system == \"darwin\":\n        if machine in (\"arm64\", \"aarch64\"):\n            return \"cloudflared-darwin-arm64\"\n        return \"cloudflared-darwin-amd64\"\n    if system == \"linux\":\n        if machine in (\"arm64\", \"aarch64\"):\n            return \"cloudflared-linux-arm64\"\n        return \"cloudflared-linux-amd64\"\n\n    raise RuntimeError(f\"Unsupported platform for cloudflared: {system}/{machine}\")\n\n\ndef _get_binary_path(bin_dir=None):\n    bin_dir = bin_dir or _get_cloudflared_dir()\n    binary_name = \"cloudflared.exe\" if platform.system().lower() == \"windows\" else \"cloudflared\"\n    return os.path.join(bin_dir, binary_name)\n\n\ndef _download_cloudflared():\n    asset = _get_platform_binary_name()\n    url = f\"https://github.com/cloudflare/cloudflared/releases/latest/download/{asset}\"\n\n    bin_dir = _get_cloudflared_dir()\n    os.makedirs(bin_dir, exist_ok=True)\n    target_path = _get_binary_path(bin_dir)\n\n    debug_log(f\"Downloading cloudflared from {url}\")\n    try:\n        with request.urlopen(url, timeout=30) as resp:\n            with open(target_path, \"wb\") as f:\n                shutil.copyfileobj(resp, f)\n    except urlerror.URLError as exc:\n        raise RuntimeError(f\"Failed to download cloudflared: {exc}\") from exc\n\n    st = os.stat(target_path)\n    os.chmod(target_path, st.st_mode | stat.S_IEXEC)\n    debug_log(f\"Downloaded cloudflared to {target_path}\")\n    return target_path\n\n\ndef ensure_binary() -> str:\n    \"\"\"Return a usable cloudflared binary path, downloading if necessary.\"\"\"\n    env_path = os.environ.get(\"CLOUDFLARED_PATH\")\n    if env_path and os.path.exists(env_path):\n        return env_path\n\n    local_candidate = _get_binary_path()\n    if os.path.exists(local_candidate):\n        return local_candidate\n\n    path_binary = shutil.which(\"cloudflared\")\n    if path_binary:\n        return path_binary\n\n    return _download_cloudflared()\n"
  },
  {
    "path": "utils/cloudflare/process_reader.py",
    "content": "\"\"\"Background cloudflared process output reader.\"\"\"\n\nimport asyncio\nimport re\nimport threading\n\nfrom ..constants import CLOUDFLARE_LOG_BUFFER_SIZE\nfrom ..logging import debug_log\n\nPUBLIC_URL_PATTERN = re.compile(\n    r\"(https?://[\\w.-]+\\.(?:trycloudflare\\.com|cloudflare\\.dev))\",\n    re.IGNORECASE,\n)\nclass ProcessReader:\n    def __init__(self, log_file=None):\n        self._process = None\n        self._thread = None\n        self._loop = None\n        self._url_event = None\n        self._public_url = None\n        self._last_error = None\n        self._recent_logs = []\n        self._log_file = log_file\n\n    def set_log_file(self, log_file):\n        self._log_file = log_file\n\n    def _append_log(self, line):\n        if self._log_file:\n            try:\n                with open(self._log_file, \"a\", encoding=\"utf-8\", errors=\"replace\") as f:\n                    f.write(line + \"\\n\")\n            except Exception as exc:  # pragma: no cover\n                debug_log(f\"Failed to write tunnel log: {exc}\")\n\n        self._recent_logs.append(line)\n        if len(self._recent_logs) > CLOUDFLARE_LOG_BUFFER_SIZE:\n            self._recent_logs = self._recent_logs[-CLOUDFLARE_LOG_BUFFER_SIZE:]\n\n    def _reader(self):\n        process = self._process\n        if process is None:\n            return\n\n        loop = self._loop\n        for raw_line in iter(process.stdout.readline, \"\"):\n            line = raw_line.strip()\n            if not line:\n                continue\n\n            self._append_log(line)\n            match = PUBLIC_URL_PATTERN.search(line)\n            if match and not self._public_url:\n                self._public_url = match.group(1).rstrip(\"/\")\n                if self._url_event and loop:\n                    loop.call_soon_threadsafe(self._url_event.set)\n\n            if \"error\" in line.lower() and not self._last_error:\n                self._last_error = line\n\n        if self._url_event and loop:\n            if not self._last_error and not self._public_url:\n                self._last_error = \"Cloudflare tunnel exited before becoming ready\"\n            loop.call_soon_threadsafe(self._url_event.set)\n\n    def start(self, process, loop):\n        self._process = process\n        self._loop = loop\n        self._url_event = asyncio.Event()\n        self._public_url = None\n        self._last_error = None\n        self._recent_logs = []\n        self._thread = threading.Thread(target=self._reader, daemon=True)\n        self._thread.start()\n\n    async def wait_for_url(self, timeout):\n        if not self._url_event:\n            return None\n        await asyncio.wait_for(self._url_event.wait(), timeout=timeout)\n        return self._public_url\n\n    def stop(self):\n        if self._thread and self._thread.is_alive():\n            self._thread.join(timeout=1)\n        self._thread = None\n        self._process = None\n        self._loop = None\n        self._url_event = None\n\n    def get_url(self):\n        return self._public_url\n\n    def get_last_error(self):\n        return self._last_error\n\n    def get_recent_logs(self):\n        return list(self._recent_logs)\n"
  },
  {
    "path": "utils/cloudflare/state.py",
    "content": "\"\"\"Cloudflare tunnel state persistence helpers.\"\"\"\n\nfrom ..config import load_config, save_config\nfrom ..network import normalize_host\n\n\ndef _get_tunnel_config(cfg):\n    tunnel_cfg = cfg.get(\"tunnel\", {})\n    if isinstance(tunnel_cfg, dict):\n        return tunnel_cfg\n    return {}\n\n\ndef load_tunnel_state():\n    cfg = load_config()\n    tunnel_cfg = _get_tunnel_config(cfg)\n    master_cfg = cfg.get(\"master\", {}) if isinstance(cfg.get(\"master\", {}), dict) else {}\n    return {\n        \"status\": tunnel_cfg.get(\"status\", \"stopped\"),\n        \"public_url\": tunnel_cfg.get(\"public_url\") or None,\n        \"pid\": tunnel_cfg.get(\"pid\"),\n        \"log_file\": tunnel_cfg.get(\"log_file\"),\n        \"previous_master_host\": tunnel_cfg.get(\"previous_master_host\"),\n        \"master_host\": master_cfg.get(\"host\"),\n    }\n\n\ndef persist_tunnel_state(\n    status=None,\n    public_url=None,\n    pid=None,\n    log_file=None,\n    previous_host=None,\n    master_host=None,\n):\n    cfg = load_config()\n    tunnel_cfg = _get_tunnel_config(cfg)\n\n    if status is not None:\n        tunnel_cfg[\"status\"] = status\n    if public_url is not None:\n        tunnel_cfg[\"public_url\"] = public_url\n    if pid is not None:\n        tunnel_cfg[\"pid\"] = pid\n    if log_file is not None:\n        tunnel_cfg[\"log_file\"] = log_file\n    if previous_host is not None:\n        tunnel_cfg[\"previous_master_host\"] = previous_host\n    if master_host is not None:\n        cfg.setdefault(\"master\", {})[\"host\"] = master_host\n\n    cfg[\"tunnel\"] = tunnel_cfg\n    save_config(cfg)\n\n\ndef clear_tunnel_state(log_file=None, previous_host=None, master_host=None):\n    persist_tunnel_state(\n        status=\"stopped\",\n        public_url=\"\",\n        pid=None,\n        log_file=log_file,\n        previous_host=previous_host,\n        master_host=master_host,\n    )\n\n\ndef resolve_restore_master_host(previous_master_host):\n    \"\"\"Determine whether master host should be restored after tunnel stop.\"\"\"\n    cfg = load_config()\n    tunnel_cfg = _get_tunnel_config(cfg)\n    active_url = tunnel_cfg.get(\"public_url\")\n    current_master_host = (cfg.get(\"master\") or {}).get(\"host\")\n\n    if not active_url:\n        return None\n\n    active_host = normalize_host(active_url)\n    current_host = normalize_host(current_master_host)\n    if current_host == active_host:\n        return previous_master_host or \"\"\n    return None\n"
  },
  {
    "path": "utils/cloudflare/tunnel.py",
    "content": "\"\"\"Cloudflare tunnel lifecycle manager.\"\"\"\n\nimport asyncio\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport time\n\nfrom ..constants import TUNNEL_START_TIMEOUT\nfrom ..logging import debug_log\nfrom ..network import get_server_port, normalize_host\nfrom ..process import is_process_alive, terminate_process\nfrom .binary import ensure_binary\nfrom .process_reader import ProcessReader\nfrom .state import clear_tunnel_state, load_tunnel_state, persist_tunnel_state, resolve_restore_master_host\n\n\nclass CloudflareTunnelManager:\n    def __init__(self):\n        self.process = None\n        self.pid = None\n        self.public_url = None\n        self.last_error = None\n        self.log_file = None\n        self.status = \"stopped\"\n        self.previous_master_host = None\n\n        self._lock = asyncio.Lock()\n        self._reader = ProcessReader()\n        self.binary_path = None\n\n        self._restore_state()\n\n    @property\n    def base_dir(self):\n        return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n    def _restore_state(self):\n        state = load_tunnel_state()\n\n        self.public_url = state.get(\"public_url\") or None\n        self.previous_master_host = state.get(\"previous_master_host\")\n        self.log_file = state.get(\"log_file\")\n        pid = state.get(\"pid\")\n\n        if pid and is_process_alive(pid):\n            self.pid = pid\n            self.status = state.get(\"status\") or \"running\"\n            debug_log(f\"Detected existing cloudflared process (pid={pid})\")\n        else:\n            clear_tunnel_state(log_file=self.log_file, previous_host=self.previous_master_host)\n            self.status = \"stopped\"\n            self.pid = None\n\n    async def start_tunnel(self):\n        async with self._lock:\n            if self.process and self.process.poll() is None:\n                return {\n                    \"status\": self.status,\n                    \"public_url\": self.public_url,\n                    \"pid\": self.process.pid,\n                    \"log_file\": self.log_file,\n                }\n\n            if self.pid and is_process_alive(self.pid):\n                debug_log(f\"Stopping stale cloudflared pid {self.pid} before starting a new one\")\n                await self.stop_tunnel()\n\n            binary = await asyncio.to_thread(ensure_binary)\n            self.binary_path = binary\n            port = get_server_port()\n            self.status = \"starting\"\n            self.last_error = None\n            self.public_url = None\n\n            state = load_tunnel_state()\n            master_host = state.get(\"master_host\") or \"\"\n            if state.get(\"previous_master_host\"):\n                self.previous_master_host = state.get(\"previous_master_host\")\n            else:\n                self.previous_master_host = master_host\n\n            os.makedirs(os.path.join(self.base_dir, \"logs\"), exist_ok=True)\n            timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n            self.log_file = os.path.join(self.base_dir, \"logs\", f\"cloudflare-{timestamp}.log\")\n\n            cmd = [\n                binary,\n                \"tunnel\",\n                \"--no-autoupdate\",\n                \"--url\",\n                f\"http://127.0.0.1:{port}\",\n            ]\n\n            debug_log(f\"Starting cloudflared: {' '.join(cmd)}\")\n            try:\n                self.process = subprocess.Popen(\n                    cmd,\n                    stdout=subprocess.PIPE,\n                    stderr=subprocess.STDOUT,\n                    text=True,\n                    bufsize=1,\n                )\n            except FileNotFoundError:\n                self.status = \"error\"\n                raise RuntimeError(\"cloudflared binary not found\")\n            except Exception as exc:\n                self.status = \"error\"\n                raise RuntimeError(f\"Failed to start cloudflared: {exc}\") from exc\n\n            self.pid = self.process.pid\n            persist_tunnel_state(\n                status=\"starting\",\n                pid=self.pid,\n                log_file=self.log_file,\n                previous_host=self.previous_master_host,\n            )\n\n            loop = asyncio.get_running_loop()\n            self._reader.set_log_file(self.log_file)\n            self._reader.start(self.process, loop)\n\n            try:\n                await self._reader.wait_for_url(timeout=TUNNEL_START_TIMEOUT)\n            except asyncio.TimeoutError:\n                self.last_error = \"Timed out waiting for Cloudflare to assign a URL\"\n                await self.stop_tunnel()\n                raise RuntimeError(self.last_error)\n\n            public_url = self._reader.get_url()\n            if not public_url:\n                self.last_error = self._reader.get_last_error() or \"Cloudflare tunnel failed to start\"\n                await self.stop_tunnel()\n                raise RuntimeError(self.last_error)\n\n            self.public_url = public_url\n            self.status = \"running\"\n            debug_log(f\"Cloudflare tunnel ready at {self.public_url}\")\n\n            persist_tunnel_state(\n                status=\"running\",\n                public_url=self.public_url,\n                pid=self.pid,\n                log_file=self.log_file,\n                previous_host=self.previous_master_host or \"\",\n                master_host=normalize_host(self.public_url),\n            )\n            return {\n                \"status\": self.status,\n                \"public_url\": self.public_url,\n                \"pid\": self.pid,\n                \"log_file\": self.log_file,\n            }\n\n    async def stop_tunnel(self):\n        async with self._lock:\n            pid = self.process.pid if self.process else self.pid\n            if not pid:\n                clear_tunnel_state(log_file=self.log_file, previous_host=self.previous_master_host)\n                self.status = \"stopped\"\n                return {\"status\": \"stopped\"}\n\n            debug_log(f\"Stopping cloudflared (pid={pid})\")\n            if self.process:\n                terminate_process(self.process, timeout=5)\n            else:\n                try:\n                    os.kill(pid, signal.SIGTERM)\n                    time.sleep(0.5)\n                except Exception as exc:  # pragma: no cover\n                    debug_log(f\"Error stopping cloudflared pid {pid}: {exc}\")\n\n            restore_host = resolve_restore_master_host(self.previous_master_host)\n\n            self.status = \"stopped\"\n            self.public_url = None\n            self.pid = None\n            self.process = None\n            self.last_error = None\n            self._reader.stop()\n\n            clear_tunnel_state(\n                log_file=self.log_file,\n                previous_host=self.previous_master_host,\n                master_host=restore_host,\n            )\n            return {\"status\": \"stopped\"}\n\n    def get_status(self):\n        alive = False\n        pid = self.process.pid if self.process else self.pid\n        if pid:\n            alive = is_process_alive(pid)\n            if not alive and self.status == \"running\":\n                self.status = \"stopped\"\n\n        return {\n            \"status\": self.status,\n            \"public_url\": self.public_url,\n            \"pid\": pid,\n            \"log_file\": self.log_file,\n            \"last_error\": self.last_error or self._reader.get_last_error(),\n            \"binary_path\": self.binary_path or shutil.which(\"cloudflared\"),\n            \"recent_logs\": self._reader.get_recent_logs()[-20:],\n            \"previous_master_host\": self.previous_master_host,\n        }\n"
  },
  {
    "path": "utils/config.py",
    "content": "\"\"\"\nConfiguration management for ComfyUI-Distributed.\n\"\"\"\nimport asyncio\nimport os\nimport json\nfrom contextlib import asynccontextmanager\nfrom .logging import log\n\n# Import defaults for timeout fallbacks\nfrom .constants import HEARTBEAT_TIMEOUT\n\nCONFIG_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"gpu_config.json\")\n_config_cache = None\n_config_mtime = 0.0\n_config_lock = asyncio.Lock()\n\n\ndef _config_path():\n    return CONFIG_FILE\n\ndef get_default_config():\n    \"\"\"Returns the default configuration dictionary. Single source of truth.\"\"\"\n    return {\n        \"master\": {\"host\": \"\"},\n        \"workers\": [],\n        \"settings\": {\n            \"debug\": False,\n            \"auto_launch_workers\": False,\n            \"stop_workers_on_master_exit\": True,\n            \"master_delegate_only\": False,\n            \"websocket_orchestration\": True,\n            \"worker_probe_concurrency\": 8,\n            \"worker_prep_concurrency\": 4,\n            \"media_sync_concurrency\": 2,\n            \"media_sync_timeout_seconds\": 120\n        },\n        \"tunnel\": {\n            \"status\": \"stopped\",\n            \"public_url\": \"\",\n            \"pid\": None,\n            \"log_file\": \"\",\n            \"previous_master_host\": \"\"\n        }\n    }\n\ndef _merge_with_defaults(data, defaults):\n    \"\"\"Recursively merge loaded config data with default keys.\"\"\"\n    if not isinstance(data, dict):\n        return defaults\n\n    merged = {}\n    for key, default_value in defaults.items():\n        loaded_value = data.get(key, default_value)\n        if isinstance(default_value, dict) and isinstance(loaded_value, dict):\n            merged[key] = _merge_with_defaults(loaded_value, default_value)\n        else:\n            merged[key] = loaded_value\n\n    # Preserve unknown keys for forward compatibility.\n    for key, value in data.items():\n        if key not in merged:\n            merged[key] = value\n\n    return merged\n\n\ndef invalidate_config_cache():\n    \"\"\"Invalidate in-memory config cache so next load reads from disk.\"\"\"\n    global _config_cache, _config_mtime\n    _config_cache = None\n    _config_mtime = 0.0\n\n\ndef load_config():\n    \"\"\"Loads the config, falling back to defaults if the file is missing or invalid.\"\"\"\n    global _config_cache, _config_mtime\n    path = _config_path()\n\n    try:\n        mtime = os.path.getmtime(path)\n    except OSError:\n        if _config_cache is None:\n            _config_cache = get_default_config()\n        return _config_cache\n\n    if _config_cache is None or mtime != _config_mtime:\n        try:\n            with open(path, 'r', encoding='utf-8') as f:\n                loaded = json.load(f)\n            _config_cache = _merge_with_defaults(loaded, get_default_config())\n        except Exception as e:\n            log(f\"Error loading config, using defaults: {e}\")\n            _config_cache = get_default_config()\n        _config_mtime = mtime\n\n    return _config_cache\n\ndef save_config(config):\n    \"\"\"Saves the configuration to file.\"\"\"\n    tmp_path = f\"{_config_path()}.tmp\"\n    try:\n        with open(tmp_path, 'w', encoding='utf-8') as f:\n            json.dump(config, f, indent=2)\n            f.flush()\n            os.fsync(f.fileno())\n        os.replace(tmp_path, _config_path())\n        invalidate_config_cache()\n        return True\n    except Exception as e:\n        try:\n            os.unlink(tmp_path)\n        except OSError:\n            pass\n        log(f\"Error saving config: {e}\")\n        return False\n\n\n@asynccontextmanager\nasync def config_transaction():\n    \"\"\"Acquire config lock, yield loaded config, and save if changed.\"\"\"\n    async with _config_lock:\n        config = load_config()\n        original_snapshot = json.dumps(config, sort_keys=True)\n        yield config\n        updated_snapshot = json.dumps(config, sort_keys=True)\n        if updated_snapshot != original_snapshot:\n            if not save_config(config):\n                raise RuntimeError(\"Failed to save config\")\n\ndef ensure_config_exists():\n    \"\"\"Creates default config file if it doesn't exist. Used by __init__.py\"\"\"\n    if not os.path.exists(_config_path()):\n        default_config = get_default_config()\n        if save_config(default_config):\n            from .logging import debug_log\n            debug_log(\"Created default config file\")\n        else:\n            log(\"Could not create default config file\")\n\ndef get_worker_timeout_seconds(default: int = HEARTBEAT_TIMEOUT) -> int:\n    \"\"\"Return the unified worker timeout (seconds).\n\n    Priority:\n    1) UI-configured setting `settings.worker_timeout_seconds`\n    2) Fallback to provided `default` (defaults to HEARTBEAT_TIMEOUT which itself\n       can be overridden via the COMFYUI_HEARTBEAT_TIMEOUT env var)\n\n    This value should be used anywhere we consider a worker \"timed out\" from the\n    master's perspective (e.g., collector waits, upscaler result collection).\n    \"\"\"\n    try:\n        cfg = load_config()\n        val = int(cfg.get('settings', {}).get('worker_timeout_seconds', default))\n        return max(1, val)\n    except Exception:\n        return max(1, int(default))\n\n\ndef is_master_delegate_only() -> bool:\n    \"\"\"Returns True when master should skip local workload and act as orchestrator only.\"\"\"\n    try:\n        cfg = load_config()\n        return bool(cfg.get('settings', {}).get('master_delegate_only', False))\n    except Exception:\n        return False\n"
  },
  {
    "path": "utils/constants.py",
    "content": "\"\"\"\nShared constants for ComfyUI-Distributed.\n\"\"\"\nimport os\n\n# Timeouts (in seconds)\nWORKER_JOB_TIMEOUT = 30.0\nTILE_COLLECTION_TIMEOUT = 30.0\nTILE_WAIT_TIMEOUT = 30.0\nPROCESS_TERMINATION_TIMEOUT = 5.0\n\n# Process monitoring\nWORKER_CHECK_INTERVAL = 2.0\nSTATUS_CHECK_INTERVAL = 5.0\n\n# Cloudflare tunnel\nTUNNEL_START_TIMEOUT = float(os.environ.get(\"TUNNEL_START_TIMEOUT\", \"25\"))\nCLOUDFLARE_LOG_BUFFER_SIZE = 200\n\n# Network\nCHUNK_SIZE = 8192\nLOG_TAIL_BYTES = 65536  # 64KB\n\n# File paths\nWORKER_LOG_PATTERN = \"distributed_worker_*.log\"\n\n# Worker management\nWORKER_STARTUP_DELAY = 2.0\n\n# Tile transfer\nTILE_TRANSFER_TIMEOUT = 30.0\n\n# Process cleanup\nPROCESS_WAIT_TIMEOUT = 3.0\nQUEUE_INIT_TIMEOUT = 5.0\nTILE_SEND_TIMEOUT = 60.0\nJOB_INIT_GRACE_PERIOD = 10.0\n\n# Memory operations  \nMEMORY_CLEAR_DELAY = 0.5\n\n# Batch processing\nMAX_BATCH = int(os.environ.get('COMFYUI_MAX_BATCH', '20'))  # Maximum items per batch to prevent timeouts/OOM (~100MB chunks for 512x512 PNGs)\n\n# Heartbeat monitoring\nHEARTBEAT_INTERVAL = float(os.environ.get('COMFYUI_HEARTBEAT_INTERVAL', '10'))  # Heartbeat/check interval in seconds\nHEARTBEAT_TIMEOUT = int(os.environ.get('COMFYUI_HEARTBEAT_TIMEOUT', '60'))  # Worker heartbeat timeout in seconds (default 60s)\n\n# USDU result collection\nDYNAMIC_MODE_MAX_POLL_TIMEOUT = 10.0\n\n# Static mode job poll loop\nJOB_POLL_INTERVAL = 1.0\nJOB_POLL_MAX_ATTEMPTS = 20\n\n# Orchestration pipeline\nORCHESTRATION_WORKER_PROBE_CONCURRENCY = int(\n    os.environ.get('COMFYUI_ORCHESTRATION_WORKER_PROBE_CONCURRENCY', '8')\n)\nORCHESTRATION_WORKER_PREP_CONCURRENCY = int(\n    os.environ.get('COMFYUI_ORCHESTRATION_WORKER_PREP_CONCURRENCY', '4')\n)\nORCHESTRATION_MEDIA_SYNC_CONCURRENCY = int(\n    os.environ.get('COMFYUI_ORCHESTRATION_MEDIA_SYNC_CONCURRENCY', '2')\n)\nORCHESTRATION_MEDIA_SYNC_TIMEOUT = float(\n    os.environ.get('COMFYUI_ORCHESTRATION_MEDIA_SYNC_TIMEOUT', '120')\n)\n"
  },
  {
    "path": "utils/crop_model_patch.py",
    "content": "from contextlib import contextmanager\n\nimport torch\n\nfrom .logging import debug_log\nfrom .usdu_utils import resize_region\n\n\n@contextmanager\ndef crop_model_cond(model, crop_regions, init_size, canvas_size, tile_size, latent_crop=False):\n    \"\"\"Clone model and crop compatible model patches for tile-local sampling.\"\"\"\n    try:\n        patched_model = model.clone()\n    except Exception:\n        # Fallback to original model when clone/patch access is unavailable.\n        yield model\n        return\n\n    patches = (\n        patched_model\n        .model_options\n        .get(\"transformer_options\", {})\n        .get(\"patches\", {})\n    )\n    applied_croppers = {}\n    for _module, module_patches in patches.items():\n        for patch in module_patches:\n            if id(patch) in applied_croppers:\n                continue\n            if type(patch).__name__ not in (\"DiffSynthCnetPatch\", \"ZImageControlPatch\"):\n                continue\n            try:\n                cropper = ModelPatchCropper(patch).crop(crop_regions, canvas_size, latent_crop)\n                applied_croppers[id(patch)] = cropper\n            except Exception as exc:\n                debug_log(f\"crop_model_cond: patch crop skipped for {type(patch).__name__}: {exc}\")\n    try:\n        yield patched_model\n    finally:\n        for cropper in applied_croppers.values():\n            del cropper\n\n\nclass ModelPatchCropper:\n    \"\"\"Stateful crop helper that restores model patch tensors on cleanup.\"\"\"\n\n    def __init__(self, patch):\n        self.patch = patch\n        self.original_state = {\n            \"image\": patch.image.clone() if isinstance(patch.image, torch.Tensor) else patch.image,\n            \"encoded_image\": patch.encoded_image.clone() if isinstance(patch.encoded_image, torch.Tensor) else patch.encoded_image,\n            \"encoded_image_size\": patch.encoded_image_size,\n        }\n        self.patch_class = type(patch).__name__\n        required_attrs = (\n            \"image\",\n            \"model_patch\",\n            \"vae\",\n            \"strength\",\n            \"encoded_image\",\n            \"encoded_image_size\",\n        )\n        missing = [attr for attr in required_attrs if not hasattr(patch, attr)]\n        if missing:\n            raise AttributeError(\n                f\"{self.patch_class} missing required attrs: {', '.join(missing)}\"\n            )\n\n    def __del__(self):\n        self.patch.image = self.original_state[\"image\"]\n        self.patch.encoded_image = self.original_state[\"encoded_image\"]\n        self.patch.encoded_image_size = self.original_state[\"encoded_image_size\"]\n\n    def crop(self, crop_regions, canvas_size, latent_crop=True):\n        patch = self.patch\n\n        if not isinstance(crop_regions, list):\n            crop_regions = [crop_regions]\n\n        image_size = (patch.image.shape[2], patch.image.shape[1])  # (W,H)\n\n        cropped_images = []\n        for crop_region in crop_regions:\n            resized_crop = resize_region(crop_region, canvas_size, image_size)\n            x1, y1, x2, y2 = resized_crop\n            cropped_image = patch.image[:, y1:y2, x1:x2, :]\n            cropped_images.append(cropped_image)\n\n        concatenated_image = torch.cat(cropped_images, dim=0)\n        patch.image = concatenated_image\n        patch.encoded_image_size = (\n            concatenated_image.shape[1],\n            concatenated_image.shape[2],\n        )\n\n        if latent_crop:\n            downscale_ratio = patch.vae.spacial_compression_encode()\n            cropped_latents = []\n            for crop_region in crop_regions:\n                resized_crop = resize_region(crop_region, canvas_size, image_size)\n                x1, y1, x2, y2 = tuple(x // downscale_ratio for x in resized_crop)\n                cropped_latent = patch.encoded_image[:, :, y1:y2, x1:x2]\n                cropped_latents.append(cropped_latent)\n            patch.encoded_image = torch.cat(cropped_latents, dim=0)\n        else:\n            patch.__init__(\n                patch.model_patch,\n                patch.vae,\n                concatenated_image,\n                patch.strength,\n                inpaint_image=patch.inpaint_image,\n                mask=patch.mask,\n            )\n        return self\n"
  },
  {
    "path": "utils/exceptions.py",
    "content": "\"\"\"Custom exceptions for ComfyUI-Distributed.\"\"\"\n\n\nclass DistributedError(Exception):\n    \"\"\"Base exception for all ComfyUI-Distributed errors.\"\"\"\n\n\nclass WorkerError(DistributedError):\n    \"\"\"Error related to a specific distributed worker.\"\"\"\n\n    def __init__(self, message, worker_id=None):\n        super().__init__(message)\n        self.worker_id = worker_id\n\n\nclass WorkerTimeoutError(WorkerError):\n    \"\"\"Worker did not respond within the expected timeout.\"\"\"\n\n\nclass WorkerNotAvailableError(WorkerError):\n    \"\"\"Worker is unreachable or not running.\"\"\"\n\n\nclass JobQueueError(DistributedError):\n    \"\"\"Error in distributed job queue management.\"\"\"\n\n\nclass TileCollectionError(DistributedError):\n    \"\"\"Error collecting processed tiles from workers.\"\"\"\n\n\nclass ProcessError(DistributedError):\n    \"\"\"Error managing a worker subprocess.\"\"\"\n\n    def __init__(self, message, pid=None, worker_id=None):\n        super().__init__(message)\n        self.pid = pid\n        self.worker_id = worker_id\n\n\nclass TunnelError(DistributedError):\n    \"\"\"Error managing the Cloudflare tunnel.\"\"\"\n"
  },
  {
    "path": "utils/image.py",
    "content": "\"\"\"\nImage and tensor conversion utilities for ComfyUI-Distributed.\n\"\"\"\nimport torch\nimport numpy as np\nfrom PIL import Image\n\ndef tensor_to_pil(img_tensor, batch_index=0):\n    \"\"\"Takes a batch of images in tensor form [B, H, W, C] and returns an RGB PIL Image.\"\"\"\n    return Image.fromarray((255 * img_tensor[batch_index].cpu().numpy()).astype(np.uint8))\n\ndef pil_to_tensor(image):\n    \"\"\"Takes a PIL image and returns a tensor of shape [1, H, W, C].\"\"\"\n    image = np.array(image).astype(np.float32) / 255.0\n    image = torch.from_numpy(image).unsqueeze(0)\n    if len(image.shape) == 3:  # If grayscale, add channel dimension\n        image = image.unsqueeze(-1)\n    return image\n\ndef ensure_contiguous(tensor):\n    \"\"\"Ensure tensor is contiguous in memory.\"\"\"\n    if not tensor.is_contiguous():\n        return tensor.contiguous()\n    return tensor\n\n"
  },
  {
    "path": "utils/logging.py",
    "content": "\"\"\"\nShared logging utilities for ComfyUI-Distributed.\n\"\"\"\nimport os\nimport json\nimport time\n\n# Config file is in parent directory\nCONFIG_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"gpu_config.json\")\n\n_debug_cache: bool | None = None\n_debug_cache_time: float = 0.0\n_DEBUG_TTL: float = 5.0\n\ndef is_debug_enabled():\n    \"\"\"Check if debug is enabled.\"\"\"\n    global _debug_cache, _debug_cache_time\n\n    now = time.monotonic()\n    if _debug_cache is not None and (now - _debug_cache_time) < _DEBUG_TTL:\n        return _debug_cache\n\n    enabled = False\n    if os.path.exists(CONFIG_FILE):\n        try:\n            with open(CONFIG_FILE, 'r') as f:\n                config = json.load(f)\n                enabled = config.get(\"settings\", {}).get(\"debug\", False)\n        except (OSError, json.JSONDecodeError, ValueError):\n            pass\n\n    _debug_cache = enabled\n    _debug_cache_time = now\n    return enabled\n\ndef debug_log(message):\n    \"\"\"Log debug messages only if debug is enabled in config.\"\"\"\n    if is_debug_enabled():\n        print(f\"[Distributed] {message}\")\n\ndef log(message):\n    \"\"\"Always log important messages.\"\"\"\n    print(f\"[Distributed] {message}\")\n"
  },
  {
    "path": "utils/network.py",
    "content": "\"\"\"\nNetwork and API utilities for ComfyUI-Distributed.\n\"\"\"\nimport asyncio\nimport aiohttp\nimport re\nimport server\nfrom aiohttp import web\nfrom .logging import debug_log\n\n# Shared session for connection pooling\n_client_session = None\n\nasync def get_client_session():\n    \"\"\"Get or create a shared aiohttp client session.\"\"\"\n    global _client_session\n    try:\n        asyncio.get_running_loop()\n    except RuntimeError as exc:\n        raise RuntimeError(\"get_client_session() requires an active asyncio event loop.\") from exc\n\n    if _client_session is None or _client_session.closed:\n        connector = aiohttp.TCPConnector(limit=100, limit_per_host=30)\n        # Don't set timeout here - set it per request\n        _client_session = aiohttp.ClientSession(connector=connector)\n    return _client_session\n\nasync def cleanup_client_session():\n    \"\"\"Clean up the shared client session.\"\"\"\n    global _client_session\n    if _client_session and not _client_session.closed:\n        await _client_session.close()\n        _client_session = None\n\nasync def handle_api_error(request, error, status=500):\n    \"\"\"Standardized error response handler.\"\"\"\n    if isinstance(error, list):\n        messages = [str(item) for item in error]\n        debug_log(f\"API Error [{status}]: {messages}\")\n        return web.json_response({\"errors\": messages}, status=status)\n\n    message = str(error)\n    debug_log(f\"API Error [{status}]: {message}\")\n    return web.json_response({\"error\": message}, status=status)\n\ndef get_server_port():\n    \"\"\"Get the ComfyUI server port.\"\"\"\n    import server\n    return server.PromptServer.instance.port\n\ndef get_server_loop():\n    \"\"\"Get the ComfyUI server event loop.\"\"\"\n    import server\n    return server.PromptServer.instance.loop\n\n\ndef normalize_host(value):\n    if value is None:\n        return None\n    if not isinstance(value, str):\n        return value\n    host = value.strip()\n    if not host:\n        return host\n    host = re.sub(r\"^https?://\", \"\", host, flags=re.IGNORECASE)\n    return host.split(\"/\")[0]\n\n\ndef _split_host_and_port(host):\n    if not host:\n        return host, None\n\n    if host.startswith(\"[\"):\n        match = re.match(r\"^(\\[[^\\]]+\\])(?::(\\d+))?$\", host)\n        if match:\n            parsed_port = int(match.group(2)) if match.group(2) else None\n            return match.group(1), parsed_port\n        return host, None\n\n    if host.count(\":\") == 1:\n        candidate_host, candidate_port = host.rsplit(\":\", 1)\n        if candidate_port.isdigit():\n            return candidate_host, int(candidate_port)\n\n    return host, None\n\n\ndef build_worker_url(worker, endpoint=\"\"):\n    \"\"\"Construct the worker base URL with optional endpoint.\"\"\"\n    host = (worker.get(\"host\") or \"\").strip()\n    port = int(worker.get(\"port\", worker.get(\"listen_port\", 8188)) or 8188)\n\n    if not host:\n        host = getattr(server.PromptServer.instance, \"address\", \"127.0.0.1\") or \"127.0.0.1\"\n\n    if host.startswith((\"http://\", \"https://\")):\n        base = host.rstrip(\"/\")\n    else:\n        is_cloud = worker.get(\"type\") == \"cloud\" or host.endswith(\".proxy.runpod.net\") or port == 443\n        scheme = \"https\" if is_cloud else \"http\"\n        default_port = 443 if scheme == \"https\" else 80\n        port_part = \"\" if port == default_port else f\":{port}\"\n        base = f\"{scheme}://{host}{port_part}\"\n\n    return f\"{base}{endpoint}\"\n\n\nasync def probe_worker(worker_url: str, timeout: float = 5.0) -> dict | None:\n    \"\"\"GET {worker_url}/prompt. Returns parsed JSON or None on any failure.\"\"\"\n    base_url = (worker_url or \"\").strip().rstrip(\"/\")\n    if not base_url:\n        return None\n    probe_url = base_url if base_url.endswith(\"/prompt\") else f\"{base_url}/prompt\"\n    session = await get_client_session()\n    try:\n        async with session.get(\n            probe_url,\n            timeout=aiohttp.ClientTimeout(total=float(timeout)),\n        ) as response:\n            if response.status != 200:\n                debug_log(f\"[Distributed] Worker probe non-200 status: {response.status} ({probe_url})\")\n                return None\n            payload = await response.json()\n            if isinstance(payload, dict):\n                return payload\n            debug_log(f\"[Distributed] Worker probe returned non-object JSON: {probe_url}\")\n            return None\n    except asyncio.TimeoutError:\n        debug_log(f\"[Distributed] Worker probe timed out: {probe_url}\")\n        return None\n    except aiohttp.ClientConnectorError:\n        debug_log(f\"[Distributed] Worker unreachable: {probe_url}\")\n        return None\n    except Exception as exc:\n        debug_log(f\"[Distributed] Worker probe error ({probe_url}): {exc}\")\n        return None\n\n\ndef build_master_url(config=None, prompt_server_instance=None):\n    \"\"\"Build the best public URL workers should use to reach the master.\"\"\"\n    if config is None:\n        from .config import load_config\n        config = load_config()\n\n    prompt_server_instance = prompt_server_instance or server.PromptServer.instance\n    master_cfg = (config or {}).get(\"master\", {}) or {}\n    configured_host = (master_cfg.get(\"host\") or \"\").strip()\n    runtime_port = getattr(prompt_server_instance, \"port\", 8188) or 8188\n\n    def _needs_https(hostname):\n        hostname = hostname.lower()\n        https_domains = (\n            \".proxy.runpod.net\",\n            \".ngrok-free.app\",\n            \".ngrok-free.dev\",\n            \".ngrok.io\",\n            \".trycloudflare.com\",\n            \".cloudflare.dev\",\n        )\n        return any(hostname.endswith(suffix) for suffix in https_domains)\n\n    if configured_host:\n        if configured_host.startswith((\"http://\", \"https://\")):\n            return configured_host.rstrip(\"/\")\n\n        host, explicit_port = _split_host_and_port(configured_host)\n        port = explicit_port if explicit_port is not None else int(runtime_port)\n        scheme = \"https\" if _needs_https(host) or port == 443 else \"http\"\n        default_port_for_scheme = 443 if scheme == \"https\" else 80\n        if explicit_port is None and scheme == \"https\" and _needs_https(host):\n            port = default_port_for_scheme\n        port_part = \"\" if port == default_port_for_scheme else f\":{port}\"\n        return f\"{scheme}://{host}{port_part}\"\n\n    address = getattr(prompt_server_instance, \"address\", \"127.0.0.1\") or \"127.0.0.1\"\n    if address in (\"0.0.0.0\", \"::\"):\n        address = \"127.0.0.1\"\n    port = int(runtime_port)\n    scheme = \"https\" if port == 443 else \"http\"\n    default_port_for_scheme = 443 if scheme == \"https\" else 80\n    port_part = \"\" if port == default_port_for_scheme else f\":{port}\"\n    return f\"{scheme}://{address}{port_part}\"\n\n\ndef build_master_callback_url(worker, config=None, prompt_server_instance=None):\n    \"\"\"Build the callback URL a specific worker should use to reach the master.\"\"\"\n    prompt_server_instance = prompt_server_instance or server.PromptServer.instance\n\n    worker_type = str((worker or {}).get(\"type\") or \"\").strip().lower()\n    worker_host = normalize_host((worker or {}).get(\"host\"))\n    local_hosts = {\"\", \"localhost\", \"127.0.0.1\", \"::1\", \"[::1]\", \"0.0.0.0\"}\n\n    is_local_worker = worker_type == \"local\" or worker_host in local_hosts\n    if is_local_worker:\n        port = int(getattr(prompt_server_instance, \"port\", 8188) or 8188)\n        scheme = \"https\" if port == 443 else \"http\"\n        default_port_for_scheme = 443 if scheme == \"https\" else 80\n        port_part = \"\" if port == default_port_for_scheme else f\":{port}\"\n        return f\"{scheme}://127.0.0.1{port_part}\"\n\n    return build_master_url(config=config, prompt_server_instance=prompt_server_instance)\n"
  },
  {
    "path": "utils/process.py",
    "content": "\"\"\"\nProcess management utilities for ComfyUI-Distributed.\n\"\"\"\nimport os\nimport subprocess\nimport platform\nimport signal\n\ndef is_process_alive(pid):\n    \"\"\"Check if a process with given PID is still alive.\"\"\"\n    try:\n        if platform.system() == \"Windows\":\n            # Windows: use tasklist\n            result = subprocess.run(['tasklist', '/FI', f'PID eq {pid}'], \n                                  capture_output=True, text=True)\n            return str(pid) in result.stdout\n        else:\n            # Unix: send signal 0\n            os.kill(pid, 0)\n            return True\n    except (OSError, subprocess.SubprocessError):\n        return False\n\ndef terminate_process(process, timeout=5):\n    \"\"\"Gracefully terminate a process with timeout.\"\"\"\n    if process.poll() is None:  # Still running\n        process.terminate()\n        try:\n            process.wait(timeout=timeout)\n        except subprocess.TimeoutExpired:\n            process.kill()\n            process.wait()\n\ndef get_python_executable():\n    \"\"\"Get the Python executable path.\"\"\"\n    import sys\n    return sys.executable\n\n"
  },
  {
    "path": "utils/trace_logger.py",
    "content": "from .logging import debug_log, log\n\n\ndef trace_prefix(trace_execution_id: str) -> str:\n    return f\"[Distributed][exec:{trace_execution_id}]\"\n\n\ndef trace_debug(trace_execution_id: str, message: str) -> None:\n    debug_log(f\"{trace_prefix(trace_execution_id)} {message}\")\n\n\ndef trace_info(trace_execution_id: str, message: str) -> None:\n    log(f\"{trace_prefix(trace_execution_id)} {message}\")\n"
  },
  {
    "path": "utils/usdu_managment.py",
    "content": "\"\"\"Backward-compatibility shim for USDU helpers.\n\nRoute handlers and job logic now live in:\n- upscale.job_store\n- upscale.job_timeout\n- upscale.payload_parsers\n- api.usdu_routes\n\"\"\"\n\nfrom ..upscale.conditioning import clone_conditioning, clone_control_chain\nfrom ..upscale.job_store import (\n    MAX_PAYLOAD_SIZE,\n    _cleanup_job,\n    _drain_results_queue,\n    _get_completed_count,\n    _init_job_queue,\n    _mark_task_completed,\n    ensure_tile_jobs_initialized,\n    init_dynamic_job,\n    init_static_job_batched,\n)\nfrom ..upscale.job_timeout import _check_and_requeue_timed_out_workers\nfrom ..upscale.payload_parsers import _parse_tiles_from_form\nfrom .logging import debug_log\nfrom .network import get_client_session\n\n\nasync def _send_heartbeat_to_master(multi_job_id, master_url, worker_id):\n    \"\"\"Send heartbeat to master from worker-side processing loops.\"\"\"\n    try:\n        data = {'multi_job_id': multi_job_id, 'worker_id': str(worker_id)}\n        session = await get_client_session()\n        url = f\"{master_url}/distributed/heartbeat\"\n        async with session.post(url, json=data) as response:\n            response.raise_for_status()\n    except Exception as e:\n        debug_log(f\"Heartbeat failed: {e}\")\n\n\n__all__ = [\n    \"MAX_PAYLOAD_SIZE\",\n    \"_check_and_requeue_timed_out_workers\",\n    \"_cleanup_job\",\n    \"_drain_results_queue\",\n    \"_get_completed_count\",\n    \"_init_job_queue\",\n    \"_mark_task_completed\",\n    \"_parse_tiles_from_form\",\n    \"_send_heartbeat_to_master\",\n    \"clone_conditioning\",\n    \"clone_control_chain\",\n    \"ensure_tile_jobs_initialized\",\n    \"init_dynamic_job\",\n    \"init_static_job_batched\",\n]\n"
  },
  {
    "path": "utils/usdu_utils.py",
    "content": "import numpy as np\nfrom PIL import Image, ImageFilter\nimport torch\nimport torch.nn.functional as F\nfrom torchvision.transforms import GaussianBlur\nimport math\n\nif (not hasattr(Image, 'Resampling')):  # For older versions of Pillow\n    Image.Resampling = Image\n\nBLUR_KERNEL_SIZE = 15\n\n\ndef tensor_to_pil(img_tensor, batch_index=0):\n    # Takes a batch of images in the form of a tensor of shape [batch_size, height, width, channels]\n    # and returns an RGB PIL Image. Assumes channels=3\n    return Image.fromarray((255 * img_tensor[batch_index].cpu().numpy()).astype(np.uint8))\n\n\ndef pil_to_tensor(image):\n    # Takes a PIL image and returns a tensor of shape [1, height, width, channels]\n    image = np.array(image).astype(np.float32) / 255.0\n    image = torch.from_numpy(image).unsqueeze(0)\n    if len(image.shape) == 3:  # If the image is grayscale, add a channel dimension\n        image = image.unsqueeze(-1)\n    return image\n\n\ndef controlnet_hint_to_pil(tensor, batch_index=0):\n    return tensor_to_pil(tensor.movedim(1, -1), batch_index)\n\n\ndef pil_to_controlnet_hint(img):\n    return pil_to_tensor(img).movedim(-1, 1)\n\n\ndef crop_tensor(tensor, region):\n    # Takes a tensor of shape [batch_size, height, width, channels] and crops it to the given region\n    x1, y1, x2, y2 = region\n    return tensor[:, y1:y2, x1:x2, :]\n\n\ndef resize_tensor(tensor, size, mode=\"nearest-exact\"):\n    # Takes a tensor of shape [B, C, H, W] and resizes\n    # it to a shape of [B, C, size[0], size[1]] using the given mode\n    return torch.nn.functional.interpolate(tensor, size=size, mode=mode)\n\n\ndef get_crop_region(mask, pad=0):\n    # Takes a black and white PIL image in 'L' mode and returns the coordinates of the white rectangular mask region\n    # Should be equivalent to the get_crop_region function from https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/masking.py\n    coordinates = mask.getbbox()\n    if coordinates is not None:\n        x1, y1, x2, y2 = coordinates\n    else:\n        x1, y1, x2, y2 = mask.width, mask.height, 0, 0\n    # Apply padding\n    x1 = max(x1 - pad, 0)\n    y1 = max(y1 - pad, 0)\n    x2 = min(x2 + pad, mask.width)\n    y2 = min(y2 + pad, mask.height)\n    return fix_crop_region((x1, y1, x2, y2), (mask.width, mask.height))\n\n\ndef fix_crop_region(region, image_size):\n    # Remove the extra pixel added by the get_crop_region function\n    image_width, image_height = image_size\n    x1, y1, x2, y2 = region\n    if x2 < image_width:\n        x2 -= 1\n    if y2 < image_height:\n        y2 -= 1\n    return x1, y1, x2, y2\n\n\ndef expand_crop(region, width, height, target_width, target_height):\n    '''\n    Expands a crop region to a specified target size.\n    :param region: A tuple of the form (x1, y1, x2, y2) denoting the upper left and the lower right points\n        of the rectangular region. Expected to have x2 > x1 and y2 > y1.\n    :param width: The width of the image the crop region is from.\n    :param height: The height of the image the crop region is from.\n    :param target_width: The desired width of the crop region.\n    :param target_height: The desired height of the crop region.\n    '''\n    x1, y1, x2, y2 = region\n    actual_width = x2 - x1\n    actual_height = y2 - y1\n    # target_width = math.ceil(actual_width / 8) * 8\n    # target_height = math.ceil(actual_height / 8) * 8\n\n    # Try to expand region to the right of half the difference\n    width_diff = target_width - actual_width\n    x2 = min(x2 + width_diff // 2, width)\n    # Expand region to the left of the difference including the pixels that could not be expanded to the right\n    width_diff = target_width - (x2 - x1)\n    x1 = max(x1 - width_diff, 0)\n    # Try the right again\n    width_diff = target_width - (x2 - x1)\n    x2 = min(x2 + width_diff, width)\n\n    # Try to expand region to the bottom of half the difference\n    height_diff = target_height - actual_height\n    y2 = min(y2 + height_diff // 2, height)\n    # Expand region to the top of the difference including the pixels that could not be expanded to the bottom\n    height_diff = target_height - (y2 - y1)\n    y1 = max(y1 - height_diff, 0)\n    # Try the bottom again\n    height_diff = target_height - (y2 - y1)\n    y2 = min(y2 + height_diff, height)\n\n    return (x1, y1, x2, y2), (target_width, target_height)\n\n\ndef resize_region(region, init_size, resize_size):\n    # Resize a crop so that it fits an image that was resized to the given width and height\n    x1, y1, x2, y2 = region\n    init_width, init_height = init_size\n    resize_width, resize_height = resize_size\n    x1 = math.floor(x1 * resize_width / init_width)\n    x2 = math.ceil(x2 * resize_width / init_width)\n    y1 = math.floor(y1 * resize_height / init_height)\n    y2 = math.ceil(y2 * resize_height / init_height)\n    return (x1, y1, x2, y2)\n\n\ndef pad_image(image, left_pad, right_pad, top_pad, bottom_pad, fill=False, blur=False):\n    '''\n    Pads an image with the given number of pixels on each side and fills the padding with data from the edges.\n    :param image: A PIL image\n    :param left_pad: The number of pixels to pad on the left side\n    :param right_pad: The number of pixels to pad on the right side\n    :param top_pad: The number of pixels to pad on the top side\n    :param bottom_pad: The number of pixels to pad on the bottom side\n    :param blur: Whether to blur the padded edges\n    :return: A PIL image with size (image.width + left_pad + right_pad, image.height + top_pad + bottom_pad)\n    '''\n    left_edge = image.crop((0, 1, 1, image.height - 1))\n    right_edge = image.crop((image.width - 1, 1, image.width, image.height - 1))\n    top_edge = image.crop((1, 0, image.width - 1, 1))\n    bottom_edge = image.crop((1, image.height - 1, image.width - 1, image.height))\n    new_width = image.width + left_pad + right_pad\n    new_height = image.height + top_pad + bottom_pad\n    padded_image = Image.new(image.mode, (new_width, new_height))\n    padded_image.paste(image, (left_pad, top_pad))\n    if fill:\n        for i in range(left_pad):\n            edge = left_edge.resize(\n                (1, new_height - i * (top_pad + bottom_pad) // left_pad), resample=Image.Resampling.NEAREST)\n            padded_image.paste(edge, (i, i * top_pad // left_pad))\n        for i in range(right_pad):\n            edge = right_edge.resize(\n                (1, new_height - i * (top_pad + bottom_pad) // right_pad), resample=Image.Resampling.NEAREST)\n            padded_image.paste(edge, (new_width - 1 - i, i * top_pad // right_pad))\n        for i in range(top_pad):\n            edge = top_edge.resize(\n                (new_width - i * (left_pad + right_pad) // top_pad, 1), resample=Image.Resampling.NEAREST)\n            padded_image.paste(edge, (i * left_pad // top_pad, i))\n        for i in range(bottom_pad):\n            edge = bottom_edge.resize(\n                (new_width - i * (left_pad + right_pad) // bottom_pad, 1), resample=Image.Resampling.NEAREST)\n            padded_image.paste(edge, (i * left_pad // bottom_pad, new_height - 1 - i))\n        if blur and not (left_pad == right_pad == top_pad == bottom_pad == 0):\n            padded_image = padded_image.filter(ImageFilter.GaussianBlur(BLUR_KERNEL_SIZE))\n            padded_image.paste(image, (left_pad, top_pad))\n    return padded_image\n\n\ndef pad_image2(image, left_pad, right_pad, top_pad, bottom_pad, fill=False, blur=False):\n    '''\n    Pads an image with the given number of pixels on each side and fills the padding with data from the edges. \n    Faster than pad_image, but only pads with edge data in straight lines.\n    :param image: A PIL image\n    :param left_pad: The number of pixels to pad on the left side\n    :param right_pad: The number of pixels to pad on the right side\n    :param top_pad: The number of pixels to pad on the top side\n    :param bottom_pad: The number of pixels to pad on the bottom side\n    :param blur: Whether to blur the padded edges\n    :return: A PIL image with size (image.width + left_pad + right_pad, image.height + top_pad + bottom_pad)\n    '''\n    left_edge = image.crop((0, 1, 1, image.height - 1))\n    right_edge = image.crop((image.width - 1, 1, image.width, image.height - 1))\n    top_edge = image.crop((1, 0, image.width - 1, 1))\n    bottom_edge = image.crop((1, image.height - 1, image.width - 1, image.height))\n    new_width = image.width + left_pad + right_pad\n    new_height = image.height + top_pad + bottom_pad\n    padded_image = Image.new(image.mode, (new_width, new_height))\n    padded_image.paste(image, (left_pad, top_pad))\n    if fill:\n        if left_pad > 0:\n            padded_image.paste(left_edge.resize((left_pad, new_height), resample=Image.Resampling.NEAREST), (0, 0))\n        if right_pad > 0:\n            padded_image.paste(right_edge.resize((right_pad, new_height),\n                               resample=Image.Resampling.NEAREST), (new_width - right_pad, 0))\n        if top_pad > 0:\n            padded_image.paste(top_edge.resize((new_width, top_pad), resample=Image.Resampling.NEAREST), (0, 0))\n        if bottom_pad > 0:\n            padded_image.paste(bottom_edge.resize((new_width, bottom_pad),\n                               resample=Image.Resampling.NEAREST), (0, new_height - bottom_pad))\n        if blur and not (left_pad == right_pad == top_pad == bottom_pad == 0):\n            padded_image = padded_image.filter(ImageFilter.GaussianBlur(BLUR_KERNEL_SIZE))\n            padded_image.paste(image, (left_pad, top_pad))\n    return padded_image\n\n\ndef pad_tensor(tensor, left_pad, right_pad, top_pad, bottom_pad, fill=False, blur=False):\n    '''\n    Pads an image tensor with the given number of pixels on each side and fills the padding with data from the edges.\n    :param tensor: A tensor of shape [B, H, W, C]\n    :param left_pad: The number of pixels to pad on the left side\n    :param right_pad: The number of pixels to pad on the right side\n    :param top_pad: The number of pixels to pad on the top side\n    :param bottom_pad: The number of pixels to pad on the bottom side\n    :param blur: Whether to blur the padded edges\n    :return: A tensor of shape [B, H + top_pad + bottom_pad, W + left_pad + right_pad, C]\n    '''\n    batch_size, channels, height, width = tensor.shape\n    h_pad = left_pad + right_pad\n    v_pad = top_pad + bottom_pad\n    new_width = width + h_pad\n    new_height = height + v_pad\n\n    # Create empty image\n    padded = torch.zeros((batch_size, channels, new_height, new_width), dtype=tensor.dtype)\n\n    # Copy the original image into the centor of the padded tensor\n    padded[:, :, top_pad:top_pad + height, left_pad:left_pad + width] = tensor\n\n    # Duplicate the edges of the original image into the padding\n    if top_pad > 0:\n        padded[:, :, :top_pad, :] = padded[:, :, top_pad:top_pad + 1, :]  # Top edge\n    if bottom_pad > 0:\n        padded[:, :, -bottom_pad:, :] = padded[:, :, -bottom_pad - 1:-bottom_pad, :]  # Bottom edge\n    if left_pad > 0:\n        padded[:, :, :, :left_pad] = padded[:, :, :, left_pad:left_pad + 1]  # Left edge\n    if right_pad > 0:\n        padded[:, :, :, -right_pad:] = padded[:, :, :, -right_pad - 1:-right_pad]  # Right edge\n\n    return padded\n\n\ndef resize_and_pad_image(image, width, height, fill=False, blur=False):\n    '''\n    Resizes an image to the given width and height and pads it to the given width and height.\n    :param image: A PIL image\n    :param width: The width of the resized image\n    :param height: The height of the resized image\n    :param fill: Whether to fill the padding with data from the edges\n    :param blur: Whether to blur the padded edges\n    :return: A PIL image of size (width, height)\n    '''\n    width_ratio = width / image.width\n    height_ratio = height / image.height\n    if height_ratio > width_ratio:\n        resize_ratio = width_ratio\n    else:\n        resize_ratio = height_ratio\n    resize_width = round(image.width * resize_ratio)\n    resize_height = round(image.height * resize_ratio)\n    resized = image.resize((resize_width, resize_height), resample=Image.Resampling.LANCZOS)\n    # Pad the sides of the image to get the image to the desired size that wasn't covered by the resize\n    horizontal_pad = (width - resize_width) // 2\n    vertical_pad = (height - resize_height) // 2\n    result = pad_image2(resized, horizontal_pad, horizontal_pad, vertical_pad, vertical_pad, fill, blur)\n    result = result.resize((width, height), resample=Image.Resampling.LANCZOS)\n    return result, (horizontal_pad, vertical_pad)\n\n\ndef resize_and_pad_tensor(tensor, width, height, fill=False, blur=False):\n    '''\n    Resizes an image tensor to the given width and height and pads it to the given width and height.\n    :param tensor: A tensor of shape [B, H, W, C]\n    :param width: The width of the resized image\n    :param height: The height of the resized image\n    :param fill: Whether to fill the padding with data from the edges\n    :param blur: Whether to blur the padded edges\n    :return: A tensor of shape [B, height, width, C]\n    '''\n    # Resize the image to the closest size that maintains the aspect ratio\n    width_ratio = width / tensor.shape[3]\n    height_ratio = height / tensor.shape[2]\n    if height_ratio > width_ratio:\n        resize_ratio = width_ratio\n    else:\n        resize_ratio = height_ratio\n    resize_width = round(tensor.shape[3] * resize_ratio)\n    resize_height = round(tensor.shape[2] * resize_ratio)\n    resized = F.interpolate(tensor, size=(resize_height, resize_width), mode='nearest-exact')\n    # Pad the sides of the image to get the image to the desired size that wasn't covered by the resize\n    horizontal_pad = (width - resize_width) // 2\n    vertical_pad = (height - resize_height) // 2\n    result = pad_tensor(resized, horizontal_pad, horizontal_pad, vertical_pad, vertical_pad, fill, blur)\n    result = F.interpolate(result, size=(height, width), mode='nearest-exact')\n    return result\n\n\ndef crop_controlnet(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad):\n    if \"control\" not in cond_dict:\n        return\n    c = cond_dict[\"control\"]\n    controlnet = c.copy()\n    cond_dict[\"control\"] = controlnet\n    while c is not None:\n        # hint is shape (B, C, H, W)\n        hint = controlnet.cond_hint_original\n        resized_crop = resize_region(region, canvas_size, hint.shape[:-3:-1])\n        hint = crop_tensor(hint.movedim(1, -1), resized_crop).movedim(-1, 1)\n        hint = resize_tensor(hint, tile_size[::-1])\n        controlnet.cond_hint_original = hint\n        c = c.previous_controlnet\n        controlnet.set_previous_controlnet(c.copy() if c is not None else None)\n        controlnet = controlnet.previous_controlnet\n\n\ndef region_intersection(region1, region2):\n    \"\"\"\n    Returns the coordinates of the intersection of two rectangular regions.\n    :param region1: A tuple of the form (x1, y1, x2, y2) denoting the upper left and the lower right points \n        of the first rectangular region. Expected to have x2 > x1 and y2 > y1.\n    :param region2: The second rectangular region with the same format as the first.\n    :return: A tuple of the form (x1, y1, x2, y2) denoting the rectangular intersection. \n        None if there is no intersection.\n    \"\"\"\n    x1, y1, x2, y2 = region1\n    x1_, y1_, x2_, y2_ = region2\n    x1 = max(x1, x1_)\n    y1 = max(y1, y1_)\n    x2 = min(x2, x2_)\n    y2 = min(y2, y2_)\n    if x1 >= x2 or y1 >= y2:\n        return None\n    return (x1, y1, x2, y2)\n\n\ndef crop_gligen(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad):\n    if \"gligen\" not in cond_dict:\n        return\n    type, model, cond = cond_dict[\"gligen\"]\n    if type != \"position\":\n        from warnings import warn\n        warn(f\"Unknown gligen type {type}\")\n        return\n    cropped = []\n    for c in cond:\n        emb, h, w, y, x = c\n        # Get the coordinates of the box in the upscaled image\n        x1 = x * 8\n        y1 = y * 8\n        x2 = x1 + w * 8\n        y2 = y1 + h * 8\n        gligen_upscaled_box = resize_region((x1, y1, x2, y2), init_size, canvas_size)\n\n        # Calculate the intersection of the gligen box and the region\n        intersection = region_intersection(gligen_upscaled_box, region)\n        if intersection is None:\n            continue\n        x1, y1, x2, y2 = intersection\n\n        # Offset the gligen box so that the origin is at the top left of the tile region\n        x1 -= region[0]\n        y1 -= region[1]\n        x2 -= region[0]\n        y2 -= region[1]\n\n        # Add the padding\n        x1 += w_pad\n        y1 += h_pad\n        x2 += w_pad\n        y2 += h_pad\n\n        # Set the new position params\n        h = (y2 - y1) // 8\n        w = (x2 - x1) // 8\n        x = x1 // 8\n        y = y1 // 8\n        cropped.append((emb, h, w, y, x))\n\n    cond_dict[\"gligen\"] = (type, model, cropped)\n\n\ndef crop_area(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad):\n    if \"area\" not in cond_dict:\n        return\n\n    # Resize the area conditioning to the canvas size and confine it to the tile region\n    h, w, y, x = cond_dict[\"area\"]\n    w, h, x, y = 8 * w, 8 * h, 8 * x, 8 * y\n    x1, y1, x2, y2 = resize_region((x, y, x + w, y + h), init_size, canvas_size)\n    intersection = region_intersection((x1, y1, x2, y2), region)\n    if intersection is None:\n        del cond_dict[\"area\"]\n        del cond_dict[\"strength\"]\n        return\n    x1, y1, x2, y2 = intersection\n\n    # Offset origin to the top left of the tile\n    x1 -= region[0]\n    y1 -= region[1]\n    x2 -= region[0]\n    y2 -= region[1]\n\n    # Add the padding\n    x1 += w_pad\n    y1 += h_pad\n    x2 += w_pad\n    y2 += h_pad\n\n    # Set the params for tile\n    w, h = (x2 - x1) // 8, (y2 - y1) // 8\n    x, y = x1 // 8, y1 // 8\n\n    cond_dict[\"area\"] = (h, w, y, x)\n\n\ndef crop_mask(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad):\n    if \"mask\" not in cond_dict:\n        return\n    mask_tensor = cond_dict[\"mask\"]  # (B, H, W)\n    masks = []\n    for i in range(mask_tensor.shape[0]):\n        # Convert to PIL image\n        mask = tensor_to_pil(mask_tensor, i)  # W x H\n\n        # Resize the mask to the canvas size\n        mask = mask.resize(canvas_size, Image.Resampling.BICUBIC)\n\n        # Crop the mask to the region\n        mask = mask.crop(region)\n\n        # Add padding\n        mask, _ = resize_and_pad_image(mask, tile_size[0], tile_size[1], fill=True)\n\n        # Resize the mask to the tile size\n        if tile_size != mask.size:\n            mask = mask.resize(tile_size, Image.Resampling.BICUBIC)\n\n        # Convert back to tensor\n        mask = pil_to_tensor(mask)  # (1, H, W, 1)\n        mask = mask.squeeze(-1)  # (1, H, W)\n        masks.append(mask)\n\n    cond_dict[\"mask\"] = torch.cat(masks, dim=0)  # (B, H, W)\n\n# Added Flux-Kontext Support crop_reference_latents by TBG ETUR\ndef crop_reference_latents(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad):\n    \"\"\"\n    1. Resize each latent to `canvas_size` in latent units.\n    2. Crop the rectangle `region` (pixel coordinates).\n    3. Down-sample the crop to latent-space `tile_size`.\n    Expects a list of BCHW tensors under \"reference_latents\".\n    \"\"\"\n\n    latents = cond_dict.get(\"reference_latents\")\n    if not isinstance(latents, list):\n        return  # nothing to do\n\n    k = 8  # down-sample factor from pixel space → latent space (SD-type models)\n\n    W_can_px, H_can_px = canvas_size\n    # canvas size expressed in latent units\n    W_can_lat, H_can_lat = W_can_px // k, H_can_px // k\n\n    W_tile_px, H_tile_px = tile_size\n    W_tile_lat, H_tile_lat = max(1, W_tile_px // k), max(1, H_tile_px // k)\n\n    x1_px, y1_px, x2_px, y2_px = region\n\n    new_latents = []\n    for t in latents:  # (B,C,H_lat_in,W_lat_in) or (B,C,1,H_lat_in,W_lat_in)\n        has_5d = False\n        if t.ndim == 5:\n            has_5d = True\n            t = t.squeeze(2)\n        if t.ndim != 4:\n            raise ValueError(f\"expected BCHW or BC1HW, got {t.shape}\")\n\n        # 1. Resize to canvas resolution in latent units only if needed\n        if t.shape[-2:] != (H_can_lat, W_can_lat):\n            t = F.interpolate(t,\n                              size=(H_can_lat, W_can_lat),\n                              mode=\"bilinear\",\n                              align_corners=False)\n\n        # 2. Convert pixel crop → latent slice\n        w0_lat = int(round(x1_px / k))\n        w1_lat = int(round(x2_px / k))\n        h0_lat = int(round(y1_px / k))\n        h1_lat = int(round(y2_px / k))\n\n        cropped = t[:, :, h0_lat:h1_lat, w0_lat:w1_lat]  # view\n\n        # 3. Down-sample to latent-tile size\n        cropped = F.interpolate(cropped,\n                                size=(H_tile_lat, W_tile_lat),\n                                mode=\"bilinear\",\n                                align_corners=False)\n\n        if has_5d:\n            cropped = cropped.unsqueeze(2)\n        new_latents.append(cropped)\n\n    cond_dict[\"reference_latents\"] = new_latents\n\n\n\ndef crop_cond(cond, region, init_size, canvas_size, tile_size, w_pad=0, h_pad=0):\n    cropped = []\n    for emb, x in cond:\n        cond_dict = x.copy()\n        n = [emb, cond_dict]\n        crop_controlnet(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        crop_gligen(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        crop_area(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        crop_mask(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        crop_reference_latents(cond_dict, region, init_size, canvas_size, tile_size, w_pad, h_pad)\n        cropped.append(n)\n    return cropped\n"
  },
  {
    "path": "vitest.config.js",
    "content": "import { defineConfig } from \"vitest/config\";\n\nexport default defineConfig({\n  test: {\n    include: [\"web/tests/**/*.test.js\"],\n    environment: \"node\",\n  },\n});\n"
  },
  {
    "path": "web/apiClient.js",
    "content": "import { TIMEOUTS } from './constants.js';\nimport { normalizeWorkerUrl } from './urlUtils.js';\n\nexport function createApiClient(baseUrl) {\n    const normalizedBaseUrl = normalizeWorkerUrl(baseUrl);\n\n    const request = async (\n        endpoint,\n        options = {},\n        { retries = TIMEOUTS.MAX_RETRIES, retry = true } = {},\n    ) => {\n        const maxAttempts = retry ? retries : 1;\n        let lastError;\n        let delay = TIMEOUTS.RETRY_DELAY; // Initial delay for exponential backoff\n\n        for (let attempt = 0; attempt < maxAttempts; attempt++) {\n            try {\n                const headers = {\n                    'Content-Type': 'application/json',\n                    ...(options.headers || {}),\n                };\n                const response = await fetch(`${normalizedBaseUrl}${endpoint}`, {\n                    ...options,\n                    headers,\n                });\n                \n                if (!response.ok) {\n                    const error = await response.json().catch(() => ({}));\n                    const message = error.message\n                        || error.error\n                        || (Array.isArray(error.errors) ? error.errors.join('; ') : null)\n                        || `HTTP ${response.status}`;\n                    throw new Error(message);\n                }\n                \n                return await response.json();\n            } catch (error) {\n                lastError = error;\n                console.log(`API Error (attempt ${attempt + 1}/${maxAttempts}): ${endpoint} - ${error.message}`);\n                if (attempt < maxAttempts - 1) {\n                    await new Promise(resolve => setTimeout(resolve, delay));\n                    delay *= 2; // Exponential backoff\n                }\n            }\n        }\n        throw lastError;\n    };\n\n    const requestUrl = async (\n        url,\n        options = {},\n        { retries = TIMEOUTS.MAX_RETRIES, retry = true } = {},\n    ) => {\n        const maxAttempts = retry ? retries : 1;\n        let lastError;\n        let delay = TIMEOUTS.RETRY_DELAY;\n\n        for (let attempt = 0; attempt < maxAttempts; attempt++) {\n            try {\n                const response = await fetch(url, options);\n                if (!response.ok) {\n                    const error = await response.json().catch(() => ({}));\n                    const message = error.message\n                        || error.error\n                        || (Array.isArray(error.errors) ? error.errors.join('; ') : null)\n                        || `HTTP ${response.status}`;\n                    throw new Error(message);\n                }\n                return await response.json();\n            } catch (error) {\n                lastError = error;\n                console.log(`API Error (attempt ${attempt + 1}/${maxAttempts}): ${url} - ${error.message}`);\n                if (attempt < maxAttempts - 1) {\n                    await new Promise(resolve => setTimeout(resolve, delay));\n                    delay *= 2;\n                }\n            }\n        }\n        throw lastError;\n    };\n    \n    return {\n        // Config endpoints\n        async getConfig() {\n            return request('/distributed/config');\n        },\n        \n        async updateWorker(workerId, data) {\n            return request('/distributed/config/update_worker', {\n                method: 'POST',\n                body: JSON.stringify({ worker_id: workerId, ...data })\n            }, { retry: false });\n        },\n        \n        async deleteWorker(workerId) {\n            return request('/distributed/config/delete_worker', {\n                method: 'POST',\n                body: JSON.stringify({ worker_id: workerId })\n            }, { retry: false });\n        },\n        \n        async updateSetting(key, value) {\n            return request('/distributed/config/update_setting', {\n                method: 'POST',\n                body: JSON.stringify({ key, value })\n            }, { retry: false });\n        },\n        \n        async updateMaster(data) {\n            return request('/distributed/config/update_master', {\n                method: 'POST',\n                body: JSON.stringify(data)\n            }, { retry: false });\n        },\n        \n        // Worker management endpoints\n        async launchWorker(workerId) {\n            return request('/distributed/launch_worker', {\n                method: 'POST',\n                body: JSON.stringify({ worker_id: workerId })\n            }, { retry: false });\n        },\n        \n        async stopWorker(workerId) {\n            return request('/distributed/stop_worker', {\n                method: 'POST',\n                body: JSON.stringify({ worker_id: workerId })\n            }, { retry: false });\n        },\n        \n        async getManagedWorkers() {\n            return request('/distributed/managed_workers');\n        },\n        \n        async getWorkerLog(workerId, lines = 1000) {\n            return request(`/distributed/worker_log/${workerId}?lines=${lines}`);\n        },\n\n        async getRemoteWorkerLog(workerId, lines = 300) {\n            return request(`/distributed/remote_worker_log/${workerId}?lines=${lines}`);\n        },\n        \n        async clearLaunchingFlag(workerId) {\n            return request('/distributed/worker/clear_launching', {\n                method: 'POST',\n                body: JSON.stringify({ worker_id: workerId })\n            }, { retry: false });\n        },\n        \n        async queueDistributed(payload) {\n            return request('/distributed/queue', {\n                method: 'POST',\n                headers: {\n                    ...(payload?.trace_execution_id\n                        ? { 'X-Idempotency-Key': payload.trace_execution_id }\n                        : {}),\n                },\n                body: JSON.stringify(payload)\n            }, { retry: false });\n        },\n\n        async probeWorker(workerUrl, timeoutMs = TIMEOUTS.STATUS_CHECK, signal = null) {\n            const normalizedWorkerUrl = normalizeWorkerUrl(workerUrl);\n            const controller = new AbortController();\n            const timeoutId = setTimeout(() => controller.abort(), timeoutMs);\n            const effectiveSignal = signal\n                ? AbortSignal.any([controller.signal, signal])\n                : controller.signal;\n            try {\n                const response = await fetch(`${normalizedWorkerUrl}/prompt`, {\n                    method: 'GET',\n                    mode: 'cors',\n                    cache: 'no-store',\n                    signal: effectiveSignal,\n                });\n\n                if (!response.ok) {\n                    return { ok: false, status: response.status, queueRemaining: null };\n                }\n\n                let data;\n                try {\n                    data = await response.json();\n                } catch {\n                    return { ok: false, status: response.status, queueRemaining: null };\n                }\n\n                if (!data || typeof data !== \"object\" || Array.isArray(data)) {\n                    return { ok: false, status: response.status, queueRemaining: null };\n                }\n\n                const execInfo = data.exec_info;\n                if (!execInfo || typeof execInfo !== \"object\" || Array.isArray(execInfo)) {\n                    return { ok: false, status: response.status, queueRemaining: null };\n                }\n\n                const rawQueueRemaining = execInfo.queue_remaining;\n                const queueRemaining = Number(rawQueueRemaining);\n                if (!Number.isFinite(queueRemaining)) {\n                    return { ok: false, status: response.status, queueRemaining: null };\n                }\n\n                return {\n                    ok: true,\n                    status: response.status,\n                    queueRemaining: Math.max(0, queueRemaining),\n                };\n            } finally {\n                clearTimeout(timeoutId);\n            }\n        },\n\n        async dispatchToWorker(workerUrl, promptPayload) {\n            const normalizedWorkerUrl = normalizeWorkerUrl(workerUrl);\n            return requestUrl(`${normalizedWorkerUrl}/prompt`, {\n                method: 'POST',\n                headers: { 'Content-Type': 'application/json' },\n                mode: 'cors',\n                body: JSON.stringify(promptPayload),\n            }, { retry: false });\n        },\n        \n        // Network info\n        async getNetworkInfo() {\n            return request('/distributed/network_info');\n        },\n        \n        // Status checking (with timeout)\n        async checkStatus(url, timeout = TIMEOUTS.DEFAULT_FETCH) {\n            const controller = new AbortController();\n            const timeoutId = setTimeout(() => controller.abort(), timeout);\n            \n            try {\n                const response = await fetch(url, {\n                    method: 'GET',\n                    mode: 'cors',\n                    signal: controller.signal\n                });\n                clearTimeout(timeoutId);\n                \n                if (!response.ok) throw new Error(`HTTP ${response.status}`);\n                return await response.json();\n            } catch (error) {\n                clearTimeout(timeoutId);\n                throw error;\n            }\n        },\n        \n        // Batch status checking\n        async checkMultipleStatuses(urls) {\n            return Promise.allSettled(\n                urls.map(url => this.checkStatus(url))\n            );\n        },\n\n        // Cloudflare tunnel management\n        async startTunnel() {\n            return request('/distributed/tunnel/start', {\n                method: 'POST',\n                body: JSON.stringify({})\n            }, { retry: false });\n        },\n\n        async stopTunnel() {\n            return request('/distributed/tunnel/stop', {\n                method: 'POST',\n                body: JSON.stringify({})\n            }, { retry: false });\n        },\n\n        async getTunnelStatus() {\n            return request('/distributed/tunnel/status');\n        }\n    };\n}\n"
  },
  {
    "path": "web/constants.js",
    "content": "export const BUTTON_STYLES = {\n    // Base styles with unified padding\n    base: \"width: 100%; padding: 4px 14px; color: white; border: none; border-radius: 4px; cursor: pointer; transition: all 0.2s; font-size: 12px; font-weight: 500;\",\n    \n    // Context-specific combined styles\n    workerControl: \"flex: 1; font-size: 11px;\",\n    \n    // Layout modifiers\n    hidden: \"display: none;\",\n    marginLeftAuto: \"margin-left: auto;\",\n    \n    // Color variants\n    cancel: \"background-color: #555;\",\n    info: \"background-color: #333;\",\n    success: \"background-color: #4a7c4a;\",\n    error: \"background-color: #7c4a4a;\",\n    launch: \"background-color: #4a7c4a;\",\n    stop: \"background-color: #7c4a4a;\",\n    log: \"background-color: #685434;\",\n    working: \"background-color: #666;\",\n    clearMemory: \"background-color: #555; padding: 6px 14px;\",\n    interrupt: \"background-color: #555; padding: 6px 14px;\",\n};\n\nexport const STATUS_COLORS = {\n    DISABLED_GRAY: \"#666\",\n    OFFLINE_RED: \"#c04c4c\",\n    ONLINE_GREEN: \"#3ca03c\",\n    PROCESSING_YELLOW: \"#f0ad4e\"\n};\n\nexport const UI_COLORS = {\n    MUTED_TEXT: \"#888\",\n    SECONDARY_TEXT: \"#ccc\",\n    BORDER_LIGHT: \"#555\",\n    BORDER_DARK: \"#444\",\n    BORDER_DARKER: \"#3a3a3a\",\n    BACKGROUND_DARK: \"#2a2a2a\",\n    BACKGROUND_DARKER: \"#1e1e1e\",\n    ICON_COLOR: \"#666\",\n    ACCENT_COLOR: \"#777\"\n};\n\nexport const PULSE_ANIMATION_CSS = `\n    @keyframes pulse {\n        0% {\n            opacity: 1;\n            transform: scale(0.8);\n            box-shadow: 0 0 0 0 rgba(240, 173, 78, 0.7);\n        }\n        50% {\n            opacity: 0.3;\n            transform: scale(1.1);\n            box-shadow: 0 0 0 6px rgba(240, 173, 78, 0);\n        }\n        100% {\n            opacity: 1;\n            transform: scale(0.8);\n            box-shadow: 0 0 0 0 rgba(240, 173, 78, 0);\n        }\n    }\n    .status-pulsing {\n        animation: pulse 1.2s ease-in-out infinite;\n        transform-origin: center;\n    }\n\n    .worker-status--online {\n        background: var(--status-online, #3ca03c) !important;\n    }\n\n    .worker-status--offline {\n        background: var(--status-offline, #c04c4c) !important;\n    }\n\n    .worker-status--unknown {\n        background: var(--status-unknown, #888) !important;\n    }\n\n    .worker-status--processing {\n        background: var(--status-processing, #f0ad4e) !important;\n    }\n    \n    /* Button hover effects */\n    .distributed-button:hover:not(:disabled) {\n        filter: brightness(1.2);\n        transition: filter 0.2s ease;\n    }\n    .distributed-button:disabled {\n        opacity: 0.6;\n        cursor: not-allowed;\n    }\n    \n    /* Settings button animation */\n    .settings-btn {\n        transition: transform 0.2s ease;\n    }\n    \n    \n    /* Expanded settings panel */\n    .worker-settings {\n        max-height: 0;\n        overflow: hidden;\n        opacity: 0;\n        transition: max-height 0.3s ease, opacity 0.3s ease, padding 0.3s ease, margin 0.3s ease;\n    }\n    .worker-settings.expanded {\n        max-height: 500px;\n        opacity: 1;\n        padding: 12px;\n        margin-top: 8px;\n        margin-bottom: 8px;\n    }\n\n    /* Cloudflare tunnel spinner */\n    @keyframes tunnel-spin {\n        from { transform: rotate(0deg); }\n        to { transform: rotate(360deg); }\n    }\n    .tunnel-spinner {\n        width: 14px;\n        height: 14px;\n        border: 2px solid rgba(255, 255, 255, 0.35);\n        border-top-color: #fff;\n        border-radius: 50%;\n        display: inline-block;\n        animation: tunnel-spin 0.9s linear infinite;\n        margin-right: 8px;\n        vertical-align: middle;\n    }\n`;\n\nexport const UI_STYLES = {\n    statusDot: \"display: inline-block; width: 10px; height: 10px; border-radius: 50%; margin-right: 10px;\",\n    controlsDiv: \"padding: 0 12px 12px 12px; display: flex; gap: 6px;\",\n    formGroup: \"display: flex; flex-direction: column; gap: 5px;\",\n    formLabel: \"font-size: 12px; color: var(--dist-label-text, #ccc); font-weight: 500;\",\n    formInput:\n        \"padding: 6px 10px; color: var(--dist-input-text, white); background: var(--dist-input-bg, transparent); font-size: 12px; transition: border-color 0.2s;\",\n    \n    // Card styles\n    cardBase: \"margin-bottom: 12px; overflow: hidden; display: flex;\",\n    workerCard: \"margin-bottom: 12px; overflow: hidden; display: flex;\",\n    cardBlueprint: \"cursor: pointer; transition: all 0.2s ease;\",\n    cardAdd: \"cursor: pointer; transition: all 0.2s ease;\",\n\n    // Column styles\n    columnBase: \"display: flex; align-items: center; justify-content: center;\",\n    checkboxColumn: \"flex: 0 0 44px; display: flex; align-items: center; justify-content: center; cursor: default;\",\n    contentColumn: \"flex: 1; display: flex; flex-direction: column; transition: background-color 0.2s ease;\",\n    iconColumn: \"width: 44px; flex-shrink: 0; font-size: 20px; color: var(--dist-placeholder-add-color, #666);\",\n    \n    // Row and content styles\n    infoRow: \"display: flex; align-items: center; padding: 12px; cursor: pointer; min-height: 64px;\",\n    workerContent: \"display: flex; align-items: center; gap: 10px; flex: 1;\",\n    \n    // Form and controls styles\n    buttonGroup: \"display: flex; gap: 4px; margin-top: 10px;\",\n    settingsForm: \"display: flex; flex-direction: column; gap: 10px;\",\n    checkboxGroup: \"display: flex; align-items: center; gap: 8px; margin: 5px 0;\",\n    formLabelClickable: \"font-size: 12px; color: var(--dist-label-text, #ccc); cursor: pointer;\",\n    settingsToggle: \"display: flex; align-items: center; gap: 6px; padding: 4px 0; cursor: pointer; user-select: none;\",\n    controlsWrapper: \"display: flex; gap: 6px; align-items: stretch; width: 100%;\",\n    \n    // Existing styles\n    settingsArrow:\n        \"font-size: 12px; color: var(--dist-settings-arrow, #888); transition: all 0.2s ease; margin-left: auto; padding: 4px;\",\n    infoBox:\n        \"color: var(--dist-info-box-text, #999); padding: 5px 14px; font-size: 11px; text-align: center; flex: 1; font-weight: 500;\",\n    workerSettings: \"margin: 0 12px; padding: 0 12px;\"\n};\n\nexport const TIMEOUTS = {\n    DEFAULT_FETCH: 5000, // ms for general API calls\n    STATUS_CHECK: 1200, // ms for status checks\n    LAUNCH: 90000, // ms for worker launch (longer for model loading)\n    RETRY_DELAY: 1000, // initial delay for exponential backoff\n    MAX_RETRIES: 3, // max retry attempts\n    \n    // UI feedback delays\n    BUTTON_RESET: 3000, // button text/state reset after actions\n    FLASH_SHORT: 1000, // brief success feedback\n    FLASH_MEDIUM: 1500, // medium error feedback  \n    FLASH_LONG: 2000, // longer error feedback\n    \n    // Operational delays\n    POST_ACTION_DELAY: 500, // delay after operations before status checks\n    STATUS_CHECK_DELAY: 100, // brief delay before status checks\n    \n    // Background tasks\n    LOG_REFRESH: 2000, // log auto-refresh interval\n    IMAGE_CACHE_CLEAR: 30000 // delay before clearing image cache\n};\n\nexport const ENDPOINTS = {\n    // ComfyUI core\n    PROMPT: '/prompt',\n    INTERRUPT: '/interrupt',\n    UPLOAD_IMAGE: '/upload/image',\n    SYSTEM_INFO: '/system_stats',\n\n    // Distributed API\n    CONFIG: '/distributed/config',\n    UPDATE_WORKER: '/distributed/config/update_worker',\n    DELETE_WORKER: '/distributed/config/delete_worker',\n    UPDATE_SETTING: '/distributed/config/update_setting',\n    UPDATE_MASTER: '/distributed/config/update_master',\n    LAUNCH_WORKER: '/distributed/launch_worker',\n    STOP_WORKER: '/distributed/stop_worker',\n    MANAGED_WORKERS: '/distributed/managed_workers',\n    WORKER_LOG: '/distributed/worker_log',\n    REMOTE_WORKER_LOG: '/distributed/remote_worker_log',\n    LOCAL_LOG: '/distributed/local_log',\n    CLEAR_LAUNCHING: '/distributed/worker/clear_launching',\n    PREPARE_JOB: '/distributed/prepare_job',\n    LOAD_IMAGE: '/distributed/load_image',\n    NETWORK_INFO: '/distributed/network_info',\n    CHECK_FILE: '/distributed/check_file',\n    CLEAR_MEMORY: '/distributed/clear_memory',\n    SYSTEM_INFO_DIST: '/distributed/system_info',\n    TUNNEL_START: '/distributed/tunnel/start',\n    TUNNEL_STOP: '/distributed/tunnel/stop',\n    TUNNEL_STATUS: '/distributed/tunnel/status',\n};\n\nexport const NODE_CLASSES = {\n    DISTRIBUTED_COLLECTOR: 'DistributedCollector',\n    DISTRIBUTED_SEED: 'DistributedSeed',\n    DISTRIBUTED_EMPTY_IMAGE: 'DistributedEmptyImage',\n    UPSCALE_DISTRIBUTED: 'UltimateSDUpscaleDistributed',\n    PREVIEW_IMAGE: 'PreviewImage',\n};\n\nexport function generateUUID() {\n    if (crypto.randomUUID) return crypto.randomUUID();\n    return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, c => {\n        const r = Math.random() * 16 | 0;\n        return (c === 'x' ? r : (r & 0x3 | 0x8)).toString(16);\n    });\n}\n"
  },
  {
    "path": "web/distributed.css",
    "content": ":root {\n    --btn-stop: #7c4a4a;\n    --btn-launch: #4a7c4a;\n    --btn-log: #685434;\n    --btn-working: #666;\n    --btn-success: #3a6a3a;\n    --btn-error: #6a3a3a;\n    --tunnel-enable: #665533;\n    --tunnel-disable: #7c4a4a;\n    --master-badge-fallback-bg: #243024;\n    --master-badge-fallback-text: #6bd06b;\n    --master-badge-fallback-border: #335533;\n    --master-badge-delegate-bg: #3a3a3a;\n    --master-badge-delegate-text: #ffcc66;\n    --dist-divider: #444;\n    --dist-muted-text: #888;\n    --dist-label-text: #ccc;\n    --dist-settings-arrow: #888;\n    --dist-settings-arrow-hover: #fff;\n    --dist-card-bg: #2a2a2a;\n    --dist-card-title: #f4f5f7;\n    --dist-card-subtext: #a9afb9;\n    --dist-card-placeholder-title: #aaa;\n    --dist-left-col-border: #3a3a3a;\n    --dist-left-col-bg: rgba(0, 0, 0, 0.1);\n    --dist-info-box-bg: #333;\n    --dist-info-box-text: #999;\n    --dist-input-bg: #2a2a2a;\n    --dist-input-border: #444;\n    --dist-input-text: #fff;\n    --dist-settings-bg: #1e1e1e;\n    --dist-settings-border: #2a2a2a;\n    --dist-hover-bg: #333;\n    --dist-placeholder-blueprint-border: #555;\n    --dist-placeholder-blueprint-hover-border: #777;\n    --dist-placeholder-add-border: #444;\n    --dist-placeholder-add-hover-border: #666;\n    --dist-placeholder-blueprint-bg: rgba(255, 255, 255, 0.02);\n    --dist-placeholder-blueprint-hover-bg: rgba(255, 255, 255, 0.05);\n    --dist-placeholder-add-hover-bg: rgba(255, 255, 255, 0.02);\n    --dist-placeholder-blueprint-color: #777;\n    --dist-placeholder-blueprint-hover-color: #999;\n    --dist-placeholder-add-color: #555;\n    --dist-placeholder-add-hover-color: #888;\n    --dist-log-modal-bg: #1e1e1e;\n    --dist-log-modal-border: #444;\n    --dist-log-modal-header-border: #444;\n    --dist-log-modal-title: #fff;\n    --dist-log-modal-label: #ccc;\n    --dist-log-modal-body-bg: #0d0d0d;\n    --dist-log-modal-body-text: #ddd;\n    --dist-log-modal-status: #888;\n}\n\n.distributed-panel--light {\n    --dist-divider: #c7ced9;\n    --dist-muted-text: #5b6472;\n    --dist-label-text: #2f3a4a;\n    --dist-settings-arrow: #6b7483;\n    --dist-settings-arrow-hover: #253040;\n    --dist-card-bg: #f6f8fb;\n    --dist-card-title: #1e293b;\n    --dist-card-subtext: #4b5565;\n    --dist-card-placeholder-title: #3f4a5a;\n    --dist-left-col-border: #d8deea;\n    --dist-left-col-bg: #eef2f8;\n    --dist-info-box-bg: #e8edf5;\n    --dist-info-box-text: #4f5b6c;\n    --dist-input-bg: #ffffff;\n    --dist-input-border: #b8c2d3;\n    --dist-input-text: #1f2937;\n    --dist-settings-bg: #eef2f8;\n    --dist-settings-border: #d5dcea;\n    --dist-hover-bg: #e6ebf3;\n    --dist-placeholder-blueprint-border: #aeb8c9;\n    --dist-placeholder-blueprint-hover-border: #8e9ab0;\n    --dist-placeholder-add-border: #b7c1d3;\n    --dist-placeholder-add-hover-border: #919eb4;\n    --dist-placeholder-blueprint-bg: rgba(80, 100, 140, 0.05);\n    --dist-placeholder-blueprint-hover-bg: rgba(80, 100, 140, 0.08);\n    --dist-placeholder-add-hover-bg: rgba(80, 100, 140, 0.05);\n    --dist-placeholder-blueprint-color: #5f6c82;\n    --dist-placeholder-blueprint-hover-color: #47556e;\n    --dist-placeholder-add-color: #6a7588;\n    --dist-placeholder-add-hover-color: #4c586f;\n    --master-badge-delegate-bg: #efe7ce;\n    --master-badge-delegate-text: #6a4f00;\n    --master-badge-fallback-bg: #dff1df;\n    --master-badge-fallback-text: #1f5f1f;\n    --master-badge-fallback-border: #9fc79f;\n    --dist-log-modal-bg: #f9fbff;\n    --dist-log-modal-border: #bcc7db;\n    --dist-log-modal-header-border: #c7d1e3;\n    --dist-log-modal-title: #1f2937;\n    --dist-log-modal-label: #4a5568;\n    --dist-log-modal-body-bg: #f2f5fb;\n    --dist-log-modal-body-text: #1f2937;\n    --dist-log-modal-status: #5c6779;\n}\n\n.btn--stop {\n    background-color: var(--btn-stop) !important;\n}\n\n.btn--launch {\n    background-color: var(--btn-launch) !important;\n}\n\n.btn--log {\n    background-color: var(--btn-log) !important;\n}\n\n.btn--working {\n    background-color: var(--btn-working) !important;\n}\n\n.btn--success {\n    background-color: var(--btn-success) !important;\n}\n\n.btn--error {\n    background-color: var(--btn-error) !important;\n}\n\n.master-info-badge--fallback {\n    background-color: var(--master-badge-fallback-bg) !important;\n    color: var(--master-badge-fallback-text) !important;\n    border: 1px solid var(--master-badge-fallback-border) !important;\n}\n\n.master-info-badge--delegate {\n    background-color: var(--master-badge-delegate-bg) !important;\n    color: var(--master-badge-delegate-text) !important;\n}\n\n.entity-card-content--hovered {\n    background-color: var(--dist-hover-bg) !important;\n}\n\n.placeholder-card--blueprint {\n    border-color: var(--dist-placeholder-blueprint-border) !important;\n    background-color: var(--dist-placeholder-blueprint-bg) !important;\n}\n\n.placeholder-card--blueprint.is-hovered {\n    border-color: var(--dist-placeholder-blueprint-hover-border) !important;\n    background-color: var(--dist-placeholder-blueprint-hover-bg) !important;\n}\n\n.placeholder-card--add {\n    border-color: var(--dist-placeholder-add-border) !important;\n    background-color: transparent !important;\n}\n\n.placeholder-card--add.is-hovered {\n    border-color: var(--dist-placeholder-add-hover-border) !important;\n    background-color: var(--dist-placeholder-add-hover-bg) !important;\n}\n\n.placeholder-column--blueprint {\n    color: var(--dist-placeholder-blueprint-color) !important;\n    border-right-color: var(--dist-placeholder-blueprint-border) !important;\n}\n\n.placeholder-column--blueprint.is-hovered {\n    color: var(--dist-placeholder-blueprint-hover-color) !important;\n}\n\n.placeholder-column--add {\n    color: var(--dist-placeholder-add-color) !important;\n    border-color: var(--dist-placeholder-add-border) !important;\n    border-right-color: var(--dist-placeholder-add-border) !important;\n}\n\n.placeholder-column--add.is-hovered {\n    color: var(--dist-placeholder-add-hover-color) !important;\n    border-color: var(--dist-placeholder-add-hover-border) !important;\n}\n\n.tunnel-button--enable {\n    background-color: var(--tunnel-enable) !important;\n}\n\n.tunnel-button--disable {\n    background-color: var(--tunnel-disable) !important;\n}\n\n.tunnel-status--enable {\n    background-color: var(--tunnel-enable) !important;\n}\n\n.tunnel-status--disable {\n    background-color: var(--tunnel-disable) !important;\n}\n\n/* ---- Themeable card/column/input defaults (Classic) ---- */\n\n.dist-card {\n    background: var(--dist-card-bg);\n    color: var(--dist-card-title);\n    border-radius: 6px;\n}\n\n.dist-card--blueprint {\n    border: 2px dashed var(--dist-placeholder-blueprint-border);\n    background: var(--dist-placeholder-blueprint-bg);\n}\n\n.dist-card--add {\n    border: 1px dashed var(--dist-placeholder-add-border);\n    background: transparent;\n}\n\n.dist-card__left-col {\n    border-right: 1px solid var(--dist-left-col-border);\n    background: var(--dist-left-col-bg);\n}\n\n.dist-info-box {\n    background-color: var(--dist-info-box-bg);\n    color: var(--dist-info-box-text);\n    border-radius: 4px;\n}\n\n.dist-form-input {\n    background: var(--dist-input-bg);\n    border: 1px solid var(--dist-input-border);\n    color: var(--dist-input-text);\n    border-radius: 4px;\n}\n\n.worker-settings {\n    background: var(--dist-settings-bg);\n    border: 1px solid var(--dist-settings-border);\n    border-radius: 4px;\n}\n\n.dist-worker-info__title {\n    color: var(--dist-card-title) !important;\n    font-size: 1.03em;\n    font-weight: 700;\n}\n\n.dist-worker-info__meta {\n    color: var(--dist-card-subtext) !important;\n    font-size: 0.88em;\n}\n\n.dist-worker-info__fallback {\n    color: var(--master-badge-fallback-text) !important;\n    font-size: 0.86em;\n    font-weight: 600;\n}\n\n/* ---- Nodes 2.0 Theme ---- */\n\n.distributed-panel--nodes2 .dist-card {\n    background: var(--p-surface-800, #1e1e1e);\n    border: 1px solid var(--p-surface-700, #2c2c2c);\n    border-radius: 8px;\n    box-shadow: 0 1px 3px rgba(0, 0, 0, 0.3);\n}\n\n.distributed-panel--nodes2 .dist-card--blueprint {\n    border: 2px dashed var(--p-surface-600, #404040);\n    background: rgba(255, 255, 255, 0.015);\n    box-shadow: none;\n}\n\n.distributed-panel--nodes2 .dist-card--add {\n    border: 1px dashed var(--p-surface-600, #404040);\n    background: transparent;\n    box-shadow: none;\n}\n\n.distributed-panel--nodes2 .dist-card__left-col {\n    border-right-color: var(--p-surface-700, #2c2c2c);\n    background: var(--p-surface-900, #131313);\n}\n\n.distributed-panel--nodes2 .dist-info-box {\n    background-color: var(--p-surface-700, #2c2c2c);\n    color: var(--p-text-muted-color, #9e9e9e);\n    border-radius: 6px;\n}\n\n.distributed-panel--nodes2 .dist-form-input {\n    background: var(--p-surface-800, #1e1e1e);\n    border-color: var(--p-surface-600, #404040);\n    border-radius: 6px;\n    color: var(--p-text-color, #ffffff);\n}\n\n.distributed-panel--nodes2 .worker-settings {\n    background: var(--p-surface-900, #131313);\n    border-color: var(--p-surface-700, #2c2c2c);\n    border-radius: 6px;\n}\n\n.distributed-panel--nodes2 .entity-card-content--hovered {\n    background-color: var(--p-surface-700, #2c2c2c) !important;\n}\n\n.distributed-panel--nodes2 .placeholder-card--blueprint {\n    border-color: var(--p-surface-600, #404040) !important;\n}\n\n.distributed-panel--nodes2 .placeholder-card--blueprint.is-hovered {\n    border-color: var(--p-surface-500, #555555) !important;\n}\n\n.distributed-panel--nodes2 .placeholder-card--add {\n    border-color: var(--p-surface-600, #404040) !important;\n}\n\n.distributed-panel--nodes2 .placeholder-card--add.is-hovered {\n    border-color: var(--p-surface-500, #555555) !important;\n}\n\n/* ---- End Nodes 2.0 Theme ---- */\n\n.is-hidden {\n    display: none !important;\n}\n\n.settings-arrow--expanded {\n    transform: rotate(90deg) !important;\n}\n\n.log-modal {\n    position: fixed;\n    top: 0;\n    left: 0;\n    width: 100%;\n    height: 100%;\n    background: rgba(0, 0, 0, 0.8);\n    display: flex;\n    align-items: center;\n    justify-content: center;\n    z-index: 10000;\n}\n\n.log-modal__content {\n    background: var(--dist-log-modal-bg);\n    border-radius: 8px;\n    width: 90%;\n    max-width: 1200px;\n    height: 80%;\n    display: flex;\n    flex-direction: column;\n    border: 1px solid var(--dist-log-modal-border);\n}\n\n.log-modal__header {\n    padding: 15px 20px;\n    border-bottom: 1px solid var(--dist-log-modal-header-border);\n    display: flex;\n    justify-content: space-between;\n    align-items: center;\n}\n\n.log-modal__title {\n    margin: 0;\n    color: var(--dist-log-modal-title);\n}\n\n.log-modal__header-buttons {\n    display: flex;\n    gap: 20px;\n    align-items: center;\n}\n\n.log-modal__refresh {\n    display: flex;\n    align-items: center;\n    gap: 4px;\n}\n\n.log-modal__refresh-input {\n    cursor: pointer;\n}\n\n.log-modal__refresh-label {\n    font-size: 12px;\n    color: var(--dist-log-modal-label);\n    cursor: pointer;\n    white-space: nowrap;\n}\n\n.log-modal__close {\n    background-color: #c04c4c !important;\n    padding: 5px 10px !important;\n    font-size: 14px !important;\n    font-weight: bold !important;\n    border-radius: 6px !important;\n}\n\n.log-modal__body {\n    flex: 1;\n    overflow: auto;\n    padding: 15px;\n    font-family: 'Consolas', 'Monaco', 'Courier New', monospace;\n    font-size: 12px;\n    line-height: 1.4;\n    color: var(--dist-log-modal-body-text);\n    background: var(--dist-log-modal-body-bg);\n    white-space: pre-wrap;\n    word-wrap: break-word;\n}\n\n.log-modal__status {\n    padding: 10px 20px;\n    border-top: 1px solid var(--dist-log-modal-header-border);\n    font-size: 11px;\n    color: var(--dist-log-modal-status);\n}\n"
  },
  {
    "path": "web/distributedValue.js",
    "content": "import { app } from \"/scripts/app.js\";\nimport { ENDPOINTS } from \"./constants.js\";\n\nconst NODE_CLASS = \"DistributedValue\";\nconst CONVERTED_WIDGET = \"converted-widget\";\nconst DYNAMIC_DEFAULT_WIDGET = \"_dv_default\";\nconst DYNAMIC_WORKER_WIDGET_PREFIX = \"_dv_worker_\";\nconst WORKERS_CHANGED_EVENT = \"distributed:workers-changed\";\n\nconst trackedNodes = new Set();\nlet workersChangedListenerAttached = false;\n\nfunction filterEnabledWorkers(workers) {\n    if (!Array.isArray(workers)) return [];\n    return workers.filter((worker) => Boolean(worker?.enabled));\n}\n\nasync function fetchWorkers() {\n    try {\n        const resp = await fetch(ENDPOINTS.CONFIG);\n        if (!resp.ok) return [];\n        const config = await resp.json();\n        return filterEnabledWorkers(config.workers);\n    } catch {\n        return [];\n    }\n}\n\nfunction getRawDefaultWidget(node) {\n    return node.widgets?.find((w) => w.name === \"default_value\");\n}\n\nfunction getRawWorkerValuesWidget(node) {\n    return node.widgets?.find((w) => w.name === \"worker_values\");\n}\n\nfunction getDynamicDefaultWidget(node) {\n    return node.widgets?.find((w) => w.name === DYNAMIC_DEFAULT_WIDGET);\n}\n\nfunction getDynamicWorkerWidgets(node) {\n    return (node.widgets || []).filter((w) => w.name.startsWith(DYNAMIC_WORKER_WIDGET_PREFIX));\n}\n\nfunction hideWidgetForGood(node, widget, suffix = \"\") {\n    if (!widget) return;\n    if (typeof widget.type === \"string\" && widget.type.startsWith(CONVERTED_WIDGET)) return;\n\n    widget.origType = widget.type;\n    widget.origComputeSize = widget.computeSize;\n    widget.origSerializeValue = widget.serializeValue;\n    widget.computeSize = () => [0, -4];\n    widget.type = `${CONVERTED_WIDGET}${suffix}`;\n\n    // Hide any attached DOM element (multiline widgets).\n    if (widget.element) widget.element.style.display = \"none\";\n    if (widget.inputEl) widget.inputEl.style.display = \"none\";\n\n    if (widget.linkedWidgets) {\n        for (const linked of widget.linkedWidgets) {\n            hideWidgetForGood(node, linked, `:${widget.name}`);\n        }\n    }\n}\n\nfunction hideRawWidgets(node) {\n    hideWidgetForGood(node, getRawDefaultWidget(node), \":default_value\");\n    hideWidgetForGood(node, getRawWorkerValuesWidget(node), \":worker_values\");\n}\n\nfunction removeDynamicDefaultWidget(node) {\n    const idx = node.widgets?.findIndex((w) => w.name === DYNAMIC_DEFAULT_WIDGET);\n    if (idx != null && idx >= 0) {\n        node.widgets.splice(idx, 1);\n    }\n}\n\nfunction removeDynamicWorkerWidgets(node) {\n    if (!node.widgets) return;\n    for (let i = node.widgets.length - 1; i >= 0; i--) {\n        if (node.widgets[i].name.startsWith(DYNAMIC_WORKER_WIDGET_PREFIX)) {\n            node.widgets.splice(i, 1);\n        }\n    }\n}\n\nfunction readWorkerStore(node) {\n    const raw = getRawWorkerValuesWidget(node);\n    if (!raw) return {};\n    try {\n        const parsed = JSON.parse(raw.value || \"{}\");\n        return typeof parsed === \"object\" && parsed !== null ? parsed : {};\n    } catch {\n        return {};\n    }\n}\n\nfunction writeWorkerStore(node, store) {\n    const raw = getRawWorkerValuesWidget(node);\n    if (!raw) return;\n    raw.value = JSON.stringify(store);\n}\n\nfunction normalizeComboOptions(options) {\n    if (!options) return null;\n    if (Array.isArray(options)) return options;\n    if (Array.isArray(options.values)) return options.values;\n    return null;\n}\n\nfunction resolveGraphLink(graph, linkId) {\n    const links = graph.links || graph._links;\n    if (!links) return null;\n    const link = links[linkId] ?? (typeof links.get === \"function\" ? links.get(linkId) : null);\n    if (!link) return null;\n    if (Array.isArray(link)) {\n        return {\n            target_id: link[2],\n            target_slot: link[3],\n        };\n    }\n    return link;\n}\n\nfunction detectTargetType(node) {\n    const out = node.outputs?.[0];\n    const linkIds = out?.links || [];\n    if (!linkIds.length) {\n        return { connected: false, type: \"STRING\", options: null };\n    }\n\n    const graph = node.graph || app.graph;\n    if (!graph) {\n        return { connected: false, type: \"STRING\", options: null };\n    }\n\n    const link = resolveGraphLink(graph, linkIds[0]);\n    if (!link) {\n        return { connected: false, type: \"STRING\", options: null };\n    }\n\n    const targetNode = graph.getNodeById(link.target_id);\n    if (!targetNode) {\n        return { connected: false, type: \"STRING\", options: null };\n    }\n\n    const targetInputName = targetNode.inputs?.[link.target_slot]?.name;\n    if (!targetInputName) {\n        return { connected: false, type: \"STRING\", options: null };\n    }\n\n    const targetWidget = targetNode.widgets?.find((w) => w.name === targetInputName);\n    if (targetWidget) {\n        if (targetWidget.type === \"combo\") {\n            const comboOptions = normalizeComboOptions(targetWidget.options);\n            return { connected: true, type: \"COMBO\", options: comboOptions };\n        }\n        if (targetWidget.type === \"number\") {\n            const step = targetWidget.options?.step;\n            const precision = targetWidget.options?.precision;\n            const isInt = Number.isInteger(step) && (precision === 0 || precision == null);\n            return { connected: true, type: isInt ? \"INT\" : \"FLOAT\", options: null };\n        }\n    }\n\n    const nodeDef = targetNode.constructor?.nodeData;\n    const inputDef = nodeDef?.input?.required?.[targetInputName] || nodeDef?.input?.optional?.[targetInputName];\n    if (inputDef) {\n        const defType = inputDef[0];\n        if (Array.isArray(defType)) {\n            return { connected: true, type: \"COMBO\", options: defType };\n        }\n        if (defType === \"INT\") return { connected: true, type: \"INT\", options: null };\n        if (defType === \"FLOAT\") return { connected: true, type: \"FLOAT\", options: null };\n    }\n\n    return { connected: true, type: \"STRING\", options: null };\n}\n\nfunction normalizeNumber(value, fallback) {\n    const parsed = Number(value);\n    return Number.isFinite(parsed) ? parsed : fallback;\n}\n\nfunction getDefaultInitialValue(node, inputType, comboOptions) {\n    const rawDefault = getRawDefaultWidget(node);\n    const current = rawDefault?.value;\n\n    if (inputType === \"INT\") {\n        return Math.trunc(normalizeNumber(current, 0));\n    }\n    if (inputType === \"FLOAT\") {\n        return normalizeNumber(current, 0);\n    }\n    if (inputType === \"COMBO\" && Array.isArray(comboOptions) && comboOptions.length) {\n        const currentText = current == null ? \"\" : String(current);\n        return comboOptions.includes(currentText) ? currentText : comboOptions[0];\n    }\n    return current == null ? \"\" : String(current);\n}\n\nfunction setRawDefaultValue(node, value) {\n    const rawDefault = getRawDefaultWidget(node);\n    if (!rawDefault) return;\n    rawDefault.value = value;\n}\n\nfunction serializeWorkerStoreFromWidgets(node, inputType, comboOptions) {\n    const nextStore = { _type: inputType };\n    if (inputType === \"COMBO\" && Array.isArray(comboOptions)) {\n        nextStore._options = comboOptions;\n    }\n    const valuesByWorkerId = {};\n\n    for (const widget of getDynamicWorkerWidgets(node)) {\n        const key = widget.name.slice(DYNAMIC_WORKER_WIDGET_PREFIX.length);\n        if (widget.value !== \"\" && widget.value !== null && widget.value !== undefined) {\n            const value = String(widget.value);\n            nextStore[key] = value;\n            if (widget._dvWorkerId) {\n                valuesByWorkerId[widget._dvWorkerId] = value;\n            }\n        }\n    }\n    if (Object.keys(valuesByWorkerId).length) {\n        nextStore._by_worker_id = valuesByWorkerId;\n    }\n\n    writeWorkerStore(node, nextStore);\n}\n\nfunction updateWorkerStoreTypeMetadata(node, inputType, comboOptions) {\n    const store = readWorkerStore(node);\n    store._type = inputType;\n    if (inputType === \"COMBO\" && Array.isArray(comboOptions)) {\n        store._options = comboOptions;\n    } else {\n        delete store._options;\n    }\n    writeWorkerStore(node, store);\n}\n\nfunction createDynamicDefaultWidget(node, inputType, comboOptions) {\n    removeDynamicDefaultWidget(node);\n    const initial = getDefaultInitialValue(node, inputType, comboOptions);\n    let widget;\n\n    if (inputType === \"COMBO\" && Array.isArray(comboOptions) && comboOptions.length) {\n        widget = node.addWidget(\n            \"combo\",\n            DYNAMIC_DEFAULT_WIDGET,\n            initial,\n            (value) => {\n                widget.value = value;\n                setRawDefaultValue(node, String(value));\n            },\n            { values: comboOptions }\n        );\n    } else if (inputType === \"INT\") {\n        widget = node.addWidget(\n            \"number\",\n            DYNAMIC_DEFAULT_WIDGET,\n            initial,\n            (value) => {\n                widget.value = Math.trunc(normalizeNumber(value, 0));\n                setRawDefaultValue(node, widget.value);\n            },\n            { min: -Infinity, max: Infinity, step: 1, precision: 0 }\n        );\n    } else if (inputType === \"FLOAT\") {\n        widget = node.addWidget(\n            \"number\",\n            DYNAMIC_DEFAULT_WIDGET,\n            initial,\n            (value) => {\n                widget.value = normalizeNumber(value, 0);\n                setRawDefaultValue(node, widget.value);\n            },\n            { min: -Infinity, max: Infinity, step: 0.1, precision: 3 }\n        );\n    } else {\n        widget = node.addWidget(\n            \"string\",\n            DYNAMIC_DEFAULT_WIDGET,\n            initial,\n            (value) => {\n                widget.value = value ?? \"\";\n                setRawDefaultValue(node, widget.value);\n            },\n            {}\n        );\n    }\n\n    widget.label = \"default_value\";\n}\n\nfunction getWorkerInitialValue(store, key, workerId, inputType, comboOptions) {\n    const byWorkerId = store?._by_worker_id;\n    const saved = (byWorkerId && workerId && byWorkerId[workerId] != null)\n        ? byWorkerId[workerId]\n        : store[key];\n    if (saved == null) {\n        if (inputType === \"INT\" || inputType === \"FLOAT\") return 0;\n        if (inputType === \"COMBO\" && Array.isArray(comboOptions) && comboOptions.length) {\n            return comboOptions[0];\n        }\n        return \"\";\n    }\n\n    if (inputType === \"INT\") return Math.trunc(normalizeNumber(saved, 0));\n    if (inputType === \"FLOAT\") return normalizeNumber(saved, 0);\n    if (inputType === \"COMBO\" && Array.isArray(comboOptions) && comboOptions.length) {\n        const savedText = String(saved);\n        return comboOptions.includes(savedText) ? savedText : comboOptions[0];\n    }\n    return String(saved);\n}\n\nfunction createWorkerWidgets(node, workers, inputType, comboOptions) {\n    removeDynamicWorkerWidgets(node);\n    const store = readWorkerStore(node);\n\n    for (let i = 0; i < workers.length; i++) {\n        const key = String(i + 1);\n        const worker = workers[i];\n        const label = worker.name || worker.id || `Worker ${key}`;\n        const widgetName = `${DYNAMIC_WORKER_WIDGET_PREFIX}${key}`;\n        const initial = getWorkerInitialValue(store, key, worker.id, inputType, comboOptions);\n        let widget;\n\n        if (inputType === \"COMBO\" && Array.isArray(comboOptions) && comboOptions.length) {\n            widget = node.addWidget(\n                \"combo\",\n                widgetName,\n                initial,\n                (value) => {\n                    widget.value = value;\n                    serializeWorkerStoreFromWidgets(node, inputType, comboOptions);\n                },\n                { values: comboOptions }\n            );\n        } else if (inputType === \"INT\") {\n            widget = node.addWidget(\n                \"number\",\n                widgetName,\n                initial,\n                (value) => {\n                    widget.value = Math.trunc(normalizeNumber(value, 0));\n                    serializeWorkerStoreFromWidgets(node, inputType, comboOptions);\n                },\n                { min: -Infinity, max: Infinity, step: 1, precision: 0 }\n            );\n        } else if (inputType === \"FLOAT\") {\n            widget = node.addWidget(\n                \"number\",\n                widgetName,\n                initial,\n                (value) => {\n                    widget.value = normalizeNumber(value, 0);\n                    serializeWorkerStoreFromWidgets(node, inputType, comboOptions);\n                },\n                { min: -Infinity, max: Infinity, step: 0.1, precision: 3 }\n            );\n        } else {\n            widget = node.addWidget(\n                \"string\",\n                widgetName,\n                initial,\n                (value) => {\n                    widget.value = value ?? \"\";\n                    serializeWorkerStoreFromWidgets(node, inputType, comboOptions);\n                },\n                {}\n            );\n        }\n\n        widget.label = label;\n        widget._dvWorkerId = worker.id;\n    }\n\n    serializeWorkerStoreFromWidgets(node, inputType, comboOptions);\n}\n\nfunction rebuildWidgets(node) {\n    hideRawWidgets(node);\n    const workers = node._dvWorkers || [];\n    const store = readWorkerStore(node);\n    const detected = detectTargetType(node);\n    const disconnected = !detected.connected;\n    const inputType = disconnected ? \"STRING\" : detected.type;\n    const comboOptions = disconnected ? null : detected.options;\n\n    if (disconnected) {\n        // Reset disconnected node back to the neutral default state.\n        setRawDefaultValue(node, \"\");\n        writeWorkerStore(node, { _type: \"STRING\" });\n    }\n\n    createDynamicDefaultWidget(node, inputType, comboOptions);\n    if (workers.length > 0) {\n        createWorkerWidgets(node, workers, inputType, comboOptions);\n    } else {\n        removeDynamicWorkerWidgets(node);\n        updateWorkerStoreTypeMetadata(node, inputType, comboOptions);\n    }\n\n    const size = node.computeSize();\n    size[0] = Math.max(size[0], node.size?.[0] || 0);\n    node.setSize(size);\n    if (node.setDirtyCanvas) node.setDirtyCanvas(true, true);\n}\n\nfunction refreshNodeWorkers(node, workers) {\n    if (!node || !node.graph) return;\n    node._dvWorkers = workers;\n    rebuildWidgets(node);\n}\n\nasync function refreshTrackedNodes(workers = null) {\n    const nextWorkers = workers || (await fetchWorkers());\n    for (const node of trackedNodes) {\n        refreshNodeWorkers(node, nextWorkers);\n    }\n}\n\nfunction attachWorkersChangedListener() {\n    if (workersChangedListenerAttached) return;\n    if (typeof window === \"undefined\" || typeof window.addEventListener !== \"function\") return;\n\n    window.addEventListener(WORKERS_CHANGED_EVENT, (event) => {\n        const changedWorkers = filterEnabledWorkers(event?.detail?.workers);\n        if (changedWorkers.length > 0 || Array.isArray(event?.detail?.workers)) {\n            void refreshTrackedNodes(changedWorkers);\n            return;\n        }\n        void refreshTrackedNodes();\n    });\n\n    workersChangedListenerAttached = true;\n}\n\napp.registerExtension({\n    name: \"Distributed.DistributedValue\",\n    async nodeCreated(node) {\n        if (node.comfyClass !== NODE_CLASS) return;\n\n        try {\n            attachWorkersChangedListener();\n            trackedNodes.add(node);\n            node._dvWorkers = await fetchWorkers();\n            rebuildWidgets(node);\n\n            const originalOnConnectionsChange = node.onConnectionsChange;\n            node.onConnectionsChange = function (type, index, connected, linkInfo, ioSlot) {\n                if (originalOnConnectionsChange) {\n                    originalOnConnectionsChange.call(this, type, index, connected, linkInfo, ioSlot);\n                }\n                if (type === 2 && index === 0) {\n                    setTimeout(() => rebuildWidgets(this), 20);\n                }\n            };\n\n            const originalConfigure = node.configure;\n            node.configure = function (data) {\n                const result = originalConfigure ? originalConfigure.call(this, data) : undefined;\n                setTimeout(() => rebuildWidgets(this), 20);\n                return result;\n            };\n\n            const originalOnRemoved = node.onRemoved;\n            node.onRemoved = function () {\n                trackedNodes.delete(this);\n                if (originalOnRemoved) {\n                    return originalOnRemoved.call(this);\n                }\n            };\n        } catch (error) {\n            console.error(\"Error in DistributedValue extension:\", error);\n        }\n    },\n});\n"
  },
  {
    "path": "web/executionUtils.js",
    "content": "import { api } from \"../../scripts/api.js\";\nimport { applyProbeResultToWorkerDot, findNodesByClass } from './workerUtils.js';\nimport { TIMEOUTS, NODE_CLASSES, generateUUID } from './constants.js';\nimport { checkAllWorkerStatuses, getWorkerUrl } from './workerLifecycle.js';\n\nexport function setupInterceptor(extension) {\n    api.queuePrompt = async (number, prompt, ...rest) => {\n        if (extension.isEnabled) {\n            const hasCollector = findNodesByClass(prompt.output, NODE_CLASSES.DISTRIBUTED_COLLECTOR).length > 0;\n            const hasDistUpscale = findNodesByClass(prompt.output, NODE_CLASSES.UPSCALE_DISTRIBUTED).length > 0;\n\n            if (hasCollector || hasDistUpscale) {\n                const result = await executeParallelDistributed(extension, prompt);\n                // Immediate status check for instant feedback\n                checkAllWorkerStatuses(extension);\n                // Another check after a short delay to catch state changes\n                setTimeout(() => checkAllWorkerStatuses(extension), TIMEOUTS.POST_ACTION_DELAY);\n                return result;\n            }\n        }\n        return extension.originalQueuePrompt(number, prompt, ...rest);\n    };\n}\n\nexport async function executeParallelDistributed(extension, promptWrapper) {\n    const traceExecutionId = `exec_${Date.now()}_${generateUUID().slice(0, 6)}`;\n    try {\n        const enabledWorkers = extension.enabledWorkers;\n        extension.log(`[exec:${traceExecutionId}] Starting distributed execution`, \"debug\");\n        \n        // Pre-flight health check on all enabled workers\n        const activeWorkers = await performPreflightCheck(extension, enabledWorkers);\n        \n        // Case: Enabled workers but all offline\n        if (activeWorkers.length === 0 && enabledWorkers.length > 0) {\n            extension.log(\"No active workers found. All enabled workers are offline.\");\n            if (extension.ui?.showToast) {\n                extension.ui.showToast(extension.app, \"error\", \"All Workers Offline\", \n                    `${enabledWorkers.length} worker(s) enabled but all are offline or unreachable. Check worker connections and try again.`, 5000);\n            }\n            // Fall back to master-only execution\n            return extension.originalQueuePrompt(0, promptWrapper);\n        }\n        \n        extension.log(`Pre-flight check: ${activeWorkers.length} of ${enabledWorkers.length} workers are active`, \"debug\");\n\n        // Check if master host might be unreachable by workers (cloudflare tunnel down)\n        const masterHost = extension.config?.master?.host || '';\n        const isCloudflareHost = /\\.(trycloudflare\\.com|cloudflare\\.dev)$/i.test(masterHost);\n\n        if (isCloudflareHost && activeWorkers.length > 0) {\n            // Try to verify if the cloudflare tunnel is actually up\n            try {\n                const testUrl = `${window.location.protocol}//${masterHost}/prompt`;\n                const response = await fetch(testUrl, {\n                    method: 'GET',\n                    mode: 'cors',\n                    cache: 'no-cache',\n                    signal: AbortSignal.timeout(3000) // 3 second timeout\n                });\n\n                if (!response.ok) {\n                    throw new Error('Master not reachable');\n                }\n            } catch (error) {\n                // Cloudflare tunnel appears to be down\n                extension.log(`Master host ${masterHost} is not reachable - cloudflare tunnel may be down`, \"error\");\n\n                if (extension.ui?.showCloudflareWarning) {\n                    extension.ui.showCloudflareWarning(extension, masterHost);\n                }\n\n                // Stop execution - workers won't be able to send results back\n                extension.log(\"Blocking execution - workers cannot reach master at cloudflare domain\", \"error\");\n                return null; // This will prevent the workflow from running\n            }\n        }\n\n        const queueResponse = await extension.api.queueDistributed({\n            prompt: promptWrapper.output,\n            workflow: promptWrapper.workflow,\n            enabled_worker_ids: activeWorkers.map((worker) => worker.id),\n            workers: activeWorkers.map((worker) => ({ id: worker.id })),\n            client_id: api.clientId,\n            delegate_master: Boolean(extension.config?.settings?.master_delegate_only),\n            auto_prepare: true,\n            trace_execution_id: traceExecutionId,\n        });\n        if (queueResponse?.prompt_id) {\n            extension.log(\n                `[exec:${traceExecutionId}] Distributed queue accepted by backend (prompt_id=${queueResponse.prompt_id}, workers=${queueResponse.worker_count ?? activeWorkers.length})`,\n                \"debug\"\n            );\n            return queueResponse;\n        }\n        throw new Error(\n            `[exec:${traceExecutionId}] Backend did not return a prompt_id for distributed queue.`\n        );\n    } catch (error) {\n        extension.log(`[exec:${traceExecutionId}] Distributed execution failed: ${error.message}`, \"error\");\n        if (extension.ui?.showToast) {\n            extension.ui.showToast(extension.app, \"error\", \"Distributed Failed\", error.message, 5000);\n        }\n        return null;\n    }\n}\n\nexport async function performPreflightCheck(extension, workers) {\n    if (workers.length === 0) return [];\n    \n    extension.log(`Performing pre-flight health check on ${workers.length} workers...`, \"debug\");\n    const startTime = Date.now();\n    \n    const checkPromises = workers.map(async (worker) => {\n        const workerUrl = getWorkerUrl(extension, worker);\n        \n        extension.log(`Pre-flight checking ${worker.name} at: ${workerUrl}`, \"debug\");\n        \n        try {\n            const probeResult = await extension.api.probeWorker(workerUrl, TIMEOUTS.STATUS_CHECK);\n\n            if (probeResult.ok) {\n                extension.log(`Worker ${worker.name} is active`, \"debug\");\n                return { worker, active: true };\n            } else {\n                extension.log(`Worker ${worker.name} returned ${probeResult.status}`, \"debug\");\n                return { worker, active: false };\n            }\n        } catch (error) {\n            if (error?.name === 'AbortError') {\n                extension.log(`Worker ${worker.name} pre-flight check timed out; assuming active`, \"debug\");\n                return { worker, active: true, uncertain: true };\n            }\n            extension.log(`Worker ${worker.name} is offline or unreachable: ${error.message}`, \"debug\");\n            return { worker, active: false };\n        }\n    });\n    \n    const results = await Promise.all(checkPromises);\n    const activeWorkers = results.filter(r => r.active).map(r => r.worker);\n    \n    const elapsed = Date.now() - startTime;\n    extension.log(`Pre-flight check completed in ${elapsed}ms. Active workers: ${activeWorkers.length}/${workers.length}`, \"debug\");\n    \n    // Update UI status indicators for inactive workers\n    results.filter(r => !r.active).forEach(r => {\n        applyProbeResultToWorkerDot(r.worker.id, { ok: false });\n    });\n    \n    return activeWorkers;\n}\n"
  },
  {
    "path": "web/image_batch_divider.js",
    "content": "import { app } from \"/scripts/app.js\";\n\n// Configuration for each batch divider node type\nconst BATCH_DIVIDER_NODES = {\n    \"ImageBatchDivider\": { outputPrefix: \"batch_\", outputType: \"IMAGE\" },\n    \"AudioBatchDivider\": { outputPrefix: \"audio_\", outputType: \"AUDIO\" }\n};\n\napp.registerExtension({\n    name: \"Distributed.BatchDividers\",\n    async nodeCreated(node) {\n        const config = BATCH_DIVIDER_NODES[node.comfyClass];\n        if (!config) return;\n\n        try {\n            const updateOutputs = () => {\n                if (!node.widgets) return;\n\n                const divideByWidget = node.widgets.find(w => w.name === \"divide_by\");\n                if (!divideByWidget) return;\n\n                const divideBy = parseInt(divideByWidget.value, 10) || 1;\n                const totalOutputs = divideBy;\n\n                // Ensure outputs array exists\n                if (!node.outputs) node.outputs = [];\n\n                // Remove excess outputs\n                while (node.outputs.length > totalOutputs) {\n                    node.removeOutput(node.outputs.length - 1);\n                }\n\n                // Add missing outputs\n                while (node.outputs.length < totalOutputs) {\n                    const outputIndex = node.outputs.length + 1;\n                    node.addOutput(`${config.outputPrefix}${outputIndex}`, config.outputType);\n                }\n\n                if (node.setDirty) node.setDirty(true);\n            };\n\n            // Initial update with delay to allow workflow loading\n            setTimeout(updateOutputs, 200);\n\n            // Find the widget and set up responsive handlers\n            const divideByWidget = node.widgets.find(w => w.name === \"divide_by\");\n            if (divideByWidget) {\n                const originalCallback = divideByWidget.callback;\n                divideByWidget.callback = (value) => {\n                    updateOutputs();\n                    if (originalCallback) originalCallback.call(divideByWidget, value);\n                };\n\n                if (divideByWidget.inputEl) {\n                    divideByWidget.inputEl.addEventListener('input', updateOutputs);\n                }\n\n                const observer = new MutationObserver(updateOutputs);\n                if (divideByWidget.element) {\n                    observer.observe(divideByWidget.element, { attributes: true, childList: true, subtree: true });\n                }\n\n                node._batchDividerCleanup = () => {\n                    observer.disconnect();\n                    if (divideByWidget.inputEl) {\n                        divideByWidget.inputEl.removeEventListener('input', updateOutputs);\n                    }\n                    divideByWidget.callback = originalCallback;\n                };\n            }\n\n            const originalConfigure = node.configure;\n            node.configure = function(data) {\n                const result = originalConfigure ? originalConfigure.call(this, data) : undefined;\n                updateOutputs();\n                return result;\n            };\n        } catch (error) {\n            console.error(`Error in ${node.comfyClass} extension:`, error);\n        }\n    },\n\n    nodeBeforeRemove(node) {\n        if (BATCH_DIVIDER_NODES[node.comfyClass] && node._batchDividerCleanup) {\n            node._batchDividerCleanup();\n        }\n    }\n});\n"
  },
  {
    "path": "web/main.js",
    "content": "import { app } from \"../../scripts/app.js\";\nimport { api } from \"../../scripts/api.js\";\nimport { DistributedUI } from './ui.js';\n\nimport { createStateManager } from './stateManager.js';\nimport { createApiClient } from './apiClient.js';\nimport { renderSidebarContent, updateWorkerCard } from './sidebarRenderer.js';\nimport { handleInterruptWorkers, handleClearMemory } from './workerUtils.js';\nimport { setupInterceptor } from './executionUtils.js';\nimport { PULSE_ANIMATION_CSS, TIMEOUTS, STATUS_COLORS } from './constants.js';\nimport { updateTunnelUIElements, refreshTunnelStatus, handleTunnelToggle } from './tunnelManager.js';\nimport { checkAllWorkerStatuses, checkWorkerStatus, loadManagedWorkers } from './workerLifecycle.js';\nimport { detectMasterIP } from './masterDetection.js';\nimport { parseHostInput, getMasterUrl as buildMasterUrl } from './urlUtils.js';\n\nconst WORKERS_CHANGED_EVENT = \"distributed:workers-changed\";\n\nclass DistributedExtension {\n    constructor() {\n        this.config = null;\n        this.originalQueuePrompt = api.queuePrompt.bind(api);\n        this.logAutoRefreshInterval = null;\n        this.masterSettingsExpanded = false;\n        this.app = app; // Store app reference for toast notifications\n        this.tunnelStatus = { status: \"unknown\" };\n        this.tunnelElements = {};\n        \n        // Initialize centralized state\n        this.state = createStateManager();\n        \n        // Initialize UI component factory\n        this.ui = new DistributedUI();\n        \n        // Initialize API client\n        this.api = createApiClient(window.location.origin);\n        \n        // Initialize status check timeout reference\n        this.statusCheckTimeout = null;\n        \n        // Initialize abort controller for status checks\n        this.statusCheckAbortController = null;\n        this.themeMutationObserver = null;\n\n        // Inject CSS for pulsing animation\n        this.injectStyles();\n\n        this.loadConfig().then(async () => {\n            this.registerSidebarTab();\n            this.setupInterceptor();\n            // Don't start polling until panel opens\n            // this.startStatusChecking();\n            loadManagedWorkers(this);\n            // Detect master IP after everything is set up\n            this.detectMasterIP();\n            // Listen for Nodes 2.0 setting changes (once, for the lifetime of the extension)\n            this._setupNodes2Listener();\n        });\n    }\n\n    // Debug logging helpers\n    log(message, level = \"info\") {\n        if (level === \"debug\" && !this.config?.settings?.debug) return;\n        if (level === \"error\") {\n            console.error(`[Distributed] ${message}`);\n        } else {\n            console.log(`[Distributed] ${message}`);\n        }\n    }\n\n    injectStyles() {\n        const styleId = 'distributed-styles';\n        if (!document.getElementById(styleId)) {\n            const style = document.createElement('style');\n            style.id = styleId;\n            style.textContent = PULSE_ANIMATION_CSS;\n            document.head.appendChild(style);\n        }\n\n        const fileStyleId = 'distributed-file-styles';\n        if (!document.getElementById(fileStyleId)) {\n            const style = document.createElement('style');\n            style.id = fileStyleId;\n            fetch(new URL('./distributed.css', import.meta.url))\n                .then((response) => response.text())\n                .then((cssText) => {\n                    style.textContent = cssText;\n                })\n                .catch((error) => {\n                    this.log(`Failed to load distributed.css: ${error.message}`, \"error\");\n                });\n            document.head.appendChild(style);\n        }\n    }\n\n    // --- State & Config Management (Single Source of Truth) ---\n\n    get enabledWorkers() {\n        return this.config?.workers?.filter(w => w.enabled) || [];\n    }\n\n    get isEnabled() {\n        return this.enabledWorkers.length > 0;\n    }\n\n    isMasterParticipationEnabled() {\n        return !Boolean(this.config?.settings?.master_delegate_only);\n    }\n\n    isMasterFallbackActive() {\n        return Boolean(this.config?.settings?.master_delegate_only) && this.enabledWorkers.length === 0;\n    }\n\n    isMasterParticipating() {\n        return this.isMasterParticipationEnabled() || this.isMasterFallbackActive();\n    }\n\n    async updateMasterParticipation(enabled) {\n        if (!this.config?.settings) {\n            this.config.settings = {};\n        }\n        const delegateOnly = !enabled;\n        if (this.config.settings.master_delegate_only === delegateOnly) {\n            return;\n        }\n\n        await this._updateSetting('master_delegate_only', delegateOnly);\n\n        if (this.panelElement) {\n            renderSidebarContent(this, this.panelElement);\n        }\n    }\n\n    async loadConfig() {\n        try {\n            this.config = await this.api.getConfig();\n            this.log(\"Loaded config: \" + JSON.stringify(this.config), \"debug\");\n            \n            // Ensure default flag values\n            if (!this.config.settings) {\n                this.config.settings = {};\n            }\n            if (this.config.settings.has_auto_populated_workers === undefined) {\n                this.config.settings.has_auto_populated_workers = false;\n            }\n            \n            // Load stored master CUDA device\n            this.masterCudaDevice = this.config?.master?.cuda_device ?? undefined;\n            \n            // Sync to state\n            if (this.config.workers) {\n                this.config.workers.forEach(w => {\n                    this.state.updateWorker(w.id, { enabled: w.enabled });\n                });\n            }\n            this._emitWorkersChanged();\n        } catch (error) {\n            this.log(\"Failed to load config: \" + error.message, \"error\");\n            this.config = { workers: [], settings: { has_auto_populated_workers: false } };\n        }\n    }\n\n    _emitWorkersChanged() {\n        if (typeof window === \"undefined\" || typeof window.dispatchEvent !== \"function\") {\n            return;\n        }\n        window.dispatchEvent(new CustomEvent(WORKERS_CHANGED_EVENT, {\n            detail: { workers: this.config?.workers || [] },\n        }));\n    }\n\n    _applyMasterHost(host) {\n        if (!host || !this.config) return;\n        if (!this.config.master) this.config.master = {};\n        this.config.master.host = host;\n        const hostInput = document.getElementById('master-host');\n        if (hostInput) {\n            hostInput.value = host;\n        }\n    }\n\n    _parseHostInput(value) {\n        return parseHostInput(value);\n    }\n\n    updateTunnelUIElements(isRunning, isStarting) {\n        return updateTunnelUIElements(this, isRunning, isStarting);\n    }\n\n    async refreshTunnelStatus() {\n        return refreshTunnelStatus(this);\n    }\n\n    async handleTunnelToggle(button) {\n        return handleTunnelToggle(this, button);\n    }\n\n    async updateWorkerEnabled(workerId, enabled) {\n        const worker = this.config.workers.find(w => w.id === workerId);\n        if (worker) {\n            worker.enabled = enabled;\n            this.state.updateWorker(workerId, { enabled });\n            this._emitWorkersChanged();\n\n            // Immediately update status dot based on enabled state\n            const statusDot = document.getElementById(`status-${workerId}`);\n            if (statusDot) {\n                if (enabled) {\n                    // Enabled: Start with checking state and trigger check\n                    this.ui.updateStatusDot(workerId, STATUS_COLORS.OFFLINE_RED, \"Checking status...\", false);\n                    setTimeout(() => checkWorkerStatus(this, worker), TIMEOUTS.STATUS_CHECK_DELAY);\n                } else {\n                    // Disabled: Set to gray\n                    this.ui.updateStatusDot(workerId, STATUS_COLORS.DISABLED_GRAY, \"Disabled\", false);\n                }\n            }\n        }\n        \n        try {\n            await this.api.updateWorker(workerId, { enabled });\n        } catch (error) {\n            this.log(\"Error updating worker: \" + error.message, \"error\");\n        }\n\n        if (this.panelElement) {\n            await renderSidebarContent(this, this.panelElement);\n        }\n    }\n\n    async _updateSetting(key, value) {\n        // Update local config\n        if (!this.config.settings) {\n            this.config.settings = {};\n        }\n        this.config.settings[key] = value;\n        \n        try {\n            await this.api.updateSetting(key, value);\n\n            const prettyKey = key.replace(/_/g, ' ').replace(/\\b\\w/g, l => l.toUpperCase());\n            let detail;\n            if (key === 'worker_timeout_seconds') {\n                const secs = parseInt(value, 10);\n                detail = `Worker Timeout set to ${Number.isFinite(secs) ? secs : value}s`;\n            } else if (typeof value === 'boolean') {\n                detail = `${prettyKey} ${value ? 'enabled' : 'disabled'}`;\n            } else {\n                detail = `${prettyKey} set to ${value}`;\n            }\n\n            app.extensionManager.toast.add({\n                severity: \"success\",\n                summary: \"Setting Updated\",\n                detail,\n                life: 2000\n            });\n        } catch (error) {\n            this.log(`Error updating setting '${key}': ${error.message}`, \"error\");\n            app.extensionManager.toast.add({\n                severity: \"error\",\n                summary: \"Setting Update Failed\",\n                detail: error.message,\n                life: 3000\n            });\n        }\n    }\n\n    // --- UI Rendering ---\n\n    registerSidebarTab() {\n        app.extensionManager.registerSidebarTab({\n            id: \"distributed\",\n            icon: \"pi pi-server\",\n            title: \"Distributed\",\n            tooltip: \"Distributed Control Panel\",\n            type: \"custom\",\n            render: (el) => {\n                this.panelElement = el;\n                this.onPanelOpen();\n                renderSidebarContent(this, el);\n                this._applyNodes2Style();\n                this._applyThemeToneClass();\n            },\n            destroy: () => {\n                this.onPanelClose();\n            }\n        });\n    }\n    \n    onPanelOpen() {\n        this.log(\"Panel opened - starting status polling\", \"debug\");\n        if (!this.statusCheckTimeout) {\n            checkAllWorkerStatuses(this);\n        }\n        this._startThemeObserver();\n        this._applyThemeToneClass();\n    }\n\n    onPanelClose() {\n        this.log(\"Panel closed - stopping status polling\", \"debug\");\n\n        // Cancel any pending status checks\n        if (this.statusCheckAbortController) {\n            this.statusCheckAbortController.abort();\n            this.statusCheckAbortController = null;\n        }\n\n        // Clear the timeout\n        if (this.statusCheckTimeout) {\n            clearTimeout(this.statusCheckTimeout);\n            this.statusCheckTimeout = null;\n        }\n        this._stopThemeObserver();\n\n        this.panelElement = null;\n    }\n\n    _applyNodes2Style() {\n        if (!this.panelElement) return;\n        const enabled = app.ui.settings.getSettingValue(\"Comfy.VueNodes.Enabled\") ?? false;\n        this.panelElement.classList.toggle('distributed-panel--nodes2', Boolean(enabled));\n    }\n\n    _parseColorToRgba(colorValue) {\n        if (!colorValue || typeof colorValue !== \"string\") {\n            return null;\n        }\n\n        const color = colorValue.trim().toLowerCase();\n        if (!color || color === \"transparent\") {\n            return null;\n        }\n\n        const rgbMatch = color.match(/^rgba?\\(([^)]+)\\)$/);\n        if (rgbMatch) {\n            const parts = rgbMatch[1].split(\",\").map((part) => Number(part.trim()));\n            if (parts.length < 3 || parts.slice(0, 3).some((part) => Number.isNaN(part))) {\n                return null;\n            }\n            const alpha = parts.length >= 4 && Number.isFinite(parts[3]) ? parts[3] : 1;\n            return {\n                r: Math.max(0, Math.min(255, parts[0])),\n                g: Math.max(0, Math.min(255, parts[1])),\n                b: Math.max(0, Math.min(255, parts[2])),\n                a: Math.max(0, Math.min(1, alpha)),\n            };\n        }\n\n        const hexMatch = color.match(/^#([0-9a-f]{3}|[0-9a-f]{6})$/i);\n        if (hexMatch) {\n            const value = hexMatch[1];\n            const expanded = value.length === 3\n                ? value.split(\"\").map((c) => `${c}${c}`).join(\"\")\n                : value;\n            const r = parseInt(expanded.slice(0, 2), 16);\n            const g = parseInt(expanded.slice(2, 4), 16);\n            const b = parseInt(expanded.slice(4, 6), 16);\n            return { r, g, b, a: 1 };\n        }\n\n        return null;\n    }\n\n    _isPanelLightTheme() {\n        const fallbackLight = window.matchMedia?.(\"(prefers-color-scheme: light)\")?.matches || false;\n        if (!this.panelElement) {\n            return fallbackLight;\n        }\n\n        let current = this.panelElement;\n        while (current) {\n            const bg = getComputedStyle(current).backgroundColor;\n            const rgba = this._parseColorToRgba(bg);\n            if (rgba && rgba.a > 0.02) {\n                // Relative luminance approximation (0..1)\n                const luminance = (0.2126 * rgba.r + 0.7152 * rgba.g + 0.0722 * rgba.b) / 255;\n                return luminance > 0.58;\n            }\n            current = current.parentElement;\n        }\n\n        return fallbackLight;\n    }\n\n    _applyThemeToneClass() {\n        if (!this.panelElement) {\n            return;\n        }\n        this.panelElement.classList.toggle(\"distributed-panel--light\", this._isPanelLightTheme());\n    }\n\n    _startThemeObserver() {\n        if (this.themeMutationObserver) {\n            return;\n        }\n\n        this.themeMutationObserver = new MutationObserver(() => {\n            this._applyThemeToneClass();\n        });\n        this.themeMutationObserver.observe(document.documentElement, {\n            attributes: true,\n            attributeFilter: [\"class\", \"style\"],\n        });\n        if (document.body) {\n            this.themeMutationObserver.observe(document.body, {\n                attributes: true,\n                attributeFilter: [\"class\", \"style\"],\n            });\n        }\n    }\n\n    _stopThemeObserver() {\n        if (!this.themeMutationObserver) {\n            return;\n        }\n        this.themeMutationObserver.disconnect();\n        this.themeMutationObserver = null;\n    }\n\n    _setupNodes2Listener() {\n        app.ui.settings.addEventListener(\"Comfy.VueNodes.Enabled.change\", (e) => {\n            const enabled = e.detail?.value ?? false;\n            if (this.panelElement) {\n                this.panelElement.classList.toggle('distributed-panel--nodes2', Boolean(enabled));\n                this._applyThemeToneClass();\n            }\n        });\n    }\n\n    // --- Core Logic & Execution ---\n\n    setupInterceptor() {\n        setupInterceptor(this);\n    }\n\n    updateWorkerCard(workerId, newStatus) {\n        return updateWorkerCard(this, workerId, newStatus);\n    }\n\n    /**\n     * Cleanup method to stop intervals and listeners\n     */\n    cleanup() {\n        if (this.logAutoRefreshInterval) {\n            clearInterval(this.logAutoRefreshInterval);\n            this.logAutoRefreshInterval = null;\n        }\n\n        if (this.statusCheckTimeout) {\n            clearTimeout(this.statusCheckTimeout);\n            this.statusCheckTimeout = null;\n        }\n\n        this.log(\"Cleaned up intervals\", \"debug\");\n    }\n\n    getMasterUrl() {\n        return buildMasterUrl(this.config, window.location, (message, level) => this.log(message, level));\n    }\n\n    async detectMasterIP() {\n        return detectMasterIP(this);\n    }\n\n    _handleInterruptWorkers(button) {\n        return handleInterruptWorkers(this, button);\n    }\n\n    _handleClearMemory(button) {\n        return handleClearMemory(this, button);\n    }\n}\n\napp.registerExtension({\n    name: \"Distributed.Panel\",\n    async setup() {\n        new DistributedExtension();\n    }\n});\n"
  },
  {
    "path": "web/masterDetection.js",
    "content": "import { generateUUID } from './constants.js';\n\nexport async function detectMasterIP(extension) {\n    try {\n        const isRunpod = window.location.hostname.endsWith('.proxy.runpod.net');\n        if (isRunpod) {\n            extension.log(\"Detected Runpod environment\", \"info\");\n        }\n\n        const data = await extension.api.getNetworkInfo();\n        extension.log(\"Network info: \" + JSON.stringify(data), \"debug\");\n\n        if (data.cuda_device !== null && data.cuda_device !== undefined) {\n            extension.masterCudaDevice = data.cuda_device;\n\n            if (!extension.config.master) {\n                extension.config.master = {};\n            }\n            if (extension.config.master.cuda_device === undefined || extension.config.master.cuda_device !== data.cuda_device) {\n                extension.config.master.cuda_device = data.cuda_device;\n                try {\n                    await extension.api.updateMaster({ cuda_device: data.cuda_device });\n                    extension.log(`Stored master CUDA device: ${data.cuda_device}`, \"debug\");\n                } catch (error) {\n                    extension.log(`Error storing master CUDA device: ${error.message}`, \"error\");\n                }\n            }\n\n            extension.ui.updateMasterDisplay(extension);\n        }\n\n        if (data.cuda_device_count > 0) {\n            extension.cudaDeviceCount = data.cuda_device_count;\n            extension.log(`Detected ${extension.cudaDeviceCount} CUDA devices`, \"info\");\n\n            const shouldAutoPopulate =\n                !extension.config.settings.has_auto_populated_workers &&\n                (!extension.config.workers || extension.config.workers.length === 0);\n\n            extension.log(`Auto-population check: has_populated=${extension.config.settings.has_auto_populated_workers}, workers=${extension.config.workers ? extension.config.workers.length : 'null'}, should_populate=${shouldAutoPopulate}`, \"debug\");\n\n            if (shouldAutoPopulate) {\n                extension.log(`Auto-populating workers based on ${extension.cudaDeviceCount} CUDA devices (excluding master on CUDA ${extension.masterCudaDevice})`, \"info\");\n\n                const newWorkers = [];\n                let workerNum = 1;\n                let portOffset = 0;\n\n                for (let i = 0; i < extension.cudaDeviceCount; i++) {\n                    if (i === extension.masterCudaDevice) {\n                        extension.log(`Skipping CUDA ${i} (used by master)`, \"debug\");\n                        continue;\n                    }\n\n                    const worker = {\n                        id: generateUUID(),\n                        name: `Worker ${workerNum}`,\n                        host: isRunpod ? null : \"localhost\",\n                        port: 8189 + portOffset,\n                        cuda_device: i,\n                        enabled: true,\n                        extra_args: isRunpod ? \"--listen\" : \"\",\n                    };\n                    newWorkers.push(worker);\n                    workerNum += 1;\n                    portOffset += 1;\n                }\n\n                if (newWorkers.length > 0) {\n                    extension.log(`Auto-populating ${newWorkers.length} workers`, \"info\");\n\n                    extension.config.workers = newWorkers;\n                    extension.config.settings.has_auto_populated_workers = true;\n\n                    for (const worker of newWorkers) {\n                        try {\n                            await extension.api.updateWorker(worker.id, worker);\n                        } catch (error) {\n                            extension.log(`Error saving worker ${worker.name}: ${error.message}`, \"error\");\n                        }\n                    }\n\n                    try {\n                        await extension.api.updateSetting('has_auto_populated_workers', true);\n                    } catch (error) {\n                        extension.log(`Error saving auto-population flag: ${error.message}`, \"error\");\n                    }\n\n                    extension.log(`Auto-populated ${newWorkers.length} workers and saved config`, \"info\");\n\n                    if (extension.app.extensionManager?.toast) {\n                        extension.app.extensionManager.toast.add({\n                            severity: \"success\",\n                            summary: \"Workers Auto-populated\",\n                            detail: `Automatically created ${newWorkers.length} workers based on detected CUDA devices`,\n                            life: 5000,\n                        });\n                    }\n\n                    await extension.loadConfig();\n                } else {\n                    extension.log(\"No additional CUDA devices available for workers (all used by master)\", \"debug\");\n                }\n            }\n        }\n\n        if (extension.config?.master?.host) {\n            extension.log(`Master host already configured: ${extension.config.master.host}`, \"debug\");\n            return;\n        }\n\n        if (isRunpod) {\n            const runpodHost = window.location.hostname;\n            extension.log(`Setting Runpod master host: ${runpodHost}`, \"info\");\n\n            await extension.api.updateMaster({ host: runpodHost });\n\n            if (!extension.config.master) {\n                extension.config.master = {};\n            }\n            extension.config.master.host = runpodHost;\n\n            if (extension.app.extensionManager?.toast) {\n                extension.app.extensionManager.toast.add({\n                    severity: \"info\",\n                    summary: \"Runpod Auto-Configuration\",\n                    detail: `Master host set to ${runpodHost} with --listen flag for workers`,\n                    life: 5000,\n                });\n            }\n            return;\n        }\n\n        if (data.recommended_ip && data.recommended_ip !== '127.0.0.1') {\n            extension.log(`Auto-detected master IP: ${data.recommended_ip}`, \"info\");\n\n            await extension.api.updateMaster({ host: data.recommended_ip });\n\n            if (!extension.config.master) {\n                extension.config.master = {};\n            }\n            extension.config.master.host = data.recommended_ip;\n        }\n    } catch (error) {\n        extension.log(\"Error detecting master IP: \" + error.message, \"error\");\n    }\n}\n"
  },
  {
    "path": "web/sidebar/actionsSection.js",
    "content": "import { BUTTON_STYLES } from \"../constants.js\";\n\nexport function renderActionsSection(extension) {\n    const actionsSection = document.createElement(\"div\");\n    actionsSection.style.cssText =\n        \"padding-top: 10px; margin-bottom: 15px; border-top: 1px solid var(--dist-divider, #444);\";\n\n    const buttonRow = document.createElement(\"div\");\n    buttonRow.style.cssText = \"display: flex; gap: 8px;\";\n\n    const clearMemButton = extension.ui.createButtonHelper(\n        \"Clear Worker VRAM\",\n        (event) => extension._handleClearMemory(event.target),\n        BUTTON_STYLES.clearMemory\n    );\n    clearMemButton.title = \"Clear VRAM on all enabled worker GPUs (not master)\";\n    clearMemButton.style.cssText = BUTTON_STYLES.base + \" flex: 1;\" + BUTTON_STYLES.clearMemory;\n\n    const interruptButton = extension.ui.createButtonHelper(\n        \"Interrupt Workers\",\n        (event) => extension._handleInterruptWorkers(event.target),\n        BUTTON_STYLES.interrupt\n    );\n    interruptButton.title = \"Cancel/interrupt execution on all enabled worker GPUs\";\n    interruptButton.style.cssText = BUTTON_STYLES.base + \" flex: 1;\" + BUTTON_STYLES.interrupt;\n\n    buttonRow.appendChild(clearMemButton);\n    buttonRow.appendChild(interruptButton);\n    actionsSection.appendChild(buttonRow);\n    return actionsSection;\n}\n"
  },
  {
    "path": "web/sidebar/settingsSection.js",
    "content": "import { createCheckboxSetting, createNumberSetting } from \"../ui/buttonHelpers.js\";\n\nexport function renderSettingsSection(extension) {\n    const settingsSection = document.createElement(\"div\");\n    settingsSection.style.cssText = \"border-top: 1px solid var(--dist-divider, #444); margin-bottom: 10px;\";\n\n    const settingsToggleArea = document.createElement(\"div\");\n    settingsToggleArea.style.cssText = \"padding: 16.5px 0; cursor: pointer; user-select: none;\";\n\n    const settingsHeader = document.createElement(\"div\");\n    settingsHeader.style.cssText = \"display: flex; align-items: center; justify-content: space-between;\";\n\n    const workerSettingsTitle = document.createElement(\"h4\");\n    workerSettingsTitle.textContent = \"Settings\";\n    workerSettingsTitle.style.cssText = \"margin: 0; font-size: 14px;\";\n\n    const workerSettingsToggle = document.createElement(\"span\");\n    workerSettingsToggle.textContent = \"▶\";\n    workerSettingsToggle.style.cssText =\n        \"font-size: 12px; color: var(--dist-settings-arrow, #888); transition: all 0.2s ease;\";\n\n    settingsHeader.appendChild(workerSettingsTitle);\n    settingsHeader.appendChild(workerSettingsToggle);\n    settingsToggleArea.appendChild(settingsHeader);\n\n    settingsToggleArea.onmouseover = () => {\n        workerSettingsToggle.style.color = \"var(--dist-settings-arrow-hover, #fff)\";\n    };\n    settingsToggleArea.onmouseout = () => {\n        workerSettingsToggle.style.color = \"var(--dist-settings-arrow, #888)\";\n    };\n\n    const settingsSeparator = document.createElement(\"div\");\n    settingsSeparator.style.cssText = \"border-bottom: 1px solid var(--dist-divider, #444); margin: 0;\";\n\n    const settingsContent = document.createElement(\"div\");\n    settingsContent.style.cssText =\n        \"max-height: 0; overflow: hidden; opacity: 0; transition: max-height 0.3s ease, opacity 0.3s ease;\";\n\n    const settingsDiv = document.createElement(\"div\");\n    settingsDiv.style.cssText =\n        \"display: grid; grid-template-columns: 1fr auto; row-gap: 10px; column-gap: 10px; padding-top: 10px; align-items: center;\";\n\n    let settingsExpanded = false;\n    settingsToggleArea.onclick = () => {\n        settingsExpanded = !settingsExpanded;\n        if (settingsExpanded) {\n            settingsContent.style.maxHeight = \"200px\";\n            settingsContent.style.opacity = \"1\";\n            workerSettingsToggle.style.transform = \"rotate(90deg)\";\n            settingsSeparator.style.display = \"none\";\n        } else {\n            settingsContent.style.maxHeight = \"0\";\n            settingsContent.style.opacity = \"0\";\n            workerSettingsToggle.style.transform = \"rotate(0deg)\";\n            settingsSeparator.style.display = \"block\";\n        }\n    };\n\n    const generalLabel = document.createElement(\"div\");\n    generalLabel.textContent = \"GENERAL\";\n    generalLabel.style.cssText =\n        \"grid-column: 1 / span 2; font-size: 11px; color: var(--dist-muted-text, #888); letter-spacing: 0.06em; padding-top: 2px;\";\n\n    const timeoutsLabel = document.createElement(\"div\");\n    timeoutsLabel.textContent = \"TIMEOUTS\";\n    timeoutsLabel.style.cssText =\n        \"grid-column: 1 / span 2; font-size: 11px; color: var(--dist-muted-text, #888); letter-spacing: 0.06em; padding-top: 4px;\";\n\n    settingsDiv.appendChild(generalLabel);\n    settingsDiv.appendChild(\n        createCheckboxSetting(\n            \"setting-debug\",\n            \"Debug Mode\",\n            \"Enable verbose logging in the browser console.\",\n            extension.config?.settings?.debug || false,\n            (event) => extension._updateSetting(\"debug\", event.target.checked)\n        )\n    );\n    settingsDiv.appendChild(\n        createCheckboxSetting(\n            \"setting-auto-launch\",\n            \"Auto-launch Local Workers on Startup\",\n            \"Start local worker processes automatically when the master starts.\",\n            extension.config?.settings?.auto_launch_workers || false,\n            (event) => extension._updateSetting(\"auto_launch_workers\", event.target.checked)\n        )\n    );\n    settingsDiv.appendChild(\n        createCheckboxSetting(\n            \"setting-stop-on-exit\",\n            \"Stop Local Workers on Master Exit\",\n            \"Stop local worker processes automatically when the master exits.\",\n            extension.config?.settings?.stop_workers_on_master_exit !== false,\n            (event) => extension._updateSetting(\"stop_workers_on_master_exit\", event.target.checked)\n        )\n    );\n    settingsDiv.appendChild(timeoutsLabel);\n    settingsDiv.appendChild(\n        createNumberSetting(\n            \"setting-worker-timeout\",\n            \"Worker Timeout\",\n            \"Seconds without a heartbeat before a worker is considered timed out. Default 60.\",\n            extension.config?.settings?.worker_timeout_seconds ?? 60,\n            10,\n            1,\n            (event) => {\n                const value = parseInt(event.target.value, 10);\n                if (!Number.isFinite(value) || value <= 0) {\n                    return;\n                }\n                extension._updateSetting(\"worker_timeout_seconds\", value);\n            }\n        )\n    );\n\n    settingsContent.appendChild(settingsDiv);\n    settingsSection.appendChild(settingsToggleArea);\n    settingsSection.appendChild(settingsSeparator);\n    settingsSection.appendChild(settingsContent);\n    return settingsSection;\n}\n"
  },
  {
    "path": "web/sidebar/workersSection.js",
    "content": "import { addNewWorker } from \"../workerSettings.js\";\n\nexport function renderWorkersSection(extension) {\n    const workersSection = document.createElement(\"div\");\n    workersSection.style.cssText = \"flex: 1; overflow-y: auto; margin-bottom: 15px;\";\n\n    const workersList = document.createElement(\"div\");\n    const workers = extension.config?.workers || [];\n\n    if (workers.length === 0) {\n        const blueprintDiv = extension.ui.renderEntityCard(\n            \"blueprint\",\n            { onClick: () => addNewWorker(extension) },\n            extension\n        );\n        workersList.appendChild(blueprintDiv);\n    }\n\n    workers.forEach((worker) => {\n        const workerCard = extension.ui.renderEntityCard(\"worker\", worker, extension);\n        workersList.appendChild(workerCard);\n    });\n\n    workersSection.appendChild(workersList);\n\n    if (workers.length > 0) {\n        const addWorkerDiv = extension.ui.renderEntityCard(\n            \"add\",\n            { onClick: () => addNewWorker(extension) },\n            extension\n        );\n        workersSection.appendChild(addWorkerDiv);\n    }\n\n    return workersSection;\n}\n"
  },
  {
    "path": "web/sidebarRenderer.js",
    "content": "import { STATUS_COLORS } from './constants.js';\nimport { checkAllWorkerStatuses, loadManagedWorkers, updateWorkerControls } from './workerLifecycle.js';\nimport { renderActionsSection } from './sidebar/actionsSection.js';\nimport { renderSettingsSection } from './sidebar/settingsSection.js';\nimport { renderWorkersSection } from './sidebar/workersSection.js';\n\nexport function updateWorkerCard(extension, workerId, newStatus = {}) {\n    const card = document.querySelector(`[data-worker-id=\"${workerId}\"]`);\n    if (!card) {\n        return false;\n    }\n\n    const worker = extension.config?.workers?.find((w) => w.id === workerId);\n    if (!worker) {\n        return false;\n    }\n\n    const workerState = extension.state.getWorker(workerId);\n    const isLaunching = Boolean(workerState?.launching);\n\n    if (isLaunching && !newStatus.online) {\n        extension.ui.updateStatusDot(workerId, STATUS_COLORS.PROCESSING_YELLOW, \"Launching...\", true);\n    } else if (newStatus.online && newStatus.processing) {\n        const queue = newStatus.queueCount || 0;\n        extension.ui.updateStatusDot(workerId, STATUS_COLORS.PROCESSING_YELLOW, `Online - Processing (${queue} in queue)`, false);\n    } else if (newStatus.online) {\n        extension.ui.updateStatusDot(workerId, STATUS_COLORS.ONLINE_GREEN, \"Online - Idle\", false);\n    } else if (worker.enabled) {\n        extension.ui.updateStatusDot(workerId, STATUS_COLORS.OFFLINE_RED, \"Offline - Cannot connect\", false);\n    }\n\n    updateWorkerControls(extension, workerId);\n    return true;\n}\n\nexport async function renderSidebarContent(extension, el) {\n    // Panel is being opened/rendered\n    extension.log(\"Panel opened\", \"debug\");\n    if (!el) {\n        extension.log(\"No element provided to renderSidebarContent\", \"debug\");\n        return;\n    }\n    // Prevent infinite recursion\n    if (extension._isRendering) {\n        extension.log(\"Already rendering, skipping\", \"debug\");\n        return;\n    }\n    extension._isRendering = true;\n    try {\n        // Store reference to the panel element\n        extension.panelElement = el;\n        // Show loading indicator\n        el.innerHTML = '';\n        const loadingDiv = document.createElement(\"div\");\n        loadingDiv.style.cssText =\n            \"display: flex; align-items: center; justify-content: center; height: calc(100vh - 100px); color: var(--dist-muted-text, #888);\";\n        loadingDiv.innerHTML = `<svg width=\"24\" height=\"24\" viewBox=\"0 0 24 24\" style=\"color: var(--dist-muted-text, #888);\">\n            <circle cx=\"12\" cy=\"12\" r=\"10\" fill=\"none\" stroke=\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-dasharray=\"40 40\"/>\n        </svg>`;\n        el.appendChild(loadingDiv);\n        // Add rotation animation\n        const style = document.createElement('style');\n        style.textContent = `\n            @keyframes rotate {\n                from { transform: rotate(0deg); }\n                to { transform: rotate(360deg); }\n            }\n        `;\n        document.head.appendChild(style);\n        loadingDiv.querySelector('svg').style.animation = 'rotate 1s linear infinite';\n        // Preload data outside render\n        await Promise.all([extension.loadConfig(), loadManagedWorkers(extension), extension.refreshTunnelStatus()]);\n        extension.tunnelElements = {};\n        el.innerHTML = '';\n        // Create toolbar header to match ComfyUI style\n        const toolbar = document.createElement(\"div\");\n        toolbar.className = \"p-toolbar p-component border-x-0 border-t-0 rounded-none px-2 py-1 min-h-8\";\n        toolbar.style.cssText =\n            \"border-bottom: 1px solid var(--dist-divider, #444); background: transparent; display: flex; align-items: center;\";\n        const toolbarStart = document.createElement(\"div\");\n        toolbarStart.className = \"p-toolbar-start\";\n        toolbarStart.style.cssText = \"display: flex; align-items: center;\";\n        const titleSpan = document.createElement(\"span\");\n        titleSpan.className = \"text-xs 2xl:text-sm truncate\";\n        titleSpan.textContent = \"COMFYUI DISTRIBUTED\";\n        titleSpan.title = \"ComfyUI Distributed\";\n        toolbarStart.appendChild(titleSpan);\n        toolbar.appendChild(toolbarStart);\n        const toolbarCenter = document.createElement(\"div\");\n        toolbarCenter.className = \"p-toolbar-center\";\n        toolbar.appendChild(toolbarCenter);\n        const toolbarEnd = document.createElement(\"div\");\n        toolbarEnd.className = \"p-toolbar-end\";\n        toolbar.appendChild(toolbarEnd);\n        el.appendChild(toolbar);\n        // Main container with adjusted padding\n        const container = document.createElement(\"div\");\n        container.style.cssText = \"padding: 15px; display: flex; flex-direction: column; height: calc(100% - 32px);\";\n        // Detect master info on panel open (in case CUDA info wasn't available at startup)\n        extension.log(`Panel opened. CUDA device count: ${extension.cudaDeviceCount}, Workers: ${extension.config?.workers?.length || 0}`, \"debug\");\n        if (!extension.cudaDeviceCount) {\n            await extension.detectMasterIP();\n        }\n        // Now render with guaranteed up-to-date config\n        // Master Node Section\n        const masterDiv = extension.ui.renderEntityCard('master', extension.config?.master, extension);\n        container.appendChild(masterDiv);\n        container.appendChild(renderWorkersSection(extension));\n        container.appendChild(renderActionsSection(extension));\n        container.appendChild(renderSettingsSection(extension));\n        el.appendChild(container);\n        extension._applyThemeToneClass?.();\n        // Start checking worker statuses immediately in parallel\n        setTimeout(() => checkAllWorkerStatuses(extension), 0);\n    } finally {\n        // Always reset the rendering flag\n        extension._isRendering = false;\n    }\n}\n"
  },
  {
    "path": "web/stateManager.js",
    "content": "export function createStateManager() {\n    const state = {\n        workers: new Map(), // Unified worker state: { status, managed, launching, expanded, ... }\n        masterStatus: 'online',\n    };\n    \n    return {\n        // Worker state management\n        getWorker(workerId) {\n            return state.workers.get(String(workerId)) || {};\n        },\n        \n        updateWorker(workerId, updates) {\n            const id = String(workerId);\n            const current = state.workers.get(id) || {};\n            state.workers.set(id, { ...current, ...updates });\n            return state.workers.get(id);\n        },\n        \n        setWorkerStatus(workerId, status) {\n            return this.updateWorker(workerId, { status });\n        },\n        \n        setWorkerManaged(workerId, info) {\n            return this.updateWorker(workerId, { managed: info });\n        },\n        \n        setWorkerLaunching(workerId, launching) {\n            return this.updateWorker(workerId, { launching });\n        },\n        \n        setWorkerExpanded(workerId, expanded) {\n            return this.updateWorker(workerId, { expanded });\n        },\n        \n        isWorkerLaunching(workerId) {\n            return this.getWorker(workerId).launching || false;\n        },\n        \n        isWorkerExpanded(workerId) {\n            return this.getWorker(workerId).expanded || false;\n        },\n        \n        isWorkerManaged(workerId) {\n            return !!this.getWorker(workerId).managed;\n        },\n        \n        getWorkerStatus(workerId) {\n            return this.getWorker(workerId).status || {};\n        },\n        \n        // Master state\n        setMasterStatus(status) {\n            state.masterStatus = status;\n        },\n        \n        getMasterStatus() {\n            return state.masterStatus;\n        }\n    };\n}"
  },
  {
    "path": "web/tests/apiClient.test.js",
    "content": "import { afterEach, beforeEach, describe, expect, it, vi } from \"vitest\";\n\nimport { createApiClient } from \"../apiClient.js\";\n\ndescribe(\"apiClient probeWorker\", () => {\n    let originalFetch;\n\n    beforeEach(() => {\n        originalFetch = globalThis.fetch;\n        globalThis.fetch = vi.fn();\n    });\n\n    afterEach(() => {\n        globalThis.fetch = originalFetch;\n        vi.restoreAllMocks();\n    });\n\n    it(\"returns ok=true when /prompt returns valid exec_info payload\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: true,\n            status: 200,\n            json: vi.fn().mockResolvedValue({ exec_info: { queue_remaining: 2 } }),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: true, status: 200, queueRemaining: 2 });\n    });\n\n    it(\"returns ok=false on non-200 responses\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: false,\n            status: 503,\n            json: vi.fn(),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: false, status: 503, queueRemaining: null });\n    });\n\n    it(\"returns ok=false when response JSON is invalid\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: true,\n            status: 200,\n            json: vi.fn().mockRejectedValue(new Error(\"invalid json\")),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: false, status: 200, queueRemaining: null });\n    });\n\n    it(\"returns ok=false when exec_info is missing\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: true,\n            status: 200,\n            json: vi.fn().mockResolvedValue({}),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: false, status: 200, queueRemaining: null });\n    });\n\n    it(\"returns ok=false when queue_remaining is not numeric\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: true,\n            status: 200,\n            json: vi.fn().mockResolvedValue({ exec_info: { queue_remaining: \"n/a\" } }),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: false, status: 200, queueRemaining: null });\n    });\n\n    it(\"clamps negative queue_remaining to zero\", async () => {\n        globalThis.fetch.mockResolvedValue({\n            ok: true,\n            status: 200,\n            json: vi.fn().mockResolvedValue({ exec_info: { queue_remaining: -5 } }),\n        });\n\n        const client = createApiClient(\"http://127.0.0.1:8188\");\n        const result = await client.probeWorker(\"http://worker.local:8190\", 1000);\n\n        expect(result).toEqual({ ok: true, status: 200, queueRemaining: 0 });\n    });\n});\n"
  },
  {
    "path": "web/tests/executionUtils.test.js",
    "content": "import { describe, expect, it } from \"vitest\";\n\nimport { buildWorkerWebSocketUrl } from \"../urlUtils.js\";\n\n\ndescribe(\"execution decision helpers\", () => {\n    it(\"buildWorkerWebSocketUrl converts http/https to ws/wss\", () => {\n        expect(buildWorkerWebSocketUrl(\"http://worker.local:8188\")).toBe(\n            \"ws://worker.local:8188/distributed/worker_ws\"\n        );\n        expect(buildWorkerWebSocketUrl(\"https://worker.example.com\")).toBe(\n            \"wss://worker.example.com/distributed/worker_ws\"\n        );\n    });\n});\n"
  },
  {
    "path": "web/tests/urlUtils.test.js",
    "content": "import { afterEach, beforeEach, describe, expect, it } from \"vitest\";\n\nimport {\n    buildWorkerUrl,\n    buildWorkerWebSocketUrl,\n    getMasterUrl,\n    normalizeWorkerUrl,\n    parseHostInput,\n} from \"../urlUtils.js\";\n\n\n// ---------------------------------------------------------------------------\n// normalizeWorkerUrl\n// ---------------------------------------------------------------------------\n\ndescribe(\"normalizeWorkerUrl\", () => {\n    it(\"returns empty string for empty input\", () => {\n        expect(normalizeWorkerUrl(\"\")).toBe(\"\");\n    });\n\n    it(\"returns empty string for null input\", () => {\n        expect(normalizeWorkerUrl(null)).toBe(\"\");\n    });\n\n    it(\"returns empty string for non-string input\", () => {\n        expect(normalizeWorkerUrl(42)).toBe(\"\");\n    });\n\n    it(\"preserves https protocol\", () => {\n        expect(normalizeWorkerUrl(\"https://example.com\")).toBe(\"https://example.com\");\n    });\n\n    it(\"strips trailing slash\", () => {\n        expect(normalizeWorkerUrl(\"http://example.com/\")).toBe(\"http://example.com\");\n    });\n\n    it(\"prepends http when protocol is missing\", () => {\n        const result = normalizeWorkerUrl(\"worker.local:8188\");\n        expect(result).toMatch(/^http:\\/\\//);\n    });\n\n    it(\"trims leading and trailing whitespace\", () => {\n        expect(normalizeWorkerUrl(\"  http://example.com  \")).toBe(\"http://example.com\");\n    });\n});\n\n\n// ---------------------------------------------------------------------------\n// parseHostInput\n// ---------------------------------------------------------------------------\n\ndescribe(\"parseHostInput\", () => {\n    it(\"returns empty host and null port for null\", () => {\n        expect(parseHostInput(null)).toEqual({ host: \"\", port: null });\n    });\n\n    it(\"returns empty host and null port for empty string\", () => {\n        expect(parseHostInput(\"\")).toEqual({ host: \"\", port: null });\n    });\n\n    it(\"parses hostname without port\", () => {\n        const result = parseHostInput(\"worker.example.com\");\n        expect(result.host).toBe(\"worker.example.com\");\n        expect(result.port).toBeNull();\n    });\n\n    it(\"parses hostname with port\", () => {\n        const result = parseHostInput(\"worker.example.com:9000\");\n        expect(result.host).toBe(\"worker.example.com\");\n        expect(result.port).toBe(9000);\n    });\n\n    it(\"strips http:// protocol prefix\", () => {\n        const result = parseHostInput(\"http://worker.example.com:8188\");\n        expect(result.host).toBe(\"worker.example.com\");\n        expect(result.port).toBe(8188);\n    });\n\n    it(\"strips https:// protocol prefix\", () => {\n        const result = parseHostInput(\"https://worker.example.com\");\n        expect(result.host).toBe(\"worker.example.com\");\n        expect(result.port).toBeNull();\n    });\n\n    it(\"ignores path after host:port\", () => {\n        const result = parseHostInput(\"worker.example.com:8188/some/path\");\n        expect(result.host).toBe(\"worker.example.com\");\n        expect(result.port).toBe(8188);\n    });\n});\n\n\n// ---------------------------------------------------------------------------\n// buildWorkerWebSocketUrl\n// ---------------------------------------------------------------------------\n\ndescribe(\"buildWorkerWebSocketUrl\", () => {\n    it(\"converts http to ws\", () => {\n        expect(buildWorkerWebSocketUrl(\"http://worker.local:8188\")).toBe(\n            \"ws://worker.local:8188/distributed/worker_ws\"\n        );\n    });\n\n    it(\"converts https to wss\", () => {\n        expect(buildWorkerWebSocketUrl(\"https://worker.example.com\")).toBe(\n            \"wss://worker.example.com/distributed/worker_ws\"\n        );\n    });\n\n    it(\"always appends /distributed/worker_ws\", () => {\n        const url = buildWorkerWebSocketUrl(\"http://worker.local:8188\");\n        expect(url.endsWith(\"/distributed/worker_ws\")).toBe(true);\n    });\n});\n\n\n// ---------------------------------------------------------------------------\n// buildWorkerUrl (requires window.location stub)\n// ---------------------------------------------------------------------------\n\ndescribe(\"buildWorkerUrl\", () => {\n    let originalWindow;\n\n    beforeEach(() => {\n        originalWindow = globalThis.window;\n        globalThis.window = {\n            location: {\n                hostname: \"127.0.0.1\",\n                protocol: \"http:\",\n                port: \"8188\",\n                origin: \"http://127.0.0.1:8188\",\n            },\n        };\n    });\n\n    afterEach(() => {\n        globalThis.window = originalWindow;\n    });\n\n    it(\"builds local worker URL using window hostname when no host set\", () => {\n        const worker = { id: \"w1\", port: 8189 };\n        expect(buildWorkerUrl(worker, \"/prompt\")).toBe(\"http://127.0.0.1:8189/prompt\");\n    });\n\n    it(\"builds remote worker URL using explicit host\", () => {\n        const worker = { id: \"w2\", host: \"worker.example.com\", port: 9000 };\n        expect(buildWorkerUrl(worker, \"/prompt\")).toBe(\"http://worker.example.com:9000/prompt\");\n    });\n\n    it(\"builds cloud worker URL with https when type=cloud\", () => {\n        const worker = { id: \"w3\", host: \"cloud.example.com\", port: 443, type: \"cloud\" };\n        expect(buildWorkerUrl(worker, \"/prompt\")).toBe(\"https://cloud.example.com/prompt\");\n    });\n\n    it(\"uses https for port 443 even without type=cloud\", () => {\n        const worker = { id: \"w4\", host: \"worker.example.com\", port: 443 };\n        const result = buildWorkerUrl(worker, \"\");\n        expect(result.startsWith(\"https://\")).toBe(true);\n    });\n\n    it(\"rewrites runpod proxy hostname for local port\", () => {\n        globalThis.window = {\n            location: {\n                hostname: \"podabc.proxy.runpod.net\",\n                protocol: \"https:\",\n                port: \"\",\n                origin: \"https://podabc.proxy.runpod.net\",\n            },\n        };\n        const worker = { id: \"w5\", port: 8189 };\n        expect(buildWorkerUrl(worker, \"/prompt\")).toBe(\n            \"https://podabc-8189.proxy.runpod.net/prompt\"\n        );\n    });\n\n    it(\"returns URL without trailing slash when no endpoint given\", () => {\n        const worker = { id: \"w6\", host: \"worker.example.com\", port: 8188 };\n        const result = buildWorkerUrl(worker, \"\");\n        expect(result.endsWith(\"/\")).toBe(false);\n    });\n});\n\n\n// ---------------------------------------------------------------------------\n// getMasterUrl\n// ---------------------------------------------------------------------------\n\ndescribe(\"getMasterUrl\", () => {\n    const _loc = (hostname, protocol = \"http:\", port = \"8188\") => ({\n        hostname,\n        protocol,\n        port,\n        origin: `${protocol}//${hostname}${port ? `:${port}` : \"\"}`,\n    });\n\n    it(\"returns origin when master host not configured and hostname is non-localhost\", () => {\n        const loc = _loc(\"192.168.1.10\");\n        const result = getMasterUrl({}, loc);\n        expect(result).toBe(loc.origin);\n    });\n\n    it(\"returns origin for localhost when master host not configured\", () => {\n        const loc = _loc(\"127.0.0.1\");\n        const result = getMasterUrl({}, loc);\n        expect(result).toBe(loc.origin);\n    });\n\n    it(\"uses configured master host as-is when it includes http://\", () => {\n        const config = { master: { host: \"http://master.example.com\" } };\n        const result = getMasterUrl(config, _loc(\"127.0.0.1\"));\n        expect(result).toBe(\"http://master.example.com\");\n    });\n\n    it(\"uses configured master host as-is when it includes https://\", () => {\n        const config = { master: { host: \"https://secure.master.com\" } };\n        const result = getMasterUrl(config, _loc(\"127.0.0.1\"));\n        expect(result).toBe(\"https://secure.master.com\");\n    });\n\n    it(\"defaults to https for domain-name master hosts\", () => {\n        const config = { master: { host: \"master.example.com\" } };\n        const result = getMasterUrl(config, _loc(\"127.0.0.1\"));\n        expect(result).toBe(\"https://master.example.com\");\n    });\n\n    it(\"does not force https for IP-address master hosts\", () => {\n        const config = { master: { host: \"192.168.1.100\" } };\n        const result = getMasterUrl(config, _loc(\"127.0.0.1\"));\n        expect(result.startsWith(\"https://\")).toBe(false);\n    });\n\n    it(\"does not force https for localhost master host\", () => {\n        const config = { master: { host: \"localhost\" } };\n        const result = getMasterUrl(config, _loc(\"127.0.0.1\"));\n        expect(result.startsWith(\"https://\")).toBe(false);\n    });\n\n    it(\"accepts null log parameter without throwing\", () => {\n        const loc = _loc(\"127.0.0.1\");\n        expect(() => getMasterUrl({}, loc, null)).not.toThrow();\n    });\n});\n"
  },
  {
    "path": "web/tests/workerLifecycle.test.js",
    "content": "import { afterEach, beforeEach, describe, expect, it } from \"vitest\";\n\nimport { getWorkerUrl } from \"../workerLifecycle.js\";\n\n\ndescribe(\"workerLifecycle URL construction\", () => {\n    let originalWindow;\n\n    beforeEach(() => {\n        originalWindow = globalThis.window;\n        globalThis.window = {\n            location: {\n                hostname: \"127.0.0.1\",\n                protocol: \"http:\",\n                port: \"8190\",\n                origin: \"http://127.0.0.1:8190\",\n            },\n        };\n    });\n\n    afterEach(() => {\n        globalThis.window = originalWindow;\n    });\n\n    it(\"builds local worker URL with explicit local port\", () => {\n        const worker = { id: \"w1\", port: 8189, type: \"local\" };\n        expect(getWorkerUrl({}, worker, \"/prompt\")).toBe(\"http://127.0.0.1:8189/prompt\");\n    });\n\n    it(\"builds remote worker URL with host:port\", () => {\n        const worker = { id: \"w2\", host: \"worker.example.com\", port: 9000, type: \"remote\" };\n        expect(getWorkerUrl({}, worker, \"/prompt\")).toBe(\"http://worker.example.com:9000/prompt\");\n    });\n\n    it(\"builds cloud worker URL as https\", () => {\n        const worker = { id: \"w3\", host: \"cloud.example.com\", port: 443, type: \"cloud\" };\n        expect(getWorkerUrl({}, worker, \"/prompt\")).toBe(\"https://cloud.example.com/prompt\");\n    });\n\n    it(\"rewrites runpod proxy hostname for local worker ports\", () => {\n        globalThis.window = {\n            location: {\n                hostname: \"podabc.proxy.runpod.net\",\n                protocol: \"https:\",\n                port: \"\",\n                origin: \"https://podabc.proxy.runpod.net\",\n            },\n        };\n        const worker = { id: \"w4\", port: 8189, type: \"local\" };\n        expect(getWorkerUrl({}, worker, \"/prompt\")).toBe(\"https://podabc-8189.proxy.runpod.net/prompt\");\n    });\n});\n"
  },
  {
    "path": "web/tests/workerSettings.test.js",
    "content": "import { afterEach, beforeEach, describe, expect, it, vi } from \"vitest\";\n\nimport { addNewWorker, isRemoteWorker } from \"../workerSettings.js\";\n\n\ndescribe(\"workerSettings remote classification\", () => {\n    let originalWindow;\n\n    beforeEach(() => {\n        originalWindow = globalThis.window;\n        globalThis.window = {\n            location: {\n                hostname: \"127.0.0.1\",\n            },\n        };\n    });\n\n    afterEach(() => {\n        globalThis.window = originalWindow;\n    });\n\n    it(\"treats explicit local worker type as local even with non-local host\", () => {\n        const worker = { type: \"local\", host: \"192.168.1.50\" };\n        expect(isRemoteWorker({}, worker)).toBe(false);\n    });\n\n    it(\"treats explicit remote worker type as remote\", () => {\n        const worker = { type: \"remote\", host: \"127.0.0.1\" };\n        expect(isRemoteWorker({}, worker)).toBe(true);\n    });\n\n    it(\"treats cloud worker type as remote\", () => {\n        const worker = { type: \"cloud\", host: \"worker.example.com\" };\n        expect(isRemoteWorker({}, worker)).toBe(true);\n    });\n\n    it(\"falls back to host heuristic for legacy workers\", () => {\n        expect(isRemoteWorker({}, { host: \"127.0.0.1\" })).toBe(false);\n        expect(isRemoteWorker({}, { host: \"worker.example.com\" })).toBe(true);\n    });\n});\n\ndescribe(\"addNewWorker GPU availability guard\", () => {\n    it(\"falls back to a disabled remote worker when no local CUDA device is available\", async () => {\n        const toastAdd = vi.fn();\n        const updateWorker = vi.fn().mockResolvedValue({});\n        const stateUpdateWorker = vi.fn();\n        const extension = {\n            cudaDeviceCount: 1,\n            masterCudaDevice: 0,\n            panelElement: null,\n            config: {\n                workers: [],\n                master: { cuda_device: 0 },\n            },\n            api: { updateWorker },\n            state: { updateWorker: stateUpdateWorker, setWorkerExpanded: vi.fn() },\n            app: { extensionManager: { toast: { add: toastAdd } } },\n        };\n\n        await addNewWorker(extension);\n\n        expect(updateWorker).toHaveBeenCalledTimes(1);\n        expect(updateWorker.mock.calls[0][1]).toEqual(\n            expect.objectContaining({\n                type: \"remote\",\n                enabled: false,\n                cuda_device: null,\n                host: \"\",\n            })\n        );\n        expect(extension.config.workers).toHaveLength(1);\n        expect(extension.config.workers[0]).toEqual(\n            expect.objectContaining({\n                type: \"remote\",\n                enabled: false,\n                cuda_device: null,\n                host: \"\",\n            })\n        );\n        expect(stateUpdateWorker).toHaveBeenCalledWith(\n            expect.any(String),\n            expect.objectContaining({ enabled: false })\n        );\n        expect(toastAdd).toHaveBeenCalledWith(\n            expect.objectContaining({\n                severity: \"warn\",\n                summary: \"Remote Worker Added\",\n            })\n        );\n    });\n\n    it(\"assigns the first free local CUDA device when adding a worker\", async () => {\n        const toastAdd = vi.fn();\n        const updateWorker = vi.fn().mockResolvedValue({});\n        const stateUpdateWorker = vi.fn();\n        const setWorkerExpanded = vi.fn();\n        const extension = {\n            cudaDeviceCount: 3,\n            masterCudaDevice: 0,\n            panelElement: null,\n            config: {\n                workers: [\n                    { id: \"w-existing\", type: \"local\", port: 8189, cuda_device: 1, enabled: true },\n                ],\n                master: { cuda_device: 0 },\n            },\n            api: { updateWorker },\n            state: { updateWorker: stateUpdateWorker, setWorkerExpanded },\n            app: { extensionManager: { toast: { add: toastAdd } } },\n        };\n\n        await addNewWorker(extension);\n\n        expect(updateWorker).toHaveBeenCalledTimes(1);\n        expect(updateWorker.mock.calls[0][1]).toEqual(\n            expect.objectContaining({\n                cuda_device: 2,\n            })\n        );\n    });\n});\n"
  },
  {
    "path": "web/tunnelManager.js",
    "content": "export function updateTunnelUIElements(extension, isRunning, isStarting) {\n    void isRunning;\n    void isStarting;\n\n    const elements = extension.tunnelElements || {};\n    const status = (extension.tunnelStatus?.status || \"stopped\").toLowerCase();\n    const tunnelButtonColorClasses = [\"tunnel-button--enable\", \"tunnel-button--disable\"];\n    const tunnelStatusColorClasses = [\"tunnel-status--enable\", \"tunnel-status--disable\"];\n\n    if (elements.button) {\n        elements.button.disabled = status === \"starting\" || status === \"stopping\";\n        elements.button.classList.remove(...tunnelButtonColorClasses);\n\n        if (status === \"starting\") {\n            elements.button.innerHTML = `<span class=\"tunnel-spinner\"></span> Starting...`;\n            elements.button.classList.add(\"tunnel-button--enable\");\n        } else if (status === \"stopping\") {\n            elements.button.innerHTML = `<span class=\"tunnel-spinner\"></span> Stopping...`;\n            elements.button.classList.add(\"tunnel-button--disable\");\n        } else if (status === \"running\") {\n            elements.button.textContent = \"Disable Cloudflare Tunnel\";\n            elements.button.classList.add(\"tunnel-button--disable\");\n        } else if (status === \"error\") {\n            elements.button.textContent = \"Retry Cloudflare Tunnel\";\n            elements.button.classList.add(\"tunnel-button--disable\");\n        } else {\n            elements.button.textContent = \"Enable Cloudflare Tunnel\";\n            elements.button.classList.add(\"tunnel-button--enable\");\n        }\n    }\n\n    if (elements.status) {\n        elements.status.textContent = status.toUpperCase();\n        elements.status.classList.remove(...tunnelStatusColorClasses);\n        if (status === \"running\" || status === \"error\" || status === \"stopping\") {\n            elements.status.classList.add(\"tunnel-status--disable\");\n        } else {\n            elements.status.classList.add(\"tunnel-status--enable\");\n        }\n    }\n\n    if (elements.url) {\n        const url = extension.tunnelStatus?.public_url;\n        if (url) {\n            elements.url.innerHTML = `<a href=\"${url}\" target=\"_blank\" style=\"color: #eee; text-decoration: none;\">${url}</a>`;\n        } else {\n            elements.url.textContent = status === \"starting\" ? \"Requesting public URL...\" : \"No tunnel active\";\n        }\n    }\n\n    if (elements.copyBtn) {\n        const hasUrl = Boolean(extension.tunnelStatus?.public_url);\n        elements.copyBtn.disabled = !hasUrl;\n        elements.copyBtn.style.opacity = hasUrl ? \"1\" : \"0.5\";\n    }\n}\n\nexport async function refreshTunnelStatus(extension) {\n    try {\n        const data = await extension.api.getTunnelStatus();\n        extension.tunnelStatus = data.tunnel || { status: \"stopped\" };\n        if (data.master_host !== undefined) {\n            extension._applyMasterHost(data.master_host);\n        }\n        return extension.tunnelStatus;\n    } catch (error) {\n        extension.tunnelStatus = { status: \"error\", last_error: error.message };\n        extension.log(\"Failed to fetch tunnel status: \" + error.message, \"error\");\n        return extension.tunnelStatus;\n    } finally {\n        updateTunnelUIElements(extension);\n    }\n}\n\nexport async function handleTunnelToggle(extension, button) {\n    const currentStatus = (extension.tunnelStatus?.status || \"stopped\").toLowerCase();\n    if (currentStatus === \"starting\" || currentStatus === \"stopping\") {\n        return;\n    }\n\n    const setStatus = (status) => {\n        extension.tunnelStatus = { ...(extension.tunnelStatus || {}), status };\n        updateTunnelUIElements(extension);\n    };\n\n    if (currentStatus === \"running\") {\n        setStatus(\"stopping\");\n        try {\n            if (button) {\n                button.innerHTML = `<span class=\"tunnel-spinner\"></span> Stopping...`;\n                button.disabled = true;\n            }\n            const data = await extension.api.stopTunnel();\n            extension.tunnelStatus = data.tunnel || { status: \"stopped\" };\n            if (data.master_host !== undefined) {\n                extension._applyMasterHost(data.master_host);\n            }\n            updateTunnelUIElements(extension);\n            extension.ui.showToast(extension.app, \"info\", \"Cloudflare Tunnel Disabled\", \"Master address restored\", 4000);\n        } catch (error) {\n            extension.tunnelStatus = { status: \"error\", last_error: error.message };\n            updateTunnelUIElements(extension);\n            extension.ui.showToast(extension.app, \"error\", \"Failed to stop tunnel\", error.message, 5000);\n        } finally {\n            if (button) {\n                button.disabled = false;\n            }\n        }\n        return;\n    }\n\n    // Start tunnel\n    setStatus(\"starting\");\n    if (button) {\n        button.innerHTML = `<span class=\"tunnel-spinner\"></span> Starting...`;\n        button.disabled = true;\n    }\n    try {\n        const data = await extension.api.startTunnel();\n        extension.tunnelStatus = data.tunnel || { status: \"running\" };\n        if (data.master_host !== undefined) {\n            extension._applyMasterHost(data.master_host);\n        }\n        updateTunnelUIElements(extension);\n        const url = data.tunnel?.public_url || data.master_host;\n        extension.ui.showToast(extension.app, \"success\", \"Cloudflare Tunnel Ready\", url || \"Public URL created\", 4500);\n    } catch (error) {\n        extension.tunnelStatus = { status: \"error\", last_error: error.message };\n        updateTunnelUIElements(extension);\n        extension.ui.showToast(extension.app, \"error\", \"Failed to start tunnel\", error.message, 5000);\n    } finally {\n        if (button) {\n            button.disabled = false;\n        }\n    }\n}\n"
  },
  {
    "path": "web/ui/buttonHelpers.js",
    "content": "export function createButtonHelper(ui, text, onClick, style) {\n    return ui.createButton(text, onClick, style);\n}\n\nexport function createCheckboxSetting(id, label, tooltip, checked, onChange) {\n    const group = document.createElement(\"div\");\n    group.style.cssText = \"grid-column: 1 / span 2; display: flex; align-items: center; gap: 8px;\";\n\n    const checkbox = document.createElement(\"input\");\n    checkbox.type = \"checkbox\";\n    checkbox.id = id;\n    checkbox.checked = checked;\n    checkbox.onchange = onChange;\n\n    const lbl = document.createElement(\"label\");\n    lbl.htmlFor = id;\n    lbl.textContent = label;\n    lbl.style.cssText = \"font-size: 12px; color: var(--dist-label-text, #ccc); cursor: pointer;\";\n    if (tooltip) {\n        lbl.title = tooltip;\n    }\n\n    group.appendChild(checkbox);\n    group.appendChild(lbl);\n    return group;\n}\n\nexport function createNumberSetting(id, label, tooltip, value, min, step, onChange) {\n    const group = document.createElement(\"div\");\n    group.style.cssText = \"grid-column: 1 / span 2; display: flex; align-items: center; gap: 6px;\";\n\n    const lbl = document.createElement(\"label\");\n    lbl.htmlFor = id;\n    lbl.textContent = label;\n    lbl.style.cssText = \"font-size: 12px; color: var(--dist-label-text, #ccc);\";\n    if (tooltip) {\n        lbl.title = tooltip;\n    }\n\n    const input = document.createElement(\"input\");\n    input.type = \"number\";\n    input.id = id;\n    input.min = String(min);\n    input.step = String(step);\n    input.style.cssText =\n        \"width: 80px; padding: 2px 6px; background: var(--dist-input-bg, #222); color: var(--dist-input-text, #ddd); border: 1px solid var(--dist-input-border, #333); border-radius: 3px;\";\n    input.value = value;\n    input.onchange = onChange;\n\n    group.appendChild(lbl);\n    group.appendChild(input);\n    return group;\n}\n"
  },
  {
    "path": "web/ui/cloudflareWarning.js",
    "content": "export function showCloudflareWarning(extension, masterHost) {\n    const existingBanner = document.getElementById('cloudflare-warning-banner');\n    if (existingBanner) {\n        existingBanner.remove();\n    }\n\n    const banner = document.createElement('div');\n    banner.id = 'cloudflare-warning-banner';\n    banner.style.cssText = `\n        position: fixed;\n        top: 0;\n        left: 0;\n        right: 0;\n        background: #ff9800;\n        color: #333;\n        padding: 8px 16px;\n        text-align: center;\n        z-index: 10000;\n        display: flex;\n        align-items: center;\n        justify-content: center;\n        gap: 16px;\n        box-shadow: 0 2px 5px rgba(0,0,0,0.2);\n        font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, sans-serif;\n    `;\n\n    const messageSpan = document.createElement('span');\n    messageSpan.innerHTML = `Connection issue: Master address <strong>${masterHost}</strong> is not reachable. The cloudflare tunnel may be offline.`;\n    messageSpan.style.fontSize = '13px';\n\n    const resetButton = document.createElement('button');\n    resetButton.textContent = 'Reset Master Address';\n    resetButton.style.cssText = `\n        background: #333;\n        color: white;\n        border: none;\n        padding: 6px 14px;\n        border-radius: 4px;\n        cursor: pointer;\n        font-weight: 500;\n        font-size: 13px;\n        transition: background 0.2s;\n    `;\n    resetButton.onmouseover = () => {\n        resetButton.style.background = '#555';\n    };\n    resetButton.onmouseout = () => {\n        resetButton.style.background = '#333';\n    };\n\n    const dismissButton = document.createElement('button');\n    dismissButton.textContent = 'Dismiss';\n    dismissButton.style.cssText = `\n        background: transparent;\n        color: #333;\n        border: 1px solid #333;\n        padding: 6px 14px;\n        border-radius: 4px;\n        cursor: pointer;\n        font-size: 13px;\n        transition: opacity 0.2s;\n    `;\n    dismissButton.onmouseover = () => {\n        dismissButton.style.opacity = '0.7';\n    };\n    dismissButton.onmouseout = () => {\n        dismissButton.style.opacity = '1';\n    };\n\n    resetButton.onclick = async () => {\n        resetButton.disabled = true;\n        resetButton.textContent = 'Resetting...';\n\n        try {\n            await extension.api.updateMaster({\n                name: extension.config?.master?.name || \"Master\",\n                host: \"\",\n            });\n\n            if (extension.config?.master) {\n                extension.config.master.host = \"\";\n            }\n\n            await extension.detectMasterIP();\n            await extension.loadConfig();\n\n            const newMasterUrl = extension.getMasterUrl();\n            extension.log(`Master host reset. New URL: ${newMasterUrl}`, \"info\");\n\n            if (extension.panelElement) {\n                const hostInput = document.getElementById('master-host');\n                if (hostInput) {\n                    hostInput.value = extension.config?.master?.host || \"\";\n                }\n            }\n\n            extension.app.extensionManager.toast.add({\n                severity: \"success\",\n                summary: \"Master Host Reset\",\n                detail: `New address: ${newMasterUrl}`,\n                life: 4000,\n            });\n\n            banner.remove();\n        } catch (error) {\n            resetButton.disabled = false;\n            resetButton.textContent = 'Reset Master Host';\n            extension.log(`Failed to reset master host: ${error.message}`, \"error\");\n        }\n    };\n\n    dismissButton.onclick = () => {\n        banner.remove();\n    };\n\n    banner.appendChild(messageSpan);\n    banner.appendChild(resetButton);\n    banner.appendChild(dismissButton);\n\n    document.body.prepend(banner);\n\n    setTimeout(() => {\n        if (document.getElementById('cloudflare-warning-banner')) {\n            banner.style.transition = 'opacity 0.5s';\n            banner.style.opacity = '0';\n            setTimeout(() => {\n                banner.remove();\n            }, 500);\n        }\n    }, 30000);\n}\n"
  },
  {
    "path": "web/ui/entityCard.js",
    "content": "import { updateWorkerControls, toggleWorkerExpanded } from \"../workerLifecycle.js\";\nimport { isRemoteWorker } from \"../workerSettings.js\";\n\nexport function renderEntityCard(ui, cardConfigs, entityType, data, extension) {\n    const config = cardConfigs[entityType] || {};\n    const isPlaceholder = entityType === 'blueprint' || entityType === 'add';\n    const isWorker = entityType === 'worker';\n    const isMaster = entityType === 'master';\n    const isRemote = isWorker && isRemoteWorker(extension, data);\n\n    const cardOptions = {\n        onClick: isPlaceholder ? data?.onClick : null,\n    };\n    if (isPlaceholder) {\n        cardOptions.title = entityType === 'blueprint' ? \"Click to add your first worker\" : \"Click to add a new worker\";\n    }\n    const card = ui.createCard(entityType, cardOptions);\n    if (isWorker && data?.id) {\n        card.dataset.workerId = String(data.id);\n    }\n\n    const leftColumn = ui.createCheckboxOrIconColumn(config.checkbox, data, extension);\n    card.appendChild(leftColumn);\n\n    const rightColumn = ui.createCardColumn('content');\n    rightColumn.classList.add(\"entity-card-content\");\n\n    const infoRow = ui.createInfoRow();\n    if (config.infoRowPadding) {\n        infoRow.style.padding = config.infoRowPadding;\n    }\n    if (config.minHeight === 'auto') {\n        infoRow.style.minHeight = 'auto';\n    } else if (config.minHeight) {\n        infoRow.style.minHeight = config.minHeight;\n    }\n    if (config.expand) {\n        infoRow.title = \"Click to expand settings\";\n        infoRow.onclick = () => {\n            if (isMaster) {\n                const masterSettingsExpanded = !extension.masterSettingsExpanded;\n                extension.masterSettingsExpanded = masterSettingsExpanded;\n                const masterSettingsDiv = document.getElementById(\"master-settings\");\n                const arrow = infoRow.querySelector('.settings-arrow');\n                if (masterSettingsExpanded) {\n                    masterSettingsDiv.classList.add(\"expanded\");\n                    masterSettingsDiv.style.padding = \"12px\";\n                    masterSettingsDiv.style.marginTop = \"8px\";\n                    masterSettingsDiv.style.marginBottom = \"8px\";\n                    arrow.style.transform = \"rotate(90deg)\";\n                } else {\n                    masterSettingsDiv.classList.remove(\"expanded\");\n                    masterSettingsDiv.style.padding = \"0 12px\";\n                    masterSettingsDiv.style.marginTop = \"0\";\n                    masterSettingsDiv.style.marginBottom = \"0\";\n                    arrow.style.transform = \"rotate(0deg)\";\n                }\n            } else {\n                toggleWorkerExpanded(extension, data.id);\n            }\n        };\n    }\n\n    const workerContent = ui.createWorkerContent();\n    if (entityType === 'add') {\n        workerContent.style.alignItems = \"center\";\n    }\n\n    const statusDot = ui.createStatusDotHelper(config.statusDot, data, extension);\n    workerContent.appendChild(statusDot);\n\n    const infoSpan = document.createElement(\"span\");\n    infoSpan.classList.add(\"dist-worker-info\");\n    infoSpan.innerHTML = config.infoText(data, extension);\n    workerContent.appendChild(infoSpan);\n\n    infoRow.appendChild(workerContent);\n\n    let settingsArrow;\n    if (config.expand) {\n        const expandedId = config.settings?.expandedId || (isMaster ? 'master' : data?.id);\n        settingsArrow = ui.createSettingsToggleHelper(expandedId, extension);\n        if (isMaster && !extension.masterSettingsExpanded) {\n            settingsArrow.style.transform = \"rotate(0deg)\";\n        }\n        infoRow.appendChild(settingsArrow);\n    }\n\n    rightColumn.appendChild(infoRow);\n\n    if (config.hover === true) {\n        rightColumn.classList.add(\"entity-card-content--hoverable\");\n        rightColumn.onmouseover = () => {\n            rightColumn.classList.add(\"entity-card-content--hovered\");\n            if (settingsArrow) {\n                settingsArrow.style.color = \"var(--dist-settings-arrow-hover, #fff)\";\n            }\n        };\n        rightColumn.onmouseout = () => {\n            rightColumn.classList.remove(\"entity-card-content--hovered\");\n            if (settingsArrow) {\n                settingsArrow.style.color = \"var(--dist-settings-arrow, #888)\";\n            }\n        };\n    }\n\n    const controlsDiv = ui.createControlsSection(config.controls, data, extension, isRemote);\n    if (controlsDiv) {\n        rightColumn.appendChild(controlsDiv);\n    }\n\n    if (config.settings) {\n        const settingsDiv = ui.createSettingsSection(config.settings, data, extension);\n        rightColumn.appendChild(settingsDiv);\n    }\n\n    card.appendChild(rightColumn);\n\n    if (config.hover === 'placeholder') {\n        ui.addPlaceholderHover(card, leftColumn, entityType);\n    }\n\n    if (isWorker && !isRemote) {\n        updateWorkerControls(extension, data.id);\n    }\n\n    return card;\n}\n"
  },
  {
    "path": "web/ui/logModal.js",
    "content": "import { TIMEOUTS } from '../constants.js';\n\nfunction formatFileSize(bytes) {\n    if (bytes < 1024) return bytes + ' B';\n    if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB';\n    return (bytes / (1024 * 1024)).toFixed(1) + ' MB';\n}\n\nexport function createLogModal() {\n    let _modalEl = null;\n    let _keydownHandler = null;\n    let _refreshTimer = null;\n    let _fetching = false;\n    let _fetchLog = null;\n    let _onClose = null;\n    let _logContentEl = null;\n    let _statusBarEl = null;\n    let _refreshCheckbox = null;\n\n    const updateLogView = (logData) => {\n        if (!_logContentEl || !_statusBarEl || !logData) {\n            return;\n        }\n\n        const shouldAutoScroll =\n            _logContentEl.scrollTop + _logContentEl.clientHeight >= _logContentEl.scrollHeight - 50;\n        _logContentEl.textContent = logData.content || '';\n        if (shouldAutoScroll) {\n            _logContentEl.scrollTop = _logContentEl.scrollHeight;\n        }\n\n        let statusText;\n        if (logData.source === \"memory\") {\n            statusText = \"Remote worker log (in-memory buffer)\";\n            if (logData.truncated) {\n                statusText += ` (showing last ${logData.lines_shown || 0} lines)`;\n            }\n        } else {\n            statusText = `Log file: ${logData.log_file || 'unknown'}`;\n            if (logData.truncated) {\n                statusText += ` (showing last ${logData.lines_shown} lines of ${formatFileSize(logData.file_size || 0)})`;\n            }\n        }\n        _statusBarEl.textContent = statusText;\n    };\n\n    const refreshLog = async () => {\n        if (_fetching || !_fetchLog || !_refreshCheckbox?.checked) {\n            return;\n        }\n        _fetching = true;\n        try {\n            const data = await _fetchLog();\n            if (data) {\n                updateLogView(data);\n            }\n        } catch (_error) {\n            // Keep the modal open and continue retrying on next interval.\n        } finally {\n            _fetching = false;\n        }\n    };\n\n    const stopRefresh = () => {\n        if (_refreshTimer) {\n            clearInterval(_refreshTimer);\n            _refreshTimer = null;\n        }\n    };\n\n    const startRefresh = () => {\n        stopRefresh();\n        _refreshTimer = setInterval(() => {\n            refreshLog();\n        }, TIMEOUTS.LOG_REFRESH);\n    };\n\n    const unmount = () => {\n        stopRefresh();\n\n        if (_keydownHandler) {\n            document.removeEventListener('keydown', _keydownHandler);\n            _keydownHandler = null;\n        }\n\n        if (_modalEl) {\n            _modalEl.remove();\n            _modalEl = null;\n        }\n\n        const onClose = _onClose;\n        _onClose = null;\n        if (onClose) {\n            onClose();\n        }\n    };\n\n    const mount = (container, { workerName, logData, onClose, fetchLog, themeClass = \"\" }) => {\n        _onClose = onClose;\n        _fetchLog = fetchLog;\n\n        const modal = document.createElement('div');\n        modal.id = 'distributed-log-modal';\n        modal.className = 'log-modal';\n        if (themeClass) {\n            modal.classList.add(themeClass);\n        }\n\n        const content = document.createElement('div');\n        content.className = 'log-modal__content';\n\n        const header = document.createElement('div');\n        header.className = 'log-modal__header';\n\n        const title = document.createElement('h3');\n        title.className = 'log-modal__title';\n        title.textContent = `${workerName} - Log Viewer`;\n\n        const headerButtons = document.createElement('div');\n        headerButtons.className = 'log-modal__header-buttons';\n\n        const refreshContainer = document.createElement('div');\n        refreshContainer.className = 'log-modal__refresh';\n\n        const refreshCheckbox = document.createElement('input');\n        refreshCheckbox.type = 'checkbox';\n        refreshCheckbox.id = 'log-auto-refresh';\n        refreshCheckbox.className = 'log-modal__refresh-input';\n        refreshCheckbox.checked = true;\n\n        const refreshLabel = document.createElement('label');\n        refreshLabel.htmlFor = 'log-auto-refresh';\n        refreshLabel.className = 'log-modal__refresh-label';\n        refreshLabel.textContent = 'Auto-refresh';\n\n        refreshContainer.appendChild(refreshCheckbox);\n        refreshContainer.appendChild(refreshLabel);\n\n        const closeBtn = document.createElement('button');\n        closeBtn.className = 'distributed-button log-modal__close';\n        closeBtn.textContent = '✕';\n\n        headerButtons.appendChild(refreshContainer);\n        headerButtons.appendChild(closeBtn);\n\n        header.appendChild(title);\n        header.appendChild(headerButtons);\n\n        const logContainer = document.createElement('div');\n        logContainer.className = 'log-modal__body';\n        logContainer.id = 'distributed-log-content';\n\n        const statusBar = document.createElement('div');\n        statusBar.className = 'log-modal__status';\n\n        content.appendChild(header);\n        content.appendChild(logContainer);\n        content.appendChild(statusBar);\n        modal.appendChild(content);\n\n        closeBtn.addEventListener('click', unmount);\n        modal.addEventListener('click', (event) => {\n            if (event.target === modal) {\n                unmount();\n            }\n        });\n\n        _keydownHandler = (event) => {\n            if (event.key === 'Escape') {\n                unmount();\n            }\n        };\n        document.addEventListener('keydown', _keydownHandler);\n\n        _modalEl = modal;\n        _logContentEl = logContainer;\n        _statusBarEl = statusBar;\n        _refreshCheckbox = refreshCheckbox;\n\n        refreshCheckbox.addEventListener('change', () => {\n            if (refreshCheckbox.checked) {\n                refreshLog();\n            }\n        });\n\n        container.appendChild(modal);\n        updateLogView(logData);\n\n        requestAnimationFrame(() => {\n            if (_logContentEl) {\n                _logContentEl.scrollTop = _logContentEl.scrollHeight;\n            }\n        });\n\n        startRefresh();\n    };\n\n    return {\n        mount,\n        unmount,\n        update: updateLogView,\n    };\n}\n"
  },
  {
    "path": "web/ui/settingsForm.js",
    "content": "import { BUTTON_STYLES } from '../constants.js';\nimport { cancelWorkerSettings, deleteWorker, isRemoteWorker, saveWorkerSettings } from '../workerSettings.js';\n\nexport function createWorkerSettingsForm(ui, extension, worker) {\n    const form = document.createElement(\"div\");\n    form.style.cssText = \"display: flex; flex-direction: column; gap: 8px;\";\n\n    const nameGroup = ui.createFormGroup(\"Name:\", worker.name, `name-${worker.id}`);\n    form.appendChild(nameGroup.group);\n\n    const typeGroup = document.createElement(\"div\");\n    typeGroup.style.cssText = \"display: flex; flex-direction: column; gap: 4px; margin: 5px 0;\";\n\n    const typeLabel = document.createElement(\"label\");\n    typeLabel.htmlFor = `worker-type-${worker.id}`;\n    typeLabel.textContent = \"Worker Type:\";\n    typeLabel.style.cssText = \"font-size: 12px; color: var(--dist-label-text, #ccc);\";\n\n    const typeSelect = document.createElement(\"select\");\n    typeSelect.id = `worker-type-${worker.id}`;\n    typeSelect.style.cssText =\n        \"padding: 4px 8px; background: var(--dist-input-bg, #333); color: var(--dist-input-text, #fff); border: 1px solid var(--dist-input-border, #555); border-radius: 4px; font-size: 12px;\";\n\n    const localOption = document.createElement(\"option\");\n    localOption.value = \"local\";\n    localOption.textContent = \"Local\";\n\n    const remoteOption = document.createElement(\"option\");\n    remoteOption.value = \"remote\";\n    remoteOption.textContent = \"Remote\";\n\n    const cloudOption = document.createElement(\"option\");\n    cloudOption.value = \"cloud\";\n    cloudOption.textContent = \"Cloud\";\n\n    typeSelect.appendChild(localOption);\n    typeSelect.appendChild(remoteOption);\n    typeSelect.appendChild(cloudOption);\n\n    const runpodText = document.createElement(\"a\");\n    runpodText.id = `runpod-text-${worker.id}`;\n    runpodText.href = \"https://github.com/robertvoy/ComfyUI-Distributed/blob/main/docs/worker-setup-guides.md#cloud-workers\";\n    runpodText.target = \"_blank\";\n    runpodText.textContent = \"Deploy Cloud Worker with Runpod\";\n    runpodText.style.cssText = \"font-size: 12px; color: #4a90e2; text-decoration: none; margin-top: 4px; display: none; cursor: pointer;\";\n\n    const createOnChangeHandler = () => {\n        return (e) => {\n            const workerType = e.target.value;\n            const hostGroup = document.getElementById(`host-group-${worker.id}`);\n            const hostInput = document.getElementById(`host-${worker.id}`);\n            const portGroup = document.getElementById(`port-group-${worker.id}`);\n            const portInput = document.getElementById(`port-${worker.id}`);\n            const cudaGroup = document.getElementById(`cuda-group-${worker.id}`);\n            const argsGroup = document.getElementById(`args-group-${worker.id}`);\n            const runpodTextElem = document.getElementById(`runpod-text-${worker.id}`);\n\n            if (!hostGroup || !portGroup || !cudaGroup || !argsGroup || !runpodTextElem || !hostInput || !portInput) {\n                return;\n            }\n\n            if (workerType === \"local\") {\n                hostGroup.style.display = \"none\";\n                portGroup.style.display = \"flex\";\n                cudaGroup.style.display = \"flex\";\n                argsGroup.style.display = \"flex\";\n                runpodTextElem.style.display = \"none\";\n            } else if (workerType === \"remote\") {\n                hostGroup.style.display = \"flex\";\n                portGroup.style.display = \"flex\";\n                cudaGroup.style.display = \"none\";\n                argsGroup.style.display = \"none\";\n                runpodTextElem.style.display = \"none\";\n                hostInput.placeholder = \"e.g., 192.168.1.100\";\n                if (hostInput.value === \"localhost\" || hostInput.value === \"127.0.0.1\") {\n                    hostInput.value = \"\";\n                }\n            } else if (workerType === \"cloud\") {\n                hostGroup.style.display = \"flex\";\n                portGroup.style.display = \"flex\";\n                cudaGroup.style.display = \"none\";\n                argsGroup.style.display = \"none\";\n                runpodTextElem.style.display = \"block\";\n                hostInput.placeholder = \"e.g., your-cloud-worker.trycloudflare.com\";\n                portInput.value = \"443\";\n                if (hostInput.value === \"localhost\" || hostInput.value === \"127.0.0.1\") {\n                    hostInput.value = \"\";\n                }\n            }\n        };\n    };\n\n    typeGroup.appendChild(typeLabel);\n    typeGroup.appendChild(typeSelect);\n    typeGroup.appendChild(runpodText);\n    form.appendChild(typeGroup);\n\n    const hostGroup = ui.createFormGroup(\"Host:\", worker.host || \"\", `host-${worker.id}`, \"text\", \"e.g., 192.168.1.100\");\n    hostGroup.group.id = `host-group-${worker.id}`;\n    hostGroup.group.style.display = (isRemoteWorker(extension, worker) || worker.type === \"cloud\") ? \"flex\" : \"none\";\n    form.appendChild(hostGroup.group);\n\n    const portGroup = ui.createFormGroup(\"Port:\", worker.port, `port-${worker.id}`, \"number\");\n    portGroup.group.id = `port-group-${worker.id}`;\n    form.appendChild(portGroup.group);\n\n    const cudaGroup = ui.createFormGroup(\"CUDA Device:\", worker.cuda_device || 0, `cuda-${worker.id}`, \"number\");\n    cudaGroup.group.id = `cuda-group-${worker.id}`;\n    cudaGroup.group.style.display = (isRemoteWorker(extension, worker) || worker.type === \"cloud\") ? \"none\" : \"flex\";\n    form.appendChild(cudaGroup.group);\n\n    const argsGroup = ui.createFormGroup(\"Extra Args:\", worker.extra_args || \"\", `args-${worker.id}`);\n    argsGroup.group.id = `args-group-${worker.id}`;\n    argsGroup.group.style.display = (isRemoteWorker(extension, worker) || worker.type === \"cloud\") ? \"none\" : \"flex\";\n    form.appendChild(argsGroup.group);\n\n    const saveBtn = ui.createButton(\"Save\", () => saveWorkerSettings(extension, worker.id), \"background-color: #4a7c4a;\");\n    saveBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.success;\n\n    const cancelBtn = ui.createButton(\"Cancel\", () => cancelWorkerSettings(extension, worker.id), \"background-color: #555;\");\n    cancelBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.cancel;\n\n    const deleteBtn = ui.createButton(\"Delete\", () => deleteWorker(extension, worker.id), \"background-color: #7c4a4a;\");\n    deleteBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.error + BUTTON_STYLES.marginLeftAuto;\n\n    const buttonGroup = ui.createButtonGroup([saveBtn, cancelBtn, deleteBtn], \" margin-top: 8px;\");\n    form.appendChild(buttonGroup);\n\n    typeSelect.onchange = createOnChangeHandler();\n\n    if (worker.type === \"cloud\") {\n        typeSelect.value = \"cloud\";\n        runpodText.style.display = \"block\";\n    } else if (isRemoteWorker(extension, worker)) {\n        typeSelect.value = \"remote\";\n    } else {\n        typeSelect.value = \"local\";\n    }\n\n    typeSelect.dispatchEvent(new Event('change'));\n\n    return form;\n}\n"
  },
  {
    "path": "web/ui.js",
    "content": "import { BUTTON_STYLES, UI_STYLES, STATUS_COLORS, UI_COLORS, TIMEOUTS } from './constants.js';\nimport { createButtonHelper as createButtonHelperFn } from './ui/buttonHelpers.js';\nimport { showCloudflareWarning as showCloudflareWarningFn } from './ui/cloudflareWarning.js';\nimport { createWorkerSettingsForm as createWorkerSettingsFormFn } from './ui/settingsForm.js';\nimport { renderEntityCard as renderEntityCardFn } from './ui/entityCard.js';\nimport { createLogModal } from './ui/logModal.js';\nimport { launchWorker, stopWorker, updateWorkerControls, viewWorkerLog } from './workerLifecycle.js';\nimport { isRemoteWorker } from './workerSettings.js';\n\nconst cardConfigs = {\n    master: {\n        checkbox: { \n            enabled: true,\n            masterToggle: true,\n            title: \"Toggle master participation in workloads\"\n        },\n        statusDot: { \n            id: 'master-status',\n            initialColor: (_, extension) => extension.isMasterParticipating() ? STATUS_COLORS.ONLINE_GREEN : STATUS_COLORS.DISABLED_GRAY,\n            initialTitle: (_, extension) => extension.isMasterParticipating() ? 'Master participating' : 'Master orchestrator only',\n            dynamic: true\n        },\n        infoText: (data, extension) => {\n            const cudaDevice = extension.config?.master?.cuda_device ?? extension.masterCudaDevice;\n            const cudaInfo = cudaDevice !== undefined ? `CUDA ${cudaDevice} • ` : '';\n            const port = window.location.port || (window.location.protocol === 'https:' ? '443' : '80');\n            const participationEnabled = extension.isMasterParticipationEnabled();\n            const fallbackActive = extension.isMasterFallbackActive();\n            let delegateBadge = '';\n            if (!participationEnabled && fallbackActive) {\n                delegateBadge = `<br><span class=\"dist-worker-info__fallback\">Fallback active • Master executing</span>`;\n            }\n            return `<span class=\"dist-worker-info__title\" id=\"master-name-display\">${data?.name || extension.config?.master?.name || \"Master\"}</span><br><span class=\"dist-worker-info__meta\"><span id=\"master-cuda-info\">${cudaInfo}Port ${port}</span></span>${delegateBadge}`;\n        },\n        controls: { \n            type: 'master'\n        },\n        settings: { \n            formType: 'master', \n            id: 'master-settings',\n            expandedTracker: 'masterSettingsExpanded'\n        },\n        hover: true,\n        expand: true,\n        border: 'solid'\n    },\n    worker: {\n        checkbox: { \n            enabled: true, \n            title: \"Enable/disable this worker\" \n        },\n        statusDot: { \n            dynamic: true,\n            initialColor: (data) => data.enabled ? STATUS_COLORS.OFFLINE_RED : STATUS_COLORS.DISABLED_GRAY,\n            initialTitle: (data) => data.enabled ? \"Checking status...\" : \"Disabled\",\n            id: (data) => `status-${data.id}`\n        },\n        infoText: (data, extension) => {\n            const isRemote = isRemoteWorker(extension, data);\n            const isCloud = data.type === 'cloud';\n            \n            if (isCloud) {\n                // For cloud workers, don't show port (it's always 443)\n                return `<span class=\"dist-worker-info__title\">${data.name}</span><br><span class=\"dist-worker-info__meta\">${data.host}</span>`;\n            } else if (isRemote) {\n                const hostLabel = data.host\n                    ? `${data.host}:${data.port}`\n                    : `Unconfigured remote worker • Port ${data.port}`;\n                return `<span class=\"dist-worker-info__title\">${data.name}</span><br><span class=\"dist-worker-info__meta\">${hostLabel}</span>`;\n            } else {\n                const cudaInfo = data.cuda_device !== undefined ? `CUDA ${data.cuda_device} • ` : '';\n                return `<span class=\"dist-worker-info__title\">${data.name}</span><br><span class=\"dist-worker-info__meta\">${cudaInfo}Port ${data.port}</span>`;\n            }\n        },\n        controls: { \n            dynamic: true \n        },\n        settings: { \n            formType: 'worker',\n            id: (data) => `settings-${data.id}`,\n            expandedId: (data) => data?.id\n        },\n        hover: true,\n        expand: true,\n        border: 'solid'\n    },\n    blueprint: {\n        checkbox: { \n            type: 'icon', \n            content: '+', \n            width: 42,\n            style: `border-right: 2px dashed ${UI_COLORS.BORDER_LIGHT}; color: ${UI_COLORS.ACCENT_COLOR}; font-size: 24px; font-weight: 500;` \n        },\n        statusDot: { \n            color: 'transparent', \n            border: `1px solid ${UI_COLORS.BORDER_LIGHT}` \n        },\n        infoText: () => `<strong style=\"color: #aaa; font-size: 16px;\">Add New Worker</strong><br><small style=\"color: ${UI_COLORS.BORDER_LIGHT};\">[CUDA] • [Port]</small>`,\n        controls: { \n            type: 'ghost', \n            text: 'Configure', \n            style: `border: 1px solid ${UI_COLORS.BORDER_DARK}; background: transparent; color: ${UI_COLORS.BORDER_LIGHT};` \n        },\n        hover: 'placeholder',\n        expand: false,\n        border: 'dashed'\n    },\n    add: {\n        checkbox: { \n            type: 'icon', \n            content: '+',\n            width: 43,\n            style: `border-right: 1px dashed ${UI_COLORS.BORDER_DARK}; color: ${UI_COLORS.BORDER_LIGHT}; font-size: 18px;` \n        },\n        statusDot: { \n            color: 'transparent', \n            border: `1px solid ${UI_COLORS.BORDER_LIGHT}` \n        },\n        infoText: () => `<span style=\"color: ${UI_COLORS.ICON_COLOR}; font-weight: bold; font-size: 13px;\">Add New Worker</span>`,\n        controls: null,\n        hover: 'placeholder',\n        expand: false,\n        border: 'dashed',\n        minHeight: '48px'\n    }\n};\n\nexport class DistributedUI {\n    constructor() {\n        // UI element styles\n        this.styles = UI_STYLES;\n    }\n\n    createStatusDot(id, color = \"#666\", title = \"Status\") {\n        const dot = document.createElement(\"span\");\n        if (id) dot.id = id;\n        dot.style.cssText = this.styles.statusDot + ` background-color: ${color};`;\n        dot.title = title;\n        return dot;\n    }\n\n    createButton(text, onClick, customStyle = \"\") {\n        const button = document.createElement(\"button\");\n        button.textContent = text;\n        button.className = \"distributed-button\";\n        button.style.cssText = BUTTON_STYLES.base + customStyle;\n        if (onClick) button.onclick = onClick;\n        return button;\n    }\n\n    createButtonGroup(buttons, style = \"\") {\n        const group = document.createElement(\"div\");\n        group.style.cssText = this.styles.buttonGroup + style;\n        buttons.forEach(button => group.appendChild(button));\n        return group;\n    }\n\n    createWorkerControls(workerId, handlers = {}) {\n        const controlsDiv = document.createElement(\"div\");\n        controlsDiv.id = `controls-${workerId}`;\n        controlsDiv.style.cssText = this.styles.controlsDiv;\n        \n        const buttons = [];\n        \n        if (handlers.launch) {\n            const launchBtn = this.createButton('Launch', handlers.launch);\n            launchBtn.id = `launch-${workerId}`;\n            launchBtn.title = \"Launch this worker instance\";\n            buttons.push(launchBtn);\n        }\n        \n        if (handlers.stop) {\n            const stopBtn = this.createButton('Stop', handlers.stop);\n            stopBtn.id = `stop-${workerId}`;\n            stopBtn.title = \"Stop this worker instance\";\n            buttons.push(stopBtn);\n        }\n        \n        if (handlers.viewLog) {\n            const logBtn = this.createButton('View Log', handlers.viewLog);\n            logBtn.id = `log-${workerId}`;\n            logBtn.title = \"View worker log file\";\n            buttons.push(logBtn);\n        }\n        \n        buttons.forEach(btn => controlsDiv.appendChild(btn));\n        return controlsDiv;\n    }\n\n    createFormGroup(label, value, id, type = \"text\", placeholder = \"\") {\n        const group = document.createElement(\"div\");\n        group.style.cssText = this.styles.formGroup;\n        \n        const labelEl = document.createElement(\"label\");\n        labelEl.textContent = label;\n        labelEl.htmlFor = id;\n        labelEl.style.cssText = this.styles.formLabel;\n        \n        const input = document.createElement(\"input\");\n        input.type = type;\n        input.id = id;\n        input.value = value;\n        input.placeholder = placeholder;\n        input.classList.add('dist-form-input');\n        input.style.cssText = this.styles.formInput;\n        \n        group.appendChild(labelEl);\n        group.appendChild(input);\n        return { group, input };\n    }\n\n\n    createInfoBox(text) {\n        const box = document.createElement(\"div\");\n        box.classList.add('dist-info-box');\n        box.style.cssText = this.styles.infoBox;\n        box.textContent = text;\n        return box;\n    }\n\n    addHoverEffect(element, onHover, onLeave) {\n        element.onmouseover = onHover;\n        element.onmouseout = onLeave;\n    }\n\n    createCard(type = 'worker', options = {}) {\n        const card = document.createElement(\"div\");\n        card.classList.add('dist-card');\n\n        switch(type) {\n            case 'master':\n            case 'worker':\n                card.style.cssText = this.styles.workerCard;\n                break;\n            case 'blueprint':\n                card.classList.add('dist-card--blueprint');\n                card.style.cssText = this.styles.cardBase + this.styles.cardBlueprint;\n                if (options.onClick) card.onclick = options.onClick;\n                if (options.title) card.title = options.title;\n                break;\n            case 'add':\n                card.classList.add('dist-card--add');\n                card.style.cssText = this.styles.cardBase + this.styles.cardAdd;\n                if (options.onClick) card.onclick = options.onClick;\n                if (options.title) card.title = options.title;\n                break;\n        }\n        \n        if (options.onMouseEnter) {\n            card.addEventListener('mouseenter', options.onMouseEnter);\n        }\n        if (options.onMouseLeave) {\n            card.addEventListener('mouseleave', options.onMouseLeave);\n        }\n        \n        return card;\n    }\n\n    createCardColumn(type = 'checkbox', options = {}) {\n        const column = document.createElement(\"div\");\n\n        switch(type) {\n            case 'checkbox':\n                column.classList.add('dist-card__left-col');\n                column.style.cssText = this.styles.checkboxColumn;\n                if (options.title) column.title = options.title;\n                break;\n            case 'icon':\n                column.style.cssText = this.styles.columnBase + this.styles.iconColumn;\n                break;\n            case 'content':\n                column.style.cssText = this.styles.contentColumn;\n                break;\n        }\n        \n        return column;\n    }\n\n    createInfoRow(options = {}) {\n        const row = document.createElement(\"div\");\n        row.style.cssText = this.styles.infoRow;\n        if (options.onClick) row.onclick = options.onClick;\n        return row;\n    }\n\n    createWorkerContent() {\n        const content = document.createElement(\"div\");\n        content.style.cssText = this.styles.workerContent;\n        return content;\n    }\n\n    createSettingsForm(fields = [], options = {}) {\n        const form = document.createElement(\"div\");\n        form.style.cssText = this.styles.settingsForm;\n        \n        fields.forEach(field => {\n            if (field.type === 'checkbox') {\n                const group = document.createElement(\"div\");\n                group.style.cssText = this.styles.checkboxGroup;\n                \n                const checkbox = document.createElement(\"input\");\n                checkbox.type = \"checkbox\";\n                checkbox.id = field.id;\n                checkbox.checked = field.checked || false;\n                if (field.onChange) checkbox.onchange = field.onChange;\n                \n                const label = document.createElement(\"label\");\n                label.htmlFor = field.id;\n                label.textContent = field.label;\n                label.style.cssText = this.styles.formLabelClickable;\n                \n                group.appendChild(checkbox);\n                group.appendChild(label);\n                form.appendChild(group);\n            } else {\n                const result = this.createFormGroup(field.label, field.value, field.id, field.type, field.placeholder);\n                if (field.groupId) result.group.id = field.groupId;\n                if (field.display) result.group.style.display = field.display;\n                form.appendChild(result.group);\n            }\n        });\n        \n        if (options.buttons) {\n            const buttonGroup = this.createButtonGroup(options.buttons, options.buttonStyle || \" margin-top: 8px;\");\n            form.appendChild(buttonGroup);\n        }\n        \n        return form;\n    }\n\n\n    createButtonHelper(text, onClick, style) {\n        return createButtonHelperFn(this, text, onClick, style);\n    }\n\n    updateMasterDisplay(extension) {\n        // Use persistent config value as fallback\n        const cudaDevice = extension?.config?.master?.cuda_device ?? extension?.masterCudaDevice;\n        \n        // Update CUDA info if element exists\n        const cudaInfo = document.getElementById('master-cuda-info');\n        if (cudaInfo) {\n            const port = window.location.port || (window.location.protocol === 'https:' ? '443' : '80');\n            if (cudaDevice !== undefined && cudaDevice !== null) {\n                cudaInfo.textContent = `CUDA ${cudaDevice} • Port ${port}`;\n            } else {\n                cudaInfo.textContent = `Port ${port}`;\n            }\n        }\n        \n        // Update name if changed\n        const nameDisplay = document.getElementById('master-name-display');\n        if (nameDisplay && extension?.config?.master?.name) {\n            nameDisplay.textContent = extension.config.master.name;\n        }\n    }\n\n    showToast(app, severity, summary, detail, life = 3000) {\n        if (app.extensionManager?.toast?.add) {\n            app.extensionManager.toast.add({ severity, summary, detail, life });\n        }\n    }\n\n    showCloudflareWarning(extension, masterHost) {\n        return showCloudflareWarningFn(extension, masterHost);\n    }\n\n    updateStatusDot(workerId, color, title, pulsing = false) {\n        const statusDot = document.getElementById(`status-${workerId}`);\n        if (!statusDot) return;\n\n        const statusClasses = [\n            \"worker-status--online\",\n            \"worker-status--offline\",\n            \"worker-status--unknown\",\n            \"worker-status--processing\",\n        ];\n        statusDot.classList.remove(...statusClasses);\n\n        const colorClassMap = {\n            [STATUS_COLORS.ONLINE_GREEN]: \"worker-status--online\",\n            [STATUS_COLORS.OFFLINE_RED]: \"worker-status--offline\",\n            [STATUS_COLORS.DISABLED_GRAY]: \"worker-status--unknown\",\n            [STATUS_COLORS.PROCESSING_YELLOW]: \"worker-status--processing\",\n        };\n\n        const statusClass = colorClassMap[color] || \"worker-status--unknown\";\n        statusDot.classList.add(statusClass);\n        statusDot.style.backgroundColor = \"\";\n        statusDot.title = title;\n        statusDot.classList.toggle('status-pulsing', pulsing);\n    }\n\n    showLogModal(extension, workerId, logData, fetchLog = null) {\n        if (this._logModal) {\n            this._logModal.unmount();\n            this._logModal = null;\n        }\n\n        const worker = extension.config.workers.find(w => w.id === workerId);\n        const workerName = worker?.name || `Worker ${workerId}`;\n\n        const modal = createLogModal();\n        this._logModal = modal;\n        const themeClass =\n            extension.panelElement?.classList.contains(\"distributed-panel--light\")\n                ? \"distributed-panel--light\"\n                : \"\";\n        modal.mount(document.body, {\n            workerName,\n            logData,\n            fetchLog: fetchLog || (async () => extension.api.getWorkerLog(workerId, 1000)),\n            themeClass,\n            onClose: () => {\n                if (this._logModal === modal) {\n                    this._logModal = null;\n                }\n            },\n        });\n    }\n\n    createWorkerSettingsForm(extension, worker) {\n        return createWorkerSettingsFormFn(this, extension, worker);\n    }\n\n    createSettingsToggle() {\n        const settingsRow = document.createElement(\"div\");\n        settingsRow.style.cssText = this.styles.settingsToggle;\n        \n        const settingsTitle = document.createElement(\"h4\");\n        settingsTitle.textContent = \"Settings\";\n        settingsTitle.style.cssText = \"margin: 0; font-size: 14px;\";\n        \n        const settingsToggle = document.createElement(\"span\");\n        settingsToggle.textContent = \"▶\"; // Right arrow when collapsed\n        settingsToggle.style.cssText =\n            \"font-size: 12px; color: var(--dist-settings-arrow, #888); transition: all 0.2s ease;\";\n        \n        settingsRow.appendChild(settingsToggle);\n        settingsRow.appendChild(settingsTitle);\n        \n        return { settingsRow, settingsToggle };\n    }\n\n\n    createCheckboxOrIconColumn(config, data, extension) {\n        const column = this.createCardColumn('checkbox');\n        \n        if (config?.type === 'icon') {\n            column.style.flex = `0 0 ${config.width || 44}px`;\n            column.innerHTML = config.content || '+';\n            if (config.style) {\n                const styles = config.style.split(';').filter(s => s.trim());\n                styles.forEach(style => {\n                    const [prop, value] = style.split(':').map(s => s.trim());\n                    if (prop && value) {\n                        column.style[prop.replace(/-([a-z])/g, (g) => g[1].toUpperCase())] = value;\n                    }\n                });\n            }\n        } else {\n            const checkbox = document.createElement(\"input\");\n            checkbox.type = \"checkbox\";\n            checkbox.id = `gpu-${data?.id || 'master'}`;\n            checkbox.checked = config?.checked !== undefined ? config.checked : data?.enabled;\n            checkbox.disabled = config?.disabled || false;\n            checkbox.style.cssText = `cursor: ${config?.disabled ? 'default' : 'pointer'}; width: 16px; height: 16px;`;\n            \n            if (config?.opacity) checkbox.style.opacity = config.opacity;\n            if (config?.title) column.title = config.title;\n            \n            const isMasterToggle = config?.masterToggle && typeof extension.isMasterParticipating === 'function';\n            if (isMasterToggle) {\n                const participationEnabled = extension.isMasterParticipationEnabled();\n                const fallbackActive = extension.isMasterFallbackActive();\n                const buildTitle = (enabled, fallback) => {\n                    if (enabled) {\n                        return \"Master participating • Click to switch to orchestrator-only\";\n                    }\n                    if (fallback) {\n                        return \"No workers selected • Master fallback execution active\";\n                    }\n                    return \"Master orchestrator-only • Click to re-enable participation\";\n                };\n\n                checkbox.checked = participationEnabled;\n                checkbox.style.pointerEvents = \"none\";\n                column.style.cursor = \"pointer\";\n                column.title = buildTitle(participationEnabled, fallbackActive);\n                column.onclick = async (event) => {\n                    if (event) {\n                        event.stopPropagation();\n                        event.preventDefault();\n                    }\n                    const nextState = !extension.isMasterParticipationEnabled();\n                    const nextFallback = !nextState && extension.enabledWorkers.length === 0;\n                    checkbox.checked = nextState;\n                    column.title = buildTitle(nextState, nextFallback);\n                    await extension.updateMasterParticipation(nextState);\n                };\n            } else if (config?.enabled && !config?.disabled && data?.id) {\n                checkbox.style.pointerEvents = \"none\";\n                column.style.cursor = \"pointer\";\n                column.onclick = async () => {\n                    checkbox.checked = !checkbox.checked;\n                    await extension.updateWorkerEnabled(data.id, checkbox.checked);\n                };\n            }\n            \n            column.appendChild(checkbox);\n        }\n        \n        return column;\n    }\n\n    createStatusDotHelper(config, data, extension) {\n        let color = config.color || \"#666\";\n        let title = config.title || \"Status\";\n        let id = config.id;\n        \n        if (typeof config.initialColor === 'function') {\n            color = config.initialColor(data, extension);\n        }\n        if (typeof config.initialTitle === 'function') {\n            title = config.initialTitle(data, extension);\n        }\n        if (typeof config.id === 'function') {\n            id = config.id(data);\n        }\n        \n        const dot = this.createStatusDot(id, color, title);\n        \n        if (config.border) {\n            dot.style.border = config.border;\n        }\n        \n        if (config.pulsing && (typeof config.pulsing !== 'function' || config.pulsing(data))) {\n            dot.classList.add('status-pulsing');\n        }\n        \n        return dot;\n    }\n\n    createSettingsToggleHelper(expandedId, extension) {\n        const arrow = document.createElement(\"span\");\n        arrow.className = \"settings-arrow\";\n        arrow.innerHTML = \"▶\";\n        arrow.style.cssText = this.styles.settingsArrow;\n        \n        const isExpanded = typeof expandedId === 'function' ? \n            extension.state.isWorkerExpanded(expandedId(extension)) : \n            (expandedId === 'master' ? false : extension.state.isWorkerExpanded(expandedId));\n            \n        if (isExpanded) {\n            arrow.style.transform = \"rotate(90deg)\";\n        }\n        \n        return arrow;\n    }\n\n    createControlsSection(config, data, extension, isRemote) {\n        if (!config) return null;\n        \n        const controlsDiv = document.createElement(\"div\");\n        controlsDiv.id = `controls-${data?.id || 'master'}`;\n        controlsDiv.style.cssText = this.styles.controlsDiv;\n        \n        // Always create a wrapper div for consistent layout\n        const controlsWrapper = document.createElement(\"div\");\n        controlsWrapper.style.cssText = this.styles.controlsWrapper;\n        \n        if (config.type === 'master') {\n            const participationEnabled = extension.isMasterParticipationEnabled();\n            const fallbackActive = extension.isMasterFallbackActive();\n            let message;\n            const badge = document.createElement(\"div\");\n            badge.classList.add(\"dist-info-box\", \"master-info-badge\");\n            badge.style.cssText = this.styles.infoBox;\n            if (fallbackActive) {\n                message = \"No workers selected. Master fallback execution active.\";\n                badge.textContent = message;\n                badge.classList.add(\"master-info-badge--fallback\");\n            } else if (!participationEnabled) {\n                message = \"Master disabled: running as orchestrator only\";\n                badge.textContent = message;\n                badge.classList.add(\"master-info-badge--delegate\");\n            } else {\n                message = \"Master participating in workflows\";\n                badge.textContent = message;\n            }\n            controlsWrapper.appendChild(badge);\n        } else if (config.dynamic && data) {\n            if (isRemote) {\n                const isCloud = data.type === 'cloud';\n                const workerTypeText = isCloud ? \"Cloud worker\" : \"Remote worker\";\n                const workerTypeBadge = this.createInfoBox(workerTypeText);\n                workerTypeBadge.title = \"Worker is externally hosted\";\n                controlsWrapper.appendChild(workerTypeBadge);\n\n                const logBtn = this.createButton('View Log', () => viewWorkerLog(extension, data.id, true));\n                logBtn.id = `log-${data.id}`;\n                logBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.workerControl;\n                logBtn.classList.add(\"btn--log\");\n                logBtn.title = \"View remote worker log\";\n                controlsWrapper.appendChild(logBtn);\n            } else {\n                const controls = this.createWorkerControls(data.id, {\n                    launch: () => launchWorker(extension, data.id),\n                    stop: () => stopWorker(extension, data.id),\n                    viewLog: () => viewWorkerLog(extension, data.id)\n                });\n                \n                const launchBtn = controls.querySelector(`#launch-${data.id}`);\n                const stopBtn = controls.querySelector(`#stop-${data.id}`);\n                const logBtn = controls.querySelector(`#log-${data.id}`);\n                \n                launchBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.workerControl;\n                launchBtn.classList.add(\"btn--launch\");\n                launchBtn.title = \"Launch worker (runs in background with logging)\";\n                \n                stopBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.workerControl + BUTTON_STYLES.hidden;\n                stopBtn.classList.add(\"btn--stop\");\n                stopBtn.title = \"Stop worker\";\n                \n                logBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.workerControl + BUTTON_STYLES.hidden;\n                logBtn.classList.add(\"btn--log\");\n                \n                while (controls.firstChild) {\n                    controlsWrapper.appendChild(controls.firstChild);\n                }\n            }\n        } else if (config.type === 'info') {\n            const infoBtn = this.createButton(config.text, null, config.style || \"\");\n            infoBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.workerControl + (config.style || BUTTON_STYLES.info) + \" cursor: default;\";\n            infoBtn.disabled = true;\n            controlsWrapper.appendChild(infoBtn);\n        } else if (config.type === 'ghost') {\n            const ghostBtn = document.createElement(\"button\");\n            ghostBtn.style.cssText = `flex: 1; padding: 5px 14px; font-size: 11px; font-weight: 500; border-radius: 4px; cursor: default; ${config.style || \"\"}`;\n            ghostBtn.textContent = config.text;\n            ghostBtn.disabled = true;\n            controlsWrapper.appendChild(ghostBtn);\n        }\n        \n        controlsDiv.appendChild(controlsWrapper);\n        return controlsDiv;\n    }\n\n    createSettingsSection(config, data, extension) {\n        const settingsDiv = document.createElement(\"div\");\n        const settingsId = typeof config.id === 'function' ? config.id(data) : config.id;\n        settingsDiv.id = settingsId;\n        settingsDiv.className = \"worker-settings\";\n        \n        const expandedId = typeof config.expandedId === 'function' ? config.expandedId(data) : config.expandedId;\n        const isExpanded = expandedId === 'master-settings' ? false : extension.state.isWorkerExpanded(expandedId);\n        \n        settingsDiv.style.cssText = this.styles.workerSettings;\n        \n        if (isExpanded) {\n            settingsDiv.classList.add(\"expanded\");\n            settingsDiv.style.padding = \"12px\";\n            settingsDiv.style.marginTop = \"8px\";\n            settingsDiv.style.marginBottom = \"8px\";\n        }\n        \n        let settingsForm;\n        if (config.formType === 'master') {\n            settingsForm = this.createMasterSettingsForm(extension, data);\n        } else if (config.formType === 'worker') {\n            settingsForm = this.createWorkerSettingsForm(extension, data);\n        }\n        \n        if (settingsForm) {\n            settingsDiv.appendChild(settingsForm);\n        }\n        \n        return settingsDiv;\n    }\n\n    createMasterSettingsForm(extension, data) {\n        const settingsForm = document.createElement(\"div\");\n        settingsForm.style.cssText = \"display: flex; flex-direction: column; gap: 8px;\";\n        \n        const nameResult = this.createFormGroup(\"Name:\", extension.config?.master?.name || \"Master\", \"master-name\");\n        settingsForm.appendChild(nameResult.group);\n        \n        const hostResult = this.createFormGroup(\"Host:\", extension.config?.master?.host || \"\", \"master-host\", \"text\", \"Auto-detect if empty\");\n        settingsForm.appendChild(hostResult.group);\n\n        // Cloudflare tunnel toggle (simple button inside master settings)\n        const tunnelBtn = this.createButton(\"Enable Cloudflare Tunnel\", (e) => extension.handleTunnelToggle(e.target));\n        tunnelBtn.id = \"cloudflare-tunnel-button\";\n        tunnelBtn.style.cssText = BUTTON_STYLES.base + \" margin: 4px 0 -5px 0;\";\n        tunnelBtn.classList.add(\"tunnel-button\", \"tunnel-button--enable\");\n        settingsForm.appendChild(tunnelBtn);\n        extension.tunnelElements = { button: tunnelBtn };\n        extension.updateTunnelUIElements();\n        \n        const saveBtn = this.createButton(\"Save\", async () => {\n            const nameInput = document.getElementById('master-name');\n            const hostInput = document.getElementById('master-host');\n            \n            if (!extension.config.master) extension.config.master = {};\n            extension.config.master.name = nameInput.value.trim() || \"Master\";\n            \n            const hostValue = hostInput.value.trim();\n            \n            await extension.api.updateMaster({\n                host: hostValue,\n                name: extension.config.master.name\n            });\n            \n            // Reload config to refresh any updated values\n            await extension.loadConfig();\n            \n            // If host was emptied, trigger auto-detection\n            if (!hostValue) {\n                extension.log(\"Host field cleared, triggering IP auto-detection\", \"debug\");\n                await extension.detectMasterIP();\n                // Reload config again to get the auto-detected IP\n                await extension.loadConfig();\n                // Update the input field with the detected IP\n                document.getElementById('master-host').value = extension.config?.master?.host || \"\";\n            }\n            \n            document.getElementById('master-name-display').textContent = extension.config.master.name;\n            this.updateMasterDisplay(extension);\n            \n            // Show toast notification\n            if (extension.app?.extensionManager?.toast) {\n                const message = !hostValue ? \n                    \"Master settings saved and IP auto-detected\" : \n                    \"Master settings saved successfully\";\n                extension.app.extensionManager.toast.add({\n                    severity: \"success\",\n                    summary: \"Master Updated\",\n                    detail: message,\n                    life: 3000\n                });\n            }\n            \n            saveBtn.textContent = \"Saved!\";\n            setTimeout(() => { saveBtn.textContent = \"Save\"; }, TIMEOUTS.FLASH_LONG);\n        }, \"background-color: #4a7c4a;\");\n        saveBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.success;\n        \n        const cancelBtn = this.createButton(\"Cancel\", () => {\n            document.getElementById('master-name').value = extension.config?.master?.name || \"Master\";\n            document.getElementById('master-host').value = extension.config?.master?.host || \"\";\n        }, \"background-color: #555;\");\n        cancelBtn.style.cssText = BUTTON_STYLES.base + BUTTON_STYLES.cancel;\n        \n        const buttonGroup = this.createButtonGroup([saveBtn, cancelBtn], \" margin-top: 8px;\");\n        settingsForm.appendChild(buttonGroup);\n        \n        return settingsForm;\n    }\n\n    addPlaceholderHover(card, leftColumn, entityType) {\n        const cardTypeClass = entityType === 'blueprint' ? 'placeholder-card--blueprint' : 'placeholder-card--add';\n        const columnTypeClass = entityType === 'blueprint' ? 'placeholder-column--blueprint' : 'placeholder-column--add';\n        card.classList.add('placeholder-card', cardTypeClass);\n        leftColumn.classList.add('placeholder-column', columnTypeClass);\n\n        card.onmouseover = () => {\n            card.classList.add('is-hovered');\n            leftColumn.classList.add('is-hovered');\n        };\n        \n        card.onmouseout = () => {\n            card.classList.remove('is-hovered');\n            leftColumn.classList.remove('is-hovered');\n        };\n    }\n\n    renderEntityCard(entityType, data, extension) {\n        return renderEntityCardFn(this, cardConfigs, entityType, data, extension);\n    }\n}\n"
  },
  {
    "path": "web/urlUtils.js",
    "content": "export function normalizeWorkerUrl(rawUrl) {\n    if (!rawUrl || typeof rawUrl !== \"string\") {\n        return \"\";\n    }\n\n    const trimmed = rawUrl.trim();\n    if (!trimmed) {\n        return \"\";\n    }\n\n    const withProtocol = /^https?:\\/\\//i.test(trimmed) ? trimmed : `http://${trimmed}`;\n\n    try {\n        const parsed = new URL(withProtocol);\n        if (parsed.pathname === \"/\") {\n            parsed.pathname = \"\";\n        }\n        return parsed.toString().replace(/\\/$/, \"\");\n    } catch (error) {\n        return withProtocol.replace(/\\/$/, \"\");\n    }\n}\n\nexport function parseHostInput(value) {\n    if (!value) {\n        return { host: \"\", port: null };\n    }\n\n    let cleaned = value.trim().replace(/^https?:\\/\\//i, \"\");\n    cleaned = cleaned.split(\"/\")[0];\n    try {\n        const url = new URL(`http://${cleaned}`);\n        const port = url.port ? parseInt(url.port, 10) : null;\n        return {\n            host: url.hostname || cleaned,\n            port: Number.isFinite(port) ? port : null,\n        };\n    } catch (error) {\n        return { host: cleaned, port: null };\n    }\n}\n\nexport function buildWorkerUrl(worker, endpoint = \"\", windowLocation = window.location) {\n    const parsed = parseHostInput(worker?.host || windowLocation.hostname);\n    const host = parsed.host || windowLocation.hostname;\n    const resolvedPort = parsed.port || worker?.port || 8188;\n\n    const isCloud = worker?.type === \"cloud\";\n    const isRunpodProxy = host.endsWith(\".proxy.runpod.net\");\n\n    let finalHost = host;\n    if (!worker?.host && isRunpodProxy) {\n        const match = host.match(/^(.*)\\.proxy\\.runpod\\.net$/);\n        if (match) {\n            finalHost = `${match[1]}-${resolvedPort}.proxy.runpod.net`;\n        } else {\n            console.error(`[Distributed] Failed to parse Runpod proxy host: ${host}`);\n        }\n    }\n\n    const useHttps = isCloud || isRunpodProxy || resolvedPort === 443;\n    const protocol = useHttps ? \"https\" : \"http\";\n    const defaultPort = useHttps ? 443 : 80;\n    const needsPort = !isRunpodProxy && resolvedPort !== defaultPort;\n    const portPart = needsPort ? `:${resolvedPort}` : \"\";\n\n    return normalizeWorkerUrl(`${protocol}://${finalHost}${portPart}${endpoint}`);\n}\n\nexport function buildWorkerWebSocketUrl(workerUrl) {\n    const normalized = normalizeWorkerUrl(workerUrl);\n    const wsBase = normalized.replace(/^http:\\/\\//i, \"ws://\").replace(/^https:\\/\\//i, \"wss://\");\n    return `${wsBase}/distributed/worker_ws`;\n}\n\nexport function getMasterUrl(config, windowLocation = window.location, log = null) {\n    const masterHost = config?.master?.host;\n    if (masterHost) {\n        const configuredHost = masterHost;\n\n        // If the configured host already includes protocol, use as-is.\n        if (configuredHost.startsWith(\"http://\") || configuredHost.startsWith(\"https://\")) {\n            return configuredHost;\n        }\n\n        // For domain names (not IPs), default to HTTPS.\n        const isIP = /^(\\d{1,3}\\.){3}\\d{1,3}$/.test(configuredHost);\n        const isLocalhost = configuredHost === \"localhost\" || configuredHost === \"127.0.0.1\";\n\n        if (!isIP && !isLocalhost && configuredHost.includes(\".\")) {\n            return `https://${configuredHost}`;\n        }\n\n        const protocol = windowLocation.protocol || \"http:\";\n        const port = windowLocation.port || (protocol === \"https:\" ? \"443\" : \"80\");\n        if ((protocol === \"https:\" && port === \"443\") || (protocol === \"http:\" && port === \"80\")) {\n            return `${protocol}//${configuredHost}`;\n        }\n        return `${protocol}//${configuredHost}:${port}`;\n    }\n\n    const hostname = windowLocation.hostname;\n    if (hostname !== \"localhost\" && hostname !== \"127.0.0.1\") {\n        return windowLocation.origin;\n    }\n\n    if (typeof log === \"function\") {\n        log(\n            \"No master host configured - remote workers won't be able to connect. Master host should be auto-detected on startup.\",\n            \"debug\",\n        );\n    }\n    return windowLocation.origin;\n}\n"
  },
  {
    "path": "web/workerLifecycle.js",
    "content": "import { TIMEOUTS, STATUS_COLORS } from './constants.js';\nimport { buildWorkerUrl, normalizeWorkerUrl } from './urlUtils.js';\nimport { isRemoteWorker } from './workerSettings.js';\nimport { applyProbeResultToWorkerDot } from './workerUtils.js';\n\nexport { normalizeWorkerUrl };\n\nlet _statusCheckRunning = false;\n\nfunction setStatusDotClass(dot, statusClass) {\n    if (!dot) {\n        return;\n    }\n    const classes = [\n        \"worker-status--online\",\n        \"worker-status--offline\",\n        \"worker-status--unknown\",\n        \"worker-status--processing\",\n    ];\n    dot.classList.remove(...classes);\n    if (statusClass) {\n        dot.classList.add(statusClass);\n    }\n}\n\nfunction setButtonClass(button, className) {\n    if (!button) {\n        return;\n    }\n    button.classList.remove(\"btn--stop\", \"btn--launch\", \"btn--log\", \"btn--working\", \"btn--success\", \"btn--error\");\n    if (className) {\n        button.classList.add(className);\n    }\n}\n\nfunction setButtonVisibility(button, visible) {\n    if (!button) {\n        return;\n    }\n    button.classList.toggle(\"is-hidden\", !visible);\n    button.style.display = visible ? \"\" : \"none\";\n}\n\nexport async function checkAllWorkerStatuses(extension) {\n    if (_statusCheckRunning || !extension.panelElement) {\n        return;\n    }\n    _statusCheckRunning = true;\n    let nextInterval = 5000;\n\n    try {\n        // Create a fresh AbortController for this poll cycle.\n        extension.statusCheckAbortController = new AbortController();\n\n        await checkMasterStatus(extension);\n\n        if (extension.config?.workers) {\n            await Promise.all(\n                extension.config.workers.map(async (worker) => {\n                    if (worker.enabled || extension.state.isWorkerLaunching(worker.id)) {\n                        await checkWorkerStatus(extension, worker);\n                    }\n                })\n            );\n        }\n\n        let isActive = extension.state.getMasterStatus() === \"processing\";\n        extension.config?.workers?.forEach((worker) => {\n            const workerState = extension.state.getWorker(worker.id);\n            if (workerState.launching || workerState.status?.processing) {\n                isActive = true;\n            }\n        });\n\n        nextInterval = isActive ? 1000 : 5000;\n    } finally {\n        _statusCheckRunning = false;\n        if (extension.panelElement) {\n            extension.statusCheckTimeout = setTimeout(() => checkAllWorkerStatuses(extension), nextInterval);\n        }\n    }\n}\n\nexport async function checkMasterStatus(extension) {\n    try {\n        const signal = extension.statusCheckAbortController?.signal || null;\n        const probeResult = await extension.api.probeWorker(\n            window.location.origin,\n            TIMEOUTS.STATUS_CHECK,\n            signal,\n        );\n        if (!probeResult.ok) {\n            throw new Error(`HTTP ${probeResult.status}`);\n        }\n\n        const queueRemaining = probeResult.queueRemaining || 0;\n        const isProcessing = queueRemaining > 0;\n\n        // Update master status in state\n        extension.state.setMasterStatus(isProcessing ? \"processing\" : \"online\");\n\n        // Update master status dot\n        const statusDot = document.getElementById(\"master-status\");\n        if (statusDot) {\n            if (!extension.isMasterParticipating()) {\n                if (isProcessing) {\n                    setStatusDotClass(statusDot, \"worker-status--processing\");\n                    statusDot.title = `Orchestrating (${queueRemaining} in queue)`;\n                } else {\n                    setStatusDotClass(statusDot, \"worker-status--unknown\");\n                    statusDot.title = \"Master orchestrator only\";\n                }\n            } else if (isProcessing) {\n                setStatusDotClass(statusDot, \"worker-status--processing\");\n                statusDot.title = `Processing (${queueRemaining} in queue)`;\n            } else {\n                setStatusDotClass(statusDot, \"worker-status--online\");\n                statusDot.title = \"Online\";\n            }\n        }\n    } catch (error) {\n        if (error?.name === \"AbortError\") {\n            return;\n        }\n        // Master is always online (we're running on it), so keep it green\n        const statusDot = document.getElementById(\"master-status\");\n        if (statusDot) {\n            setStatusDotClass(\n                statusDot,\n                extension.isMasterParticipating() ? \"worker-status--online\" : \"worker-status--unknown\"\n            );\n            statusDot.title = extension.isMasterParticipating() ? \"Online\" : \"Master orchestrator only\";\n        }\n    }\n}\n\n// Helper to build worker URL\nexport function getWorkerUrl(extension, worker, endpoint = \"\") {\n    return buildWorkerUrl(worker, endpoint, window.location);\n}\n\nexport async function checkWorkerStatus(extension, worker) {\n    // Assume caller ensured enabled; proceed with check\n    const workerUrl = getWorkerUrl(extension, worker);\n\n    try {\n        const signal = extension.statusCheckAbortController?.signal || null;\n        const probeResult = await extension.api.probeWorker(\n            workerUrl,\n            TIMEOUTS.STATUS_CHECK,\n            signal,\n        );\n        if (!probeResult.ok) {\n            throw new Error(`HTTP ${probeResult.status}`);\n        }\n\n        const queueRemaining = probeResult.queueRemaining || 0;\n        const isProcessing = queueRemaining > 0;\n\n        // Update status\n        extension.state.setWorkerStatus(worker.id, {\n            online: true,\n            processing: isProcessing,\n            queueCount: queueRemaining,\n        });\n\n        // Update status dot based on probe result\n        applyProbeResultToWorkerDot(worker.id, probeResult);\n\n        // Clear launching state since worker is now online\n        if (extension.state.isWorkerLaunching(worker.id)) {\n            extension.state.setWorkerLaunching(worker.id, false);\n            clearLaunchingFlag(extension, worker.id);\n        }\n    } catch (error) {\n        // Don't process aborted requests\n        if (error.name === \"AbortError\") {\n            return;\n        }\n\n        // Worker is offline or unreachable\n        extension.state.setWorkerStatus(worker.id, {\n            online: false,\n            processing: false,\n            queueCount: 0,\n        });\n\n        // Check if worker is launching\n        if (extension.state.isWorkerLaunching(worker.id)) {\n            extension.ui.updateStatusDot(worker.id, STATUS_COLORS.PROCESSING_YELLOW, \"Launching...\", true);\n        } else if (worker.enabled) {\n            // Only update to red if not currently launching AND still enabled.\n            applyProbeResultToWorkerDot(worker.id, { ok: false });\n        }\n        // If disabled, don't update the dot (leave it gray)\n\n        extension.log(`Worker ${worker.id} status check failed: ${error.message}`, \"debug\");\n    }\n\n    // Update control buttons based on new status\n    const updatedInPlace = extension.updateWorkerCard?.(worker.id, extension.state.getWorkerStatus(worker.id));\n    if (!updatedInPlace) {\n        updateWorkerControls(extension, worker.id);\n    }\n}\n\nexport async function launchWorker(extension, workerId) {\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n\n    // If worker is disabled, enable it first\n    if (!worker.enabled) {\n        await extension.updateWorkerEnabled(workerId, true);\n\n        // Update the checkbox UI\n        const checkbox = document.getElementById(`gpu-${workerId}`);\n        if (checkbox) {\n            checkbox.checked = true;\n        }\n    }\n\n    // Re-query button AFTER updateWorkerEnabled (which may re-render sidebar)\n    const launchBtn = document.querySelector(`#controls-${workerId} button`);\n\n    extension.ui.updateStatusDot(workerId, STATUS_COLORS.PROCESSING_YELLOW, \"Launching...\", true);\n    extension.state.setWorkerLaunching(workerId, true);\n\n    // Allow 90 seconds for worker to launch (model loading can take time)\n    setTimeout(() => {\n        extension.state.setWorkerLaunching(workerId, false);\n    }, TIMEOUTS.LAUNCH);\n\n    if (!launchBtn) {\n        return;\n    }\n\n    try {\n        // Disable button immediately\n        launchBtn.disabled = true;\n\n        const result = await extension.api.launchWorker(workerId);\n        if (result) {\n            extension.log(`Launched ${worker.name} (PID: ${result.pid})`, \"info\");\n            if (result.log_file) {\n                extension.log(`Log file: ${result.log_file}`, \"debug\");\n            }\n\n            extension.state.setWorkerManaged(workerId, {\n                pid: result.pid,\n                log_file: result.log_file,\n                started_at: Date.now(),\n            });\n\n            // Update controls immediately to hide launch button and show stop/log buttons\n            updateWorkerControls(extension, workerId);\n            setTimeout(() => checkWorkerStatus(extension, worker), TIMEOUTS.STATUS_CHECK);\n        }\n    } catch (error) {\n        // Check if worker was already running\n        if (error.message && error.message.includes(\"already running\")) {\n            extension.log(`Worker ${worker.name} is already running`, \"info\");\n            updateWorkerControls(extension, workerId);\n            setTimeout(() => checkWorkerStatus(extension, worker), TIMEOUTS.STATUS_CHECK_DELAY);\n        } else {\n            extension.log(`Error launching worker: ${error.message || error}`, \"error\");\n\n            // Re-enable button on error\n            if (launchBtn) {\n                launchBtn.disabled = false;\n            }\n        }\n    }\n}\n\nexport async function stopWorker(extension, workerId) {\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    const stopBtn = document.querySelectorAll(`#controls-${workerId} button`)[1];\n\n    // Provide immediate feedback\n    if (stopBtn) {\n        stopBtn.disabled = true;\n        stopBtn.textContent = \"Stopping...\";\n        setButtonClass(stopBtn, \"btn--working\");\n    }\n\n    try {\n        const result = await extension.api.stopWorker(workerId);\n        if (result) {\n            extension.log(`Stopped worker: ${result.message}`, \"info\");\n            extension.state.setWorkerManaged(workerId, null);\n\n            // Immediately update status to offline\n            extension.ui.updateStatusDot(workerId, STATUS_COLORS.OFFLINE_RED, \"Offline\");\n            extension.state.setWorkerStatus(workerId, { online: false });\n\n            // Flash success feedback\n            if (stopBtn) {\n                setButtonClass(stopBtn, \"btn--success\");\n                stopBtn.textContent = \"Stopped!\";\n                setTimeout(() => {\n                    updateWorkerControls(extension, workerId);\n                }, TIMEOUTS.FLASH_SHORT);\n            }\n\n            // Verify status after a short delay\n            setTimeout(() => checkWorkerStatus(extension, worker), TIMEOUTS.STATUS_CHECK);\n        } else {\n            extension.log(`Failed to stop worker: ${result.message}`, \"error\");\n\n            // Flash error feedback\n            if (stopBtn) {\n                setButtonClass(stopBtn, \"btn--error\");\n                stopBtn.textContent = result.message.includes(\"already stopped\") ? \"Not Running\" : \"Failed\";\n\n                // If already stopped, update status immediately\n                if (result.message.includes(\"already stopped\")) {\n                    extension.ui.updateStatusDot(workerId, STATUS_COLORS.OFFLINE_RED, \"Offline\");\n                    extension.state.setWorkerStatus(workerId, { online: false });\n                }\n\n                setTimeout(() => {\n                    updateWorkerControls(extension, workerId);\n                }, TIMEOUTS.FLASH_MEDIUM);\n            }\n        }\n    } catch (error) {\n        extension.log(`Error stopping worker: ${error}`, \"error\");\n\n        // Reset button on error\n        if (stopBtn) {\n            setButtonClass(stopBtn, \"btn--error\");\n            stopBtn.textContent = \"Error\";\n            setTimeout(() => {\n                updateWorkerControls(extension, workerId);\n            }, TIMEOUTS.FLASH_MEDIUM);\n        }\n    }\n}\n\nexport async function clearLaunchingFlag(extension, workerId) {\n    try {\n        await extension.api.clearLaunchingFlag(workerId);\n        extension.log(`Cleared launching flag for worker ${workerId}`, \"debug\");\n    } catch (error) {\n        extension.log(`Error clearing launching flag: ${error.message || error}`, \"error\");\n    }\n}\n\nexport async function loadManagedWorkers(extension) {\n    try {\n        const result = await extension.api.getManagedWorkers();\n\n        // Check for launching workers\n        for (const [workerId, info] of Object.entries(result.managed_workers)) {\n            extension.state.setWorkerManaged(workerId, info);\n\n            // If worker is marked as launching, add to launchingWorkers set\n            if (info.launching) {\n                extension.state.setWorkerLaunching(workerId, true);\n                extension.log(`Worker ${workerId} is in launching state`, \"debug\");\n            }\n        }\n\n        // Update UI for all workers\n        if (extension.config?.workers) {\n            extension.config.workers.forEach((w) => updateWorkerControls(extension, w.id));\n        }\n    } catch (error) {\n        extension.log(`Error loading managed workers: ${error}`, \"error\");\n    }\n}\n\nexport function updateWorkerControls(extension, workerId) {\n    const controlsDiv = document.getElementById(`controls-${workerId}`);\n\n    if (!controlsDiv) {\n        return;\n    }\n\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    if (!worker) {\n        return;\n    }\n\n    // Update button states - buttons are now inside a wrapper div\n    const launchBtn = document.getElementById(`launch-${workerId}`);\n    const stopBtn = document.getElementById(`stop-${workerId}`);\n    const logBtn = document.getElementById(`log-${workerId}`);\n\n    if (isRemoteWorker(extension, worker)) {\n        setButtonVisibility(launchBtn, false);\n        setButtonVisibility(stopBtn, false);\n        if (logBtn) {\n            setButtonVisibility(logBtn, true);\n            logBtn.disabled = false;\n            logBtn.textContent = \"View Log\";\n            setButtonClass(logBtn, \"btn--log\");\n        }\n        return;\n    }\n\n    // Ensure we check for string ID\n    const managedInfo = extension.state.getWorker(workerId).managed;\n    const status = extension.state.getWorkerStatus(workerId);\n\n    // Show log button immediately if we have log file info (even if worker is still starting)\n    if (logBtn) {\n        const showLog = Boolean(managedInfo?.log_file);\n        setButtonVisibility(logBtn, showLog);\n        if (showLog) {\n            setButtonClass(logBtn, \"btn--log\");\n        }\n    }\n\n    if (status?.online || managedInfo) {\n        // Worker is running or we just launched it\n        setButtonVisibility(launchBtn, false);\n\n        if (managedInfo) {\n            // Only show stop button if we manage this worker\n            setButtonVisibility(stopBtn, true);\n            stopBtn.disabled = false;\n            stopBtn.textContent = \"Stop\";\n            setButtonClass(stopBtn, \"btn--stop\");\n        } else {\n            // Hide stop button for workers launched outside UI\n            setButtonVisibility(stopBtn, false);\n        }\n    } else {\n        // Worker is not running\n        setButtonVisibility(launchBtn, true);\n        launchBtn.disabled = false;\n        launchBtn.textContent = \"Launch\";\n        setButtonClass(launchBtn, \"btn--launch\");\n\n        setButtonVisibility(stopBtn, false);\n    }\n}\n\nexport async function viewWorkerLog(extension, workerId, isRemote = false) {\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    const isRemoteLog = isRemote || (worker ? isRemoteWorker(extension, worker) : false);\n    const managedInfo = extension.state.getWorker(workerId).managed;\n    if (!isRemoteLog && !managedInfo?.log_file) {\n        return;\n    }\n\n    const logBtn = document.getElementById(`log-${workerId}`);\n\n    // Provide immediate feedback\n    if (logBtn) {\n        logBtn.disabled = true;\n        logBtn.textContent = \"Loading...\";\n        setButtonClass(logBtn, \"btn--working\");\n    }\n\n    try {\n        const fetchLog = isRemoteLog\n            ? async () => extension.api.getRemoteWorkerLog(workerId, 300)\n            : async () => extension.api.getWorkerLog(workerId, 1000);\n        const data = await fetchLog();\n\n        // Create modal dialog\n        extension.ui.showLogModal(extension, workerId, data, fetchLog);\n\n        // Restore button\n        if (logBtn) {\n            logBtn.disabled = false;\n            logBtn.textContent = \"View Log\";\n            setButtonClass(logBtn, \"btn--log\");\n        }\n    } catch (error) {\n        extension.log(\"Error viewing log: \" + error.message, \"error\");\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Error\",\n            detail: `Failed to load log: ${error.message}`,\n            life: 5000,\n        });\n\n        // Flash error and restore button\n        if (logBtn) {\n            setButtonClass(logBtn, \"btn--error\");\n            logBtn.textContent = \"Error\";\n            setTimeout(() => {\n                logBtn.disabled = false;\n                logBtn.textContent = \"View Log\";\n                setButtonClass(logBtn, \"btn--log\");\n            }, TIMEOUTS.FLASH_LONG);\n        }\n    }\n}\n\nexport async function refreshLog(extension, workerId, silent = false) {\n    const logContent = document.getElementById(\"distributed-log-content\");\n    if (!logContent) {\n        return;\n    }\n\n    try {\n        const worker = extension.config.workers.find((w) => w.id === workerId);\n        const isRemoteLog = worker ? isRemoteWorker(extension, worker) : false;\n        const data = isRemoteLog\n            ? await extension.api.getRemoteWorkerLog(workerId, 300)\n            : await extension.api.getWorkerLog(workerId, 1000);\n\n        // Update content\n        const shouldAutoScroll = logContent.scrollTop + logContent.clientHeight >= logContent.scrollHeight - 50;\n        logContent.textContent = data.content;\n\n        // Auto-scroll if was at bottom\n        if (shouldAutoScroll) {\n            logContent.scrollTop = logContent.scrollHeight;\n        }\n\n        // Only show toast if not in silent mode (manual refresh)\n        if (!silent) {\n            extension.app.extensionManager.toast.add({\n                severity: \"success\",\n                summary: \"Log Refreshed\",\n                detail: \"Log content updated\",\n                life: 2000,\n            });\n        }\n    } catch (error) {\n        // Only show error toast if not in silent mode\n        if (!silent) {\n            extension.app.extensionManager.toast.add({\n                severity: \"error\",\n                summary: \"Refresh Failed\",\n                detail: error.message,\n                life: 3000,\n            });\n        }\n    }\n}\n\nexport function startLogAutoRefresh(extension, workerId) {\n    // Stop any existing auto-refresh\n    stopLogAutoRefresh(extension);\n\n    // Refresh every 2 seconds\n    extension.logAutoRefreshInterval = setInterval(() => {\n        refreshLog(extension, workerId, true); // silent mode\n    }, TIMEOUTS.LOG_REFRESH);\n}\n\nexport function stopLogAutoRefresh(extension) {\n    if (extension.logAutoRefreshInterval) {\n        clearInterval(extension.logAutoRefreshInterval);\n        extension.logAutoRefreshInterval = null;\n    }\n}\n\nexport function toggleWorkerExpanded(extension, workerId) {\n    const gpuDiv = document.querySelector(`[data-worker-id=\"${workerId}\"]`);\n    const settingsDiv = gpuDiv?.querySelector(`#settings-${workerId}`) || document.getElementById(`settings-${workerId}`);\n    const settingsArrow = gpuDiv?.querySelector(\".settings-arrow\");\n\n    if (!settingsDiv) {\n        return;\n    }\n\n    if (extension.state.isWorkerExpanded(workerId)) {\n        extension.state.setWorkerExpanded(workerId, false);\n        settingsDiv.classList.remove(\"expanded\");\n        settingsDiv.style.padding = \"0 12px\";\n        settingsDiv.style.marginTop = \"0\";\n        settingsDiv.style.marginBottom = \"0\";\n        if (settingsArrow) {\n            settingsArrow.classList.remove(\"settings-arrow--expanded\");\n        }\n    } else {\n        extension.state.setWorkerExpanded(workerId, true);\n        settingsDiv.classList.add(\"expanded\");\n        settingsDiv.style.padding = \"12px\";\n        settingsDiv.style.marginTop = \"8px\";\n        settingsDiv.style.marginBottom = \"8px\";\n        if (settingsArrow) {\n            settingsArrow.classList.add(\"settings-arrow--expanded\");\n        }\n    }\n}\n"
  },
  {
    "path": "web/workerSettings.js",
    "content": "import { renderSidebarContent } from './sidebarRenderer.js';\nimport { generateUUID } from './constants.js';\nimport { parseHostInput } from './urlUtils.js';\nimport { toggleWorkerExpanded } from './workerLifecycle.js';\n\nconst WORKERS_CHANGED_EVENT = \"distributed:workers-changed\";\n\nfunction emitWorkersChanged(extension) {\n    if (typeof window === \"undefined\" || typeof window.dispatchEvent !== \"function\") {\n        return;\n    }\n    window.dispatchEvent(new CustomEvent(WORKERS_CHANGED_EVENT, {\n        detail: { workers: extension.config?.workers || [] },\n    }));\n}\n\nexport function isRemoteWorker(extension, worker) {\n    const workerType = String(worker?.type || \"\").toLowerCase();\n\n    // Explicit type always wins over host heuristics.\n    if (workerType === \"cloud\" || workerType === \"remote\") {\n        return true;\n    }\n    if (workerType === \"local\") {\n        return false;\n    }\n\n    // Otherwise check by host (backward compatibility)\n    const parsed = parseHostInput(worker?.host || window.location.hostname);\n    const host = String(parsed.host || window.location.hostname || \"\").toLowerCase();\n    const currentHost = String(parseHostInput(window.location.hostname).host || window.location.hostname || \"\").toLowerCase();\n    const localHosts = new Set([\"\", \"localhost\", \"127.0.0.1\", \"::1\", \"[::1]\", \"0.0.0.0\"]);\n    return !(localHosts.has(host) || host === currentHost);\n}\n\nexport function isCloudWorker(extension, worker) {\n    return worker.type === \"cloud\";\n}\n\nexport async function saveWorkerSettings(extension, workerId) {\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    if (!worker) {\n        return;\n    }\n\n    // Get form values\n    const name = document.getElementById(`name-${workerId}`).value;\n    const workerType = document.getElementById(`worker-type-${workerId}`).value;\n    const isRemote = workerType === \"remote\" || workerType === \"cloud\";\n    const isCloud = workerType === \"cloud\";\n    const rawHost = isRemote ? document.getElementById(`host-${workerId}`).value : window.location.hostname;\n    const parsedHost = isRemote ? parseHostInput(rawHost) : { host: window.location.hostname, port: null };\n    const host = isRemote ? parsedHost.host : window.location.hostname;\n    const hostTrimmed = (host || \"\").trim();\n    let port = parseInt(document.getElementById(`port-${workerId}`).value);\n    const cudaDevice = isRemote ? undefined : parseInt(document.getElementById(`cuda-${workerId}`).value);\n    const extraArgs = isRemote ? undefined : document.getElementById(`args-${workerId}`).value;\n\n    if (isRemote && Number.isFinite(parsedHost.port)) {\n        port = parsedHost.port;\n    }\n\n    // Validate\n    if (!name.trim()) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Validation Error\",\n            detail: \"Worker name is required\",\n            life: 3000,\n        });\n        return;\n    }\n\n    if ((workerType === \"remote\" || workerType === \"cloud\") && !hostTrimmed) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Validation Error\",\n            detail: \"Host is required for remote workers\",\n            life: 3000,\n        });\n        return;\n    }\n\n    if (!isCloud && (isNaN(port) || port < 1 || port > 65535)) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Validation Error\",\n            detail: \"Port must be between 1 and 65535\",\n            life: 3000,\n        });\n        return;\n    }\n\n    // Check for port conflicts\n    // Remote workers can reuse ports, but local workers cannot share ports with each other or master\n    if (!isRemote) {\n        // Check if port conflicts with master\n        const masterPort = parseInt(window.location.port) || (window.location.protocol === \"https:\" ? 443 : 80);\n        if (port === masterPort) {\n            extension.app.extensionManager.toast.add({\n                severity: \"error\",\n                summary: \"Port Conflict\",\n                detail: `Port ${port} is already in use by the master server`,\n                life: 3000,\n            });\n            return;\n        }\n\n        // Check if port conflicts with other local workers\n        const localPortConflict = extension.config.workers.some(\n            (w) => w.id !== workerId && w.port === port && !w.host // local workers have no host or host is null\n        );\n\n        if (localPortConflict) {\n            extension.app.extensionManager.toast.add({\n                severity: \"error\",\n                summary: \"Port Conflict\",\n                detail: `Port ${port} is already in use by another local worker`,\n                life: 3000,\n            });\n            return;\n        }\n    } else {\n        // For remote workers, only check conflicts with other workers on the same host\n        const sameHostConflict = extension.config.workers.some((w) => w.id !== workerId && w.port === port && w.host === hostTrimmed);\n\n        if (sameHostConflict) {\n            extension.app.extensionManager.toast.add({\n                severity: \"error\",\n                summary: \"Port Conflict\",\n                detail: `Port ${port} is already in use by another worker on ${host}`,\n                life: 3000,\n            });\n            return;\n        }\n    }\n\n    const wasUnconfiguredRemote =\n        (worker.type === \"remote\" || worker.type === \"cloud\") &&\n        (!String(worker.host || \"\").trim()) &&\n        !worker.enabled;\n    const nextEnabled = isRemote && hostTrimmed && wasUnconfiguredRemote ? true : worker.enabled;\n\n    try {\n        await extension.api.updateWorker(workerId, {\n            name: name.trim(),\n            type: workerType,\n            host: isRemote ? hostTrimmed : null,\n            port,\n            cuda_device: isRemote ? null : cudaDevice,\n            extra_args: isRemote ? null : extraArgs ? extraArgs.trim() : \"\",\n            enabled: nextEnabled,\n        });\n\n        // Update local config\n        worker.name = name.trim();\n        worker.type = workerType;\n        if (isRemote) {\n            worker.host = hostTrimmed;\n            delete worker.cuda_device;\n            delete worker.extra_args;\n        } else {\n            delete worker.host;\n            worker.cuda_device = cudaDevice;\n            worker.extra_args = extraArgs ? extraArgs.trim() : \"\";\n        }\n        worker.port = port;\n        worker.enabled = nextEnabled;\n\n        // Sync to state\n        extension.state.updateWorker(workerId, { enabled: nextEnabled });\n        emitWorkersChanged(extension);\n\n        extension.app.extensionManager.toast.add({\n            severity: \"success\",\n            summary: \"Settings Saved\",\n            detail: nextEnabled && wasUnconfiguredRemote\n                ? `Worker ${name} configured and enabled`\n                : `Worker ${name} settings updated`,\n            life: 3000,\n        });\n\n        // Keep post-save card height consistent with the default collapsed layout.\n        extension.state.setWorkerExpanded(workerId, false);\n\n        // Refresh the UI\n        if (extension.panelElement) {\n            renderSidebarContent(extension, extension.panelElement);\n        }\n    } catch (error) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Save Failed\",\n            detail: error.message,\n            life: 5000,\n        });\n    }\n}\n\nexport function cancelWorkerSettings(extension, workerId) {\n    // Collapse the settings panel\n    toggleWorkerExpanded(extension, workerId);\n\n    // Reset form values to original\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    if (worker) {\n        document.getElementById(`name-${workerId}`).value = worker.name;\n        document.getElementById(`host-${workerId}`).value = worker.host || \"\";\n        document.getElementById(`port-${workerId}`).value = worker.port;\n        document.getElementById(`cuda-${workerId}`).value = worker.cuda_device || 0;\n        document.getElementById(`args-${workerId}`).value = worker.extra_args || \"\";\n\n        // Reset remote checkbox\n        const remoteCheckbox = document.getElementById(`remote-${workerId}`);\n        if (remoteCheckbox) {\n            remoteCheckbox.checked = isRemoteWorker(extension, worker);\n        }\n    }\n}\n\nexport async function deleteWorker(extension, workerId) {\n    const worker = extension.config.workers.find((w) => w.id === workerId);\n    if (!worker) {\n        return;\n    }\n\n    // Confirm deletion\n    if (!confirm(`Are you sure you want to delete worker \"${worker.name}\"?`)) {\n        return;\n    }\n\n    try {\n        await extension.api.deleteWorker(workerId);\n\n        // Remove from local config\n        const index = extension.config.workers.findIndex((w) => w.id === workerId);\n        if (index !== -1) {\n            extension.config.workers.splice(index, 1);\n        }\n        emitWorkersChanged(extension);\n\n        extension.app.extensionManager.toast.add({\n            severity: \"success\",\n            summary: \"Worker Deleted\",\n            detail: `Worker ${worker.name} has been removed`,\n            life: 3000,\n        });\n\n        // Refresh the UI\n        if (extension.panelElement) {\n            renderSidebarContent(extension, extension.panelElement);\n        }\n    } catch (error) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Delete Failed\",\n            detail: error.message,\n            life: 5000,\n        });\n    }\n}\n\nexport async function addNewWorker(extension) {\n    const toInt = (value) => {\n        const parsed = Number.parseInt(value, 10);\n        return Number.isFinite(parsed) ? parsed : null;\n    };\n\n    const totalCudaDevices = toInt(extension.cudaDeviceCount);\n    const masterCudaDevice = toInt(extension.masterCudaDevice ?? extension.config?.master?.cuda_device);\n    const localWorkers = (extension.config?.workers || []).filter((w) => !isRemoteWorker(extension, w));\n\n    let selectedCudaDevice = extension.config.workers.length;\n    let fallbackToRemote = false;\n    if (totalCudaDevices !== null && totalCudaDevices > 0) {\n        const usedCudaDevices = new Set();\n        for (const worker of localWorkers) {\n            const cudaIdx = toInt(worker.cuda_device);\n            if (cudaIdx !== null) {\n                usedCudaDevices.add(cudaIdx);\n            }\n        }\n        if (masterCudaDevice !== null) {\n            usedCudaDevices.add(masterCudaDevice);\n        }\n\n        const availableCudaDevices = [];\n        for (let i = 0; i < totalCudaDevices; i++) {\n            if (!usedCudaDevices.has(i)) {\n                availableCudaDevices.push(i);\n            }\n        }\n\n        if (availableCudaDevices.length === 0) {\n            fallbackToRemote = true;\n            selectedCudaDevice = null;\n        } else {\n            selectedCudaDevice = availableCudaDevices[0];\n        }\n    }\n\n    // Generate new worker ID using UUID (fallback for non-secure contexts)\n    const newId = generateUUID();\n\n    // Find next available port\n    const usedPorts = extension.config.workers.map((w) => w.port);\n    let nextPort = 8189;\n    while (usedPorts.includes(nextPort)) {\n        nextPort++;\n    }\n\n    // Create new worker object\n    const newWorker = {\n        id: newId,\n        name: `Worker ${extension.config.workers.length + 1}`,\n        port: nextPort,\n        type: fallbackToRemote ? \"remote\" : \"local\",\n        host: fallbackToRemote ? \"\" : null,\n        cuda_device: selectedCudaDevice,\n        enabled: fallbackToRemote ? false : true, // Remote fallback starts disabled until configured\n        extra_args: \"\",\n    };\n\n    // Add to config\n    extension.config.workers.push(newWorker);\n\n    // Save immediately\n    try {\n        await extension.api.updateWorker(newId, {\n            name: newWorker.name,\n            port: newWorker.port,\n            cuda_device: newWorker.cuda_device,\n            extra_args: newWorker.extra_args,\n            enabled: newWorker.enabled,\n            host: newWorker.host,\n            type: newWorker.type,\n        });\n\n        // Sync to state\n        extension.state.updateWorker(newId, { enabled: newWorker.enabled });\n        emitWorkersChanged(extension);\n\n        extension.app.extensionManager.toast.add({\n            severity: fallbackToRemote ? \"warn\" : \"success\",\n            summary: fallbackToRemote ? \"Remote Worker Added\" : \"Worker Added\",\n            detail: fallbackToRemote\n                ? `No local GPU available, so a disabled remote worker was added on port ${nextPort}. Configure host and enable it.`\n                : `New worker created on port ${nextPort}`,\n            life: fallbackToRemote ? 5000 : 3000,\n        });\n\n        // Refresh UI and expand the new worker\n        extension.state.setWorkerExpanded(newId, true);\n        if (extension.panelElement) {\n            renderSidebarContent(extension, extension.panelElement);\n        }\n    } catch (error) {\n        extension.app.extensionManager.toast.add({\n            severity: \"error\",\n            summary: \"Failed to Add Worker\",\n            detail: error.message,\n            life: 5000,\n        });\n    }\n}\n"
  },
  {
    "path": "web/workerUtils.js",
    "content": "import { TIMEOUTS, ENDPOINTS } from './constants.js';\nimport { checkAllWorkerStatuses, getWorkerUrl } from './workerLifecycle.js';\n\nexport async function handleWorkerOperation(extension, button, operation, successText, errorText) {\n    const originalText = button.textContent;\n    const originalStyle = button.style.cssText;\n    const originalClasses = Array.from(button.classList);\n    const stateClasses = [\"btn--working\", \"btn--success\", \"btn--error\"];\n\n    const setButtonStateClass = (className) => {\n        button.classList.remove(...stateClasses);\n        if (className) {\n            button.classList.add(className);\n        }\n    };\n    \n    button.textContent = operation.loadingText;\n    button.disabled = true;\n    setButtonStateClass(\"btn--working\");\n    \n    try {\n        const urlsToProcess = extension.enabledWorkers.map(w => ({ \n            name: w.name, \n            url: getWorkerUrl(extension, w)\n        }));\n        \n        if (urlsToProcess.length === 0) {\n            button.textContent = \"No Workers\";\n            setButtonStateClass(\"btn--error\");\n            setTimeout(() => {\n                button.textContent = originalText;\n                button.style.cssText = originalStyle;\n                button.classList.remove(...stateClasses);\n                button.classList.add(...originalClasses);\n                button.disabled = false;\n            }, TIMEOUTS.BUTTON_RESET);\n            return;\n        }\n        \n        const promises = urlsToProcess.map(target =>\n            fetch(`${target.url}${operation.endpoint}`, { \n                method: 'POST', \n                mode: 'cors'\n            })\n                .then(response => ({ ok: response.ok, name: target.name }))\n                .catch(() => ({ ok: false, name: target.name }))\n        );\n        \n        const results = await Promise.all(promises);\n        const failures = results.filter(r => !r.ok);\n        \n        if (failures.length === 0) {\n            button.textContent = successText;\n            setButtonStateClass(\"btn--success\");\n            if (operation.onSuccess) operation.onSuccess();\n        } else {\n            button.textContent = errorText;\n            setButtonStateClass(\"btn--error\");\n            extension.log(`${operation.name} failed on: ${failures.map(f => f.name).join(\", \")}`, \"error\");\n        }\n        \n        setTimeout(() => {\n            button.textContent = originalText;\n            button.style.cssText = originalStyle;\n            button.classList.remove(...stateClasses);\n            button.classList.add(...originalClasses);\n        }, TIMEOUTS.BUTTON_RESET);\n    } finally {\n        button.disabled = false;\n    }\n}\n\nexport async function handleInterruptWorkers(extension, button) {\n    return handleWorkerOperation(extension, button, {\n        name: \"Interrupt\",\n        endpoint: ENDPOINTS.INTERRUPT,\n        loadingText: \"Interrupting...\",\n        onSuccess: () => setTimeout(() => checkAllWorkerStatuses(extension), TIMEOUTS.POST_ACTION_DELAY)\n    }, \"Interrupted!\", \"Error! See Console\");\n}\n\nexport async function handleClearMemory(extension, button) {\n    return handleWorkerOperation(extension, button, {\n        name: \"Clear memory\",\n        endpoint: ENDPOINTS.CLEAR_MEMORY,\n        loadingText: \"Clearing...\"\n    }, \"Success!\", \"Error! See Console\");\n}\n\nexport function findNodesByClass(apiPrompt, className) {\n    return Object.entries(apiPrompt)\n        .filter(([, nodeData]) => nodeData.class_type === className)\n        .map(([nodeId, nodeData]) => ({ id: nodeId, data: nodeData }));\n}\n\n\nexport function applyProbeResultToWorkerDot(workerId, probeResult) {\n    const dot = document.getElementById(`status-${workerId}`);\n    if (!dot) {\n        return;\n    }\n\n    dot.classList.remove(\n        'worker-status--online',\n        'worker-status--offline',\n        'worker-status--processing',\n        'worker-status--unknown',\n        'status-pulsing',\n    );\n\n    if (!probeResult || !probeResult.ok) {\n        dot.classList.add('worker-status--offline');\n        dot.title = 'Offline - Cannot connect';\n        return;\n    }\n\n    if ((probeResult.queueRemaining || 0) > 0) {\n        dot.classList.add('worker-status--processing');\n        dot.title = `Processing (${probeResult.queueRemaining} queued)`;\n        return;\n    }\n\n    dot.classList.add('worker-status--online');\n    dot.title = 'Online - Idle';\n}\n"
  },
  {
    "path": "workers/__init__.py",
    "content": "from .process_manager import WorkerProcessManager\n\n_worker_manager: WorkerProcessManager | None = None\n\n\ndef get_worker_manager() -> WorkerProcessManager:\n    global _worker_manager\n    if _worker_manager is None:\n        _worker_manager = WorkerProcessManager()\n        _worker_manager.queues = {}\n    return _worker_manager\n"
  },
  {
    "path": "workers/detection.py",
    "content": "import os\nimport platform\nimport uuid\n\nimport aiohttp\n\nfrom ..utils.network import normalize_host, get_client_session\nfrom ..utils.logging import debug_log\n\n\nasync def is_local_worker(worker_config):\n    \"\"\"Check if a worker is running on the same machine as the master.\"\"\"\n    host = normalize_host(worker_config.get('host', 'localhost')) or 'localhost'\n    if host in ['localhost', '127.0.0.1', '0.0.0.0', ''] or worker_config.get('type') == 'local':\n        return True\n    \n    # For cloud workers, check if on same physical host\n    if worker_config.get('type') == 'cloud':\n        return await is_same_physical_host(worker_config)\n    \n    return False\n\nasync def is_same_physical_host(worker_config):\n    \"\"\"Compare machine IDs to determine if worker is on same physical host.\"\"\"\n    try:\n        # Get master machine ID\n        master_machine_id = get_machine_id()\n        \n        # Fetch worker's machine ID via API\n        host = normalize_host(worker_config.get('host', 'localhost')) or 'localhost'\n        port = worker_config.get('port', 8188)\n        \n        session = await get_client_session()\n        async with session.get(\n            f\"http://{host}:{port}/distributed/system_info\",\n            timeout=aiohttp.ClientTimeout(total=5)\n        ) as resp:\n            if resp.status == 200:\n                data = await resp.json()\n                worker_machine_id = data.get('machine_id')\n                return worker_machine_id == master_machine_id\n            else:\n                debug_log(f\"Failed to get system info from worker: HTTP {resp.status}\")\n                return False\n    except Exception as e:\n        debug_log(f\"Error checking same physical host: {e}\")\n        return False\n\ndef get_machine_id():\n    \"\"\"Get a unique identifier for this machine.\"\"\"\n    # Try multiple methods to get a stable machine ID\n    try:\n        # Method 1: MAC address-based UUID\n        return str(uuid.getnode())\n    except Exception:\n        try:\n            # Method 2: Platform + hostname\n            import socket\n            return f\"{platform.machine()}_{socket.gethostname()}\"\n        except Exception:\n            # Fallback\n            return platform.machine()\n\ndef is_docker_environment():\n    \"\"\"Check if running inside Docker container.\"\"\"\n    return (os.path.exists('/.dockerenv') or \n            os.environ.get('DOCKER_CONTAINER', False) or\n            'docker' in platform.node().lower())\n\ndef is_runpod_environment():\n    \"\"\"Check if running in Runpod environment.\"\"\"\n    return (os.environ.get('RUNPOD_POD_ID') is not None or\n            os.environ.get('RUNPOD_API_KEY') is not None)\n"
  },
  {
    "path": "workers/process/__init__.py",
    "content": "from .launch_builder import LaunchCommandBuilder\nfrom .lifecycle import ProcessLifecycle\nfrom .persistence import ProcessPersistence\nfrom .root_discovery import ComfyRootDiscovery\n\n__all__ = [\n    \"ComfyRootDiscovery\",\n    \"LaunchCommandBuilder\",\n    \"ProcessLifecycle\",\n    \"ProcessPersistence\",\n]\n"
  },
  {
    "path": "workers/process/launch_builder.py",
    "content": "import glob\nimport os\nimport shlex\nimport shutil\n\nfrom ...utils.logging import debug_log\nfrom ...utils.process import get_python_executable\n\n\nclass LaunchCommandBuilder:\n    \"\"\"Build command-lines for launching worker ComfyUI processes.\"\"\"\n\n    def _extend_arg(self, cmd, flag, value):\n        if value in (None, \"\", [], ()):\n            return\n        cmd.extend([flag, str(value)])\n\n    def _extend_grouped_args(self, cmd, flag, values):\n        for group in values or []:\n            flattened = [str(item) for item in group if item]\n            if flattened:\n                cmd.append(flag)\n                cmd.extend(flattened)\n\n    def _get_runtime_args(self):\n        try:\n            from comfy.cli_args import args\n            return args\n        except Exception as exc:\n            debug_log(f\"Could not read current ComfyUI CLI args for worker launch: {exc}\")\n            return None\n\n    def _build_runtime_launch_args(self):\n        args = self._get_runtime_args()\n        if args is None:\n            return []\n\n        inherited = []\n        self._extend_arg(inherited, \"--listen\", getattr(args, \"listen\", None))\n        self._extend_arg(inherited, \"--base-directory\", getattr(args, \"base_directory\", None))\n        self._extend_arg(inherited, \"--temp-directory\", getattr(args, \"temp_directory\", None))\n        self._extend_arg(inherited, \"--input-directory\", getattr(args, \"input_directory\", None))\n        self._extend_arg(inherited, \"--output-directory\", getattr(args, \"output_directory\", None))\n        self._extend_arg(inherited, \"--user-directory\", getattr(args, \"user_directory\", None))\n        self._extend_arg(inherited, \"--front-end-root\", getattr(args, \"front_end_root\", None))\n        self._extend_grouped_args(\n            inherited,\n            \"--extra-model-paths-config\",\n            getattr(args, \"extra_model_paths_config\", None),\n        )\n\n        if getattr(args, \"enable_manager\", False):\n            inherited.append(\"--enable-manager\")\n        if getattr(args, \"disable_manager_ui\", False):\n            inherited.append(\"--disable-manager-ui\")\n        if getattr(args, \"enable_manager_legacy_ui\", False):\n            inherited.append(\"--enable-manager-legacy-ui\")\n        if getattr(args, \"windows_standalone_build\", False):\n            inherited.append(\"--windows-standalone-build\")\n        if getattr(args, \"log_stdout\", False):\n            inherited.append(\"--log-stdout\")\n\n        verbose = getattr(args, \"verbose\", None)\n        if verbose and verbose != \"INFO\":\n            inherited.extend([\"--verbose\", str(verbose)])\n\n        return inherited\n\n    def _find_windows_terminal(self):\n        \"\"\"Find Windows Terminal executable.\"\"\"\n        possible_paths = [\n            os.path.expandvars(r\"%LOCALAPPDATA%\\Microsoft\\WindowsApps\\wt.exe\"),\n            os.path.expandvars(r\"%PROGRAMFILES%\\WindowsApps\\Microsoft.WindowsTerminal_*\\wt.exe\"),\n            \"wt.exe\",\n        ]\n\n        for path in possible_paths:\n            if os.path.exists(path):\n                return path\n            if \"*\" in path:\n                matches = glob.glob(path)\n                if matches:\n                    return matches[0]\n\n        wt_path = shutil.which(\"wt\")\n        if wt_path:\n            return wt_path\n        return None\n\n    def build_launch_command(self, worker_config, comfy_root):\n        \"\"\"Build the command to launch a worker.\"\"\"\n        main_py = os.path.join(comfy_root, \"main.py\")\n\n        if os.path.exists(main_py):\n            cmd = [\n                get_python_executable(),\n                main_py,\n            ]\n            cmd.extend(self._build_runtime_launch_args())\n            cmd.extend([\"--port\", str(worker_config[\"port\"])])\n\n            current_args = self._get_runtime_args()\n            current_cors = getattr(current_args, \"enable_cors_header\", None) if current_args else None\n            cmd.append(\"--enable-cors-header\")\n            if current_cors is not None:\n                cmd.append(str(current_cors))\n\n            if \"--disable-auto-launch\" not in cmd:\n                cmd.append(\"--disable-auto-launch\")\n\n            debug_log(f\"Using main.py: {main_py}\")\n        else:\n            error_msg = f\"Could not find main.py in {comfy_root}\\n\"\n            error_msg += f\"Searched for: {main_py}\\n\"\n            error_msg += f\"Directory contents of {comfy_root}:\\n\"\n            try:\n                if os.path.exists(comfy_root):\n                    files = os.listdir(comfy_root)[:20]\n                    error_msg += \"  \" + \"\\n  \".join(files)\n                    if len(os.listdir(comfy_root)) > 20:\n                        error_msg += f\"\\n  ... and {len(os.listdir(comfy_root)) - 20} more files\"\n                else:\n                    error_msg += f\"  Directory {comfy_root} does not exist!\"\n            except Exception as exc:\n                error_msg += f\"  Error listing directory: {exc}\"\n\n            error_msg += \"\\n\\nPossible solutions:\\n\"\n            error_msg += \"1. Check if ComfyUI is installed in a different location\\n\"\n            error_msg += \"2. For Docker: ComfyUI might be in /ComfyUI or /app\\n\"\n            error_msg += \"3. Ensure the custom node is installed in the correct location\\n\"\n            raise RuntimeError(error_msg)\n\n        if worker_config.get(\"extra_args\"):\n            raw_args = worker_config[\"extra_args\"].strip()\n            if raw_args:\n                extra_args_list = shlex.split(raw_args)\n                forbidden_chars = set(\";|>&<`$()[]{}*!?\")\n                for arg in extra_args_list:\n                    if any(char in forbidden_chars for char in arg):\n                        forbidden = \"\".join(forbidden_chars)\n                        raise ValueError(f\"Invalid characters in extra_args: {arg}. Forbidden: {forbidden}\")\n                cmd.extend(extra_args_list)\n\n        return cmd\n"
  },
  {
    "path": "workers/process/lifecycle.py",
    "content": "import os\nimport platform\nimport signal\nimport subprocess\nimport time\n\nfrom ...utils.config import load_config, save_config\nfrom ...utils.constants import PROCESS_TERMINATION_TIMEOUT, PROCESS_WAIT_TIMEOUT, WORKER_CHECK_INTERVAL\nfrom ...utils.logging import debug_log, log\nfrom ...utils.process import get_python_executable, is_process_alive, terminate_process\n\ntry:\n    import psutil\n\n    PSUTIL_AVAILABLE = True\nexcept ImportError:\n    log(\"psutil not available, using fallback process management\")\n    PSUTIL_AVAILABLE = False\n\n\nclass ProcessLifecycle:\n    \"\"\"Worker process lifecycle operations operating on manager-owned state.\"\"\"\n\n    def __init__(self, manager):\n        self._manager = manager\n\n    def launch_worker(self, worker_config, show_window=False):\n        \"\"\"Launch a worker process with logging.\"\"\"\n        _ = show_window  # Kept for API compatibility.\n        comfy_root = self._manager.find_comfy_root()\n\n        env = os.environ.copy()\n        env[\"CUDA_VISIBLE_DEVICES\"] = str(worker_config.get(\"cuda_device\", 0))\n        env[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n        env[\"COMFYUI_MASTER_PID\"] = str(os.getpid())\n        env[\"COMFYUI_IS_WORKER\"] = \"1\"\n\n        cmd = self._manager.build_launch_command(worker_config, comfy_root)\n        cwd = comfy_root\n\n        log_dir = os.path.join(comfy_root, \"logs\", \"workers\")\n        os.makedirs(log_dir, exist_ok=True)\n\n        date_stamp = time.strftime(\"%Y%m%d\")\n        worker_name = worker_config.get(\"name\", f\"Worker{worker_config['id']}\")\n        safe_name = \"\".join(char if char.isalnum() or char in (\"-\", \"_\") else \"_\" for char in worker_name)\n        log_file = os.path.join(log_dir, f\"{safe_name}_{date_stamp}.log\")\n\n        with open(log_file, \"a\", encoding=\"utf-8\") as log_handle:\n            log_handle.write(f\"\\n\\n{'=' * 50}\\n\")\n            log_handle.write(\"=== ComfyUI Worker Session Started ===\\n\")\n            log_handle.write(f\"Worker: {worker_name}\\n\")\n            log_handle.write(f\"Port: {worker_config['port']}\\n\")\n            log_handle.write(f\"CUDA Device: {worker_config.get('cuda_device', 0)}\\n\")\n            log_handle.write(f\"Started: {time.strftime('%Y-%m-%d %H:%M:%S')}\\n\")\n            log_handle.write(f\"Command: {' '.join(cmd)}\\n\")\n\n            config = load_config()\n            stop_on_master_exit = config.get(\"settings\", {}).get(\"stop_workers_on_master_exit\", True)\n            if stop_on_master_exit:\n                log_handle.write(\"Note: Worker will stop when master shuts down\\n\")\n            else:\n                log_handle.write(\"Note: Worker will continue running after master shuts down\\n\")\n            log_handle.write(\"=\" * 30 + \"\\n\\n\")\n            log_handle.flush()\n\n            if stop_on_master_exit and env.get(\"COMFYUI_MASTER_PID\"):\n                monitor_script = os.path.join(\n                    os.path.dirname(os.path.dirname(__file__)),\n                    \"worker_monitor.py\",\n                )\n                monitored_cmd = [get_python_executable(), monitor_script] + cmd\n                log_handle.write(f\"[Worker Monitor] Monitoring master PID: {env['COMFYUI_MASTER_PID']}\\n\")\n                log_handle.flush()\n            else:\n                monitored_cmd = cmd\n\n            if platform.system() == \"Windows\":\n                create_no_window = 0x08000000\n                process = subprocess.Popen(\n                    monitored_cmd,\n                    env=env,\n                    cwd=cwd,\n                    stdout=log_handle,\n                    stderr=subprocess.STDOUT,\n                    creationflags=create_no_window,\n                )\n            else:\n                process = subprocess.Popen(\n                    monitored_cmd,\n                    env=env,\n                    cwd=cwd,\n                    stdout=log_handle,\n                    stderr=subprocess.STDOUT,\n                    start_new_session=True,\n                )\n\n        worker_id = str(worker_config[\"id\"])\n        self._manager.processes[worker_id] = {\n            \"pid\": process.pid,\n            \"process\": process,\n            \"started_at\": time.time(),\n            \"config\": worker_config,\n            \"log_file\": log_file,\n            \"is_monitor\": stop_on_master_exit and env.get(\"COMFYUI_MASTER_PID\"),\n            \"launching\": True,\n        }\n\n        self._manager.save_processes()\n\n        if stop_on_master_exit and env.get(\"COMFYUI_MASTER_PID\"):\n            debug_log(f\"Launched worker {worker_name} via monitor (Monitor PID: {process.pid})\")\n        else:\n            log(f\"Launched worker {worker_name} directly (PID: {process.pid})\")\n        debug_log(f\"Log file: {log_file}\")\n        return process.pid\n\n    def stop_worker(self, worker_id):\n        \"\"\"Stop a worker process.\"\"\"\n        worker_id = str(worker_id)\n        if worker_id not in self._manager.processes:\n            return False, \"Worker not managed by UI\"\n\n        proc_info = self._manager.processes[worker_id]\n        process = proc_info.get(\"process\")\n        pid = proc_info[\"pid\"]\n        debug_log(f\"Attempting to stop worker {worker_id} (PID: {pid})\")\n\n        if not process:\n            try:\n                debug_log(\"[Distributed] Stopping restored process (no subprocess object)\")\n                if self._kill_process_tree(pid):\n                    del self._manager.processes[worker_id]\n                    self._manager.save_processes()\n                    debug_log(f\"Successfully stopped worker {worker_id} and all child processes\")\n                    return True, \"Worker stopped\"\n                return False, \"Failed to stop worker process\"\n            except Exception as exc:\n                log(f\"[Distributed] Exception during stop: {exc}\")\n                return False, f\"Error stopping worker: {str(exc)}\"\n\n        if process.poll() is not None:\n            log(f\"[Distributed] Worker {worker_id} already stopped\")\n            del self._manager.processes[worker_id]\n            self._manager.save_processes()\n            return False, \"Worker already stopped\"\n\n        try:\n            debug_log(f\"Using process tree kill for worker {worker_id}\")\n            if self._kill_process_tree(pid):\n                del self._manager.processes[worker_id]\n                self._manager.save_processes()\n                debug_log(f\"Successfully stopped worker {worker_id} and all child processes\")\n                return True, \"Worker stopped\"\n\n            log(\"[Distributed] Process tree kill failed, trying normal termination\")\n            terminate_process(process, timeout=PROCESS_TERMINATION_TIMEOUT)\n            del self._manager.processes[worker_id]\n            self._manager.save_processes()\n            return True, \"Worker stopped (fallback)\"\n        except Exception as exc:\n            log(f\"[Distributed] Exception during stop: {exc}\")\n            return False, f\"Error stopping worker: {str(exc)}\"\n\n    def get_managed_workers(self):\n        \"\"\"Get list of workers managed by this process.\"\"\"\n        managed = {}\n        for worker_id, proc_info in list(self._manager.processes.items()):\n            is_running, _ = self._check_worker_process(worker_id, proc_info)\n            if is_running:\n                managed[worker_id] = {\n                    \"pid\": proc_info[\"pid\"],\n                    \"started_at\": proc_info[\"started_at\"],\n                    \"log_file\": proc_info.get(\"log_file\"),\n                    \"launching\": proc_info.get(\"launching\", False),\n                }\n            else:\n                del self._manager.processes[worker_id]\n\n        return managed\n\n    def cleanup_all(self):\n        \"\"\"Stop all managed workers (called on shutdown).\"\"\"\n        for worker_id in list(self._manager.processes.keys()):\n            try:\n                self.stop_worker(worker_id)\n            except Exception as exc:\n                log(f\"[Distributed] Error stopping worker {worker_id}: {exc}\")\n\n        config = load_config()\n        config[\"managed_processes\"] = {}\n        save_config(config)\n\n    def _is_process_running(self, pid):\n        \"\"\"Check if a process with given PID is running.\"\"\"\n        return is_process_alive(pid)\n\n    def _check_worker_process(self, worker_id, proc_info):\n        \"\"\"Check if a worker process is still running and return status.\"\"\"\n        _ = worker_id  # Signature retained for compatibility with existing callers.\n        process = proc_info.get(\"process\")\n        pid = proc_info.get(\"pid\")\n\n        if process:\n            return process.poll() is None, True\n        if pid:\n            return self._is_process_running(pid), False\n        return False, False\n\n    def _kill_process_tree(self, pid):\n        \"\"\"Kill a process and all its children.\"\"\"\n        if PSUTIL_AVAILABLE:\n            try:\n                parent = psutil.Process(pid)\n                children = parent.children(recursive=True)\n\n                debug_log(f\"Killing process tree for PID {pid} ({parent.name()})\")\n                for child in children:\n                    debug_log(f\"  - Child PID {child.pid} ({child.name()})\")\n\n                for child in children:\n                    try:\n                        debug_log(f\"Terminating child {child.pid}\")\n                        child.terminate()\n                    except psutil.NoSuchProcess:\n                        pass\n\n                _, alive = psutil.wait_procs(children, timeout=PROCESS_WAIT_TIMEOUT)\n                for child in alive:\n                    try:\n                        debug_log(f\"Force killing child {child.pid}\")\n                        child.kill()\n                    except psutil.NoSuchProcess:\n                        pass\n\n                try:\n                    debug_log(f\"Terminating parent {pid}\")\n                    parent.terminate()\n                    parent.wait(timeout=PROCESS_WAIT_TIMEOUT)\n                except psutil.TimeoutExpired:\n                    debug_log(f\"Force killing parent {pid}\")\n                    parent.kill()\n                except psutil.NoSuchProcess:\n                    debug_log(f\"Parent process {pid} already gone\")\n                return True\n            except psutil.NoSuchProcess:\n                debug_log(f\"Process {pid} does not exist\")\n                return False\n            except Exception as exc:\n                debug_log(f\"Error killing process tree: {exc}\")\n\n        debug_log(\"[Distributed] Using OS commands to kill process tree\")\n        if platform.system() == \"Windows\":\n            try:\n                result = subprocess.run(\n                    [\"wmic\", \"process\", \"where\", f\"ParentProcessId={pid}\", \"get\", \"ProcessId\"],\n                    capture_output=True,\n                    text=True,\n                )\n                if result.returncode == 0:\n                    lines = result.stdout.strip().split(\"\\n\")[1:]\n                    child_pids = [line.strip() for line in lines if line.strip().isdigit()]\n                    debug_log(f\"[Distributed] Found child processes: {child_pids}\")\n                    for child_pid in child_pids:\n                        try:\n                            subprocess.run(\n                                [\"taskkill\", \"/F\", \"/PID\", child_pid],\n                                capture_output=True,\n                                check=False,\n                            )\n                        except (FileNotFoundError, OSError) as exc:\n                            debug_log(f\"[Distributed] Warning: taskkill failed for PID {child_pid}: {exc}\")\n\n                result = subprocess.run(\n                    [\"taskkill\", \"/F\", \"/PID\", str(pid), \"/T\"],\n                    capture_output=True,\n                    text=True,\n                )\n                debug_log(f\"[Distributed] Taskkill result: {result.stdout.strip()}\")\n                return result.returncode == 0\n            except Exception as exc:\n                log(f\"[Distributed] Error with taskkill: {exc}\")\n                return False\n\n        try:\n            subprocess.run([\"pkill\", \"-TERM\", \"-P\", str(pid)], check=False)\n            time.sleep(WORKER_CHECK_INTERVAL)\n            subprocess.run([\"pkill\", \"-KILL\", \"-P\", str(pid)], check=False)\n            os.kill(pid, signal.SIGKILL)\n            return True\n        except Exception as exc:\n            log(f\"[Distributed] Error killing process tree for PID {pid}: {exc}\")\n            return False\n"
  },
  {
    "path": "workers/process/persistence.py",
    "content": "from ...utils.config import load_config, save_config\nfrom ...utils.logging import debug_log\n\n\nclass ProcessPersistence:\n    \"\"\"Persist and restore manager-owned worker process metadata.\"\"\"\n\n    def __init__(self, manager):\n        self._manager = manager\n\n    def load_processes(self):\n        \"\"\"Load persisted process information from config.\"\"\"\n        config = load_config()\n        managed_processes = config.get(\"managed_processes\", {})\n\n        for worker_id, proc_info in managed_processes.items():\n            pid = proc_info.get(\"pid\")\n            if pid and self._manager._is_process_running(pid):\n                self._manager.processes[worker_id] = {\n                    \"pid\": pid,\n                    \"process\": None,\n                    \"started_at\": proc_info.get(\"started_at\"),\n                    \"config\": proc_info.get(\"config\"),\n                    \"log_file\": proc_info.get(\"log_file\"),\n                }\n                debug_log(f\"[Distributed] Restored worker {worker_id} (PID: {pid})\")\n            elif pid:\n                debug_log(f\"[Distributed] Worker {worker_id} (PID: {pid}) is no longer running\")\n\n    def save_processes(self):\n        \"\"\"Save process information to config.\"\"\"\n        config = load_config()\n        managed_processes = {}\n\n        for worker_id, proc_info in self._manager.processes.items():\n            is_running, _ = self._manager._check_worker_process(worker_id, proc_info)\n            if not is_running:\n                continue\n            managed_processes[worker_id] = {\n                \"pid\": proc_info[\"pid\"],\n                \"started_at\": proc_info[\"started_at\"],\n                \"config\": proc_info[\"config\"],\n                \"log_file\": proc_info.get(\"log_file\"),\n                \"launching\": proc_info.get(\"launching\", False),\n            }\n\n        config[\"managed_processes\"] = managed_processes\n        save_config(config)\n"
  },
  {
    "path": "workers/process/root_discovery.py",
    "content": "import os\nimport sys\n\nfrom ...utils.logging import debug_log, log\n\n\nclass ComfyRootDiscovery:\n    \"\"\"Resolve the ComfyUI root directory across local and container layouts.\"\"\"\n\n    def _find_root_from_loaded_modules(self):\n        \"\"\"Use already-imported ComfyUI modules to locate the runtime root.\"\"\"\n        for module_name in (\"server\", \"folder_paths\", \"main\"):\n            module = sys.modules.get(module_name)\n            module_file = getattr(module, \"__file__\", None)\n            if not module_file:\n                continue\n\n            candidate = os.path.dirname(os.path.abspath(module_file))\n            if os.path.exists(os.path.join(candidate, \"main.py\")):\n                debug_log(f\"Found ComfyUI root via loaded module {module_name}: {candidate}\")\n                return candidate\n\n        return None\n\n    def find_comfy_root(self):\n        # Start from current file location.\n        current_dir = os.path.dirname(os.path.abspath(__file__))\n        potential_root = os.path.dirname(os.path.dirname(current_dir))\n\n        # Method 1: Check for environment variable override.\n        env_root = os.environ.get(\"COMFYUI_ROOT\")\n        if env_root and os.path.exists(os.path.join(env_root, \"main.py\")):\n            debug_log(f\"Found ComfyUI root via COMFYUI_ROOT environment variable: {env_root}\")\n            return env_root\n\n        # Method 2: Inspect the already-loaded ComfyUI runtime modules.\n        runtime_root = self._find_root_from_loaded_modules()\n        if runtime_root:\n            return runtime_root\n\n        # Method 3: Try going up from custom_nodes directory.\n        if os.path.exists(os.path.join(potential_root, \"main.py\")):\n            debug_log(f\"Found ComfyUI root via directory traversal: {potential_root}\")\n            return potential_root\n\n        # Method 4: Look for common Docker paths.\n        docker_paths = [\n            \"/basedir\",\n            \"/ComfyUI\",\n            \"/app\",\n            \"/workspace/ComfyUI\",\n            \"/comfyui\",\n            \"/opt/ComfyUI\",\n            \"/workspace\",\n        ]\n        for path in docker_paths:\n            if os.path.exists(path) and os.path.exists(os.path.join(path, \"main.py\")):\n                debug_log(f\"Found ComfyUI root in Docker path: {path}\")\n                return path\n\n        # Method 5: Search upwards for main.py.\n        search_dir = current_dir\n        for _ in range(5):\n            if os.path.exists(os.path.join(search_dir, \"main.py\")):\n                debug_log(f\"Found ComfyUI root via upward search: {search_dir}\")\n                return search_dir\n            parent = os.path.dirname(search_dir)\n            if parent == search_dir:\n                break\n            search_dir = parent\n\n        # Method 6: Try to import and use folder_paths.\n        try:\n            import folder_paths\n\n            if hasattr(folder_paths, \"base_path\") and os.path.exists(\n                os.path.join(folder_paths.base_path, \"main.py\")\n            ):\n                debug_log(f\"Found ComfyUI root via folder_paths: {folder_paths.base_path}\")\n                return folder_paths.base_path\n        except Exception as exc:\n            debug_log(f\"folder_paths root detection failed: {exc}\")\n\n        log(\"Warning: Could not reliably determine ComfyUI root directory\")\n        log(f\"Current directory: {current_dir}\")\n        log(f\"Initial guess was: {potential_root}\")\n        return potential_root\n"
  },
  {
    "path": "workers/process_manager.py",
    "content": "from .process import ComfyRootDiscovery, LaunchCommandBuilder, ProcessLifecycle, ProcessPersistence\n\n\nclass WorkerProcessManager:\n    \"\"\"Thin composition wrapper around worker process subsystems.\"\"\"\n\n    def __init__(self):\n        self.processes = {}\n        self._root_discovery = ComfyRootDiscovery()\n        self._launch_builder = LaunchCommandBuilder()\n        self._lifecycle = ProcessLifecycle(self)\n        self._persistence = ProcessPersistence(self)\n        self.load_processes()\n\n    def find_comfy_root(self):\n        return self._root_discovery.find_comfy_root()\n\n    def _find_windows_terminal(self):\n        return self._launch_builder._find_windows_terminal()\n\n    def build_launch_command(self, worker_config, comfy_root):\n        return self._launch_builder.build_launch_command(worker_config, comfy_root)\n\n    def launch_worker(self, worker_config, show_window=False):\n        return self._lifecycle.launch_worker(worker_config, show_window=show_window)\n\n    def stop_worker(self, worker_id):\n        return self._lifecycle.stop_worker(worker_id)\n\n    def get_managed_workers(self):\n        return self._lifecycle.get_managed_workers()\n\n    def cleanup_all(self):\n        return self._lifecycle.cleanup_all()\n\n    def load_processes(self):\n        return self._persistence.load_processes()\n\n    def save_processes(self):\n        return self._persistence.save_processes()\n\n    def _is_process_running(self, pid):\n        return self._lifecycle._is_process_running(pid)\n\n    def _check_worker_process(self, worker_id, proc_info):\n        return self._lifecycle._check_worker_process(worker_id, proc_info)\n\n    def _kill_process_tree(self, pid):\n        return self._lifecycle._kill_process_tree(pid)\n"
  },
  {
    "path": "workers/startup.py",
    "content": "import asyncio\nimport threading\nimport time\nimport atexit\nimport signal\nimport sys\nimport platform\n\nimport server\n\nfrom ..utils.config import load_config, save_config\nfrom ..utils.logging import debug_log, log\nfrom ..utils.network import normalize_host\nfrom ..utils.cloudflare import cloudflare_tunnel_manager\nfrom ..utils.constants import WORKER_STARTUP_DELAY\nfrom . import get_worker_manager\n\n\ndef auto_launch_workers():\n    \"\"\"Launch enabled workers if auto_launch_workers is set to true.\"\"\"\n    wm = get_worker_manager()\n    try:\n        config = load_config()\n        if config.get('settings', {}).get('auto_launch_workers', False):\n            log(\"Auto-launch workers is enabled, checking for workers to start...\")\n            \n            # Clear managed_processes before launching new workers\n            # This handles cases where the master was killed without proper cleanup\n            if config.get('managed_processes'):\n                log(\"Clearing old managed_processes before auto-launch...\")\n                config['managed_processes'] = {}\n                save_config(config)\n            \n            workers = config.get('workers', [])\n            launched_count = 0\n            \n            for worker in workers:\n                if worker.get('enabled', False):\n                    worker_id = worker.get('id')\n                    worker_name = worker.get('name', f'Worker {worker_id}')\n                    \n                    # Skip remote workers\n                    host = (normalize_host(worker.get('host', 'localhost')) or 'localhost').lower()\n                    if host not in ['localhost', '127.0.0.1', '', None]:\n                        debug_log(f\"Skipping remote worker {worker_name} (host: {host})\")\n                        continue\n                    \n                    # Check if already running\n                    if str(worker_id) in wm.processes:\n                        proc_info = wm.processes[str(worker_id)]\n                        if wm._is_process_running(proc_info['pid']):\n                            debug_log(f\"Worker {worker_name} already running, skipping\")\n                            continue\n                    \n                    # Launch the worker\n                    try:\n                        pid = wm.launch_worker(worker)\n                        log(f\"Auto-launched worker {worker_name} (PID: {pid})\")\n                        \n                        # Mark as launching in managed processes\n                        if str(worker_id) in wm.processes:\n                            wm.processes[str(worker_id)]['launching'] = True\n                            wm.save_processes()\n                        \n                        launched_count += 1\n                    except Exception as e:\n                        log(f\"Failed to auto-launch worker {worker_name}: {e}\")\n            \n            if launched_count > 0:\n                log(f\"Auto-launched {launched_count} worker(s)\")\n            else:\n                debug_log(\"No workers to auto-launch\")\n        else:\n            debug_log(\"Auto-launch workers is disabled\")\n    except Exception as e:\n        log(f\"Error during auto-launch: {e}\")\n\n# Schedule auto-launch after a short delay to ensure server is ready\ndef delayed_auto_launch():\n    \"\"\"Delay auto-launch to ensure server is fully initialized.\"\"\"\n    import threading\n    timer = threading.Timer(WORKER_STARTUP_DELAY, auto_launch_workers)\n    timer.daemon = True\n    timer.start()\n\n# Async cleanup function for proper shutdown\nasync def async_cleanup_and_exit(signum=None):\n    \"\"\"Async-friendly cleanup and exit.\"\"\"\n    wm = get_worker_manager()\n    try:\n        config = load_config()\n        if config.get('settings', {}).get('stop_workers_on_master_exit', True):\n            print(\"\\n[Distributed] Master shutting down, stopping all managed workers...\")\n            wm.cleanup_all()\n        else:\n            print(\"\\n[Distributed] Master shutting down, workers will continue running\")\n            wm.save_processes()\n        try:\n            await cloudflare_tunnel_manager.stop_tunnel()\n        except Exception as tunnel_error:\n            log(f\"[Distributed] Warning: Cloudflare tunnel did not stop cleanly during shutdown: {tunnel_error}\")\n    except Exception as e:\n        print(f\"[Distributed] Error during cleanup: {e}\")\n    \n    # On Windows, we need to exit differently\n    if platform.system() == \"Windows\":\n        # Force exit on Windows\n        sys.exit(0)\n    else:\n        # On Unix, stop the event loop gracefully\n        loop = asyncio.get_running_loop()\n        loop.stop()\n\ndef register_async_signals():\n    \"\"\"Register async signal handlers for graceful shutdown.\"\"\"\n    wm = get_worker_manager()\n    if platform.system() == \"Windows\":\n        # Windows doesn't support add_signal_handler, use traditional signal handling\n        def signal_handler(signum, frame):\n            # Schedule the async cleanup in the event loop\n            loop = server.PromptServer.instance.loop\n            if loop and loop.is_running():\n                asyncio.run_coroutine_threadsafe(async_cleanup_and_exit(signum), loop)\n            else:\n                # Fallback to sync cleanup if loop isn't running\n                try:\n                    config = load_config()\n                    if config.get('settings', {}).get('stop_workers_on_master_exit', True):\n                        print(\"\\n[Distributed] Master shutting down, stopping all managed workers...\")\n                        wm.cleanup_all()\n                    else:\n                        print(\"\\n[Distributed] Master shutting down, workers will continue running\")\n                        wm.save_processes()\n                except Exception as e:\n                    print(f\"[Distributed] Error during cleanup: {e}\")\n                sys.exit(0)\n        \n        signal.signal(signal.SIGINT, signal_handler)\n        signal.signal(signal.SIGTERM, signal_handler)\n    else:\n        # Unix-like systems support add_signal_handler\n        loop = server.PromptServer.instance.loop\n        \n        for sig in (signal.SIGINT, signal.SIGTERM):\n            loop.add_signal_handler(sig, lambda s=sig: asyncio.create_task(async_cleanup_and_exit(s)))\n        \n        # SIGHUP is Unix-only\n        loop.add_signal_handler(signal.SIGHUP, lambda: asyncio.create_task(async_cleanup_and_exit(signal.SIGHUP)))\n\ndef sync_cleanup():\n    \"\"\"Synchronous wrapper for atexit.\"\"\"\n    wm = get_worker_manager()\n    try:\n        # For atexit, we don't want to stop the loop or exit\n        config = load_config()\n        if config.get('settings', {}).get('stop_workers_on_master_exit', True):\n            print(\"\\n[Distributed] Master shutting down, stopping all managed workers...\")\n            wm.cleanup_all()\n        else:\n            print(\"\\n[Distributed] Master shutting down, workers will continue running\")\n            wm.save_processes()\n        try:\n            loop = asyncio.get_event_loop()\n            if loop.is_running():\n                loop.create_task(cloudflare_tunnel_manager.stop_tunnel())\n            else:\n                loop.run_until_complete(cloudflare_tunnel_manager.stop_tunnel())\n        except RuntimeError:\n            # No running loop; create a temporary one\n            asyncio.run(cloudflare_tunnel_manager.stop_tunnel())\n        except Exception as tunnel_error:\n            log(f\"[Distributed] Warning: Cloudflare tunnel did not stop cleanly during sync cleanup: {tunnel_error}\")\n    except Exception as e:\n        print(f\"[Distributed] Error during cleanup: {e}\")\n"
  },
  {
    "path": "workers/worker_monitor.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nWorker process monitor - monitors if the master process is still alive\nand terminates the worker if the master dies.\n\"\"\"\nimport os\nimport sys\nimport time\nimport subprocess\nimport platform\nimport signal\n\n# Add package root to path so this script works when launched by file path.\nNODE_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif NODE_ROOT not in sys.path:\n    sys.path.insert(0, NODE_ROOT)\n\ntry:\n    from utils.process import is_process_alive, terminate_process\n    from utils.constants import WORKER_CHECK_INTERVAL, PROCESS_TERMINATION_TIMEOUT\nexcept ImportError:\n    # Fallback if running from different context\n    def is_process_alive(pid):\n        \"\"\"Check if a process with given PID is still alive.\"\"\"\n        try:\n            if platform.system() == \"Windows\":\n                # Windows: use tasklist\n                result = subprocess.run(['tasklist', '/FI', f'PID eq {pid}'], \n                                      capture_output=True, text=True)\n                return str(pid) in result.stdout\n            else:\n                # Unix: send signal 0\n                os.kill(pid, 0)\n                return True\n        except (OSError, subprocess.SubprocessError):\n            return False\n    \n    WORKER_CHECK_INTERVAL = 2.0\n    PROCESS_TERMINATION_TIMEOUT = 5.0\n\ndef monitor_and_run(master_pid, command):\n    \"\"\"Run command and monitor master process.\"\"\"\n    # Start the actual worker process\n    print(f\"[Distributed] Launching worker command: {' '.join(command)}\")\n    worker_process = subprocess.Popen(command)\n    \n    print(f\"[Distributed] Started worker PID: {worker_process.pid}\")\n    print(f\"[Distributed] Monitoring master PID: {master_pid}\")\n    \n    # Write worker PID to a file so parent can track it\n    monitor_pid = os.getpid()\n    pid_info_file = os.environ.get('WORKER_PID_FILE')\n    if pid_info_file:\n        try:\n            with open(pid_info_file, 'w') as f:\n                f.write(f\"{monitor_pid},{worker_process.pid}\")\n            print(f\"[Distributed] Wrote PID info to {pid_info_file}\")\n        except Exception as e:\n            print(f\"[Distributed] Could not write PID file: {e}\")\n    \n    # Define cleanup function\n    def cleanup_worker(signum=None, frame=None):\n        \"\"\"Clean up worker process when monitor is terminated.\"\"\"\n        if signum:\n            print(f\"\\n[Distributed] Received signal {signum}, terminating worker...\")\n        else:\n            print(\"\\n[Distributed] Terminating worker...\")\n        \n        if worker_process.poll() is None:  # Still running\n            try:\n                terminate_process(worker_process, timeout=PROCESS_TERMINATION_TIMEOUT)\n            except NameError:\n                # Fallback if terminate_process wasn't imported\n                worker_process.terminate()\n                try:\n                    worker_process.wait(timeout=PROCESS_TERMINATION_TIMEOUT)\n                except subprocess.TimeoutExpired:\n                    print(\"[Distributed] Worker didn't terminate gracefully, forcing kill...\")\n                    worker_process.kill()\n                    worker_process.wait()\n        \n        print(\"[Distributed] Worker terminated.\")\n        sys.exit(0)\n    \n    # Register signal handlers for graceful shutdown\n    signal.signal(signal.SIGTERM, cleanup_worker)\n    signal.signal(signal.SIGINT, cleanup_worker)\n    if platform.system() != \"Windows\":\n        signal.signal(signal.SIGHUP, cleanup_worker)\n    \n    # Monitor loop\n    check_interval = WORKER_CHECK_INTERVAL\n    \n    try:\n        while True:\n            # Check if worker is still running\n            if worker_process.poll() is not None:\n                print(f\"[Distributed] Worker process exited with code: {worker_process.returncode}\")\n                sys.exit(worker_process.returncode)\n            \n            # Check if master is still alive\n            if not is_process_alive(master_pid):\n                print(f\"[Distributed] Master process {master_pid} is no longer running. Terminating worker...\")\n                cleanup_worker()\n            \n            time.sleep(check_interval)\n            \n    except KeyboardInterrupt:\n        cleanup_worker()\n\nif __name__ == \"__main__\":\n    # Get master PID from environment\n    master_pid = os.environ.get('COMFYUI_MASTER_PID')\n    if not master_pid:\n        print(\"[Distributed] Error: COMFYUI_MASTER_PID not set\")\n        sys.exit(1)\n    \n    try:\n        master_pid = int(master_pid)\n    except ValueError:\n        print(f\"[Distributed] Error: Invalid master PID: {master_pid}\")\n        sys.exit(1)\n    \n    # Get the actual command to run (all remaining arguments)\n    if len(sys.argv) < 2:\n        print(\"[Distributed] Error: No command specified\")\n        sys.exit(1)\n    \n    command = sys.argv[1:]\n    \n    # Start monitoring\n    monitor_and_run(master_pid, command)\n"
  },
  {
    "path": "workflows/distributed-txt2img.json",
    "content": "{\n  \"id\": \"c9a4d248-9b83-408f-b45e-3ef61dd56ef5\",\n  \"revision\": 0,\n  \"last_node_id\": 13,\n  \"last_link_id\": 19,\n  \"nodes\": [\n    {\n      \"id\": 8,\n      \"type\": \"KSampler\",\n      \"pos\": [\n        2190,\n        770\n      ],\n      \"size\": [\n        315,\n        262\n      ],\n      \"flags\": {},\n      \"order\": 5,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 7\n        },\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 8\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 9\n        },\n        {\n          \"name\": \"latent_image\",\n          \"type\": \"LATENT\",\n          \"link\": 10\n        },\n        {\n          \"name\": \"seed\",\n          \"type\": \"INT\",\n          \"widget\": {\n            \"name\": \"seed\"\n          },\n          \"link\": 11\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"LATENT\",\n          \"type\": \"LATENT\",\n          \"slot_index\": 0,\n          \"links\": [\n            1\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"KSampler\"\n      },\n      \"widgets_values\": [\n        361252850620022,\n        \"randomize\",\n        20,\n        6,\n        \"euler\",\n        \"normal\",\n        1\n      ]\n    },\n    {\n      \"id\": 9,\n      \"type\": \"EmptyLatentImage\",\n      \"pos\": [\n        2220,\n        1080\n      ],\n      \"size\": [\n        270,\n        106\n      ],\n      \"flags\": {},\n      \"order\": 0,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"LATENT\",\n          \"type\": \"LATENT\",\n          \"links\": [\n            10\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"EmptyLatentImage\"\n      },\n      \"widgets_values\": [\n        512,\n        512,\n        1\n      ]\n    },\n    {\n      \"id\": 6,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        1846.3876953125,\n        968.6343994140625\n      ],\n      \"size\": [\n        310,\n        180\n      ],\n      \"flags\": {},\n      \"order\": 4,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 6\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            9\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"text, watermark\"\n      ]\n    },\n    {\n      \"id\": 5,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        1856.3876953125,\n        768.6343994140625\n      ],\n      \"size\": [\n        300,\n        160\n      ],\n      \"flags\": {},\n      \"order\": 3,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 5\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            8\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\"\n      ]\n    },\n    {\n      \"id\": 7,\n      \"type\": \"CheckpointLoaderSimple\",\n      \"pos\": [\n        1500,\n        780\n      ],\n      \"size\": [\n        315,\n        98\n      ],\n      \"flags\": {},\n      \"order\": 1,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"slot_index\": 0,\n          \"links\": [\n            7\n          ]\n        },\n        {\n          \"name\": \"CLIP\",\n          \"type\": \"CLIP\",\n          \"slot_index\": 1,\n          \"links\": [\n            5,\n            6\n          ]\n        },\n        {\n          \"name\": \"VAE\",\n          \"type\": \"VAE\",\n          \"slot_index\": 2,\n          \"links\": [\n            2\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CheckpointLoaderSimple\",\n        \"models\": [\n          {\n            \"name\": \"v1-5-pruned-emaonly-fp16.safetensors\",\n            \"url\": \"https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly-fp16.safetensors?download=true\",\n            \"directory\": \"checkpoints\"\n          }\n        ]\n      },\n      \"widgets_values\": [\n        \"SDXL\\\\juggernautXL_ragnarokBy.safetensors\"\n      ]\n    },\n    {\n      \"id\": 1,\n      \"type\": \"VAEDecode\",\n      \"pos\": [\n        2530,\n        790\n      ],\n      \"size\": [\n        210,\n        46\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 6,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"samples\",\n          \"type\": \"LATENT\",\n          \"link\": 1\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 2\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"slot_index\": 0,\n          \"links\": [\n            3\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"VAEDecode\"\n      }\n    },\n    {\n      \"id\": 2,\n      \"type\": \"DistributedCollector\",\n      \"pos\": [\n        2690,\n        770\n      ],\n      \"size\": [\n        166.50416564941406,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 7,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 3\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            4\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"aux_id\": \"robertvoy/ComfyUI-Distributed\",\n        \"ver\": \"99021363d65cc2b2f0f3a0f12a76a358f0fb330f\",\n        \"Node name for S&R\": \"DistributedCollector\"\n      }\n    },\n    {\n      \"id\": 3,\n      \"type\": \"PreviewImage\",\n      \"pos\": [\n        2880,\n        760\n      ],\n      \"size\": [\n        410,\n        480\n      ],\n      \"flags\": {},\n      \"order\": 8,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 4\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"PreviewImage\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 4,\n      \"type\": \"DistributedSeed\",\n      \"pos\": [\n        1890,\n        1220\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 2,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"seed\",\n          \"type\": \"INT\",\n          \"links\": [\n            11\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"aux_id\": \"robertvoy/ComfyUI-Distributed\",\n        \"ver\": \"99021363d65cc2b2f0f3a0f12a76a358f0fb330f\",\n        \"Node name for S&R\": \"DistributedSeed\"\n      },\n      \"widgets_values\": [\n        504373561407102,\n        \"randomize\"\n      ]\n    }\n  ],\n  \"links\": [\n    [\n      1,\n      8,\n      0,\n      1,\n      0,\n      \"LATENT\"\n    ],\n    [\n      2,\n      7,\n      2,\n      1,\n      1,\n      \"VAE\"\n    ],\n    [\n      3,\n      1,\n      0,\n      2,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      4,\n      2,\n      0,\n      3,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      5,\n      7,\n      1,\n      5,\n      0,\n      \"CLIP\"\n    ],\n    [\n      6,\n      7,\n      1,\n      6,\n      0,\n      \"CLIP\"\n    ],\n    [\n      7,\n      7,\n      0,\n      8,\n      0,\n      \"MODEL\"\n    ],\n    [\n      8,\n      5,\n      0,\n      8,\n      1,\n      \"CONDITIONING\"\n    ],\n    [\n      9,\n      6,\n      0,\n      8,\n      2,\n      \"CONDITIONING\"\n    ],\n    [\n      10,\n      9,\n      0,\n      8,\n      3,\n      \"LATENT\"\n    ],\n    [\n      11,\n      4,\n      0,\n      8,\n      4,\n      \"INT\"\n    ]\n  ],\n  \"groups\": [],\n  \"config\": {},\n  \"extra\": {\n    \"ds\": {\n      \"scale\": 0.6649272177973091,\n      \"offset\": [\n        -903.8525468054443,\n        -478.58804363769354\n      ]\n    },\n    \"frontendVersion\": \"1.23.4\"\n  },\n  \"version\": 0.4\n}"
  },
  {
    "path": "workflows/distributed-upscale-video.json",
    "content": "{\n  \"id\": \"707da2be-c7d6-481f-b3b0-3ec8207924a1\",\n  \"revision\": 0,\n  \"last_node_id\": 76,\n  \"last_link_id\": 122,\n  \"nodes\": [\n    {\n      \"id\": 18,\n      \"type\": \"UltimateSDUpscaleDistributed\",\n      \"pos\": [\n        3800,\n        1400\n      ],\n      \"size\": [\n        380,\n        430\n      ],\n      \"flags\": {},\n      \"order\": 15,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"upscaled_image\",\n          \"type\": \"IMAGE\",\n          \"link\": 118\n        },\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 32\n        },\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 33\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 104\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 35\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            95\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"e076cf3455df72383d17f1d5f5b5aa5e709f2e4a\",\n        \"Node name for S&R\": \"UltimateSDUpscaleDistributed\"\n      },\n      \"widgets_values\": [\n        157027921504581,\n        \"fixed\",\n        5,\n        1,\n        \"res_2s\",\n        \"bong_tangent\",\n        0.35,\n        1024,\n        1024,\n        32,\n        16,\n        true,\n        false\n      ]\n    },\n    {\n      \"id\": 57,\n      \"type\": \"VHS_VideoCombine\",\n      \"pos\": [\n        4230,\n        1400\n      ],\n      \"size\": [\n        550,\n        639.5\n      ],\n      \"flags\": {},\n      \"order\": 16,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 95\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        },\n        {\n          \"name\": \"meta_batch\",\n          \"shape\": 7,\n          \"type\": \"VHS_BatchManager\",\n          \"link\": null\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"Filenames\",\n          \"type\": \"VHS_FILENAMES\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-videohelpersuite\",\n        \"ver\": \"a7ce59e381934733bfae03b1be029756d6ce936d\",\n        \"Node name for S&R\": \"VHS_VideoCombine\"\n      },\n      \"widgets_values\": {\n        \"frame_rate\": 16,\n        \"loop_count\": 0,\n        \"filename_prefix\": \"WAN\",\n        \"format\": \"video/h264-mp4\",\n        \"pix_fmt\": \"yuv420p\",\n        \"crf\": 20,\n        \"save_metadata\": true,\n        \"trim_to_audio\": false,\n        \"pingpong\": false,\n        \"save_output\": true,\n        \"videopreview\": {\n          \"hidden\": false,\n          \"paused\": false,\n          \"params\": {\n            \"filename\": \"WAN_00065.mp4\",\n            \"subfolder\": \"\",\n            \"type\": \"output\",\n            \"format\": \"video/h264-mp4\",\n            \"frame_rate\": 16,\n            \"workflow\": \"WAN_00065.png\",\n            \"fullpath\": \"C:\\\\venvs\\\\ComfyUI\\\\ComfyUI\\\\output\\\\WAN_00065.mp4\"\n          }\n        }\n      }\n    },\n    {\n      \"id\": 12,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        2980,\n        1610\n      ],\n      \"size\": [\n        370,\n        160\n      ],\n      \"flags\": {\n        \"collapsed\": false\n      },\n      \"order\": 5,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 115\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            33,\n            56\n          ]\n        }\n      ],\n      \"title\": \"Positive Prompt\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPTextEncode\",\n        \"ue_properties\": {\n          \"version\": \"7.0.1\",\n          \"widget_ue_connectable\": {}\n        }\n      },\n      \"widgets_values\": [\n        \"A sea turtle swims amidst vibrant coral reefs, two sergeant major fish nearby.  The water is clear and blue, showcasing the intricate details of the coral and turtle's shell.  Photorealistic, underwater scene, 8k resolution.\"\n      ],\n      \"color\": \"#232\",\n      \"bgcolor\": \"#353\"\n    },\n    {\n      \"id\": 11,\n      \"type\": \"CLIPLoader\",\n      \"pos\": [\n        2520,\n        1610\n      ],\n      \"size\": [\n        387.5943603515625,\n        106\n      ],\n      \"flags\": {\n        \"collapsed\": false\n      },\n      \"order\": 0,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"CLIP\",\n          \"type\": \"CLIP\",\n          \"links\": [\n            115\n          ]\n        }\n      ],\n      \"title\": \"CLIP\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPLoader\",\n        \"ue_properties\": {\n          \"version\": \"7.0.1\",\n          \"widget_ue_connectable\": {}\n        }\n      },\n      \"widgets_values\": [\n        \"umt5_xxl_fp16.safetensors\",\n        \"wan\",\n        \"default\"\n      ]\n    },\n    {\n      \"id\": 74,\n      \"type\": \"ImageFromBatch\",\n      \"pos\": [\n        2280,\n        1970\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 6,\n      \"mode\": 4,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 119\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            120\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.59\",\n        \"Node name for S&R\": \"ImageFromBatch\"\n      },\n      \"widgets_values\": [\n        0,\n        1\n      ]\n    },\n    {\n      \"id\": 63,\n      \"type\": \"easy imageInterrogator\",\n      \"pos\": [\n        2570,\n        1970\n      ],\n      \"size\": [\n        280,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 10,\n      \"mode\": 4,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 120\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"prompt\",\n          \"shape\": 6,\n          \"type\": \"STRING\",\n          \"links\": [\n            108\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-easy-use\",\n        \"ver\": \"1.3.3\",\n        \"Node name for S&R\": \"easy imageInterrogator\"\n      },\n      \"widgets_values\": [\n        \"fast\",\n        true\n      ]\n    },\n    {\n      \"id\": 8,\n      \"type\": \"ModelSamplingSD3\",\n      \"pos\": [\n        3410,\n        1020\n      ],\n      \"size\": [\n        221.14166259765625,\n        88.32342529296875\n      ],\n      \"flags\": {\n        \"collapsed\": false\n      },\n      \"order\": 12,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 121\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"slot_index\": 0,\n          \"links\": [\n            32\n          ]\n        }\n      ],\n      \"title\": \"Shift\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.36\",\n        \"Node name for S&R\": \"ModelSamplingSD3\",\n        \"ue_properties\": {\n          \"version\": \"7.0.1\",\n          \"widget_ue_connectable\": {}\n        }\n      },\n      \"widgets_values\": [\n        8.000000000000002\n      ],\n      \"color\": \"#223\",\n      \"bgcolor\": \"#335\"\n    },\n    {\n      \"id\": 26,\n      \"type\": \"ConditioningZeroOut\",\n      \"pos\": [\n        3440,\n        1710\n      ],\n      \"size\": [\n        198.16665649414062,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 9,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"conditioning\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 56\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            104\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.51\",\n        \"Node name for S&R\": \"ConditioningZeroOut\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 70,\n      \"type\": \"ImageUpscaleWithModel\",\n      \"pos\": [\n        2590,\n        1280\n      ],\n      \"size\": [\n        310,\n        46\n      ],\n      \"flags\": {},\n      \"order\": 7,\n      \"mode\": 4,\n      \"inputs\": [\n        {\n          \"name\": \"upscale_model\",\n          \"type\": \"UPSCALE_MODEL\",\n          \"link\": 111\n        },\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 112\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            113\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.59\",\n        \"Node name for S&R\": \"ImageUpscaleWithModel\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 54,\n      \"type\": \"VHS_LoadVideo\",\n      \"pos\": [\n        1480,\n        1300\n      ],\n      \"size\": [\n        620,\n        654\n      ],\n      \"flags\": {},\n      \"order\": 1,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"meta_batch\",\n          \"shape\": 7,\n          \"type\": \"VHS_BatchManager\",\n          \"link\": null\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            112,\n            119\n          ]\n        },\n        {\n          \"name\": \"frame_count\",\n          \"type\": \"INT\",\n          \"links\": []\n        },\n        {\n          \"name\": \"audio\",\n          \"type\": \"AUDIO\",\n          \"links\": null\n        },\n        {\n          \"name\": \"video_info\",\n          \"type\": \"VHS_VIDEOINFO\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-videohelpersuite\",\n        \"ver\": \"a7ce59e381934733bfae03b1be029756d6ce936d\",\n        \"Node name for S&R\": \"VHS_LoadVideo\"\n      },\n      \"widgets_values\": {\n        \"video\": \"ComfyUI_00005_.mp4\",\n        \"force_rate\": 0,\n        \"custom_width\": 0,\n        \"custom_height\": 0,\n        \"frame_load_cap\": 5,\n        \"skip_first_frames\": 0,\n        \"select_every_nth\": 1,\n        \"format\": \"Wan\",\n        \"choose video to upload\": \"image\",\n        \"videopreview\": {\n          \"hidden\": false,\n          \"paused\": true,\n          \"params\": {\n            \"filename\": \"ComfyUI_00005_.mp4\",\n            \"type\": \"input\",\n            \"format\": \"video/mp4\",\n            \"force_rate\": 0,\n            \"custom_width\": 0,\n            \"custom_height\": 0,\n            \"frame_load_cap\": 5,\n            \"skip_first_frames\": 0,\n            \"select_every_nth\": 1\n          }\n        }\n      }\n    },\n    {\n      \"id\": 71,\n      \"type\": \"ImageResize+\",\n      \"pos\": [\n        3030,\n        1270\n      ],\n      \"size\": [\n        270,\n        218\n      ],\n      \"flags\": {},\n      \"order\": 11,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 113\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            117\n          ]\n        },\n        {\n          \"name\": \"width\",\n          \"type\": \"INT\",\n          \"links\": null\n        },\n        {\n          \"name\": \"height\",\n          \"type\": \"INT\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui_essentials\",\n        \"ver\": \"9d9f4bedfc9f0321c19faf71855e228c93bd0dc9\",\n        \"Node name for S&R\": \"ImageResize+\"\n      },\n      \"widgets_values\": [\n        1920,\n        1080,\n        \"lanczos\",\n        \"keep proportion\",\n        \"always\",\n        0\n      ]\n    },\n    {\n      \"id\": 14,\n      \"type\": \"VAELoader\",\n      \"pos\": [\n        3410,\n        1870\n      ],\n      \"size\": [\n        270,\n        58\n      ],\n      \"flags\": {\n        \"collapsed\": false\n      },\n      \"order\": 2,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"VAE\",\n          \"type\": \"VAE\",\n          \"links\": [\n            35\n          ]\n        }\n      ],\n      \"title\": \"VAE\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"VAELoader\",\n        \"ue_properties\": {\n          \"version\": \"7.0.1\",\n          \"widget_ue_connectable\": {}\n        }\n      },\n      \"widgets_values\": [\n        \"wan_2.1_vae.safetensors\"\n      ]\n    },\n    {\n      \"id\": 67,\n      \"type\": \"DisplayAny\",\n      \"pos\": [\n        2880,\n        1970\n      ],\n      \"size\": [\n        360,\n        160\n      ],\n      \"flags\": {},\n      \"order\": 13,\n      \"mode\": 4,\n      \"inputs\": [\n        {\n          \"name\": \"input\",\n          \"type\": \"*\",\n          \"link\": 108\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"STRING\",\n          \"type\": \"STRING\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui_essentials\",\n        \"ver\": \"9d9f4bedfc9f0321c19faf71855e228c93bd0dc9\",\n        \"Node name for S&R\": \"DisplayAny\"\n      },\n      \"widgets_values\": [\n        \"raw value\"\n      ]\n    },\n    {\n      \"id\": 73,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        3500,\n        1270\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 14,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 117\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            118\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 72,\n      \"type\": \"LoraLoaderModelOnly\",\n      \"pos\": [\n        2940,\n        1020\n      ],\n      \"size\": [\n        420,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 8,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 122\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            121\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.59\",\n        \"Node name for S&R\": \"LoraLoaderModelOnly\"\n      },\n      \"widgets_values\": [\n        \"Wan2.2-Lightning_T2V-v1.1-A14B-4steps-lora_LOW_fp16.safetensors\",\n        1\n      ]\n    },\n    {\n      \"id\": 69,\n      \"type\": \"UpscaleModelLoader\",\n      \"pos\": [\n        2250,\n        1280\n      ],\n      \"size\": [\n        300,\n        60\n      ],\n      \"flags\": {},\n      \"order\": 3,\n      \"mode\": 4,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"UPSCALE_MODEL\",\n          \"type\": \"UPSCALE_MODEL\",\n          \"links\": [\n            111\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.59\",\n        \"Node name for S&R\": \"UpscaleModelLoader\"\n      },\n      \"widgets_values\": [\n        \"RealESRGAN_x2.pth\"\n      ]\n    },\n    {\n      \"id\": 76,\n      \"type\": \"UNETLoader\",\n      \"pos\": [\n        2520,\n        1020\n      ],\n      \"size\": [\n        380,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 4,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            122\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.59\",\n        \"Node name for S&R\": \"UNETLoader\"\n      },\n      \"widgets_values\": [\n        \"wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors\",\n        \"default\"\n      ]\n    }\n  ],\n  \"links\": [\n    [\n      32,\n      8,\n      0,\n      18,\n      1,\n      \"MODEL\"\n    ],\n    [\n      33,\n      12,\n      0,\n      18,\n      2,\n      \"CONDITIONING\"\n    ],\n    [\n      35,\n      14,\n      0,\n      18,\n      4,\n      \"VAE\"\n    ],\n    [\n      56,\n      12,\n      0,\n      26,\n      0,\n      \"CONDITIONING\"\n    ],\n    [\n      95,\n      18,\n      0,\n      57,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      104,\n      26,\n      0,\n      18,\n      3,\n      \"CONDITIONING\"\n    ],\n    [\n      108,\n      63,\n      0,\n      67,\n      0,\n      \"*\"\n    ],\n    [\n      111,\n      69,\n      0,\n      70,\n      0,\n      \"UPSCALE_MODEL\"\n    ],\n    [\n      112,\n      54,\n      0,\n      70,\n      1,\n      \"IMAGE\"\n    ],\n    [\n      113,\n      70,\n      0,\n      71,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      115,\n      11,\n      0,\n      12,\n      0,\n      \"CLIP\"\n    ],\n    [\n      117,\n      71,\n      0,\n      73,\n      0,\n      \"*\"\n    ],\n    [\n      118,\n      73,\n      0,\n      18,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      119,\n      54,\n      0,\n      74,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      120,\n      74,\n      0,\n      63,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      121,\n      72,\n      0,\n      8,\n      0,\n      \"MODEL\"\n    ],\n    [\n      122,\n      76,\n      0,\n      72,\n      0,\n      \"MODEL\"\n    ]\n  ],\n  \"groups\": [\n    {\n      \"id\": 1,\n      \"title\": \"Optional Upscale\",\n      \"bounding\": [\n        2240,\n        1200,\n        690,\n        180\n      ],\n      \"color\": \"#3f789e\",\n      \"font_size\": 24,\n      \"flags\": {}\n    },\n    {\n      \"id\": 2,\n      \"title\": \"Prompt Generator\",\n      \"bounding\": [\n        2260,\n        1880,\n        1000,\n        280\n      ],\n      \"color\": \"#3f789e\",\n      \"font_size\": 24,\n      \"flags\": {}\n    }\n  ],\n  \"config\": {},\n  \"extra\": {\n    \"ds\": {\n      \"scale\": 0.7360065561459117,\n      \"offset\": [\n        -1580.220925243604,\n        -519.0718505701544\n      ]\n    },\n    \"frontendVersion\": \"1.25.11\",\n    \"VHS_latentpreview\": false,\n    \"VHS_latentpreviewrate\": 0,\n    \"VHS_MetadataImage\": true,\n    \"VHS_KeepIntermediate\": true\n  },\n  \"version\": 0.4\n}"
  },
  {
    "path": "workflows/distributed-upscale.json",
    "content": "{\n  \"id\": \"817bbfe2-06b8-44c8-8c14-b82b63b335d5\",\n  \"revision\": 0,\n  \"last_node_id\": 137,\n  \"last_link_id\": 211,\n  \"nodes\": [\n    {\n      \"id\": 86,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2130,\n        1050\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 11,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 121\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"VAE\",\n          \"links\": [\n            127\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 88,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2130,\n        1000\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 8,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 124\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            137\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 107,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2810,\n        1050\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 18,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 171\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"VAE\",\n          \"links\": [\n            172\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 89,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2810,\n        1000\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 14,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 137\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            170\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 110,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2810,\n        970\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 21,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 211\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            176\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 7,\n      \"type\": \"CheckpointLoaderSimple\",\n      \"pos\": [\n        1700,\n        1270\n      ],\n      \"size\": [\n        370,\n        98\n      ],\n      \"flags\": {},\n      \"order\": 0,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"slot_index\": 0,\n          \"links\": [\n            124\n          ]\n        },\n        {\n          \"name\": \"CLIP\",\n          \"type\": \"CLIP\",\n          \"slot_index\": 1,\n          \"links\": [\n            5,\n            6\n          ]\n        },\n        {\n          \"name\": \"VAE\",\n          \"type\": \"VAE\",\n          \"slot_index\": 2,\n          \"links\": [\n            121\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CheckpointLoaderSimple\",\n        \"models\": [\n          {\n            \"name\": \"v1-5-pruned-emaonly-fp16.safetensors\",\n            \"url\": \"https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly-fp16.safetensors?download=true\",\n            \"directory\": \"checkpoints\"\n          }\n        ]\n      },\n      \"widgets_values\": [\n        \"SDXL\\\\juggernautXL_ragnarokBy.safetensors\"\n      ]\n    },\n    {\n      \"id\": 90,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2460,\n        1050\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 15,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 127\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"VAE\",\n          \"links\": [\n            129,\n            171\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 44,\n      \"type\": \"ControlNetLoader\",\n      \"pos\": [\n        1910,\n        1510\n      ],\n      \"size\": [\n        390,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 1,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"CONTROL_NET\",\n          \"type\": \"CONTROL_NET\",\n          \"links\": [\n            81\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"ControlNetLoader\"\n      },\n      \"widgets_values\": [\n        \"SDXL\\\\xinsir-controlnet-union-sdxl-1.0-promax.safetensors\"\n      ]\n    },\n    {\n      \"id\": 6,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        2130,\n        1370\n      ],\n      \"size\": [\n        420,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 10,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 6\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            64\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"text, watermark\"\n      ]\n    },\n    {\n      \"id\": 52,\n      \"type\": \"SetUnionControlNetType\",\n      \"pos\": [\n        2340,\n        1510\n      ],\n      \"size\": [\n        210,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 12,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"control_net\",\n          \"type\": \"CONTROL_NET\",\n          \"link\": 81\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONTROL_NET\",\n          \"type\": \"CONTROL_NET\",\n          \"links\": [\n            192\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"SetUnionControlNetType\"\n      },\n      \"widgets_values\": [\n        \"tile\"\n      ]\n    },\n    {\n      \"id\": 125,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        2450,\n        880\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 16,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 200\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            206\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 34,\n      \"type\": \"ImageResize+\",\n      \"pos\": [\n        1700,\n        970\n      ],\n      \"size\": [\n        270,\n        218\n      ],\n      \"flags\": {},\n      \"order\": 20,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 197\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            211\n          ]\n        },\n        {\n          \"name\": \"width\",\n          \"type\": \"INT\",\n          \"links\": null\n        },\n        {\n          \"name\": \"height\",\n          \"type\": \"INT\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui_essentials\",\n        \"ver\": \"9d9f4bedfc9f0321c19faf71855e228c93bd0dc9\",\n        \"Node name for S&R\": \"ImageResize+\"\n      },\n      \"widgets_values\": [\n        2048,\n        2048,\n        \"lanczos\",\n        \"keep proportion\",\n        \"always\",\n        8\n      ]\n    },\n    {\n      \"id\": 123,\n      \"type\": \"ImageUpscaleWithModel\",\n      \"pos\": [\n        1410,\n        970\n      ],\n      \"size\": [\n        222.75416564941406,\n        46\n      ],\n      \"flags\": {},\n      \"order\": 17,\n      \"mode\": 4,\n      \"inputs\": [\n        {\n          \"name\": \"upscale_model\",\n          \"type\": \"UPSCALE_MODEL\",\n          \"link\": 195\n        },\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 210\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            197\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"ImageUpscaleWithModel\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 134,\n      \"type\": \"MarkdownNote\",\n      \"pos\": [\n        1920,\n        1610\n      ],\n      \"size\": [\n        370,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 2,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"[https://huggingface.co/xinsir/controlnet-union-sdxl-1.0/tree/main](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0/tree/main)\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 135,\n      \"type\": \"LoadImage\",\n      \"pos\": [\n        780,\n        880\n      ],\n      \"size\": [\n        274.375,\n        314.00006103515625\n      ],\n      \"flags\": {},\n      \"order\": 3,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            207\n          ]\n        },\n        {\n          \"name\": \"MASK\",\n          \"type\": \"MASK\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"LoadImage\"\n      },\n      \"widgets_values\": [\n        \"example.png\",\n        \"image\"\n      ]\n    },\n    {\n      \"id\": 132,\n      \"type\": \"Note\",\n      \"pos\": [\n        1460,\n        1280\n      ],\n      \"size\": [\n        210,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 4,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"Choose an SDXL model\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 5,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        2130,\n        1160\n      ],\n      \"size\": [\n        420,\n        160\n      ],\n      \"flags\": {},\n      \"order\": 9,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 5\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            63\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.41\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\"\n      ]\n    },\n    {\n      \"id\": 124,\n      \"type\": \"Reroute\",\n      \"pos\": [\n        1230,\n        880\n      ],\n      \"size\": [\n        75,\n        26\n      ],\n      \"flags\": {},\n      \"order\": 13,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"*\",\n          \"link\": 207\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            200,\n            210\n          ]\n        }\n      ],\n      \"properties\": {\n        \"showOutputText\": false,\n        \"horizontal\": false\n      }\n    },\n    {\n      \"id\": 122,\n      \"type\": \"UpscaleModelLoader\",\n      \"pos\": [\n        1130,\n        1070\n      ],\n      \"size\": [\n        270,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 5,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"UPSCALE_MODEL\",\n          \"type\": \"UPSCALE_MODEL\",\n          \"links\": [\n            195\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"UpscaleModelLoader\"\n      },\n      \"widgets_values\": [\n        \"4xNomos8kDAT.pth\"\n      ]\n    },\n    {\n      \"id\": 131,\n      \"type\": \"Note\",\n      \"pos\": [\n        1420,\n        1070\n      ],\n      \"size\": [\n        210,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 6,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"Optional\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 137,\n      \"type\": \"Note\",\n      \"pos\": [\n        3000,\n        1600\n      ],\n      \"size\": [\n        330,\n        90\n      ],\n      \"flags\": {},\n      \"order\": 7,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"If all your GPUs are the same/similar, set static_distribution to true\\n\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 136,\n      \"type\": \"PreviewImage\",\n      \"pos\": [\n        3380,\n        1100\n      ],\n      \"size\": [\n        490,\n        550\n      ],\n      \"flags\": {},\n      \"order\": 23,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 209\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"PreviewImage\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 43,\n      \"type\": \"ControlNetApplyAdvanced\",\n      \"pos\": [\n        2640,\n        1150\n      ],\n      \"size\": [\n        270,\n        186\n      ],\n      \"flags\": {},\n      \"order\": 19,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 63\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 64\n        },\n        {\n          \"name\": \"control_net\",\n          \"type\": \"CONTROL_NET\",\n          \"link\": 192\n        },\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 206\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": 129\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            190\n          ]\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            191\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"ControlNetApplyAdvanced\"\n      },\n      \"widgets_values\": [\n        1.0000000000000002,\n        0,\n        0.8000000000000002\n      ]\n    },\n    {\n      \"id\": 30,\n      \"type\": \"UltimateSDUpscaleDistributed\",\n      \"pos\": [\n        3000,\n        1110\n      ],\n      \"size\": [\n        326.691650390625,\n        450\n      ],\n      \"flags\": {},\n      \"order\": 22,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"upscaled_image\",\n          \"type\": \"IMAGE\",\n          \"link\": 176\n        },\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 170\n        },\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 190\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 191\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 172\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            209\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"dd23503883fdf319e8beb6e7a190445ecf89973c\",\n        \"Node name for S&R\": \"UltimateSDUpscaleDistributed\"\n      },\n      \"widgets_values\": [\n        269777990474642,\n        \"randomize\",\n        20,\n        7,\n        \"dpmpp_2m_sde\",\n        \"karras\",\n        0.6000000000000001,\n        1024,\n        1024,\n        32,\n        16,\n        true,\n        false,\n        true\n      ]\n    }\n  ],\n  \"links\": [\n    [\n      5,\n      7,\n      1,\n      5,\n      0,\n      \"CLIP\"\n    ],\n    [\n      6,\n      7,\n      1,\n      6,\n      0,\n      \"CLIP\"\n    ],\n    [\n      63,\n      5,\n      0,\n      43,\n      0,\n      \"CONDITIONING\"\n    ],\n    [\n      64,\n      6,\n      0,\n      43,\n      1,\n      \"CONDITIONING\"\n    ],\n    [\n      81,\n      44,\n      0,\n      52,\n      0,\n      \"CONTROL_NET\"\n    ],\n    [\n      121,\n      7,\n      2,\n      86,\n      0,\n      \"*\"\n    ],\n    [\n      124,\n      7,\n      0,\n      88,\n      0,\n      \"*\"\n    ],\n    [\n      127,\n      86,\n      0,\n      90,\n      0,\n      \"*\"\n    ],\n    [\n      129,\n      90,\n      0,\n      43,\n      4,\n      \"VAE\"\n    ],\n    [\n      137,\n      88,\n      0,\n      89,\n      0,\n      \"*\"\n    ],\n    [\n      170,\n      89,\n      0,\n      30,\n      1,\n      \"MODEL\"\n    ],\n    [\n      171,\n      90,\n      0,\n      107,\n      0,\n      \"*\"\n    ],\n    [\n      172,\n      107,\n      0,\n      30,\n      4,\n      \"VAE\"\n    ],\n    [\n      176,\n      110,\n      0,\n      30,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      190,\n      43,\n      0,\n      30,\n      2,\n      \"CONDITIONING\"\n    ],\n    [\n      191,\n      43,\n      1,\n      30,\n      3,\n      \"CONDITIONING\"\n    ],\n    [\n      192,\n      52,\n      0,\n      43,\n      2,\n      \"CONTROL_NET\"\n    ],\n    [\n      195,\n      122,\n      0,\n      123,\n      0,\n      \"UPSCALE_MODEL\"\n    ],\n    [\n      197,\n      123,\n      0,\n      34,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      200,\n      124,\n      0,\n      125,\n      0,\n      \"*\"\n    ],\n    [\n      206,\n      125,\n      0,\n      43,\n      3,\n      \"IMAGE\"\n    ],\n    [\n      207,\n      135,\n      0,\n      124,\n      0,\n      \"*\"\n    ],\n    [\n      209,\n      30,\n      0,\n      136,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      210,\n      124,\n      0,\n      123,\n      1,\n      \"IMAGE\"\n    ],\n    [\n      211,\n      34,\n      0,\n      110,\n      0,\n      \"*\"\n    ]\n  ],\n  \"groups\": [],\n  \"config\": {},\n  \"extra\": {\n    \"ds\": {\n      \"scale\": 1.0152559799477252,\n      \"offset\": [\n        -2260.53316345765,\n        -499.7179536588252\n      ]\n    },\n    \"frontendVersion\": \"1.23.4\",\n    \"VHS_latentpreview\": false,\n    \"VHS_latentpreviewrate\": 0,\n    \"VHS_MetadataImage\": true,\n    \"VHS_KeepIntermediate\": true\n  },\n  \"version\": 0.4\n}"
  },
  {
    "path": "workflows/distributed-wan-2.2_14b_t2v.json",
    "content": "{\n  \"id\": \"8968d33f-abd1-4e8a-8e55-5d87a104afb8\",\n  \"revision\": 0,\n  \"last_node_id\": 92,\n  \"last_link_id\": 187,\n  \"nodes\": [\n    {\n      \"id\": 82,\n      \"type\": \"CreateVideo\",\n      \"pos\": [\n        640,\n        1460\n      ],\n      \"size\": [\n        270,\n        78\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 16,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 172\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"VIDEO\",\n          \"type\": \"VIDEO\",\n          \"links\": [\n            187\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CreateVideo\"\n      },\n      \"widgets_values\": [\n        16\n      ]\n    },\n    {\n      \"id\": 80,\n      \"type\": \"CreateVideo\",\n      \"pos\": [\n        20,\n        1450\n      ],\n      \"size\": [\n        270,\n        78\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 15,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 170\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"VIDEO\",\n          \"type\": \"VIDEO\",\n          \"links\": [\n            186\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CreateVideo\"\n      },\n      \"widgets_values\": [\n        16\n      ]\n    },\n    {\n      \"id\": 78,\n      \"type\": \"CreateVideo\",\n      \"pos\": [\n        970,\n        480\n      ],\n      \"size\": [\n        270,\n        78\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 14,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 168\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"VIDEO\",\n          \"type\": \"VIDEO\",\n          \"links\": [\n            184\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CreateVideo\"\n      },\n      \"widgets_values\": [\n        16\n      ]\n    },\n    {\n      \"id\": 60,\n      \"type\": \"CreateVideo\",\n      \"pos\": [\n        80,\n        610\n      ],\n      \"size\": [\n        270,\n        78\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 13,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 166\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"VIDEO\",\n          \"type\": \"VIDEO\",\n          \"links\": [\n            185\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CreateVideo\"\n      },\n      \"widgets_values\": [\n        16\n      ]\n    },\n    {\n      \"id\": 77,\n      \"type\": \"ImageBatchDivider\",\n      \"pos\": [\n        930,\n        380\n      ],\n      \"size\": [\n        270,\n        118\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 12,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 165\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"batch_1\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            166\n          ]\n        },\n        {\n          \"name\": \"batch_2\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            168\n          ]\n        },\n        {\n          \"name\": \"batch_3\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            170\n          ]\n        },\n        {\n          \"name\": \"batch_4\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            172\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"ae3201accb0161040bfd7c5705b08874726b1853\",\n        \"Node name for S&R\": \"ImageBatchDivider\"\n      },\n      \"widgets_values\": [\n        4\n      ]\n    },\n    {\n      \"id\": 67,\n      \"type\": \"DistributedCollector\",\n      \"pos\": [\n        700,\n        380\n      ],\n      \"size\": [\n        166.50416564941406,\n        26\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 11,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 141\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            165\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"b54f2be19cab29543068d9c4355d9c5b773bee0d\",\n        \"Node name for S&R\": \"DistributedCollector\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 8,\n      \"type\": \"VAEDecode\",\n      \"pos\": [\n        520,\n        380\n      ],\n      \"size\": [\n        210,\n        46\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 10,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"samples\",\n          \"type\": \"LATENT\",\n          \"link\": 178\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 76\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"slot_index\": 0,\n          \"links\": [\n            141\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"VAEDecode\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 7,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        580,\n        340\n      ],\n      \"size\": [\n        430,\n        180\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 5,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 148\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            176\n          ]\n        }\n      ],\n      \"title\": \"CLIP Text Encode (Negative Prompt)\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"色调艳丽，过曝，静态，细节模糊不清，字幕，风格，作品，画作，画面，静止，整体发灰，最差质量，低质量，JPEG压缩残留，丑陋的，残缺的，多余的手指，画得不好的手部，画得不好的脸部，畸形的，毁容的，形态畸形的肢体，手指融合，静止不动的画面，杂乱的背景，三条腿，背景人很多，倒着走\"\n      ],\n      \"color\": \"#322\",\n      \"bgcolor\": \"#533\"\n    },\n    {\n      \"id\": 57,\n      \"type\": \"KSamplerAdvanced\",\n      \"pos\": [\n        890,\n        10\n      ],\n      \"size\": [\n        304.748046875,\n        334\n      ],\n      \"flags\": {},\n      \"order\": 9,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 139\n        },\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 174\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 176\n        },\n        {\n          \"name\": \"latent_image\",\n          \"type\": \"LATENT\",\n          \"link\": 179\n        },\n        {\n          \"name\": \"noise_seed\",\n          \"type\": \"INT\",\n          \"widget\": {\n            \"name\": \"noise_seed\"\n          },\n          \"link\": 143\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"LATENT\",\n          \"type\": \"LATENT\",\n          \"links\": [\n            178\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"KSamplerAdvanced\"\n      },\n      \"widgets_values\": [\n        \"enable\",\n        1070591872081175,\n        \"randomize\",\n        4,\n        1,\n        \"euler\",\n        \"simple\",\n        0,\n        1000,\n        \"disable\"\n      ]\n    },\n    {\n      \"id\": 68,\n      \"type\": \"DistributedSeed\",\n      \"pos\": [\n        560,\n        100\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 0,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"seed\",\n          \"type\": \"INT\",\n          \"links\": [\n            143\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"b54f2be19cab29543068d9c4355d9c5b773bee0d\",\n        \"Node name for S&R\": \"DistributedSeed\"\n      },\n      \"widgets_values\": [\n        349924686792776,\n        \"randomize\"\n      ]\n    },\n    {\n      \"id\": 66,\n      \"type\": \"LoraLoaderModelOnly\",\n      \"pos\": [\n        320,\n        0\n      ],\n      \"size\": [\n        240,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 7,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 134\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            135\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"LoraLoaderModelOnly\"\n      },\n      \"widgets_values\": [\n        \"WAN\\\\Wan21_T2V_14B_lightx2v_cfg_step_distill_lora_rank32.safetensors\",\n        0.8000000000000002\n      ]\n    },\n    {\n      \"id\": 71,\n      \"type\": \"CLIPLoaderGGUF\",\n      \"pos\": [\n        30,\n        70\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 1,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"CLIP\",\n          \"type\": \"CLIP\",\n          \"links\": [\n            148,\n            149\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-gguf\",\n        \"ver\": \"1.1.1\",\n        \"Node name for S&R\": \"CLIPLoaderGGUF\"\n      },\n      \"widgets_values\": [\n        \"umt5-xxl-encoder-Q8_0.gguf\",\n        \"wan\"\n      ]\n    },\n    {\n      \"id\": 63,\n      \"type\": \"UnetLoaderGGUF\",\n      \"pos\": [\n        30,\n        0\n      ],\n      \"size\": [\n        270,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 2,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            134\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-gguf\",\n        \"ver\": \"1.1.1\",\n        \"Node name for S&R\": \"UnetLoaderGGUF\"\n      },\n      \"widgets_values\": [\n        \"Wan2.2-T2V-A14B-LowNoise-Q8_0.gguf\"\n      ]\n    },\n    {\n      \"id\": 54,\n      \"type\": \"ModelSamplingSD3\",\n      \"pos\": [\n        610,\n        0\n      ],\n      \"size\": [\n        210,\n        60\n      ],\n      \"flags\": {},\n      \"order\": 8,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 135\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"slot_index\": 0,\n          \"links\": [\n            139\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"ModelSamplingSD3\"\n      },\n      \"widgets_values\": [\n        8.000000000000002\n      ]\n    },\n    {\n      \"id\": 90,\n      \"type\": \"SaveVideo\",\n      \"pos\": [\n        20,\n        420\n      ],\n      \"size\": [\n        580,\n        678\n      ],\n      \"flags\": {},\n      \"order\": 17,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"video\",\n          \"type\": \"VIDEO\",\n          \"link\": 185\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.47\",\n        \"Node name for S&R\": \"SaveVideo\"\n      },\n      \"widgets_values\": [\n        \"video/ComfyUI\",\n        \"auto\",\n        \"auto\"\n      ]\n    },\n    {\n      \"id\": 91,\n      \"type\": \"SaveVideo\",\n      \"pos\": [\n        20,\n        1130\n      ],\n      \"size\": [\n        580,\n        678\n      ],\n      \"flags\": {},\n      \"order\": 19,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"video\",\n          \"type\": \"VIDEO\",\n          \"link\": 186\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.47\",\n        \"Node name for S&R\": \"SaveVideo\"\n      },\n      \"widgets_values\": [\n        \"video/ComfyUI\",\n        \"auto\",\n        \"auto\"\n      ]\n    },\n    {\n      \"id\": 89,\n      \"type\": \"SaveVideo\",\n      \"pos\": [\n        610,\n        420\n      ],\n      \"size\": [\n        580,\n        678\n      ],\n      \"flags\": {},\n      \"order\": 18,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"video\",\n          \"type\": \"VIDEO\",\n          \"link\": 184\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.47\",\n        \"Node name for S&R\": \"SaveVideo\"\n      },\n      \"widgets_values\": [\n        \"video/ComfyUI\",\n        \"auto\",\n        \"auto\"\n      ]\n    },\n    {\n      \"id\": 92,\n      \"type\": \"SaveVideo\",\n      \"pos\": [\n        610,\n        1130\n      ],\n      \"size\": [\n        580,\n        678\n      ],\n      \"flags\": {},\n      \"order\": 20,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"video\",\n          \"type\": \"VIDEO\",\n          \"link\": 187\n        }\n      ],\n      \"outputs\": [],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.47\",\n        \"Node name for S&R\": \"SaveVideo\"\n      },\n      \"widgets_values\": [\n        \"video/ComfyUI\",\n        \"auto\",\n        \"auto\"\n      ]\n    },\n    {\n      \"id\": 39,\n      \"type\": \"VAELoader\",\n      \"pos\": [\n        30,\n        340\n      ],\n      \"size\": [\n        320,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 3,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"VAE\",\n          \"type\": \"VAE\",\n          \"slot_index\": 0,\n          \"links\": [\n            76\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"VAELoader\",\n        \"models\": [\n          {\n            \"name\": \"wan_2.1_vae.safetensors\",\n            \"url\": \"https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors\",\n            \"directory\": \"vae\"\n          }\n        ]\n      },\n      \"widgets_values\": [\n        \"wan_2.1_vae.safetensors\"\n      ]\n    },\n    {\n      \"id\": 59,\n      \"type\": \"EmptyHunyuanLatentVideo\",\n      \"pos\": [\n        40,\n        180\n      ],\n      \"size\": [\n        315,\n        130\n      ],\n      \"flags\": {},\n      \"order\": 4,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"LATENT\",\n          \"type\": \"LATENT\",\n          \"slot_index\": 0,\n          \"links\": [\n            179\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"EmptyHunyuanLatentVideo\"\n      },\n      \"widgets_values\": [\n        704,\n        704,\n        33,\n        1\n      ]\n    },\n    {\n      \"id\": 6,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        380,\n        210\n      ],\n      \"size\": [\n        460,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 6,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 149\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"slot_index\": 0,\n          \"links\": [\n            174\n          ]\n        }\n      ],\n      \"title\": \"CLIP Text Encode (Positive Prompt)\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.45\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"A beautiful woman holding a GPU at a photoshoot\"\n      ],\n      \"color\": \"#232\",\n      \"bgcolor\": \"#353\"\n    }\n  ],\n  \"links\": [\n    [\n      76,\n      39,\n      0,\n      8,\n      1,\n      \"VAE\"\n    ],\n    [\n      134,\n      63,\n      0,\n      66,\n      0,\n      \"MODEL\"\n    ],\n    [\n      135,\n      66,\n      0,\n      54,\n      0,\n      \"MODEL\"\n    ],\n    [\n      139,\n      54,\n      0,\n      57,\n      0,\n      \"MODEL\"\n    ],\n    [\n      141,\n      8,\n      0,\n      67,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      143,\n      68,\n      0,\n      57,\n      4,\n      \"INT\"\n    ],\n    [\n      148,\n      71,\n      0,\n      7,\n      0,\n      \"CLIP\"\n    ],\n    [\n      149,\n      71,\n      0,\n      6,\n      0,\n      \"CLIP\"\n    ],\n    [\n      165,\n      67,\n      0,\n      77,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      166,\n      77,\n      0,\n      60,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      168,\n      77,\n      1,\n      78,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      170,\n      77,\n      2,\n      80,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      172,\n      77,\n      3,\n      82,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      174,\n      6,\n      0,\n      57,\n      1,\n      \"CONDITIONING\"\n    ],\n    [\n      176,\n      7,\n      0,\n      57,\n      2,\n      \"CONDITIONING\"\n    ],\n    [\n      178,\n      57,\n      0,\n      8,\n      0,\n      \"LATENT\"\n    ],\n    [\n      179,\n      59,\n      0,\n      57,\n      3,\n      \"LATENT\"\n    ],\n    [\n      184,\n      78,\n      0,\n      89,\n      0,\n      \"VIDEO\"\n    ],\n    [\n      185,\n      60,\n      0,\n      90,\n      0,\n      \"VIDEO\"\n    ],\n    [\n      186,\n      80,\n      0,\n      91,\n      0,\n      \"VIDEO\"\n    ],\n    [\n      187,\n      82,\n      0,\n      92,\n      0,\n      \"VIDEO\"\n    ]\n  ],\n  \"groups\": [],\n  \"config\": {},\n  \"extra\": {\n    \"ds\": {\n      \"scale\": 0.693433494944177,\n      \"offset\": [\n        630.9170235538304,\n        115.03441263315318\n      ]\n    },\n    \"frontendVersion\": \"1.23.4\",\n    \"VHS_latentpreview\": false,\n    \"VHS_latentpreviewrate\": 0,\n    \"VHS_MetadataImage\": true,\n    \"VHS_KeepIntermediate\": true\n  },\n  \"version\": 0.4\n}"
  },
  {
    "path": "workflows/distributed-wan.json",
    "content": "{\n  \"id\": \"00000000-0000-0000-0000-000000000000\",\n  \"revision\": 0,\n  \"last_node_id\": 234,\n  \"last_link_id\": 79,\n  \"nodes\": [\n    {\n      \"id\": 67,\n      \"type\": \"ModelSamplingSD3\",\n      \"pos\": [\n        1700,\n        550\n      ],\n      \"size\": [\n        270,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 14,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 23\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            17,\n            26\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"ModelSamplingSD3\"\n      },\n      \"widgets_values\": [\n        8.000000000000002\n      ]\n    },\n    {\n      \"id\": 50,\n      \"type\": \"WanImageToVideo\",\n      \"pos\": [\n        1590,\n        230\n      ],\n      \"size\": [\n        270,\n        210\n      ],\n      \"flags\": {},\n      \"order\": 18,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 7\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 8\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 9\n        },\n        {\n          \"name\": \"clip_vision_output\",\n          \"shape\": 7,\n          \"type\": \"CLIP_VISION_OUTPUT\",\n          \"link\": 10\n        },\n        {\n          \"name\": \"start_image\",\n          \"shape\": 7,\n          \"type\": \"IMAGE\",\n          \"link\": 11\n        },\n        {\n          \"name\": \"width\",\n          \"type\": \"INT\",\n          \"widget\": {\n            \"name\": \"width\"\n          },\n          \"link\": 5\n        },\n        {\n          \"name\": \"height\",\n          \"type\": \"INT\",\n          \"widget\": {\n            \"name\": \"height\"\n          },\n          \"link\": 6\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"positive\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            27\n          ]\n        },\n        {\n          \"name\": \"negative\",\n          \"type\": \"CONDITIONING\",\n          \"links\": null\n        },\n        {\n          \"name\": \"latent\",\n          \"type\": \"LATENT\",\n          \"links\": [\n            22\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"WanImageToVideo\"\n      },\n      \"widgets_values\": [\n        832,\n        480,\n        17,\n        1\n      ]\n    },\n    {\n      \"id\": 60,\n      \"type\": \"RandomNoise\",\n      \"pos\": [\n        1850,\n        10\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 9,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"noise_seed\",\n          \"type\": \"INT\",\n          \"widget\": {\n            \"name\": \"noise_seed\"\n          },\n          \"link\": 16\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"NOISE\",\n          \"type\": \"NOISE\",\n          \"links\": [\n            18\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"RandomNoise\"\n      },\n      \"widgets_values\": [\n        200526686850175,\n        \"randomize\"\n      ]\n    },\n    {\n      \"id\": 150,\n      \"type\": \"DistributedSeed\",\n      \"pos\": [\n        1540,\n        0\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 0,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"seed\",\n          \"type\": \"INT\",\n          \"links\": [\n            16\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"9650280c7f50898720bf9773e8874d75e7c20846\",\n        \"Node name for S&R\": \"DistributedSeed\"\n      },\n      \"widgets_values\": [\n        177896884005433,\n        \"randomize\"\n      ]\n    },\n    {\n      \"id\": 61,\n      \"type\": \"KSamplerSelect\",\n      \"pos\": [\n        2140,\n        350\n      ],\n      \"size\": [\n        260,\n        60\n      ],\n      \"flags\": {},\n      \"order\": 1,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"SAMPLER\",\n          \"type\": \"SAMPLER\",\n          \"links\": [\n            20\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"KSamplerSelect\"\n      },\n      \"widgets_values\": [\n        \"lcm\"\n      ]\n    },\n    {\n      \"id\": 221,\n      \"type\": \"DistributedCollector\",\n      \"pos\": [\n        2660,\n        200\n      ],\n      \"size\": [\n        166.50416564941406,\n        26\n      ],\n      \"flags\": {\n        \"collapsed\": false\n      },\n      \"order\": 22,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 67\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            78\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"b4ed92a363fab14d944b87c610aa9a0b4c87c085\",\n        \"Node name for S&R\": \"DistributedCollector\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 231,\n      \"type\": \"ImageBatchDivider\",\n      \"pos\": [\n        2850,\n        200\n      ],\n      \"size\": [\n        210,\n        118\n      ],\n      \"flags\": {},\n      \"order\": 23,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 78\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"batch_1\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            74\n          ]\n        },\n        {\n          \"name\": \"batch_2\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            75\n          ]\n        },\n        {\n          \"name\": \"batch_3\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            76\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"ComfyUI-Distributed\",\n        \"ver\": \"fa09a2da10dfc63cecfbec1f8d4a0516e923b911\",\n        \"Node name for S&R\": \"ImageBatchDivider\"\n      },\n      \"widgets_values\": [\n        3\n      ]\n    },\n    {\n      \"id\": 139,\n      \"type\": \"CLIPLoaderGGUF\",\n      \"pos\": [\n        490,\n        60\n      ],\n      \"size\": [\n        270,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 2,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"CLIP\",\n          \"type\": \"CLIP\",\n          \"links\": [\n            1,\n            2\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-gguf\",\n        \"ver\": \"1.1.1\",\n        \"Node name for S&R\": \"CLIPLoaderGGUF\"\n      },\n      \"widgets_values\": [\n        \"umt5-xxl-encoder-Q8_0.gguf\",\n        \"wan\"\n      ]\n    },\n    {\n      \"id\": 142,\n      \"type\": \"UnetLoaderGGUF\",\n      \"pos\": [\n        930,\n        850\n      ],\n      \"size\": [\n        340,\n        60\n      ],\n      \"flags\": {},\n      \"order\": 3,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            24\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-gguf\",\n        \"ver\": \"1.1.1\",\n        \"Node name for S&R\": \"UnetLoaderGGUF\"\n      },\n      \"widgets_values\": [\n        \"wan2.1-i2v-14b-720p-Q8_0.gguf\"\n      ]\n    },\n    {\n      \"id\": 118,\n      \"type\": \"LoraLoaderModelOnly\",\n      \"pos\": [\n        1290,\n        840\n      ],\n      \"size\": [\n        310,\n        82\n      ],\n      \"flags\": {},\n      \"order\": 12,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 24\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"MODEL\",\n          \"type\": \"MODEL\",\n          \"links\": [\n            23\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"LoraLoaderModelOnly\"\n      },\n      \"widgets_values\": [\n        \"Wan21_T2V_14B_lightx2v_cfg_step_distill_lora_rank32.safetensors\",\n        0.8100000000000002\n      ]\n    },\n    {\n      \"id\": 135,\n      \"type\": \"ImageNoiseAugmentation\",\n      \"pos\": [\n        1250,\n        310\n      ],\n      \"size\": [\n        270,\n        106\n      ],\n      \"flags\": {},\n      \"order\": 16,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 25\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            11\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-kjnodes\",\n        \"ver\": \"1.1.2\",\n        \"Node name for S&R\": \"ImageNoiseAugmentation\"\n      },\n      \"widgets_values\": [\n        0.10000000000000002,\n        1100164865582526,\n        \"randomize\"\n      ]\n    },\n    {\n      \"id\": 49,\n      \"type\": \"CLIPVisionLoader\",\n      \"pos\": [\n        1260,\n        610\n      ],\n      \"size\": [\n        290,\n        60\n      ],\n      \"flags\": {},\n      \"order\": 4,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"CLIP_VISION\",\n          \"type\": \"CLIP_VISION\",\n          \"links\": [\n            12\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPVisionLoader\"\n      },\n      \"widgets_values\": [\n        \"clip_vision_h.safetensors\"\n      ]\n    },\n    {\n      \"id\": 7,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        790,\n        120\n      ],\n      \"size\": [\n        380,\n        110\n      ],\n      \"flags\": {},\n      \"order\": 11,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 2\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            8\n          ]\n        }\n      ],\n      \"title\": \"CLIP Text Encode (Negative Prompt)\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"色调艳丽，过曝，静态，细节模糊不清，字幕，风格，作品，画作，画面，静止，整体发灰，最差质量，低质量，JPEG压缩残留，丑陋的，残缺的，多余的手指，画得不好的手部，画得不好的脸部，畸形的，毁容的，形态畸形的肢体，手指融合，静止不动的画面，杂乱的背景，三条腿，背景人很多，倒着走\"\n      ]\n    },\n    {\n      \"id\": 64,\n      \"type\": \"SamplerCustomAdvanced\",\n      \"pos\": [\n        2140,\n        190\n      ],\n      \"size\": [\n        260,\n        110\n      ],\n      \"flags\": {},\n      \"order\": 20,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"noise\",\n          \"type\": \"NOISE\",\n          \"link\": 18\n        },\n        {\n          \"name\": \"guider\",\n          \"type\": \"GUIDER\",\n          \"link\": 19\n        },\n        {\n          \"name\": \"sampler\",\n          \"type\": \"SAMPLER\",\n          \"link\": 20\n        },\n        {\n          \"name\": \"sigmas\",\n          \"type\": \"SIGMAS\",\n          \"link\": 21\n        },\n        {\n          \"name\": \"latent_image\",\n          \"type\": \"LATENT\",\n          \"link\": 22\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"output\",\n          \"type\": \"LATENT\",\n          \"links\": [\n            3\n          ]\n        },\n        {\n          \"name\": \"denoised_output\",\n          \"type\": \"LATENT\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"SamplerCustomAdvanced\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 233,\n      \"type\": \"Note\",\n      \"pos\": [\n        2840,\n        40\n      ],\n      \"size\": [\n        220,\n        90\n      ],\n      \"flags\": {},\n      \"order\": 5,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"Use this node to set how many videos to output.\\n\\nExample: if you have 1x master and 2x workers, set it to 3.\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 234,\n      \"type\": \"Note\",\n      \"pos\": [\n        1540,\n        -140\n      ],\n      \"size\": [\n        270,\n        90\n      ],\n      \"flags\": {},\n      \"order\": 6,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [],\n      \"properties\": {},\n      \"widgets_values\": [\n        \"This node will give you a different variation from each worker. Delete this node if you want them to be all the same.\"\n      ],\n      \"color\": \"#432\",\n      \"bgcolor\": \"#653\"\n    },\n    {\n      \"id\": 227,\n      \"type\": \"VHS_VideoCombine\",\n      \"pos\": [\n        3090,\n        200\n      ],\n      \"size\": [\n        380,\n        555.6923217773438\n      ],\n      \"flags\": {},\n      \"order\": 24,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 74\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        },\n        {\n          \"name\": \"meta_batch\",\n          \"shape\": 7,\n          \"type\": \"VHS_BatchManager\",\n          \"link\": null\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"Filenames\",\n          \"type\": \"VHS_FILENAMES\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-videohelpersuite\",\n        \"ver\": \"a7ce59e381934733bfae03b1be029756d6ce936d\",\n        \"Node name for S&R\": \"VHS_VideoCombine\"\n      },\n      \"widgets_values\": {\n        \"frame_rate\": 16,\n        \"loop_count\": 0,\n        \"filename_prefix\": \"WAN\",\n        \"format\": \"video/h264-mp4\",\n        \"pix_fmt\": \"yuv420p\",\n        \"crf\": 19,\n        \"save_metadata\": true,\n        \"trim_to_audio\": false,\n        \"pingpong\": false,\n        \"save_output\": true,\n        \"videopreview\": {\n          \"hidden\": false,\n          \"paused\": false,\n          \"params\": {\n            \"filename\": \"AnimateDiff_00163.mp4\",\n            \"subfolder\": \"\",\n            \"type\": \"output\",\n            \"format\": \"video/h264-mp4\",\n            \"frame_rate\": 8,\n            \"workflow\": \"AnimateDiff_00163.png\",\n            \"fullpath\": \"C:\\\\venvs\\\\ComfyUI\\\\ComfyUI\\\\output\\\\AnimateDiff_00163.mp4\"\n          }\n        }\n      }\n    },\n    {\n      \"id\": 228,\n      \"type\": \"VHS_VideoCombine\",\n      \"pos\": [\n        3490,\n        190\n      ],\n      \"size\": [\n        380,\n        334\n      ],\n      \"flags\": {},\n      \"order\": 25,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 75\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        },\n        {\n          \"name\": \"meta_batch\",\n          \"shape\": 7,\n          \"type\": \"VHS_BatchManager\",\n          \"link\": null\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"Filenames\",\n          \"type\": \"VHS_FILENAMES\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-videohelpersuite\",\n        \"ver\": \"a7ce59e381934733bfae03b1be029756d6ce936d\",\n        \"Node name for S&R\": \"VHS_VideoCombine\"\n      },\n      \"widgets_values\": {\n        \"frame_rate\": 16,\n        \"loop_count\": 0,\n        \"filename_prefix\": \"WAN\",\n        \"format\": \"video/h264-mp4\",\n        \"pix_fmt\": \"yuv420p\",\n        \"crf\": 19,\n        \"save_metadata\": true,\n        \"trim_to_audio\": false,\n        \"pingpong\": false,\n        \"save_output\": true,\n        \"videopreview\": {\n          \"hidden\": false,\n          \"paused\": false,\n          \"params\": {\n            \"filename\": \"AnimateDiff_00165.mp4\",\n            \"subfolder\": \"\",\n            \"type\": \"output\",\n            \"format\": \"video/h264-mp4\",\n            \"frame_rate\": 8,\n            \"workflow\": \"AnimateDiff_00165.png\",\n            \"fullpath\": \"C:\\\\venvs\\\\ComfyUI\\\\ComfyUI\\\\output\\\\AnimateDiff_00165.mp4\"\n          }\n        }\n      }\n    },\n    {\n      \"id\": 229,\n      \"type\": \"VHS_VideoCombine\",\n      \"pos\": [\n        3880,\n        190\n      ],\n      \"size\": [\n        390,\n        334\n      ],\n      \"flags\": {},\n      \"order\": 26,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"images\",\n          \"type\": \"IMAGE\",\n          \"link\": 76\n        },\n        {\n          \"name\": \"audio\",\n          \"shape\": 7,\n          \"type\": \"AUDIO\",\n          \"link\": null\n        },\n        {\n          \"name\": \"meta_batch\",\n          \"shape\": 7,\n          \"type\": \"VHS_BatchManager\",\n          \"link\": null\n        },\n        {\n          \"name\": \"vae\",\n          \"shape\": 7,\n          \"type\": \"VAE\",\n          \"link\": null\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"Filenames\",\n          \"type\": \"VHS_FILENAMES\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui-videohelpersuite\",\n        \"ver\": \"a7ce59e381934733bfae03b1be029756d6ce936d\",\n        \"Node name for S&R\": \"VHS_VideoCombine\"\n      },\n      \"widgets_values\": {\n        \"frame_rate\": 16,\n        \"loop_count\": 0,\n        \"filename_prefix\": \"WAN\",\n        \"format\": \"video/h264-mp4\",\n        \"pix_fmt\": \"yuv420p\",\n        \"crf\": 19,\n        \"save_metadata\": true,\n        \"trim_to_audio\": false,\n        \"pingpong\": false,\n        \"save_output\": true,\n        \"videopreview\": {\n          \"hidden\": false,\n          \"paused\": false,\n          \"params\": {\n            \"filename\": \"AnimateDiff_00164.mp4\",\n            \"subfolder\": \"\",\n            \"type\": \"output\",\n            \"format\": \"video/h264-mp4\",\n            \"frame_rate\": 8,\n            \"workflow\": \"AnimateDiff_00164.png\",\n            \"fullpath\": \"C:\\\\venvs\\\\ComfyUI\\\\ComfyUI\\\\output\\\\AnimateDiff_00164.mp4\"\n          }\n        }\n      }\n    },\n    {\n      \"id\": 8,\n      \"type\": \"VAEDecode\",\n      \"pos\": [\n        2450,\n        190\n      ],\n      \"size\": [\n        140,\n        46\n      ],\n      \"flags\": {},\n      \"order\": 21,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"samples\",\n          \"type\": \"LATENT\",\n          \"link\": 3\n        },\n        {\n          \"name\": \"vae\",\n          \"type\": \"VAE\",\n          \"link\": 4\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            67\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"VAEDecode\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 51,\n      \"type\": \"CLIPVisionEncode\",\n      \"pos\": [\n        1260,\n        490\n      ],\n      \"size\": [\n        290.97918701171875,\n        78\n      ],\n      \"flags\": {},\n      \"order\": 15,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip_vision\",\n          \"type\": \"CLIP_VISION\",\n          \"link\": 12\n        },\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 13\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CLIP_VISION_OUTPUT\",\n          \"type\": \"CLIP_VISION_OUTPUT\",\n          \"links\": [\n            10\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPVisionEncode\"\n      },\n      \"widgets_values\": [\n        \"none\"\n      ]\n    },\n    {\n      \"id\": 192,\n      \"type\": \"LoadImage\",\n      \"pos\": [\n        530,\n        310\n      ],\n      \"size\": [\n        310,\n        370\n      ],\n      \"flags\": {},\n      \"order\": 7,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            79\n          ]\n        },\n        {\n          \"name\": \"MASK\",\n          \"type\": \"MASK\",\n          \"links\": null\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"LoadImage\"\n      },\n      \"widgets_values\": [\n        \"example.png\",\n        \"image\"\n      ]\n    },\n    {\n      \"id\": 55,\n      \"type\": \"ImageResize+\",\n      \"pos\": [\n        890,\n        320\n      ],\n      \"size\": [\n        270,\n        218\n      ],\n      \"flags\": {},\n      \"order\": 13,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"image\",\n          \"type\": \"IMAGE\",\n          \"link\": 79\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"IMAGE\",\n          \"type\": \"IMAGE\",\n          \"links\": [\n            13,\n            25\n          ]\n        },\n        {\n          \"name\": \"width\",\n          \"type\": \"INT\",\n          \"links\": [\n            5\n          ]\n        },\n        {\n          \"name\": \"height\",\n          \"type\": \"INT\",\n          \"links\": [\n            6\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfyui_essentials\",\n        \"ver\": \"9d9f4bedfc9f0321c19faf71855e228c93bd0dc9\",\n        \"Node name for S&R\": \"ImageResize+\"\n      },\n      \"widgets_values\": [\n        832,\n        480,\n        \"lanczos\",\n        \"fill / crop\",\n        \"always\",\n        0\n      ]\n    },\n    {\n      \"id\": 6,\n      \"type\": \"CLIPTextEncode\",\n      \"pos\": [\n        790,\n        -10\n      ],\n      \"size\": [\n        380,\n        88\n      ],\n      \"flags\": {},\n      \"order\": 10,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"clip\",\n          \"type\": \"CLIP\",\n          \"link\": 1\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"CONDITIONING\",\n          \"type\": \"CONDITIONING\",\n          \"links\": [\n            7\n          ]\n        }\n      ],\n      \"title\": \"CLIP Text Encode (Positive Prompt)\",\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"CLIPTextEncode\"\n      },\n      \"widgets_values\": [\n        \"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,\"\n      ]\n    },\n    {\n      \"id\": 39,\n      \"type\": \"VAELoader\",\n      \"pos\": [\n        1250,\n        200\n      ],\n      \"size\": [\n        270,\n        58\n      ],\n      \"flags\": {},\n      \"order\": 8,\n      \"mode\": 0,\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"name\": \"VAE\",\n          \"type\": \"VAE\",\n          \"links\": [\n            4,\n            9\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"VAELoader\"\n      },\n      \"widgets_values\": [\n        \"wan_2.1_vae.safetensors\"\n      ]\n    },\n    {\n      \"id\": 148,\n      \"type\": \"BasicGuider\",\n      \"pos\": [\n        1980,\n        260\n      ],\n      \"size\": [\n        156.0208282470703,\n        46\n      ],\n      \"flags\": {\n        \"collapsed\": true\n      },\n      \"order\": 19,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 26\n        },\n        {\n          \"name\": \"conditioning\",\n          \"type\": \"CONDITIONING\",\n          \"link\": 27\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"GUIDER\",\n          \"type\": \"GUIDER\",\n          \"links\": [\n            19\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"BasicGuider\"\n      },\n      \"widgets_values\": []\n    },\n    {\n      \"id\": 62,\n      \"type\": \"BasicScheduler\",\n      \"pos\": [\n        2130,\n        460\n      ],\n      \"size\": [\n        270,\n        106\n      ],\n      \"flags\": {},\n      \"order\": 17,\n      \"mode\": 0,\n      \"inputs\": [\n        {\n          \"name\": \"model\",\n          \"type\": \"MODEL\",\n          \"link\": 17\n        }\n      ],\n      \"outputs\": [\n        {\n          \"name\": \"SIGMAS\",\n          \"type\": \"SIGMAS\",\n          \"links\": [\n            21\n          ]\n        }\n      ],\n      \"properties\": {\n        \"enableTabs\": false,\n        \"tabWidth\": 65,\n        \"tabXOffset\": 10,\n        \"hasSecondTab\": false,\n        \"secondTabText\": \"Send Back\",\n        \"secondTabOffset\": 80,\n        \"secondTabWidth\": 65,\n        \"cnr_id\": \"comfy-core\",\n        \"ver\": \"0.3.43\",\n        \"Node name for S&R\": \"BasicScheduler\"\n      },\n      \"widgets_values\": [\n        \"simple\",\n        4,\n        1\n      ]\n    }\n  ],\n  \"links\": [\n    [\n      1,\n      139,\n      0,\n      6,\n      0,\n      \"CLIP\"\n    ],\n    [\n      2,\n      139,\n      0,\n      7,\n      0,\n      \"CLIP\"\n    ],\n    [\n      3,\n      64,\n      0,\n      8,\n      0,\n      \"LATENT\"\n    ],\n    [\n      4,\n      39,\n      0,\n      8,\n      1,\n      \"VAE\"\n    ],\n    [\n      5,\n      55,\n      1,\n      50,\n      5,\n      \"INT\"\n    ],\n    [\n      6,\n      55,\n      2,\n      50,\n      6,\n      \"INT\"\n    ],\n    [\n      7,\n      6,\n      0,\n      50,\n      0,\n      \"CONDITIONING\"\n    ],\n    [\n      8,\n      7,\n      0,\n      50,\n      1,\n      \"CONDITIONING\"\n    ],\n    [\n      9,\n      39,\n      0,\n      50,\n      2,\n      \"VAE\"\n    ],\n    [\n      10,\n      51,\n      0,\n      50,\n      3,\n      \"CLIP_VISION_OUTPUT\"\n    ],\n    [\n      11,\n      135,\n      0,\n      50,\n      4,\n      \"IMAGE\"\n    ],\n    [\n      12,\n      49,\n      0,\n      51,\n      0,\n      \"CLIP_VISION\"\n    ],\n    [\n      13,\n      55,\n      0,\n      51,\n      1,\n      \"IMAGE\"\n    ],\n    [\n      16,\n      150,\n      0,\n      60,\n      0,\n      \"INT\"\n    ],\n    [\n      17,\n      67,\n      0,\n      62,\n      0,\n      \"MODEL\"\n    ],\n    [\n      18,\n      60,\n      0,\n      64,\n      0,\n      \"NOISE\"\n    ],\n    [\n      19,\n      148,\n      0,\n      64,\n      1,\n      \"GUIDER\"\n    ],\n    [\n      20,\n      61,\n      0,\n      64,\n      2,\n      \"SAMPLER\"\n    ],\n    [\n      21,\n      62,\n      0,\n      64,\n      3,\n      \"SIGMAS\"\n    ],\n    [\n      22,\n      50,\n      2,\n      64,\n      4,\n      \"LATENT\"\n    ],\n    [\n      23,\n      118,\n      0,\n      67,\n      0,\n      \"MODEL\"\n    ],\n    [\n      24,\n      142,\n      0,\n      118,\n      0,\n      \"MODEL\"\n    ],\n    [\n      25,\n      55,\n      0,\n      135,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      26,\n      67,\n      0,\n      148,\n      0,\n      \"MODEL\"\n    ],\n    [\n      27,\n      50,\n      0,\n      148,\n      1,\n      \"CONDITIONING\"\n    ],\n    [\n      67,\n      8,\n      0,\n      221,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      74,\n      231,\n      0,\n      227,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      75,\n      231,\n      1,\n      228,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      76,\n      231,\n      2,\n      229,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      78,\n      221,\n      0,\n      231,\n      0,\n      \"IMAGE\"\n    ],\n    [\n      79,\n      192,\n      0,\n      55,\n      0,\n      \"IMAGE\"\n    ]\n  ],\n  \"groups\": [],\n  \"config\": {},\n  \"extra\": {\n    \"ds\": {\n      \"scale\": 0.9090909090909091,\n      \"offset\": [\n        -1587.6131275248988,\n        280.3225523752761\n      ]\n    },\n    \"frontendVersion\": \"1.23.4\",\n    \"VHS_latentpreview\": false,\n    \"VHS_latentpreviewrate\": 0,\n    \"VHS_MetadataImage\": true,\n    \"VHS_KeepIntermediate\": true\n  },\n  \"version\": 0.4\n}"
  }
]