master 82a973c04367 cached
298 files
2.2 MB
585.1k tokens
2616 symbols
4 requests
Download .txt
Showing preview only (2,335K chars total). Download the full file or copy to clipboard to get everything.
Repository: AUTOMATIC1111/stable-diffusion-webui
Branch: master
Commit: 82a973c04367
Files: 298
Total size: 2.2 MB

Directory structure:
gitextract_l_rki2tj/

├── .eslintignore
├── .eslintrc.js
├── .git-blame-ignore-revs
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   └── feature_request.yml
│   ├── pull_request_template.md
│   └── workflows/
│       ├── on_pull_request.yaml
│       ├── run_tests.yaml
│       └── warns_merge_master.yml
├── .gitignore
├── .pylintrc
├── CHANGELOG.md
├── CITATION.cff
├── CODEOWNERS
├── LICENSE.txt
├── README.md
├── _typos.toml
├── configs/
│   ├── alt-diffusion-inference.yaml
│   ├── alt-diffusion-m18-inference.yaml
│   ├── instruct-pix2pix.yaml
│   ├── sd3-inference.yaml
│   ├── sd_xl_inpaint.yaml
│   ├── v1-inference.yaml
│   └── v1-inpainting-inference.yaml
├── environment-wsl2.yaml
├── extensions-builtin/
│   ├── LDSR/
│   │   ├── ldsr_model_arch.py
│   │   ├── preload.py
│   │   ├── scripts/
│   │   │   └── ldsr_model.py
│   │   ├── sd_hijack_autoencoder.py
│   │   ├── sd_hijack_ddpm_v1.py
│   │   └── vqvae_quantize.py
│   ├── Lora/
│   │   ├── extra_networks_lora.py
│   │   ├── lora.py
│   │   ├── lora_logger.py
│   │   ├── lora_patches.py
│   │   ├── lyco_helpers.py
│   │   ├── network.py
│   │   ├── network_full.py
│   │   ├── network_glora.py
│   │   ├── network_hada.py
│   │   ├── network_ia3.py
│   │   ├── network_lokr.py
│   │   ├── network_lora.py
│   │   ├── network_norm.py
│   │   ├── network_oft.py
│   │   ├── networks.py
│   │   ├── preload.py
│   │   ├── scripts/
│   │   │   └── lora_script.py
│   │   ├── ui_edit_user_metadata.py
│   │   └── ui_extra_networks_lora.py
│   ├── ScuNET/
│   │   ├── preload.py
│   │   └── scripts/
│   │       └── scunet_model.py
│   ├── SwinIR/
│   │   ├── preload.py
│   │   └── scripts/
│   │       └── swinir_model.py
│   ├── canvas-zoom-and-pan/
│   │   ├── javascript/
│   │   │   └── zoom.js
│   │   ├── scripts/
│   │   │   └── hotkey_config.py
│   │   └── style.css
│   ├── extra-options-section/
│   │   └── scripts/
│   │       └── extra_options_section.py
│   ├── hypertile/
│   │   ├── hypertile.py
│   │   └── scripts/
│   │       └── hypertile_script.py
│   ├── mobile/
│   │   └── javascript/
│   │       └── mobile.js
│   ├── postprocessing-for-training/
│   │   └── scripts/
│   │       ├── postprocessing_autosized_crop.py
│   │       ├── postprocessing_caption.py
│   │       ├── postprocessing_create_flipped_copies.py
│   │       ├── postprocessing_focal_crop.py
│   │       └── postprocessing_split_oversized.py
│   ├── prompt-bracket-checker/
│   │   └── javascript/
│   │       └── prompt-bracket-checker.js
│   └── soft-inpainting/
│       └── scripts/
│           └── soft_inpainting.py
├── html/
│   ├── extra-networks-card.html
│   ├── extra-networks-copy-path-button.html
│   ├── extra-networks-edit-item-button.html
│   ├── extra-networks-metadata-button.html
│   ├── extra-networks-no-cards.html
│   ├── extra-networks-pane-dirs.html
│   ├── extra-networks-pane-tree.html
│   ├── extra-networks-pane.html
│   ├── extra-networks-tree-button.html
│   ├── footer.html
│   └── licenses.html
├── javascript/
│   ├── aspectRatioOverlay.js
│   ├── contextMenus.js
│   ├── dragdrop.js
│   ├── edit-attention.js
│   ├── edit-order.js
│   ├── extensions.js
│   ├── extraNetworks.js
│   ├── generationParams.js
│   ├── hints.js
│   ├── hires_fix.js
│   ├── imageMaskFix.js
│   ├── imageviewer.js
│   ├── imageviewerGamepad.js
│   ├── inputAccordion.js
│   ├── localStorage.js
│   ├── localization.js
│   ├── notification.js
│   ├── profilerVisualization.js
│   ├── progressbar.js
│   ├── resizeHandle.js
│   ├── settings.js
│   ├── textualInversion.js
│   ├── token-counters.js
│   ├── ui.js
│   └── ui_settings_hints.js
├── launch.py
├── localizations/
│   └── Put localization files here.txt
├── modules/
│   ├── api/
│   │   ├── api.py
│   │   └── models.py
│   ├── cache.py
│   ├── call_queue.py
│   ├── cmd_args.py
│   ├── codeformer_model.py
│   ├── config_states.py
│   ├── dat_model.py
│   ├── deepbooru.py
│   ├── deepbooru_model.py
│   ├── devices.py
│   ├── errors.py
│   ├── esrgan_model.py
│   ├── extensions.py
│   ├── extra_networks.py
│   ├── extra_networks_hypernet.py
│   ├── extras.py
│   ├── face_restoration.py
│   ├── face_restoration_utils.py
│   ├── fifo_lock.py
│   ├── gfpgan_model.py
│   ├── gitpython_hack.py
│   ├── gradio_extensons.py
│   ├── hashes.py
│   ├── hat_model.py
│   ├── hypernetworks/
│   │   ├── hypernetwork.py
│   │   └── ui.py
│   ├── images.py
│   ├── img2img.py
│   ├── import_hook.py
│   ├── infotext_utils.py
│   ├── infotext_versions.py
│   ├── initialize.py
│   ├── initialize_util.py
│   ├── interrogate.py
│   ├── launch_utils.py
│   ├── localization.py
│   ├── logging_config.py
│   ├── lowvram.py
│   ├── mac_specific.py
│   ├── masking.py
│   ├── memmon.py
│   ├── modelloader.py
│   ├── models/
│   │   ├── diffusion/
│   │   │   ├── ddpm_edit.py
│   │   │   └── uni_pc/
│   │   │       ├── __init__.py
│   │   │       ├── sampler.py
│   │   │       └── uni_pc.py
│   │   └── sd3/
│   │       ├── mmdit.py
│   │       ├── other_impls.py
│   │       ├── sd3_cond.py
│   │       ├── sd3_impls.py
│   │       └── sd3_model.py
│   ├── ngrok.py
│   ├── npu_specific.py
│   ├── options.py
│   ├── patches.py
│   ├── paths.py
│   ├── paths_internal.py
│   ├── postprocessing.py
│   ├── processing.py
│   ├── processing_scripts/
│   │   ├── comments.py
│   │   ├── refiner.py
│   │   ├── sampler.py
│   │   └── seed.py
│   ├── profiling.py
│   ├── progress.py
│   ├── prompt_parser.py
│   ├── realesrgan_model.py
│   ├── restart.py
│   ├── rng.py
│   ├── rng_philox.py
│   ├── safe.py
│   ├── script_callbacks.py
│   ├── script_loading.py
│   ├── scripts.py
│   ├── scripts_auto_postprocessing.py
│   ├── scripts_postprocessing.py
│   ├── sd_disable_initialization.py
│   ├── sd_emphasis.py
│   ├── sd_hijack.py
│   ├── sd_hijack_checkpoint.py
│   ├── sd_hijack_clip.py
│   ├── sd_hijack_clip_old.py
│   ├── sd_hijack_ip2p.py
│   ├── sd_hijack_open_clip.py
│   ├── sd_hijack_optimizations.py
│   ├── sd_hijack_unet.py
│   ├── sd_hijack_utils.py
│   ├── sd_hijack_xlmr.py
│   ├── sd_models.py
│   ├── sd_models_config.py
│   ├── sd_models_types.py
│   ├── sd_models_xl.py
│   ├── sd_samplers.py
│   ├── sd_samplers_cfg_denoiser.py
│   ├── sd_samplers_common.py
│   ├── sd_samplers_compvis.py
│   ├── sd_samplers_extra.py
│   ├── sd_samplers_kdiffusion.py
│   ├── sd_samplers_lcm.py
│   ├── sd_samplers_timesteps.py
│   ├── sd_samplers_timesteps_impl.py
│   ├── sd_schedulers.py
│   ├── sd_unet.py
│   ├── sd_vae.py
│   ├── sd_vae_approx.py
│   ├── sd_vae_taesd.py
│   ├── shared.py
│   ├── shared_cmd_options.py
│   ├── shared_gradio_themes.py
│   ├── shared_init.py
│   ├── shared_items.py
│   ├── shared_options.py
│   ├── shared_state.py
│   ├── shared_total_tqdm.py
│   ├── styles.py
│   ├── sub_quadratic_attention.py
│   ├── sysinfo.py
│   ├── textual_inversion/
│   │   ├── autocrop.py
│   │   ├── dataset.py
│   │   ├── image_embedding.py
│   │   ├── learn_schedule.py
│   │   ├── saving_settings.py
│   │   ├── textual_inversion.py
│   │   └── ui.py
│   ├── timer.py
│   ├── torch_utils.py
│   ├── txt2img.py
│   ├── ui.py
│   ├── ui_checkpoint_merger.py
│   ├── ui_common.py
│   ├── ui_components.py
│   ├── ui_extensions.py
│   ├── ui_extra_networks.py
│   ├── ui_extra_networks_checkpoints.py
│   ├── ui_extra_networks_checkpoints_user_metadata.py
│   ├── ui_extra_networks_hypernets.py
│   ├── ui_extra_networks_textual_inversion.py
│   ├── ui_extra_networks_user_metadata.py
│   ├── ui_gradio_extensions.py
│   ├── ui_loadsave.py
│   ├── ui_postprocessing.py
│   ├── ui_prompt_styles.py
│   ├── ui_settings.py
│   ├── ui_tempdir.py
│   ├── ui_toprow.py
│   ├── upscaler.py
│   ├── upscaler_utils.py
│   ├── util.py
│   ├── xlmr.py
│   ├── xlmr_m18.py
│   └── xpu_specific.py
├── package.json
├── pyproject.toml
├── requirements-test.txt
├── requirements.txt
├── requirements_npu.txt
├── requirements_versions.txt
├── script.js
├── scripts/
│   ├── custom_code.py
│   ├── img2imgalt.py
│   ├── loopback.py
│   ├── outpainting_mk_2.py
│   ├── poor_mans_outpainting.py
│   ├── postprocessing_codeformer.py
│   ├── postprocessing_gfpgan.py
│   ├── postprocessing_upscale.py
│   ├── prompt_matrix.py
│   ├── prompts_from_file.py
│   ├── sd_upscale.py
│   └── xyz_grid.py
├── style.css
├── test/
│   ├── __init__.py
│   ├── conftest.py
│   ├── test_extras.py
│   ├── test_face_restorers.py
│   ├── test_files/
│   │   └── empty.pt
│   ├── test_img2img.py
│   ├── test_torch_utils.py
│   ├── test_txt2img.py
│   └── test_utils.py
├── textual_inversion_templates/
│   ├── hypernetwork.txt
│   ├── none.txt
│   ├── style.txt
│   ├── style_filewords.txt
│   ├── subject.txt
│   └── subject_filewords.txt
├── webui-macos-env.sh
├── webui.bat
├── webui.py
└── webui.sh

================================================
FILE CONTENTS
================================================

================================================
FILE: .eslintignore
================================================
extensions
extensions-disabled
repositories
venv

================================================
FILE: .eslintrc.js
================================================
/* global module */
module.exports = {
    env: {
        browser: true,
        es2021: true,
    },
    extends: "eslint:recommended",
    parserOptions: {
        ecmaVersion: "latest",
    },
    rules: {
        "arrow-spacing": "error",
        "block-spacing": "error",
        "brace-style": "error",
        "comma-dangle": ["error", "only-multiline"],
        "comma-spacing": "error",
        "comma-style": ["error", "last"],
        "curly": ["error", "multi-line", "consistent"],
        "eol-last": "error",
        "func-call-spacing": "error",
        "function-call-argument-newline": ["error", "consistent"],
        "function-paren-newline": ["error", "consistent"],
        "indent": ["error", 4],
        "key-spacing": "error",
        "keyword-spacing": "error",
        "linebreak-style": ["error", "unix"],
        "no-extra-semi": "error",
        "no-mixed-spaces-and-tabs": "error",
        "no-multi-spaces": "error",
        "no-redeclare": ["error", {builtinGlobals: false}],
        "no-trailing-spaces": "error",
        "no-unused-vars": "off",
        "no-whitespace-before-property": "error",
        "object-curly-newline": ["error", {consistent: true, multiline: true}],
        "object-curly-spacing": ["error", "never"],
        "operator-linebreak": ["error", "after"],
        "quote-props": ["error", "consistent-as-needed"],
        "semi": ["error", "always"],
        "semi-spacing": "error",
        "semi-style": ["error", "last"],
        "space-before-blocks": "error",
        "space-before-function-paren": ["error", "never"],
        "space-in-parens": ["error", "never"],
        "space-infix-ops": "error",
        "space-unary-ops": "error",
        "switch-colon-spacing": "error",
        "template-curly-spacing": ["error", "never"],
        "unicode-bom": "error",
    },
    globals: {
        //script.js
        gradioApp: "readonly",
        executeCallbacks: "readonly",
        onAfterUiUpdate: "readonly",
        onOptionsChanged: "readonly",
        onUiLoaded: "readonly",
        onUiUpdate: "readonly",
        uiCurrentTab: "writable",
        uiElementInSight: "readonly",
        uiElementIsVisible: "readonly",
        //ui.js
        opts: "writable",
        all_gallery_buttons: "readonly",
        selected_gallery_button: "readonly",
        selected_gallery_index: "readonly",
        switch_to_txt2img: "readonly",
        switch_to_img2img_tab: "readonly",
        switch_to_img2img: "readonly",
        switch_to_sketch: "readonly",
        switch_to_inpaint: "readonly",
        switch_to_inpaint_sketch: "readonly",
        switch_to_extras: "readonly",
        get_tab_index: "readonly",
        create_submit_args: "readonly",
        restart_reload: "readonly",
        updateInput: "readonly",
        onEdit: "readonly",
        //extraNetworks.js
        requestGet: "readonly",
        popup: "readonly",
        // profilerVisualization.js
        createVisualizationTable: "readonly",
        // from python
        localization: "readonly",
        // progrssbar.js
        randomId: "readonly",
        requestProgress: "readonly",
        // imageviewer.js
        modalPrevImage: "readonly",
        modalNextImage: "readonly",
        // localStorage.js
        localSet: "readonly",
        localGet: "readonly",
        localRemove: "readonly",
        // resizeHandle.js
        setupResizeHandle: "writable"
    }
};


================================================
FILE: .git-blame-ignore-revs
================================================
# Apply ESlint
9c54b78d9dde5601e916f308d9a9d6953ec39430

================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.yml
================================================
name: Bug Report
description: You think something is broken in the UI
title: "[Bug]: "
labels: ["bug-report"]

body:
  - type: markdown
    attributes:
      value: |
        > The title of the bug report should be short and descriptive.
        > Use relevant keywords for searchability.
        > Do not leave it blank, but also do not put an entire error log in it.
  - type: checkboxes
    attributes:
      label: Checklist
      description: |
        Please perform basic debugging to see if extensions or configuration is the cause of the issue.
        Basic debug procedure
         1. Disable all third-party extensions - check if extension is the cause
         2. Update extensions and webui - sometimes things just need to be updated
         3. Backup and remove your config.json and ui-config.json - check if the issue is caused by bad configuration
         4. Delete venv with third-party extensions disabled - sometimes extensions might cause wrong libraries to be installed
         5. Try a fresh installation webui in a different directory - see if a clean installation solves the issue
        Before making a issue report please, check that the issue hasn't been reported recently.
      options:
        - label: The issue exists after disabling all extensions
        - label: The issue exists on a clean installation of webui
        - label: The issue is caused by an extension, but I believe it is caused by a bug in the webui
        - label: The issue exists in the current version of the webui
        - label: The issue has not been reported before recently
        - label: The issue has been reported before but has not been fixed yet
  - type: markdown
    attributes:
      value: |
        > Please fill this form with as much information as possible. Don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible
  - type: textarea
    id: what-did
    attributes:
      label: What happened?
      description: Tell us what happened in a very clear and simple way
      placeholder: |
        txt2img is not working as intended.
    validations:
      required: true
  - type: textarea
    id: steps
    attributes:
      label: Steps to reproduce the problem
      description: Please provide us with precise step by step instructions on how to reproduce the bug
      placeholder: |
        1. Go to ...
        2. Press ...
        3. ...
    validations:
      required: true
  - type: textarea
    id: what-should
    attributes:
      label: What should have happened?
      description: Tell us what you think the normal behavior should be
      placeholder: |
        WebUI should ...
    validations:
      required: true
  - type: dropdown
    id: browsers
    attributes:
      label: What browsers do you use to access the UI ?
      multiple: true
      options:
        - Mozilla Firefox
        - Google Chrome
        - Brave
        - Apple Safari
        - Microsoft Edge
        - Android
        - iOS
        - Other
  - type: textarea
    id: sysinfo
    attributes:
      label: Sysinfo
      description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file.
      placeholder: |
        1. Go to WebUI Settings -> Sysinfo -> Download system info.
            If WebUI fails to launch, use --dump-sysinfo commandline argument to generate the file
        2. Upload the Sysinfo as a attached file, Do NOT paste it in as plain text.
    validations:
      required: true
  - type: textarea
    id: logs
    attributes:
      label: Console logs
      description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occurred. If it's very long, provide a link to pastebin or similar service.
      render: Shell
    validations:
      required: true
  - type: textarea
    id: misc
    attributes:
      label: Additional information
      description: | 
        Please provide us with any relevant additional info or context.
        Examples:
         I have updated my GPU driver recently.


================================================
FILE: .github/ISSUE_TEMPLATE/config.yml
================================================
blank_issues_enabled: false
contact_links:
  - name: WebUI Community Support
    url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions
    about: Please ask and answer questions here.


================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.yml
================================================
name: Feature request
description: Suggest an idea for this project
title: "[Feature Request]: "
labels: ["enhancement"]

body:
  - type: checkboxes
    attributes:
      label: Is there an existing issue for this?
      description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
      options:
        - label: I have searched the existing issues and checked the recent builds/commits
          required: true
  - type: markdown
    attributes:
      value: |
        *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
  - type: textarea
    id: feature
    attributes:
      label: What would your feature do ?
      description: Tell us about your feature in a very clear and simple way, and what problem it would solve
    validations:
      required: true
  - type: textarea
    id: workflow
    attributes:
      label: Proposed workflow
      description: Please provide us with step by step information on how you'd like the feature to be accessed and used
      value: |
        1. Go to .... 
        2. Press ....
        3. ...
    validations:
      required: true
  - type: textarea
    id: misc
    attributes:
      label: Additional information
      description: Add any other context or screenshots about the feature request here.


================================================
FILE: .github/pull_request_template.md
================================================
## Description

* a simple description of what you're trying to accomplish
* a summary of changes in code
* which issues it fixes, if any

## Screenshots/videos:


## Checklist:

- [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [ ] I have performed a self-review of my own code
- [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)


================================================
FILE: .github/workflows/on_pull_request.yaml
================================================
name: Linter

on:
  - push
  - pull_request

jobs:
  lint-python:
    name: ruff
    runs-on: ubuntu-latest
    if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
    steps:
      - name: Checkout Code
        uses: actions/checkout@v4
      - uses: actions/setup-python@v5
        with:
          python-version: 3.11
          # NB: there's no cache: pip here since we're not installing anything
          #     from the requirements.txt file(s) in the repository; it's faster
          #     not to have GHA download an (at the time of writing) 4 GB cache
          #     of PyTorch and other dependencies.
      - name: Install Ruff
        run: pip install ruff==0.3.3
      - name: Run Ruff
        run: ruff .
  lint-js:
    name: eslint
    runs-on: ubuntu-latest
    if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
    steps:
      - name: Checkout Code
        uses: actions/checkout@v4
      - name: Install Node.js
        uses: actions/setup-node@v4
        with:
          node-version: 18
      - run: npm i --ci
      - run: npm run lint


================================================
FILE: .github/workflows/run_tests.yaml
================================================
name: Tests

on:
  - push
  - pull_request

jobs:
  test:
    name: tests on CPU with empty model
    runs-on: ubuntu-latest
    if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
    steps:
      - name: Checkout Code
        uses: actions/checkout@v4
      - name: Set up Python 3.10
        uses: actions/setup-python@v5
        with:
          python-version: 3.10.6
          cache: pip
          cache-dependency-path: |
            **/requirements*txt
            launch.py
      - name: Cache models
        id: cache-models
        uses: actions/cache@v4
        with:
          path: models
          key: "2023-12-30"
      - name: Install test dependencies
        run: pip install wait-for-it -r requirements-test.txt
        env:
          PIP_DISABLE_PIP_VERSION_CHECK: "1"
          PIP_PROGRESS_BAR: "off"
      - name: Setup environment
        run: python launch.py --skip-torch-cuda-test --exit
        env:
          PIP_DISABLE_PIP_VERSION_CHECK: "1"
          PIP_PROGRESS_BAR: "off"
          TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
          WEBUI_LAUNCH_LIVE_OUTPUT: "1"
          PYTHONUNBUFFERED: "1"
      - name: Print installed packages
        run: pip freeze
      - name: Start test server
        run: >
          python -m coverage run
          --data-file=.coverage.server
          launch.py
          --skip-prepare-environment
          --skip-torch-cuda-test
          --test-server
          --do-not-download-clip
          --no-half
          --disable-opt-split-attention
          --use-cpu all
          --api-server-stop
          2>&1 | tee output.txt &
      - name: Run tests
        run: |
          wait-for-it --service 127.0.0.1:7860 -t 20
          python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test
      - name: Kill test server
        if: always()
        run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10
      - name: Show coverage
        run: |
          python -m coverage combine .coverage*
          python -m coverage report -i
          python -m coverage html -i
      - name: Upload main app output
        uses: actions/upload-artifact@v4
        if: always()
        with:
          name: output
          path: output.txt
      - name: Upload coverage HTML
        uses: actions/upload-artifact@v4
        if: always()
        with:
          name: htmlcov
          path: htmlcov


================================================
FILE: .github/workflows/warns_merge_master.yml
================================================
name: Pull requests can't target master branch

"on":
  pull_request:
    types:
      - opened
      - synchronize
      - reopened
    branches:
      - master

jobs:
  check:
    runs-on: ubuntu-latest
    steps:
      - name: Warning marge into master
        run: |
          echo -e "::warning::This pull request directly merge into \"master\" branch, normally development happens on \"dev\" branch."
          exit 1


================================================
FILE: .gitignore
================================================
__pycache__
*.ckpt
*.safetensors
*.pth
.DS_Store
/ESRGAN/*
/SwinIR/*
/repositories
/venv
/tmp
/model.ckpt
/models/**/*
/GFPGANv1.3.pth
/gfpgan/weights/*.pth
/ui-config.json
/outputs
/config.json
/log
/webui.settings.bat
/embeddings
/styles.csv
/params.txt
/styles.csv.bak
/webui-user.bat
/webui-user.sh
/interrogate
/user.css
/.idea
notification.mp3
/SwinIR
/textual_inversion
.vscode
/extensions
/test/stdout.txt
/test/stderr.txt
/cache.json*
/config_states/
/node_modules
/package-lock.json
/.coverage*
/test/test_outputs
/cache
trace.json
/sysinfo-????-??-??-??-??.json


================================================
FILE: .pylintrc
================================================
# See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
[MESSAGES CONTROL]
disable=C,R,W,E,I


================================================
FILE: CHANGELOG.md
================================================
## 1.10.1

### Bug Fixes:
* fix image upscale on cpu ([#16275](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16275))


## 1.10.0

### Features:
* A lot of performance improvements (see below in Performance section)
* Stable Diffusion 3 support ([#16030](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16030), [#16164](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16164), [#16212](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16212))
  * Recommended Euler sampler; DDIM and other timestamp samplers currently not supported
  * T5 text model is disabled by default, enable it in settings
* New schedulers:
  * Align Your Steps ([#15751](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15751))
  * KL Optimal ([#15608](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15608))
  * Normal ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
  * DDIM ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
  * Simple ([#16142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16142))
  * Beta ([#16235](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16235))
* New sampler: DDIM CFG++ ([#16035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16035))

### Minor:
* Option to skip CFG on early steps ([#15607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15607))
* Add --models-dir option ([#15742](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15742))
* Allow mobile users to open context menu by using two fingers press ([#15682](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15682))
* Infotext: add Lora name as TI hashes for bundled Textual Inversion ([#15679](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15679))
* Check model's hash after downloading it to prevent corruped downloads ([#15602](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15602))
* More extension tag filtering options ([#15627](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15627))
* When saving AVIF, use JPEG's quality setting ([#15610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15610))
* Add filename pattern: `[basename]` ([#15978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15978))
* Add option to enable clip skip for clip L on SDXL ([#15992](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15992))
* Option to prevent screen sleep during generation ([#16001](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16001))
* ToggleLivePriview button in image viewer ([#16065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16065))
* Remove ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
* option to disable save button log.csv ([#16242](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16242))

### Extensions and API:
* Add process_before_every_sampling hook ([#15984](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15984))
* Return HTTP 400 instead of 404 on invalid sampler error ([#16140](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16140))

### Performance:
* [Performance 1/6] use_checkpoint = False ([#15803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15803))
* [Performance 2/6] Replace einops.rearrange with torch native ops ([#15804](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15804))
* [Performance 4/6] Precompute is_sdxl_inpaint flag ([#15806](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15806))
* [Performance 5/6] Prevent unnecessary extra networks bias backup ([#15816](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15816))
* [Performance 6/6] Add --precision half option to avoid casting during inference ([#15820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15820))
* [Performance] LDM optimization patches ([#15824](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15824))
* [Performance] Keep sigmas on CPU ([#15823](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15823))
* Check for nans in unet only once, after all steps have been completed
* Added pption to run torch profiler for image generation

### Bug Fixes:
* Fix for grids without comprehensive infotexts ([#15958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15958))
* feat: lora partial update precede full update ([#15943](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15943))
* Fix bug where file extension had an extra '.' under some circumstances ([#15893](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15893))
* Fix corrupt model initial load loop ([#15600](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15600))
* Allow old sampler names in API ([#15656](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15656))
* more old sampler scheduler compatibility ([#15681](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15681))
* Fix Hypertile xyz ([#15831](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15831))
* XYZ CSV skipinitialspace ([#15832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15832))
* fix soft inpainting on mps and xpu, torch_utils.float64 ([#15815](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15815))
* fix extention update when not on main branch ([#15797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15797))
* update pickle safe filenames
* use relative path for webui-assets css ([#15757](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15757))
* When creating a virtual environment, upgrade pip in webui.bat/webui.sh ([#15750](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15750))
* Fix AttributeError ([#15738](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15738))
* use script_path for webui root in launch_utils ([#15705](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15705))
* fix extra batch mode P Transparency ([#15664](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15664))
* use gradio theme colors in css ([#15680](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15680))
* Fix dragging text within prompt input ([#15657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15657))
* Add correct mimetype for .mjs files ([#15654](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15654))
* QOL Items - handle metadata issues more cleanly for SD models, Loras and embeddings ([#15632](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15632))
* replace wsl-open with wslpath and explorer.exe ([#15968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15968))
* Fix SDXL Inpaint ([#15976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15976))
* multi size grid ([#15988](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15988))
* fix Replace preview ([#16118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16118))
* Possible fix of wrong scale in weight decomposition ([#16151](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16151))
* Ensure use of python from venv on Mac and Linux ([#16116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16116))
* Prioritize python3.10 over python3 if both are available on Linux and Mac (with fallback) ([#16092](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16092))
* stoping generation extras ([#16085](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16085))
* Fix SD2 loading ([#16078](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16078), [#16079](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16079))
* fix infotext Lora hashes for hires fix different lora ([#16062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16062))
* Fix sampler scheduler autocorrection warning ([#16054](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16054))
* fix ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
* fix upscale logic ([#16239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16239))
* [bug] do not break progressbar on non-job actions (add wrap_gradio_call_no_job) ([#16202](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16202))
* fix OSError: cannot write mode P as JPEG ([#16194](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16194))

### Other:
* fix changelog #15883 -> #15882 ([#15907](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15907))
* ReloadUI backgroundColor --background-fill-primary ([#15864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15864))
* Use different torch versions for Intel and ARM Macs ([#15851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15851))
* XYZ override rework ([#15836](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15836))
* scroll extensions table on overflow ([#15830](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15830))
* img2img batch upload method ([#15817](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15817))
* chore: sync v1.8.0 packages according to changelog ([#15783](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15783))
* Add AVIF MIME type support to mimetype definitions ([#15739](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15739))
* Update imageviewer.js ([#15730](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15730))
* no-referrer ([#15641](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15641))
* .gitignore trace.json ([#15980](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15980))
* Bump spandrel to 0.3.4 ([#16144](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16144))
* Defunct --max-batch-count ([#16119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16119))
* docs: update bug_report.yml ([#16102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16102))
* Maintaining Project Compatibility for Python 3.9 Users Without Upgrade Requirements. ([#16088](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16088), [#16169](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16169), [#16192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16192))
* Update torch for ARM Macs to 2.3.1 ([#16059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16059))
* remove deprecated setting dont_fix_second_order_samplers_schedule ([#16061](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16061))
* chore: fix typos ([#16060](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16060))
* shlex.join launch args in console log ([#16170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16170))
* activate venv .bat ([#16231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16231))
* add ids to the resize tabs in img2img ([#16218](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16218))
* update installation guide linux ([#16178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16178))
* Robust sysinfo ([#16173](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16173))
* do not send image size on paste inpaint ([#16180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16180))
* Fix noisy DS_Store files for MacOS ([#16166](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16166))


## 1.9.4

### Bug Fixes:
*  pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882)) 

## 1.9.3

### Bug Fixes:
*  fix get_crop_region_v2 ([#15594](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15594)) 

## 1.9.2

### Extensions and API:
* restore 1.8.0-style naming of scripts

## 1.9.1

### Minor:
* Add avif support ([#15582](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15582))
* Add filename patterns: `[sampler_scheduler]` and `[scheduler]` ([#15581](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15581))

### Extensions and API:
* undo adding scripts to sys.modules
* Add schedulers API endpoint ([#15577](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15577))
* Remove API upscaling factor limits ([#15560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15560))

### Bug Fixes:
* Fix images do not match / Coordinate 'right' is less than 'left' ([#15534](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15534))
* fix: remove_callbacks_for_function should also remove from the ordered map ([#15533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15533))
* fix x1 upscalers ([#15555](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15555))
* Fix cls.__module__ value in extension script ([#15532](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15532))
* fix typo in function call (eror -> error) ([#15531](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15531))

### Other:
* Hide 'No Image data blocks found.' message ([#15567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15567))
* Allow webui.sh to be runnable from arbitrary directories containing a .git file ([#15561](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15561))
* Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ ([#15544](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15544))
* numpy DeprecationWarning product -> prod ([#15547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15547))
* get_crop_region_v2 ([#15583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15583), [#15587](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15587))


## 1.9.0

### Features:
* Make refiner switchover based on model timesteps instead of sampling steps ([#14978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14978))
* add an option to have old-style directory view instead of tree view; stylistic changes for extra network sorting/search controls
* add UI for reordering callbacks, support for specifying callback order in extension metadata ([#15205](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205))
* Sgm uniform scheduler for SDXL-Lightning models ([#15325](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15325))
* Scheduler selection in main UI ([#15333](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15333), [#15361](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15361), [#15394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15394))

### Minor:
* "open images directory" button now opens the actual dir ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
* make extra network card description plaintext by default, with an option to re-enable HTML as it was
* resize handle for extra networks ([#15041](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15041))
* cmd args: `--unix-filenames-sanitization` and `--filenames-max-length` ([#15031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15031))
* show extra networks parameters in HTML table rather than raw JSON ([#15131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15131))
* Add DoRA (weight-decompose) support for LoRA/LoHa/LoKr ([#15160](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15160), [#15283](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15283))
* Add '--no-prompt-history' cmd args for disable last generation prompt history ([#15189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15189))
* update preview on Replace Preview ([#15201](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15201))
* only fetch updates for extensions' active git branches ([#15233](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15233))
* put upscale postprocessing UI into an accordion ([#15223](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15223))
* Support dragdrop for URLs to read infotext ([#15262](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15262))
* use diskcache library for caching ([#15287](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15287), [#15299](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15299))
* Allow PNG-RGBA for Extras Tab ([#15334](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15334))
* Support cover images embedded in safetensors metadata ([#15319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15319))
* faster interrupt when using NN upscale ([#15380](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15380))
* Extras upscaler: an input field to limit maximul side length for the output image ([#15293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15293), [#15415](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15415), [#15417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15417), [#15425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15425))
* add an option to hide postprocessing options in Extras tab

### Extensions and API:
* ResizeHandleRow - allow overriden column scale parametr ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
* call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist
* make it possible to use zoom.js outside webui context ([#15286](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15286), [#15288](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15288))
* allow variants for extension name in metadata.ini ([#15290](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15290))
* make reloading UI scripts optional when doing Reload UI, and off by default
* put request: gr.Request at start of img2img function similar to txt2img
* open_folder as util ([#15442](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15442))
* make it possible to import extensions' script files as `import scripts.<filename>` ([#15423](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15423))

### Performance:
* performance optimization for extra networks HTML pages
* optimization for extra networks filtering
* optimization for extra networks sorting

### Bug Fixes:
* prevent escape button causing an interrupt when no generation has been made yet
* [bug] avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
* possible fix for reload button not appearing in some cases for extra networks.
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
* Fix resize-handle visability for vertical layout (mobile) ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010))
* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
* Protect alphas_cumprod during refiner switchover ([#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
* Fix EXIF orientation in API image loading ([#15062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15062))
* Only override emphasis if actually used in prompt ([#15141](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15141))
* Fix emphasis infotext missing from `params.txt` ([#15142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15142))
* fix extract_style_text_from_prompt #15132 ([#15135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15135))
* Fix Soft Inpaint for AnimateDiff ([#15148](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15148))
* edit-attention: deselect surrounding whitespace ([#15178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15178))
* chore: fix font not loaded ([#15183](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15183))
* use natural sort in extra networks when ordering by path
* Fix built-in lora system bugs caused by torch.nn.MultiheadAttention ([#15190](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15190))
* Avoid error from None in get_learned_conditioning ([#15191](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15191))
* Add entry to MassFileLister after writing metadata ([#15199](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15199))
* fix issue with Styles when Hires prompt is used ([#15269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15269), [#15276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15276))
* Strip comments from hires fix prompt ([#15263](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15263))
* Make imageviewer event listeners browser consistent ([#15261](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15261))
* Fix AttributeError in OFT when trying to get MultiheadAttention weight ([#15260](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15260))
* Add missing .mean() back ([#15239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15239))
* fix "Restore progress" button ([#15221](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15221))
* fix ui-config for InputAccordion [custom_script_source] ([#15231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15231))
* handle 0 wheel deltaY ([#15268](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15268))
* prevent alt menu for firefox ([#15267](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15267))
* fix: fix syntax errors ([#15179](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15179))
* restore outputs path ([#15307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15307))
* Escape btn_copy_path filename ([#15316](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15316))
* Fix extra networks buttons when filename contains an apostrophe ([#15331](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15331))
* escape brackets in lora random prompt generator ([#15343](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15343))
* fix: Python version check for PyTorch installation compatibility ([#15390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15390))
* fix typo in call_queue.py ([#15386](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15386))
* fix: when find already_loaded model, remove loaded by array index ([#15382](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15382))
* minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350))
* Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414))
* Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428))
* fix limited file write (thanks, Sylwia)
* Fix extra-single-image API not doing upscale failed ([#15465](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15465))
* error handling paste_field callables ([#15470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15470))

### Hardware:
* Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981))
* Update to ROCm5.7 and PyTorch ([#14820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14820))
* Better workaround for Navi1, removing --pre for Navi3 ([#15224](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15224))
* Ascend NPU wiki page ([#15228](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15228))

### Other:
* Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation
* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
* Use `absolute` path for normalized filepath ([#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
* resizeHandle handle double tap ([#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
* --dat-models-path cmd flag ([#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
* Add a direct link to the binary release ([#15059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15059))
* upscaler_utils: Reduce logging ([#15084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15084))
* Fix various typos with crate-ci/typos ([#15116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15116))
* fix_jpeg_live_preview ([#15102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15102))
* [alternative fix] can't load webui if selected wrong extra option in ui ([#15121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15121))
* Error handling for unsupported transparency ([#14958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14958))
* Add model description to searched terms ([#15198](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15198))
* bump action version ([#15272](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15272))
* PEP 604 annotations ([#15259](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15259))
* Automatically Set the Scale by value when user selects an Upscale Model ([#15244](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15244))
* move postprocessing-for-training into builtin extensions ([#15222](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15222))
* type hinting in shared.py ([#15211](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15211))
* update ruff to 0.3.3
* Update pytorch lightning utilities ([#15310](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15310))
* Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354))
* Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443))
* re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446))
* create_infotext allow index and callable, re-work Hires prompt infotext ([#15460](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15460))
* update restricted_opts to include more options for --hide-ui-dir-config ([#15492](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15492))


## 1.8.0

### Features:
* Update torch to version 2.1.2
* Soft Inpainting ([#14208](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14208))
* FP8 support ([#14031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14031), [#14327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14327))
* Support for SDXL-Inpaint Model ([#14390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14390))
* Use Spandrel for upscaling and face restoration architectures ([#14425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14425), [#14467](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14467), [#14473](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14473), [#14474](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14474), [#14477](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14477), [#14476](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14476), [#14484](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14484), [#14500](https://github.com/AUTOMATIC1111/stable-difusion-webui/pull/14500), [#14501](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14501), [#14504](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14504), [#14524](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14524), [#14809](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14809))
* Automatic backwards version compatibility (when loading infotexts from old images with program version specified, will add compatibility settings)
* Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145), [#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
* Add a [✨] button to run hires fix on selected image in the gallery (with help from [#14598](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14598), [#14626](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14626), [#14728](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14728))
* [Separate assets repository](https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets); serve fonts locally rather than from google's servers
* Official LCM Sampler Support ([#14583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14583))
* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
* Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900))
* NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801))
* Prompt comments support

### Minor:
* Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296))
* add option: Live preview in full page image viewer ([#14230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14230), [#14307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14307))
* Add keyboard shortcuts for generate/skip/interrupt ([#14269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14269))
* Better TCMALLOC support on different platforms ([#14227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14227), [#14883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14883), [#14910](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14910))
* Lora not found warning ([#14464](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14464))
* Adding negative prompts to Loras in extra networks ([#14475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14475))
* xyz_grid: allow varying the seed along an axis separate from axis options ([#12180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12180))
* option to convert VAE to bfloat16 (implementation of [#9295](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9295))
* Better IPEX support ([#14229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14229), [#14353](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14353), [#14559](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14559), [#14562](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14562), [#14597](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14597))
* Option to interrupt after current generation rather than immediately ([#13653](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13653), [#14659](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14659))
* Fullscreen Preview control fading/disable ([#14291](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14291))
* Finer settings freezing control ([#13789](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13789))
* Increase Upscaler Limits ([#14589](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14589))
* Adjust brush size with hotkeys ([#14638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14638))
* Add checkpoint info to csv log file when saving images ([#14663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14663))
* Make more columns resizable ([#14740](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14740), [#14884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14884))
* Add an option to not overlay original image for inpainting for #14727
* Add Pad conds v0 option to support same generation with DDIM as before 1.6.0
* Add "Interrupting..." placeholder.
* Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857))
* Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874))
* When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior)
* Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))

### Extensions and API:
* Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve
* Enable task ids for API ([#14314](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14314))
* add override_settings support for infotext API
* rename generation_parameters_copypaste module to infotext_utils
* prevent crash due to Script __init__ exception ([#14407](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14407))
* Bump numpy to 1.26.2 ([#14471](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14471))
* Add utility to inspect a model's dtype/device ([#14478](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14478))
* Implement general forward method for all method in built-in lora ext ([#14547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14547))
* Execute model_loaded_callback after moving to target device ([#14563](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14563))
* Add self to CFGDenoiserParams ([#14573](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14573))
* Allow TLS with API only mode (--nowebui) ([#14593](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14593))
* New callback: postprocess_image_after_composite ([#14657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14657))
* modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715))
* set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773))
* add before_token_counter callback and use it for prompt comments
* ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))

### Performance:
* Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528))
* Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512))
* Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527))

### Bug Fixes:
* fix multiple bugs related to styles multi-file support ([#14203](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14203), [#14276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14276), [#14707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14707))
* Lora fixes ([#14300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14300), [#14237](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14237), [#14546](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14546), [#14726](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14726))
* Re-add setting lost as part of e294e46 ([#14266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14266))
* fix extras caption BLIP ([#14330](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14330))
* include infotext into saved init image for img2img ([#14452](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14452))
* xyz grid handle axis_type is None ([#14394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14394))
* Update Added (Fixed) IPV6 Functionality When there is No Webui Argument Passed webui.py ([#14354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14354))
* fix API thread safe issues of txt2img and img2img ([#14421](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14421))
* handle selectable script_index is None ([#14487](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14487))
* handle config.json failed to load ([#14525](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14525), [#14767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14767))
* paste infotext cast int as float ([#14523](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14523))
* Ensure GRADIO_ANALYTICS_ENABLED is set early enough ([#14537](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14537))
* Fix logging configuration again ([#14538](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14538))
* Handle CondFunc exception when resolving attributes ([#14560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14560))
* Fix extras big batch crashes ([#14699](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14699))
* Fix using wrong model caused by alias ([#14655](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14655))
* Add # to the invalid_filename_chars list ([#14640](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14640))
* Fix extension check for requirements ([#14639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14639))
* Fix tab indexes are reset after restart UI ([#14637](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14637))
* Fix nested manual cast ([#14689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14689))
* Keep postprocessing upscale selected tab after restart ([#14702](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14702))
* XYZ grid: filter out blank vals when axis is int or float type (like int axis seed) ([#14754](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14754))
* fix CLIP Interrogator topN regex ([#14775](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14775))
* Fix dtype error in MHA layer/change dtype checking mechanism for manual cast ([#14791](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14791))
* catch load style.csv error ([#14814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14814))
* fix error when editing extra networks card
* fix extra networks metadata failing to work properly when you create the .json file with metadata for the first time.
* util.walk_files extensions case insensitive ([#14879](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14879))
* if extensions page not loaded, prevent apply ([#14873](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14873))
* call the right function for token counter in img2img
* Fix the bugs that search/reload will disappear when using other ExtraNetworks extensions ([#14939](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14939))
* Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933))
* Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932))
* Disable prompt token counters option actually disables token counting rather than just hiding results.
* avoid double upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
* Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))

### Other:
* Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270))
* change state dict comparison to ref compare ([#14216](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14216))
* Bump torch-rocm to 5.6/5.7 ([#14293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14293))
* Base output path off data path ([#14446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14446))
* reorder training preprocessing modules in extras tab ([#14367](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14367))
* Remove `cleanup_models` code ([#14472](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14472))
* only rewrite ui-config when there is change ([#14352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14352))
* Fix lint issue from 501993eb ([#14495](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14495))
* Update README.md ([#14548](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14548))
* hires button, fix seeds ()
* Logging: set formatter correctly for fallback logger too ([#14618](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14618))
* Read generation info from infotexts rather than json for internal needs (save, extract seed from generated pic) ([#14645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14645))
* improve get_crop_region ([#14709](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14709))
* Bump safetensors' version to 0.4.2 ([#14782](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14782))
* add tooltip create_submit_box ([#14803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14803))
* extensions tab table row hover highlight ([#14885](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14885))
* Always add timestamp to displayed image ([#14890](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14890))
* Added core.filemode=false so doesn't track changes in file permission… ([#14930](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14930))
* Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934), [#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
* Use original App Title in progress bar ([#14916](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14916))
* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))

## 1.7.0

### Features:
* settings tab rework: add search field, add categories, split UI settings page into many
* add altdiffusion-m18 support ([#13364](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13364))
* support inference with LyCORIS GLora networks ([#13610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13610))
* add lora-embedding bundle system ([#13568](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13568))
* option to move prompt from top row into generation parameters
* add support for SSD-1B ([#13865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13865))
* support inference with OFT networks ([#13692](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13692))
* script metadata and DAG sorting mechanism ([#13944](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13944))
* support HyperTile optimization ([#13948](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13948))
* add support for SD 2.1 Turbo ([#14170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14170))
* remove Train->Preprocessing tab and put all its functionality into Extras tab
* initial IPEX support for Intel Arc GPU ([#14171](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14171))

### Minor:
* allow reading model hash from images in img2img batch mode ([#12767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12767))
* add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
* extra field for lora metadata viewer: `ss_output_name` ([#12838](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12838))
* add action in settings page to calculate all SD checkpoint hashes ([#12909](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12909))
* add button to copy prompt to style editor ([#12975](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12975))
* add --skip-load-model-at-start option ([#13253](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13253))
* write infotext to gif images
* read infotext from gif images ([#13068](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13068))
* allow configuring the initial state of InputAccordion in ui-config.json ([#13189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13189))
* allow editing whitespace delimiters for ctrl+up/ctrl+down prompt editing ([#13444](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13444))
* prevent accidentally closing popup dialogs ([#13480](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13480))
* added option to play notification sound or not ([#13631](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13631))
* show the preview image in the full screen image viewer if available ([#13459](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13459))
* support for webui.settings.bat ([#13638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13638))
* add an option to not print stack traces on ctrl+c
* start/restart generation by Ctrl (Alt) + Enter ([#13644](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13644))
* update prompts_from_file script to allow concatenating entries with the general prompt ([#13733](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13733))
* added a visible checkbox to input accordion
* added an option to hide all txt2img/img2img parameters in an accordion ([#13826](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13826))
* added 'Path' sorting option for Extra network cards ([#13968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13968))
* enable prompt hotkeys in style editor ([#13931](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13931))
* option to show batch img2img results in UI ([#14009](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14009))
* infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page
* add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046))
* support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126))
* allow use of multiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125))
* make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241))

### Extensions and API:
* update gradio to 3.41.2
* support installed extensions list api ([#12774](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12774))
* update pnginfo API to return dict with parsed values
* add noisy latent to `ExtraNoiseParams` for callback ([#12856](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12856))
* show extension datetime in UTC ([#12864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12864), [#12865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12865), [#13281](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13281))
* add an option to choose how to combine hires fix and refiner
* include program version in info response. ([#13135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13135))
* sd_unet support for SDXL
* patch DDPM.register_betas so that users can put given_betas in model yaml ([#13276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13276))
* xyz_grid: add prepare ([#13266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13266))
* allow multiple localization files with same language in extensions ([#13077](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13077))
* add onEdit function for js and rework token-counter.js to use it
* fix the key error exception when processing override_settings keys ([#13567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13567))
* ability for extensions to return custom data via api in response.images ([#13463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13463))
* call state.jobnext() before postproces*() ([#13762](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13762))
* add option to set notification sound volume ([#13884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13884))
* update Ruff to 0.1.6 ([#14059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14059))
* add Block component creation callback ([#14119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14119))
* catch uncaught exception with ui creation scripts ([#14120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14120))
* use extension name for determining an extension is installed in the index ([#14063](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14063))
* update is_installed() from launch_utils.py to fix reinstalling already installed packages ([#14192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14192))

### Bug Fixes:
* fix pix2pix producing bad results
* fix defaults settings page breaking when any of main UI tabs are hidden
* fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
* fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
* prevent duplicate resize handler ([#12795](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12795))
* small typo: vae resolve bug ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12797))
* hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12792))
* don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12780))
* fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
* hide --gradio-auth and --api-auth values from /internal/sysinfo report
* add missing infotext for RNG in options ([#12819](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12819))
* fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
* honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
* don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12833), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
* get progressbar to display correctly in extensions tab
* keep order in list of checkpoints when loading model that doesn't have a checksum
* fix inpainting models in txt2img creating black pictures
* fix generation params regex ([#12876](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12876))
* fix batch img2img output dir with script ([#12926](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12926))
* fix #13080 - Hypernetwork/TI preview generation ([#13084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13084))
* fix bug with sigma min/max overrides. ([#12995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12995))
* more accurate check for enabling cuDNN benchmark on 16XX cards ([#12924](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12924))
* don't use multicond parser for negative prompt counter ([#13118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13118))
* fix data-sort-name containing spaces ([#13412](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13412))
* update card on correct tab when editing metadata ([#13411](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13411))
* fix viewing/editing metadata when filename contains an apostrophe ([#13395](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13395))
* fix: --sd_model in "Prompts from file or textbox" script is not working ([#13302](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13302))
* better Support for Portable Git ([#13231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13231))
* fix issues when webui_dir is not work_dir ([#13210](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13210))
* fix: lora-bias-backup don't reset cache ([#13178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13178))
* account for customizable extra network separators whyen removing extra network text from the prompt ([#12877](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12877))
* re fix batch img2img output dir with script ([#13170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13170))
* fix `--ckpt-dir` path separator and option use `short name` for checkpoint dropdown ([#13139](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13139))
* consolidated allowed preview formats, Fix extra network `.gif` not woking as preview ([#13121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13121))
* fix venv_dir=- environment variable not working as expected on linux ([#13469](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13469))
* repair unload sd checkpoint button
* edit-attention fixes ([#13533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13533))
* fix bug when using --gfpgan-models-path ([#13718](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13718))
* properly apply sort order for extra network cards when selected from dropdown
* fixes generation restart not working for some users when 'Ctrl+Enter' is pressed ([#13962](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13962))
* thread safe extra network list_items ([#13014](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13014))
* fix not able to exit metadata popup when pop up is too big ([#14156](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14156))
* fix auto focal point crop for opencv >= 4.8 ([#14121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14121))
* make 'use-cpu all' actually apply to 'all' ([#14131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14131))
* extras tab batch: actually use original filename
* make webui not crash when running with --disable-all-extensions option

### Other:
* non-local condition ([#12814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12814))
* fix minor typos ([#12827](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12827))
* remove xformers Python version check ([#12842](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12842))
* style: file-metadata word-break ([#12837](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12837))
* revert SGM noise multiplier change for img2img because it breaks hires fix
* do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
* [RC 1.6.0 - zoom is partly hidden] Update style.css ([#12839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12839))
* chore: change extension time format ([#12851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12851))
* WEBUI.SH - Use torch 2.1.0 release candidate for Navi 3 ([#12929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12929))
* add Fallback at images.read_info_from_image if exif data was invalid ([#13028](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13028))
* update cmd arg description ([#12986](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12986))
* fix: update shared.opts.data when add_option ([#12957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12957), [#13213](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13213))
* restore missing tooltips ([#12976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12976))
* use default dropdown padding on mobile ([#12880](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12880))
* put enable console prompts option into settings from commandline args ([#13119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13119))
* fix some deprecated types ([#12846](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12846))
* bump to torchsde==0.2.6 ([#13418](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13418))
* update dragdrop.js ([#13372](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13372))
* use orderdict as lru cache:opt/bug ([#13313](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13313))
* XYZ if not include sub grids do not save sub grid ([#13282](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13282))
* initialize state.time_start befroe state.job_count ([#13229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13229))
* fix fieldname regex ([#13458](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13458))
* change denoising_strength default to None. ([#13466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13466))
* fix regression ([#13475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13475))
* fix IndexError ([#13630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13630))
* fix: checkpoints_loaded:{checkpoint:state_dict}, model.load_state_dict issue in dict value empty ([#13535](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13535))
* update bug_report.yml ([#12991](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12991))
* requirements_versions httpx==0.24.1 ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
* fix parenthesis auto selection ([#13829](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13829))
* fix #13796 ([#13797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13797))
* corrected a typo in `modules/cmd_args.py` ([#13855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13855))
* feat: fix randn found element of type float at pos 2 ([#14004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14004))
* adds tqdm handler to logging_config.py for progress bar integration ([#13996](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13996))
* hotfix: call shared.state.end() after postprocessing done ([#13977](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13977))
* fix dependency address patch 1 ([#13929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13929))
* save sysinfo as .json ([#14035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14035))
* move exception_records related methods to errors.py ([#14084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14084))
* compatibility ([#13936](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13936))
* json.dump(ensure_ascii=False) ([#14108](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14108))
* dir buttons start with / so only the correct dir will be shown and no… ([#13957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13957))
* alternate implementation for unet forward replacement that does not depend on hijack being applied
* re-add `keyedit_delimiters_whitespace` setting lost as part of commit e294e46 ([#14178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14178))
* fix `save_samples` being checked early when saving masked composite ([#14177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14177))
* slight optimization for mask and mask_composite ([#14181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14181))
* add import_hook hack to work around basicsr/torchvision incompatibility ([#14186](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14186))

## 1.6.1

### Bug Fixes:
 * fix an error causing the webui to fail to start ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))

## 1.6.0

### Features:
 * refiner support [#12371](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371)
 * add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards
 * add style editor dialog
 * hires fix: add an option to use a different checkpoint for second pass ([#12181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12181))
 * option to keep multiple loaded models in memory ([#12227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12227))
 * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542))
 * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers:
   * makes all of them work with img2img
   * makes prompt composition possible (AND)
   * makes them available for SDXL
 * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808))
 * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599))
 * textual inversion inference support for SDXL
 * extra networks UI: show metadata for SD checkpoints
 * checkpoint merger: add metadata support 
 * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) ([#12177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12177))
 * VAE: allow selecting own VAE for each checkpoint (in user metadata editor)
 * VAE: add selected VAE to infotext
 * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count ([#12551](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12551))
 * add resize handle to txt2img and img2img tabs, allowing to change the amount of horizontable space given to generation parameters and resulting image gallery ([#12687](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12687), [#12723](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12723))
 * change default behavior for batching cond/uncond -- now it's on by default, and is disabled by an UI setting (Optimizatios -> Batch cond/uncond) - if you are on lowvram/medvram and are getting OOM exceptions, you will need to enable it
 * show current position in queue and make it so that requests are processed in the order of arrival ([#12707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12707))
 * add `--medvram-sdxl` flag that only enables `--medvram` for SDXL models
 * prompt editing timeline has separate range for first pass and hires-fix pass (seed breaking change) ([#12457](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12457))

### Minor:
 * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch ([#12120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12120), [#12514](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12514), [#12515](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12515))
 * postprocessing/extras: RAM savings ([#12479](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12479))
 * XYZ: in the axis labels, remove pathnames from model filenames
 * XYZ: support hires sampler ([#12298](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12298))
 * XYZ: new option: use text inputs instead of dropdowns ([#12491](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12491))
 * add gradio version warning
 * sort list of VAE checkpoints ([#12297](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12297))
 * use transparent white for mask in inpainting, along with an option to select the color ([#12326](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12326))
 * move some settings to their own section: img2img, VAE
 * add checkbox to show/hide dirs for extra networks
 * Add TAESD(or more) options for all the VAE encode/decode operation ([#12311](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12311))
 * gradio theme cache, new gradio themes, along with explanation that the user can input his own values ([#12346](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12346), [#12355](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12355))
 * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax ([#12354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12354), [#12356](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12356), [#12357](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12357), [#12358](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12358), [#12375](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12375), [#12521](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12521))
 * update README.md with correct instructions for Linux installation ([#12352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12352))
 * option to not save incomplete images, on by default ([#12338](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12338))
 * enable cond cache by default
 * git autofix for repos that are corrupted ([#12230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12230))
 * allow to open images in new browser tab by middle mouse button ([#12379](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12379))
 * automatically open webui in browser when running "locally" ([#12254](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12254))
 * put commonly used samplers on top, make DPM++ 2M Karras the default choice
 * zoom and pan: option to auto-expand a wide image, improved integration ([#12413](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12413), [#12727](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12727))
 * option to cache Lora networks in memory
 * rework hires fix UI to use accordion
 * face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back
 * change quicksettings items to have variable width
 * Lora: add Norm module, add support for bias ([#12503](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12503))
 * Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console
 * support search and display of hashes for all extra network items ([#12510](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12510))
 * add extra noise param for img2img operations ([#12564](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12564))
 * support for Lora with bias ([#12584](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12584))
 * make interrupt quicker ([#12634](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12634))
 * configurable gallery height ([#12648](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12648))
 * make results column sticky ([#12645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12645))
 * more hash filename patterns ([#12639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12639))
 * make image viewer actually fit the whole page ([#12635](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12635))
 * make progress bar work independently from live preview display which results in it being updated a lot more often
 * forbid Full live preview method for medvram and add a setting to undo the forbidding
 * make it possible to localize tooltips and placeholders
 * add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
 * Restore faces and Tiling generation parameters have been moved to settings out of main UI
   * if you want to put them back into main UI, use `Options in main UI` setting on the UI page.

### Extensions and API:
 * gradio 3.41.2
 * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd
 * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world')
 * properly clear the total console progressbar when using txt2img and img2img from API
 * add cmd_arg --disable-extra-extensions and --disable-all-extensions ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294))
 * shared.py and webui.py split into many files
 * add --loglevel commandline argument for logging
 * add a custom UI element that combines accordion and checkbox
 * avoid importing gradio in tests because it spams warnings
 * put infotext label for setting into OptionInfo definition rather than in a separate list
 * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` ([#12470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12470))
 * option to make scripts UI without gr.Group
 * add a way for scripts to register a callback for before/after just a single component's creation
 * use dataclass for StableDiffusionProcessing
 * store patches for Lora in a specialized module instead of inside torch
 * support http/https URLs in API ([#12663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12663), [#12698](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12698))
 * add extra noise callback ([#12616](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12616))
 * dump current stack traces when exiting with SIGINT
 * add type annotations for extra fields of shared.sd_model

### Bug Fixes:
 * Don't crash if out of local storage quota for javascriot localStorage
 * XYZ plot do not fail if an exception occurs
 * fix missing TI hash in infotext if generation uses both negative and positive TI ([#12269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12269))
 * localization fixes ([#12307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12307))
 * fix sdxl model invalid configuration after the hijack
 * correctly toggle extras checkbox for infotext paste ([#12304](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12304))
 * open raw sysinfo link in new page ([#12318](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12318))
 * prompt parser: Account for empty field in alternating words syntax ([#12319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12319))
 * add tab and carriage return to invalid filename chars ([#12327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12327))
 * fix api only Lora not working ([#12387](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12387))
 * fix options in main UI misbehaving when there's just one element
 * make it possible to use a sampler from infotext even if it's hidden in the dropdown
 * fix styles missing from the prompt in infotext when making a grid of batch of multiplie images
 * prevent bogus progress output in console when calculating hires fix dimensions
 * fix --use-textbox-seed
 * fix broken `Lora/Networks: use old method` option ([#12466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12466))
 * properly return `None` for VAE hash when using `--no-hashing` ([#12463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12463))
 * MPS/macOS fixes and optimizations ([#12526](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12526))
 * add second_order to samplers that mistakenly didn't have it
 * when refreshing cards in extra networks UI, do not discard user's custom resolution
 * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are ([#12509](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12509))
 * fix inpaint upload for alpha masks ([#12588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12588))
 * fix exception when image sizes are not integers ([#12586](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12586))
 * fix incorrect TAESD Latent scale ([#12596](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12596))
 * auto add data-dir to gradio-allowed-path ([#12603](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12603))
 * fix exception if extensuions dir is missing ([#12607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12607))
 * fix issues with api model-refresh and vae-refresh ([#12638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12638))
 * fix img2img background color for transparent images option not being used ([#12633](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12633))
 * attempt to resolve NaN issue with unstable VAEs in fp32 mk2 ([#12630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12630))
 * implement missing undo hijack for SDXL
 * fix xyz swap axes ([#12684](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12684))
 * fix errors in backup/restore tab if any of config files are broken ([#12689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12689))
 * fix SD VAE switch error after model reuse ([#12685](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12685))
 * fix trying to create images too large for the chosen format ([#12667](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12667))
 * create Gradio temp directory if necessary ([#12717](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12717))
 * prevent possible cache loss if exiting as it's being written by using an atomic operation to replace the cache with the new version
 * set devices.dtype_unet correctly
 * run RealESRGAN on GPU for non-CUDA devices ([#12737](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
 * prevent extra network buttons being obscured by description for very small card sizes ([#12745](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12745))
 * fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
 * fix defaults settings page breaking when any of main UI tabs are hidden
 * fix incorrect save/display of new values in Defaults page in settings
 * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
 * fix an error that prevents VAE being reloaded after an option change if a VAE near the checkpoint exists ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
 * hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
 * don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
 * fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
 * fix a bug allowing users to bypass gradio and API authentication (reported by vysecurity) 
 * fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
 * honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
 * don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
 * do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
 * get progressbar to display correctly in extensions tab


## 1.5.2

### Bug Fixes:
 * fix memory leak when generation fails
 * update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk


## 1.5.1

### Minor:
 * support parsing text encoder blocks in some new LoRAs
 * delete scale checker script due to user demand

### Extensions and API:
 * add postprocess_batch_list script callback

### Bug Fixes:
 * fix TI training for SD1
 * fix reload altclip model error
 * prepend the pythonpath instead of overriding it
 * fix typo in SD_WEBUI_RESTARTING
 * if txt2img/img2img raises an exception, finally call state.end()
 * fix composable diffusion weight parsing
 * restyle Startup profile for black users
 * fix webui not launching with --nowebui
 * catch exception for non git extensions
 * fix some options missing from /sdapi/v1/options
 * fix for extension update status always saying "unknown"
 * fix display of extra network cards that have `<>` in the name
 * update lora extension to work with python 3.8


## 1.5.0

### Features:
 * SD XL support
 * user metadata system for custom networks
 * extended Lora metadata editor: set activation text, default weight, view tags, training info
 * Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension)
 * show github stars for extensions
 * img2img batch mode can read extra stuff from png info
 * img2img batch works with subdirectories
 * hotkeys to move prompt elements: alt+left/right
 * restyle time taken/VRAM display
 * add textual inversion hashes to infotext
 * optimization: cache git extension repo information
 * move generate button next to the generated picture for mobile clients
 * hide cards for networks of incompatible Stable Diffusion version in Lora extra networks interface
 * skip installing packages with pip if they all are already installed - startup speedup of about 2 seconds

### Minor:
 * checkbox to check/uncheck all extensions in the Installed tab
 * add gradio user to infotext and to filename patterns
 * allow gif for extra network previews
 * add options to change colors in grid
 * use natural sort for items in extra networks
 * Mac: use empty_cache() from torch 2 to clear VRAM
 * added automatic support for installing the right libraries for Navi3 (AMD)
 * add option SWIN_torch_compile to accelerate SwinIR upscale
 * suppress printing TI embedding info at start to console by default
 * speedup extra networks listing
 * added `[none]` filename token.
 * removed thumbs extra networks view mode (use settings tab to change width/height/scale to get thumbs)
 * add always_discard_next_to_last_sigma option to XYZ plot
 * automatically switch to 32-bit float VAE if the generated picture has NaNs without the need for `--no-half-vae` commandline flag.
 
### Extensions and API:
 * api endpoints: /sdapi/v1/server-kill, /sdapi/v1/server-restart, /sdapi/v1/server-stop
 * allow Script to have custom metaclass
 * add model exists status check /sdapi/v1/options
 * rename --add-stop-route to --api-server-stop
 * add `before_hr` script callback
 * add callback `after_extra_networks_activate`
 * disable rich exception output in console for API by default, use WEBUI_RICH_EXCEPTIONS env var to enable
 * return http 404 when thumb file not found
 * allow replacing extensions index with environment variable
 
### Bug Fixes:
 * fix for catch errors when retrieving extension index #11290
 * fix very slow loading speed of .safetensors files when reading from network drives
 * API cache cleanup
 * fix UnicodeEncodeError when writing to file CLIP Interrogator batch mode
 * fix warning of 'has_mps' deprecated from PyTorch
 * fix problem with extra network saving images as previews losing generation info
 * fix throwing exception when trying to resize image with I;16 mode
 * fix for #11534: canvas zoom and pan extension hijacking shortcut keys
 * fixed launch script to be runnable from any directory
 * don't add "Seed Resize: -1x-1" to API image metadata
 * correctly remove end parenthesis with ctrl+up/down
 * fixing --subpath on newer gradio version
 * fix: check fill size none zero when resize  (fixes #11425)
 * use submit and blur for quick settings textbox
 * save img2img batch with images.save_image()
 * prevent running preload.py for disabled extensions
 * fix: previously, model name was added together with directory name to infotext and to [model_name] filename pattern; directory name is now not included


## 1.4.1

### Bug Fixes:
 * add queue lock for refresh-checkpoints

## 1.4.0

### Features:
 * zoom controls for inpainting
 * run basic torch calculation at startup in parallel to reduce the performance impact of first generation
 * option to pad prompt/neg prompt to be same length
 * remove taming_transformers dependency
 * custom k-diffusion scheduler settings
 * add an option to show selected settings in main txt2img/img2img UI
 * sysinfo tab in settings
 * infer styles from prompts when pasting params into the UI
 * an option to control the behavior of the above

### Minor:
 * bump Gradio to 3.32.0
 * bump xformers to 0.0.20
 * Add option to disable token counters
 * tooltip fixes & optimizations
 * make it possible to configure filename for the zip download
 * `[vae_filename]` pattern for filenames
 * Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
 * change UI reorder setting to multiselect
 * read version info form CHANGELOG.md if git version info is not available
 * link footer API to Wiki when API is not active
 * persistent conds cache (opt-in optimization)
 
### Extensions:
 * After installing extensions, webui properly restarts the process rather than reloads the UI 
 * Added VAE listing to web API. Via: /sdapi/v1/sd-vae
 * custom unet support
 * Add onAfterUiUpdate callback
 * refactor EmbeddingDatabase.register_embedding() to allow unregistering
 * add before_process callback for scripts
 * add ability for alwayson scripts to specify section and let user reorder those sections
 
### Bug Fixes:
 * Fix dragging text to prompt
 * fix incorrect quoting for infotext values with colon in them
 * fix "hires. fix" prompt sharing same labels with txt2img_prompt
 * Fix s_min_uncond default type int
 * Fix for #10643 (Inpainting mask sometimes not working)
 * fix bad styling for thumbs view in extra networks #10639
 * fix for empty list of optimizations #10605
 * small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
 * fix --ui-debug-mode exit
 * patch GitPython to not use leaky persistent processes
 * fix duplicate Cross attention optimization after UI reload
 * torch.cuda.is_available() check for SdOptimizationXformers
 * fix hires fix using wrong conds in second pass if using Loras.
 * handle exception when parsing generation parameters from png info
 * fix upcast attention dtype error
 * forcing Torch Version to 1.13.1 for RX 5000 series GPUs
 * split mask blur into X and Y components, patch Outpainting MK2 accordingly
 * don't die when a LoRA is a broken symlink
 * allow activation of Generate Forever during generation


## 1.3.2

### Bug Fixes:
 * fix files served out of tmp directory even if they are saved to disk
 * fix postprocessing overwriting parameters

## 1.3.1

### Features:
 * revert default cross attention optimization to Doggettx

### Bug Fixes:
 * fix bug: LoRA don't apply on dropdown list sd_lora
 * fix png info always added even if setting is not enabled
 * fix some fields not applying in xyz plot
 * fix "hires. fix" prompt sharing same labels with txt2img_prompt
 * fix lora hashes not being added properly to infotex if there is only one lora
 * fix --use-cpu failing to work properly at startup
 * make --disable-opt-split-attention command line option work again

## 1.3.0

### Features:
 * add UI to edit defaults
 * token merging (via dbolya/tomesd)
 * settings tab rework: add a lot of additional explanations and links
 * load extensions' Git metadata in parallel to loading the main program to save a ton of time during startup
 * update extensions table: show branch, show date in separate column, and show version from tags if available
 * TAESD - another option for cheap live previews
 * allow choosing sampler and prompts for second pass of hires fix - hidden by default, enabled in settings
 * calculate hashes for Lora
 * add lora hashes to infotext
 * when pasting infotext, use infotext's lora hashes to find local loras for `<lora:xxx:1>` entries whose hashes match loras the user has
 * select cross attention optimization from UI

### Minor:
 * bump Gradio to 3.31.0
 * bump PyTorch to 2.0.1 for macOS and Linux AMD
 * allow setting defaults for elements in extensions' tabs
 * allow selecting file type for live previews
 * show "Loading..." for extra networks when displaying for the first time
 * suppress ENSD infotext for samplers that don't use it
 * clientside optimizations
 * add options to show/hide hidden files and dirs in extra networks, and to not list models/files in hidden directories
 * allow whitespace in styles.csv
 * add option to reorder tabs
 * move some functionality (swap resolution and set seed to -1) to client
 * option to specify editor height for img2img
 * button to copy image resolution into img2img width/height sliders
 * switch from pyngrok to ngrok-py
 * lazy-load images in extra networks UI
 * set "Navigate image viewer with gamepad" option to false by default, by request
 * change upscalers to download models into user-specified directory (from commandline args) rather than the default models/<...>
 * allow hiding buttons in ui-config.json

### Extensions:
 * add /sdapi/v1/script-info api
 * use Ruff to lint Python code
 * use ESlint to lint Javascript code
 * add/modify CFG callbacks for Self-Attention Guidance extension
 * add command and endpoint for graceful server stopping
 * add some locals (prompts/seeds/etc) from processing function into the Processing class as fields
 * rework quoting for infotext items that have commas in them to use JSON (should be backwards compatible except for cases where it didn't work previously)
 * add /sdapi/v1/refresh-loras api checkpoint post request
 * tests overhaul

### Bug Fixes:
 * fix an issue preventing the program from starting if the user specifies a bad Gradio theme
 * fix broken prompts from file script
 * fix symlink scanning for extra networks
 * fix --data-dir ignored when launching via webui-user.bat COMMANDLINE_ARGS
 * allow web UI to be ran fully offline
 * fix inability to run with --freeze-settings
 * fix inability to merge checkpoint without adding metadata
 * fix extra networks' save preview image not adding infotext for jpeg/webm
 * remove blinking effect from text in hires fix and scale resolution preview
 * make links to `http://<...>.git` extensions work in the extension tab
 * fix bug with webui hanging at startup due to hanging git process


## 1.2.1

### Features:
 * add an option to always refer to LoRA by filenames

### Bug Fixes:
 * never refer to LoRA by an alias if multiple LoRAs have same alias or the alias is called none
 * fix upscalers disappearing after the user reloads UI
 * allow bf16 in safe unpickler (resolves problems with loading some LoRAs)
 * allow web UI to be ran fully offline
 * fix localizations not working
 * fix error for LoRAs: `'LatentDiffusion' object has no attribute 'lora_layer_mapping'`

## 1.2.0

### Features:
 * do not wait for Stable Diffusion model to load at startup
 * add filename patterns: `[denoising]`
 * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
 * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metadata of the file, if present, instead of filename (both can be used to activate LoRA)
 * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
 * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
 * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
 * add version to infotext, footer and console output when starting
 * add links to wiki for filename pattern settings
 * add extended info for quicksettings setting and use multiselect input instead of a text field

### Minor:
 * bump Gradio to 3.29.0
 * bump PyTorch to 2.0.1
 * `--subpath` option for gradio for use with reverse proxy
 * Linux/macOS: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
 * do not apply localizations if there are none (possible frontend optimization)
 * add extra `None` option for VAE in XYZ plot
 * print error to console when batch processing in img2img fails
 * create HTML for extra network pages only on demand
 * allow directories starting with `.` to still list their models for LoRA, checkpoints, etc
 * put infotext options into their own category in settings tab
 * do not show licenses page when user selects Show all pages in settings

### Extensions:
 * tooltip localization support
 * add API method to get LoRA models with prompt

### Bug Fixes:
 * re-add `/docs` endpoint
 * fix gamepad navigation
 * make the lightbox fullscreen image function properly
 * fix squished thumbnails in extras tab
 * keep "search" filter for extra networks when user refreshes the tab (previously it showed everything after you refreshed)
 * fix webui showing the same image if you configure the generation to always save results into same file
 * fix bug with upscalers not working properly
 * fix MPS on PyTorch 2.0.1, Intel Macs
 * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
 * prevent Reload UI button/link from reloading the page when it's not yet ready
 * fix prompts from file script failing to read contents from a drag/drop file


## 1.1.1
### Bug Fixes:
 * fix an error that prevents running webui on PyTorch<2.0 without --disable-safe-unpickle

## 1.1.0
### Features:
 * switch to PyTorch 2.0.0 (except for AMD GPUs)
 * visual improvements to custom code scripts
 * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
 * add support for saving init images in img2img, and record their hashes in infotext for reproducibility
 * automatically select current word when adjusting weight with ctrl+up/down
 * add dropdowns for X/Y/Z plot
 * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs
 * support Gradio's theme API
 * use TCMalloc on Linux by default; possible fix for memory leaks
 * add optimization option to remove negative conditioning at low sigma values #9177
 * embed model merge metadata in .safetensors file
 * extension settings backup/restore feature #9169
 * add "resize by" and "resize to" tabs to img2img
 * add option "keep original size" to textual inversion images preprocess
 * image viewer scrolling via analog stick
 * button to restore the progress from session lost / tab reload

### Minor:
 * bump Gradio to 3.28.1
 * change "scale to" to sliders in Extras tab
 * add labels to tool buttons to make it possible to hide them
 * add tiled inference support for ScuNET
 * add branch support for extension installation
 * change Linux installation script to install into current directory rather than `/home/username`
 * sort textual inversion embeddings by name (case-insensitive)
 * allow styles.csv to be symlinked or mounted in docker
 * remove the "do not add watermark to images" option
 * make selected tab configurable with UI config
 * make the extra networks UI fixed height and scrollable
 * add `disable_tls_verify` arg for use with self-signed certs

### Extensions:
 * add reload callback
 * add `is_hr_pass` field for processing

### Bug Fixes:
 * fix broken batch image processing on 'Extras/Batch Process' tab
 * add "None" option to extra networks dropdowns
 * fix FileExistsError for CLIP Interrogator
 * fix /sdapi/v1/txt2img endpoint not working on Linux #9319
 * fix disappearing live previews and progressbar during slow tasks
 * fix fullscreen image view not working properly in some cases
 * prevent alwayson_scripts args param resizing script_arg list when they are inserted in it
 * fix prompt schedule for second order samplers
 * fix image mask/composite for weird resolutions #9628
 * use correct images for previews when using AND (see #9491)
 * one broken image in img2img batch won't stop all processing
 * fix image orientation bug in train/preprocess
 * fix Ngrok recreating tunnels every reload
 * fix `--realesrgan-models-path` and `--ldsr-models-path` not working
 * fix `--skip-install` not working
 * use SAMPLE file format in Outpainting Mk2 & Poorman
 * do not fail all LoRAs if some have failed to load when making a picture

## 1.0.0
  * everything


================================================
FILE: CITATION.cff
================================================
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
  - given-names: AUTOMATIC1111
title: "Stable Diffusion Web UI"
date-released: 2022-08-22
url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"


================================================
FILE: CODEOWNERS
================================================
*       @AUTOMATIC1111

# if you were managing a localization and were removed from this file, this is because
# the intended way to do localizations now is via extensions. See:
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions
# Make a repo with your localization and since you are still listed as a collaborator
# you can add it to the wiki page yourself. This change is because some people complained
# the git commit log is cluttered with things unrelated to almost everyone and
# because I believe this is the best overall for the project to handle localizations almost
# entirely without my oversight.




================================================
FILE: LICENSE.txt
================================================
                    GNU AFFERO GENERAL PUBLIC LICENSE
                       Version 3, 19 November 2007

                    Copyright (c) 2023 AUTOMATIC1111

 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.

  A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate.  Many developers of free software are heartened and
encouraged by the resulting cooperation.  However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.

  The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community.  It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server.  Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.

  An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals.  This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU Affero General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Remote Network Interaction; Use with the GNU General Public License.

  Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software.  This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time.  Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU Affero General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Affero General Public License for more details.

    You should have received a copy of the GNU Affero General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source.  For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code.  There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.


================================================
FILE: README.md
================================================
# Stable Diffusion web UI
A web interface for Stable Diffusion, implemented using Gradio library.

![](screenshot.png)

## Features
[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features):
- Original txt2img and img2img modes
- One click install and run script (but you still must install python and git)
- Outpainting
- Inpainting
- Color Sketch
- Prompt Matrix
- Stable Diffusion Upscale
- Attention, specify parts of text that the model should pay more attention to
    - a man in a `((tuxedo))` - will pay more attention to tuxedo
    - a man in a `(tuxedo:1.21)` - alternative syntax
    - select text and press `Ctrl+Up` or `Ctrl+Down` (or `Command+Up` or `Command+Down` if you're on a MacOS) to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times
- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
- Textual Inversion
    - have as many embeddings as you want and use any names you like for them
    - use multiple embeddings with different numbers of vectors per token
    - works with half precision floating point numbers
    - train embeddings on 8GB (also reports of 6GB working)
- Extras tab with:
    - GFPGAN, neural network that fixes faces
    - CodeFormer, face restoration tool as an alternative to GFPGAN
    - RealESRGAN, neural network upscaler
    - ESRGAN, neural network upscaler with a lot of third party models
    - SwinIR and Swin2SR ([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers
    - LDSR, Latent diffusion super resolution upscaling
- Resizing aspect ratio options
- Sampling method selection
    - Adjust sampler eta values (noise multiplier)
    - More advanced noise setting options
- Interrupt processing at any time
- 4GB video card support (also reports of 2GB working)
- Correct seeds for batches
- Live prompt token length validation
- Generation parameters
     - parameters you used to generate images are saved with that image
     - in PNG chunks for PNG, in EXIF for JPEG
     - can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI
     - can be disabled in settings
     - drag and drop an image/text-parameters to promptbox
- Read Generation Parameters Button, loads parameters in promptbox to UI
- Settings page
- Running arbitrary python code from UI (must run with `--allow-code` to enable)
- Mouseover hints for most UI elements
- Possible to change defaults/mix/max/step values for UI elements via text config
- Tiling support, a checkbox to create images that can be tiled like textures
- Progress bar and live image generation preview
    - Can use a separate neural network to produce previews with almost none VRAM or compute requirement
- Negative prompt, an extra text field that allows you to list what you don't want to see in generated image
- Styles, a way to save part of prompt and easily apply them via dropdown later
- Variations, a way to generate same image but with tiny differences
- Seed resizing, a way to generate same image but at slightly different resolution
- CLIP interrogator, a button that tries to guess prompt from an image
- Prompt Editing, a way to change prompt mid-generation, say to start making a watermelon and switch to anime girl midway
- Batch Processing, process a group of files using img2img
- Img2img Alternative, reverse Euler method of cross attention control
- Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions
- Reloading checkpoints on the fly
- Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one
- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community
- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
     - separate prompts using uppercase `AND`
     - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
- DeepDanbooru integration, creates danbooru style tags for anime prompts
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add `--xformers` to commandline args)
- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
- Generate forever option
- Training tab
     - hypernetworks and embeddings options
     - Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime)
- Clip skip
- Hypernetworks
- Loras (same as Hypernetworks but more pretty)
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
- Can select to load a different VAE from settings screen
- Estimated completion time in progress bar
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
- Now without any bad letters!
- Load checkpoints in safetensors format
- Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64
- Now with a license!
- Reorder elements in the UI from settings screen
- [Segmind Stable Diffusion](https://huggingface.co/segmind/SSD-1B) support

## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for:
- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page)

Alternatively, use online services (like Google Colab):

- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)

### Installation on Windows 10/11 with NVidia-GPUs using release package
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents.
2. Run `update.bat`.
3. Run `run.bat`.
> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs)

### Automatic Installation on Windows
1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH".
2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.

### Automatic Installation on Linux
1. Install the dependencies:
```bash
# Debian-based:
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based:
sudo dnf install wget git python3 gperftools-libs libglvnd-glx
# openSUSE-based:
sudo zypper install wget git python3 libtcmalloc4 libglvnd
# Arch-based:
sudo pacman -S wget git python3
```
If your system is very new, you need to install python3.11 or python3.10:
```bash
# Ubuntu 24.04
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11

# Manjaro/Arch
sudo pacman -S yay
yay -S python311 # do not confuse with python3.11 package

# Only for 3.11
# Then set up env variable in launch script
export python_cmd="python3.11"
# or in webui-user.sh
python_cmd="python3.11"
```
2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
```
Or just clone the repo wherever you want:
```bash
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
```

3. Run `webui.sh`.
4. Check `webui-user.sh` for options.
### Installation on Apple Silicon

Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon).

## Contributing
Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)

## Documentation

The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki).

For the purposes of getting Google and other search engines to crawl the wiki, here's a link to the (not for humans) [crawlable wiki](https://github-wiki-see.page/m/AUTOMATIC1111/stable-diffusion-webui/wiki).

## Credits
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.

- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers, https://github.com/mcmonkey4eva/sd3-ref
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- Spandrel - https://github.com/chaiNNer-org/spandrel implementing
  - GFPGAN - https://github.com/TencentARC/GFPGAN.git
  - CodeFormer - https://github.com/sczhou/CodeFormer
  - ESRGAN - https://github.com/xinntao/ESRGAN
  - SwinIR - https://github.com/JingyunLiang/SwinIR
  - Swin2SR - https://github.com/mv-lab/swin2sr
- LDSR - https://github.com/Hafiidz/latent-diffusion
- MiDaS - https://github.com/isl-org/MiDaS
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention)
- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
- xformers - https://github.com/facebookresearch/xformers
- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6)
- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix
- Security advice - RyotaK
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
- LyCORIS - KohakuBlueleaf
- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling
- Hypertile - tfernd - https://github.com/tfernd/HyperTile
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)


================================================
FILE: _typos.toml
================================================
[default.extend-words]
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
Ba = "Ba"
# HSA is something AMD uses for their GPUs
HSA = "HSA"


================================================
FILE: configs/alt-diffusion-inference.yaml
================================================
model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "jpg"
    cond_stage_key: "txt"
    image_size: 64
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: False

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 10000 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 4
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: False
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: modules.xlmr.BertSeriesModelWithTransformation
      params:
        name: "XLMR-Large"

================================================
FILE: configs/alt-diffusion-m18-inference.yaml
================================================
model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "jpg"
    cond_stage_key: "txt"
    image_size: 64
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: False

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 10000 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 4
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_head_channels: 64
        use_spatial_transformer: True
        use_linear_in_transformer: True
        transformer_depth: 1
        context_dim: 1024
        use_checkpoint: False
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: modules.xlmr_m18.BertSeriesModelWithTransformation
      params:
        name: "XLMR-Large"


================================================
FILE: configs/instruct-pix2pix.yaml
================================================
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
# See more details in LICENSE.

model:
  base_learning_rate: 1.0e-04
  target: modules.models.diffusion.ddpm_edit.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: edited
    cond_stage_key: edit
    # image_size: 64
    # image_size: 32
    image_size: 16
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: hybrid
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: false

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 0 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 8
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: False
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 128
    num_workers: 1
    wrap: false
    validation:
      target: edit_dataset.EditDataset
      params:
        path: data/clip-filtered-dataset
        cache_dir:  data/
        cache_name: data_10k
        split: val
        min_text_sim: 0.2
        min_image_sim: 0.75
        min_direction_sim: 0.2
        max_samples_per_prompt: 1
        min_resize_res: 512
        max_resize_res: 512
        crop_res: 512
        output_as_edit: False
        real_input: True


================================================
FILE: configs/sd3-inference.yaml
================================================
model:
  target: modules.models.sd3.sd3_model.SD3Inferencer
  params:
    shift: 3
    state_dict: null


================================================
FILE: configs/sd_xl_inpaint.yaml
================================================
model:
  target: sgm.models.diffusion.DiffusionEngine
  params:
    scale_factor: 0.13025
    disable_first_stage_autocast: True

    denoiser_config:
      target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
      params:
        num_idx: 1000

        weighting_config:
          target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
        scaling_config:
          target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
        discretization_config:
          target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization

    network_config:
      target: sgm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        adm_in_channels: 2816
        num_classes: sequential
        use_checkpoint: False
        in_channels: 9
        out_channels: 4
        model_channels: 320
        attention_resolutions: [4, 2]
        num_res_blocks: 2
        channel_mult: [1, 2, 4]
        num_head_channels: 64
        use_spatial_transformer: True
        use_linear_in_transformer: True
        transformer_depth: [1, 2, 10]  # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
        context_dim: 2048
        spatial_transformer_attn_type: softmax-xformers
        legacy: False

    conditioner_config:
      target: sgm.modules.GeneralConditioner
      params:
        emb_models:
          # crossattn cond
          - is_trainable: False
            input_key: txt
            target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
            params:
              layer: hidden
              layer_idx: 11
          # crossattn and vector cond
          - is_trainable: False
            input_key: txt
            target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
            params:
              arch: ViT-bigG-14
              version: laion2b_s39b_b160k
              freeze: True
              layer: penultimate
              always_return_pooled: True
              legacy: False
          # vector cond
          - is_trainable: False
            input_key: original_size_as_tuple
            target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
            params:
              outdim: 256  # multiplied by two
          # vector cond
          - is_trainable: False
            input_key: crop_coords_top_left
            target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
            params:
              outdim: 256  # multiplied by two
          # vector cond
          - is_trainable: False
            input_key: target_size_as_tuple
            target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
            params:
              outdim: 256  # multiplied by two

    first_stage_config:
      target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          attn_type: vanilla-xformers
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult: [1, 2, 4, 4]
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity


================================================
FILE: configs/v1-inference.yaml
================================================
model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "jpg"
    cond_stage_key: "txt"
    image_size: 64
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: False

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 10000 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 4
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: False
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder


================================================
FILE: configs/v1-inpainting-inference.yaml
================================================
model:
  base_learning_rate: 7.5e-05
  target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "jpg"
    cond_stage_key: "txt"
    image_size: 64
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: hybrid   # important
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    finetune_keys: null

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 9  # 4 data + 4 downscaled image + 1 mask
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: False
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder


================================================
FILE: environment-wsl2.yaml
================================================
name: automatic
channels:
  - pytorch
  - defaults
dependencies:
  - python=3.10
  - pip=23.0
  - cudatoolkit=11.8
  - pytorch=2.0
  - torchvision=0.15
  - numpy=1.23


================================================
FILE: extensions-builtin/LDSR/ldsr_model_arch.py
================================================
import os
import gc
import time

import numpy as np
import torch
import torchvision
from PIL import Image
from einops import rearrange, repeat
from omegaconf import OmegaConf
import safetensors.torch

from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules import shared, sd_hijack, devices

cached_ldsr_model: torch.nn.Module = None


# Create LDSR Class
class LDSR:
    def load_model_from_config(self, half_attention):
        global cached_ldsr_model

        if shared.opts.ldsr_cached and cached_ldsr_model is not None:
            print("Loading model from cache")
            model: torch.nn.Module = cached_ldsr_model
        else:
            print(f"Loading model from {self.modelPath}")
            _, extension = os.path.splitext(self.modelPath)
            if extension.lower() == ".safetensors":
                pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
            else:
                pl_sd = torch.load(self.modelPath, map_location="cpu")
            sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
            config = OmegaConf.load(self.yamlPath)
            config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
            model: torch.nn.Module = instantiate_from_config(config.model)
            model.load_state_dict(sd, strict=False)
            model = model.to(shared.device)
            if half_attention:
                model = model.half()
            if shared.cmd_opts.opt_channelslast:
                model = model.to(memory_format=torch.channels_last)

            sd_hijack.model_hijack.hijack(model) # apply optimization
            model.eval()

            if shared.opts.ldsr_cached:
                cached_ldsr_model = model

        return {"model": model}

    def __init__(self, model_path, yaml_path):
        self.modelPath = model_path
        self.yamlPath = yaml_path

    @staticmethod
    def run(model, selected_path, custom_steps, eta):
        example = get_cond(selected_path)

        n_runs = 1
        guider = None
        ckwargs = None
        ddim_use_x0_pred = False
        temperature = 1.
        eta = eta
        custom_shape = None

        height, width = example["image"].shape[1:3]
        split_input = height >= 128 and width >= 128

        if split_input:
            ks = 128
            stride = 64
            vqf = 4  #
            model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
                                        "vqf": vqf,
                                        "patch_distributed_vq": True,
                                        "tie_braker": False,
                                        "clip_max_weight": 0.5,
                                        "clip_min_weight": 0.01,
                                        "clip_max_tie_weight": 0.5,
                                        "clip_min_tie_weight": 0.01}
        else:
            if hasattr(model, "split_input_params"):
                delattr(model, "split_input_params")

        x_t = None
        logs = None
        for _ in range(n_runs):
            if custom_shape is not None:
                x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
                x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])

            logs = make_convolutional_sample(example, model,
                                             custom_steps=custom_steps,
                                             eta=eta, quantize_x0=False,
                                             custom_shape=custom_shape,
                                             temperature=temperature, noise_dropout=0.,
                                             corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
                                             ddim_use_x0_pred=ddim_use_x0_pred
                                             )
        return logs

    def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
        model = self.load_model_from_config(half_attention)

        # Run settings
        diffusion_steps = int(steps)
        eta = 1.0


        gc.collect()
        devices.torch_gc()

        im_og = image
        width_og, height_og = im_og.size
        # If we can adjust the max upscale size, then the 4 below should be our variable
        down_sample_rate = target_scale / 4
        wd = width_og * down_sample_rate
        hd = height_og * down_sample_rate
        width_downsampled_pre = int(np.ceil(wd))
        height_downsampled_pre = int(np.ceil(hd))

        if down_sample_rate != 1:
            print(
                f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
            im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
        else:
            print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")

        # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
        pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
        im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))

        logs = self.run(model["model"], im_padded, diffusion_steps, eta)

        sample = logs["sample"]
        sample = sample.detach().cpu()
        sample = torch.clamp(sample, -1., 1.)
        sample = (sample + 1.) / 2. * 255
        sample = sample.numpy().astype(np.uint8)
        sample = np.transpose(sample, (0, 2, 3, 1))
        a = Image.fromarray(sample[0])

        # remove padding
        a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))

        del model
        gc.collect()
        devices.torch_gc()

        return a


def get_cond(selected_path):
    example = {}
    up_f = 4
    c = selected_path.convert('RGB')
    c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
    c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
                                                    antialias=True)
    c_up = rearrange(c_up, '1 c h w -> 1 h w c')
    c = rearrange(c, '1 c h w -> 1 h w c')
    c = 2. * c - 1.

    c = c.to(shared.device)
    example["LR_image"] = c
    example["image"] = c_up

    return example


@torch.no_grad()
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
                    mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
                    corrector_kwargs=None, x_t=None
                    ):
    ddim = DDIMSampler(model)
    bs = shape[0]
    shape = shape[1:]
    print(f"Sampling with eta = {eta}; steps: {steps}")
    samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
                                         normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
                                         mask=mask, x0=x0, temperature=temperature, verbose=False,
                                         score_corrector=score_corrector,
                                         corrector_kwargs=corrector_kwargs, x_t=x_t)

    return samples, intermediates


@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
                              corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
    log = {}

    z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
                                        return_first_stage_outputs=True,
                                        force_c_encode=not (hasattr(model, 'split_input_params')
                                                            and model.cond_stage_key == 'coordinates_bbox'),
                                        return_original_cond=True)

    if custom_shape is not None:
        z = torch.randn(custom_shape)
        print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")

    z0 = None

    log["input"] = x
    log["reconstruction"] = xrec

    if ismap(xc):
        log["original_conditioning"] = model.to_rgb(xc)
        if hasattr(model, 'cond_stage_key'):
            log[model.cond_stage_key] = model.to_rgb(xc)

    else:
        log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
        if model.cond_stage_model:
            log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
            if model.cond_stage_key == 'class_label':
                log[model.cond_stage_key] = xc[model.cond_stage_key]

    with model.ema_scope("Plotting"):
        t0 = time.time()

        sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
                                                eta=eta,
                                                quantize_x0=quantize_x0, mask=None, x0=z0,
                                                temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
                                                x_t=x_T)
        t1 = time.time()

        if ddim_use_x0_pred:
            sample = intermediates['pred_x0'][-1]

    x_sample = model.decode_first_stage(sample)

    try:
        x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
        log["sample_noquant"] = x_sample_noquant
        log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
    except Exception:
        pass

    log["sample"] = x_sample
    log["time"] = t1 - t0

    return log


================================================
FILE: extensions-builtin/LDSR/preload.py
================================================
import os
from modules import paths


def preload(parser):
    parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))


================================================
FILE: extensions-builtin/LDSR/scripts/ldsr_model.py
================================================
import os

from modules.modelloader import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks, errors
import sd_hijack_autoencoder  # noqa: F401
import sd_hijack_ddpm_v1  # noqa: F401


class UpscalerLDSR(Upscaler):
    def __init__(self, user_path):
        self.name = "LDSR"
        self.user_path = user_path
        self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
        self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
        super().__init__()
        scaler_data = UpscalerData("LDSR", None, self)
        self.scalers = [scaler_data]

    def load_model(self, path: str):
        # Remove incorrect project.yaml file if too big
        yaml_path = os.path.join(self.model_path, "project.yaml")
        old_model_path = os.path.join(self.model_path, "model.pth")
        new_model_path = os.path.join(self.model_path, "model.ckpt")

        local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
        local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
        local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
        local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)

        if os.path.exists(yaml_path):
            statinfo = os.stat(yaml_path)
            if statinfo.st_size >= 10485760:
                print("Removing invalid LDSR YAML file.")
                os.remove(yaml_path)

        if os.path.exists(old_model_path):
            print("Renaming model from model.pth to model.ckpt")
            os.rename(old_model_path, new_model_path)

        if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
            model = local_safetensors_path
        else:
            model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")

        yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")

        return LDSR(model, yaml)

    def do_upscale(self, img, path):
        try:
            ldsr = self.load_model(path)
        except Exception:
            errors.report(f"Failed loading LDSR model {path}", exc_info=True)
            return img
        ddim_steps = shared.opts.ldsr_steps
        return ldsr.super_resolution(img, ddim_steps, self.scale)


def on_ui_settings():
    import gradio as gr

    shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
    shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))


script_callbacks.on_ui_settings(on_ui_settings)


================================================
FILE: extensions-builtin/LDSR/sd_hijack_autoencoder.py
================================================
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
import numpy as np
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager

from torch.optim.lr_scheduler import LambdaLR

from ldm.modules.ema import LitEma
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config

import ldm.models.autoencoder
from packaging import version

class VQModel(pl.LightningModule):
    def __init__(self,
                 ddconfig,
                 lossconfig,
                 n_embed,
                 embed_dim,
                 ckpt_path=None,
                 ignore_keys=None,
                 image_key="image",
                 colorize_nlabels=None,
                 monitor=None,
                 batch_resize_range=None,
                 scheduler_config=None,
                 lr_g_factor=1.0,
                 remap=None,
                 sane_index_shape=False, # tell vector quantizer to return indices as bhw
                 use_ema=False
                 ):
        super().__init__()
        self.embed_dim = embed_dim
        self.n_embed = n_embed
        self.image_key = image_key
        self.encoder = Encoder(**ddconfig)
        self.decoder = Decoder(**ddconfig)
        self.loss = instantiate_from_config(lossconfig)
        self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
                                        remap=remap,
                                        sane_index_shape=sane_index_shape)
        self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
        self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
        if colorize_nlabels is not None:
            assert type(colorize_nlabels)==int
            self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
        if monitor is not None:
            self.monitor = monitor
        self.batch_resize_range = batch_resize_range
        if self.batch_resize_range is not None:
            print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")

        self.use_ema = use_ema
        if self.use_ema:
            self.model_ema = LitEma(self)
            print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")

        if ckpt_path is not None:
            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
        self.scheduler_config = scheduler_config
        self.lr_g_factor = lr_g_factor

    @contextmanager
    def ema_scope(self, context=None):
        if self.use_ema:
            self.model_ema.store(self.parameters())
            self.model_ema.copy_to(self)
            if context is not None:
                print(f"{context}: Switched to EMA weights")
        try:
            yield None
        finally:
            if self.use_ema:
                self.model_ema.restore(self.parameters())
                if context is not None:
                    print(f"{context}: Restored training weights")

    def init_from_ckpt(self, path, ignore_keys=None):
        sd = torch.load(path, map_location="cpu")["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys or []:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        missing, unexpected = self.load_state_dict(sd, strict=False)
        print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
        if missing:
            print(f"Missing Keys: {missing}")
        if unexpected:
            print(f"Unexpected Keys: {unexpected}")

    def on_train_batch_end(self, *args, **kwargs):
        if self.use_ema:
            self.model_ema(self)

    def encode(self, x):
        h = self.encoder(x)
        h = self.quant_conv(h)
        quant, emb_loss, info = self.quantize(h)
        return quant, emb_loss, info

    def encode_to_prequant(self, x):
        h = self.encoder(x)
        h = self.quant_conv(h)
        return h

    def decode(self, quant):
        quant = self.post_quant_conv(quant)
        dec = self.decoder(quant)
        return dec

    def decode_code(self, code_b):
        quant_b = self.quantize.embed_code(code_b)
        dec = self.decode(quant_b)
        return dec

    def forward(self, input, return_pred_indices=False):
        quant, diff, (_,_,ind) = self.encode(input)
        dec = self.decode(quant)
        if return_pred_indices:
            return dec, diff, ind
        return dec, diff

    def get_input(self, batch, k):
        x = batch[k]
        if len(x.shape) == 3:
            x = x[..., None]
        x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
        if self.batch_resize_range is not None:
            lower_size = self.batch_resize_range[0]
            upper_size = self.batch_resize_range[1]
            if self.global_step <= 4:
                # do the first few batches with max size to avoid later oom
                new_resize = upper_size
            else:
                new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
            if new_resize != x.shape[2]:
                x = F.interpolate(x, size=new_resize, mode="bicubic")
            x = x.detach()
        return x

    def training_step(self, batch, batch_idx, optimizer_idx):
        # https://github.com/pytorch/pytorch/issues/37142
        # try not to fool the heuristics
        x = self.get_input(batch, self.image_key)
        xrec, qloss, ind = self(x, return_pred_indices=True)

        if optimizer_idx == 0:
            # autoencode
            aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
                                            last_layer=self.get_last_layer(), split="train",
                                            predicted_indices=ind)

            self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
            return aeloss

        if optimizer_idx == 1:
            # discriminator
            discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
                                            last_layer=self.get_last_layer(), split="train")
            self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
            return discloss

    def validation_step(self, batch, batch_idx):
        log_dict = self._validation_step(batch, batch_idx)
        with self.ema_scope():
            self._validation_step(batch, batch_idx, suffix="_ema")
        return log_dict

    def _validation_step(self, batch, batch_idx, suffix=""):
        x = self.get_input(batch, self.image_key)
        xrec, qloss, ind = self(x, return_pred_indices=True)
        aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
                                        self.global_step,
                                        last_layer=self.get_last_layer(),
                                        split="val"+suffix,
                                        predicted_indices=ind
                                        )

        discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
                                            self.global_step,
  
Download .txt
gitextract_l_rki2tj/

├── .eslintignore
├── .eslintrc.js
├── .git-blame-ignore-revs
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   └── feature_request.yml
│   ├── pull_request_template.md
│   └── workflows/
│       ├── on_pull_request.yaml
│       ├── run_tests.yaml
│       └── warns_merge_master.yml
├── .gitignore
├── .pylintrc
├── CHANGELOG.md
├── CITATION.cff
├── CODEOWNERS
├── LICENSE.txt
├── README.md
├── _typos.toml
├── configs/
│   ├── alt-diffusion-inference.yaml
│   ├── alt-diffusion-m18-inference.yaml
│   ├── instruct-pix2pix.yaml
│   ├── sd3-inference.yaml
│   ├── sd_xl_inpaint.yaml
│   ├── v1-inference.yaml
│   └── v1-inpainting-inference.yaml
├── environment-wsl2.yaml
├── extensions-builtin/
│   ├── LDSR/
│   │   ├── ldsr_model_arch.py
│   │   ├── preload.py
│   │   ├── scripts/
│   │   │   └── ldsr_model.py
│   │   ├── sd_hijack_autoencoder.py
│   │   ├── sd_hijack_ddpm_v1.py
│   │   └── vqvae_quantize.py
│   ├── Lora/
│   │   ├── extra_networks_lora.py
│   │   ├── lora.py
│   │   ├── lora_logger.py
│   │   ├── lora_patches.py
│   │   ├── lyco_helpers.py
│   │   ├── network.py
│   │   ├── network_full.py
│   │   ├── network_glora.py
│   │   ├── network_hada.py
│   │   ├── network_ia3.py
│   │   ├── network_lokr.py
│   │   ├── network_lora.py
│   │   ├── network_norm.py
│   │   ├── network_oft.py
│   │   ├── networks.py
│   │   ├── preload.py
│   │   ├── scripts/
│   │   │   └── lora_script.py
│   │   ├── ui_edit_user_metadata.py
│   │   └── ui_extra_networks_lora.py
│   ├── ScuNET/
│   │   ├── preload.py
│   │   └── scripts/
│   │       └── scunet_model.py
│   ├── SwinIR/
│   │   ├── preload.py
│   │   └── scripts/
│   │       └── swinir_model.py
│   ├── canvas-zoom-and-pan/
│   │   ├── javascript/
│   │   │   └── zoom.js
│   │   ├── scripts/
│   │   │   └── hotkey_config.py
│   │   └── style.css
│   ├── extra-options-section/
│   │   └── scripts/
│   │       └── extra_options_section.py
│   ├── hypertile/
│   │   ├── hypertile.py
│   │   └── scripts/
│   │       └── hypertile_script.py
│   ├── mobile/
│   │   └── javascript/
│   │       └── mobile.js
│   ├── postprocessing-for-training/
│   │   └── scripts/
│   │       ├── postprocessing_autosized_crop.py
│   │       ├── postprocessing_caption.py
│   │       ├── postprocessing_create_flipped_copies.py
│   │       ├── postprocessing_focal_crop.py
│   │       └── postprocessing_split_oversized.py
│   ├── prompt-bracket-checker/
│   │   └── javascript/
│   │       └── prompt-bracket-checker.js
│   └── soft-inpainting/
│       └── scripts/
│           └── soft_inpainting.py
├── html/
│   ├── extra-networks-card.html
│   ├── extra-networks-copy-path-button.html
│   ├── extra-networks-edit-item-button.html
│   ├── extra-networks-metadata-button.html
│   ├── extra-networks-no-cards.html
│   ├── extra-networks-pane-dirs.html
│   ├── extra-networks-pane-tree.html
│   ├── extra-networks-pane.html
│   ├── extra-networks-tree-button.html
│   ├── footer.html
│   └── licenses.html
├── javascript/
│   ├── aspectRatioOverlay.js
│   ├── contextMenus.js
│   ├── dragdrop.js
│   ├── edit-attention.js
│   ├── edit-order.js
│   ├── extensions.js
│   ├── extraNetworks.js
│   ├── generationParams.js
│   ├── hints.js
│   ├── hires_fix.js
│   ├── imageMaskFix.js
│   ├── imageviewer.js
│   ├── imageviewerGamepad.js
│   ├── inputAccordion.js
│   ├── localStorage.js
│   ├── localization.js
│   ├── notification.js
│   ├── profilerVisualization.js
│   ├── progressbar.js
│   ├── resizeHandle.js
│   ├── settings.js
│   ├── textualInversion.js
│   ├── token-counters.js
│   ├── ui.js
│   └── ui_settings_hints.js
├── launch.py
├── localizations/
│   └── Put localization files here.txt
├── modules/
│   ├── api/
│   │   ├── api.py
│   │   └── models.py
│   ├── cache.py
│   ├── call_queue.py
│   ├── cmd_args.py
│   ├── codeformer_model.py
│   ├── config_states.py
│   ├── dat_model.py
│   ├── deepbooru.py
│   ├── deepbooru_model.py
│   ├── devices.py
│   ├── errors.py
│   ├── esrgan_model.py
│   ├── extensions.py
│   ├── extra_networks.py
│   ├── extra_networks_hypernet.py
│   ├── extras.py
│   ├── face_restoration.py
│   ├── face_restoration_utils.py
│   ├── fifo_lock.py
│   ├── gfpgan_model.py
│   ├── gitpython_hack.py
│   ├── gradio_extensons.py
│   ├── hashes.py
│   ├── hat_model.py
│   ├── hypernetworks/
│   │   ├── hypernetwork.py
│   │   └── ui.py
│   ├── images.py
│   ├── img2img.py
│   ├── import_hook.py
│   ├── infotext_utils.py
│   ├── infotext_versions.py
│   ├── initialize.py
│   ├── initialize_util.py
│   ├── interrogate.py
│   ├── launch_utils.py
│   ├── localization.py
│   ├── logging_config.py
│   ├── lowvram.py
│   ├── mac_specific.py
│   ├── masking.py
│   ├── memmon.py
│   ├── modelloader.py
│   ├── models/
│   │   ├── diffusion/
│   │   │   ├── ddpm_edit.py
│   │   │   └── uni_pc/
│   │   │       ├── __init__.py
│   │   │       ├── sampler.py
│   │   │       └── uni_pc.py
│   │   └── sd3/
│   │       ├── mmdit.py
│   │       ├── other_impls.py
│   │       ├── sd3_cond.py
│   │       ├── sd3_impls.py
│   │       └── sd3_model.py
│   ├── ngrok.py
│   ├── npu_specific.py
│   ├── options.py
│   ├── patches.py
│   ├── paths.py
│   ├── paths_internal.py
│   ├── postprocessing.py
│   ├── processing.py
│   ├── processing_scripts/
│   │   ├── comments.py
│   │   ├── refiner.py
│   │   ├── sampler.py
│   │   └── seed.py
│   ├── profiling.py
│   ├── progress.py
│   ├── prompt_parser.py
│   ├── realesrgan_model.py
│   ├── restart.py
│   ├── rng.py
│   ├── rng_philox.py
│   ├── safe.py
│   ├── script_callbacks.py
│   ├── script_loading.py
│   ├── scripts.py
│   ├── scripts_auto_postprocessing.py
│   ├── scripts_postprocessing.py
│   ├── sd_disable_initialization.py
│   ├── sd_emphasis.py
│   ├── sd_hijack.py
│   ├── sd_hijack_checkpoint.py
│   ├── sd_hijack_clip.py
│   ├── sd_hijack_clip_old.py
│   ├── sd_hijack_ip2p.py
│   ├── sd_hijack_open_clip.py
│   ├── sd_hijack_optimizations.py
│   ├── sd_hijack_unet.py
│   ├── sd_hijack_utils.py
│   ├── sd_hijack_xlmr.py
│   ├── sd_models.py
│   ├── sd_models_config.py
│   ├── sd_models_types.py
│   ├── sd_models_xl.py
│   ├── sd_samplers.py
│   ├── sd_samplers_cfg_denoiser.py
│   ├── sd_samplers_common.py
│   ├── sd_samplers_compvis.py
│   ├── sd_samplers_extra.py
│   ├── sd_samplers_kdiffusion.py
│   ├── sd_samplers_lcm.py
│   ├── sd_samplers_timesteps.py
│   ├── sd_samplers_timesteps_impl.py
│   ├── sd_schedulers.py
│   ├── sd_unet.py
│   ├── sd_vae.py
│   ├── sd_vae_approx.py
│   ├── sd_vae_taesd.py
│   ├── shared.py
│   ├── shared_cmd_options.py
│   ├── shared_gradio_themes.py
│   ├── shared_init.py
│   ├── shared_items.py
│   ├── shared_options.py
│   ├── shared_state.py
│   ├── shared_total_tqdm.py
│   ├── styles.py
│   ├── sub_quadratic_attention.py
│   ├── sysinfo.py
│   ├── textual_inversion/
│   │   ├── autocrop.py
│   │   ├── dataset.py
│   │   ├── image_embedding.py
│   │   ├── learn_schedule.py
│   │   ├── saving_settings.py
│   │   ├── textual_inversion.py
│   │   └── ui.py
│   ├── timer.py
│   ├── torch_utils.py
│   ├── txt2img.py
│   ├── ui.py
│   ├── ui_checkpoint_merger.py
│   ├── ui_common.py
│   ├── ui_components.py
│   ├── ui_extensions.py
│   ├── ui_extra_networks.py
│   ├── ui_extra_networks_checkpoints.py
│   ├── ui_extra_networks_checkpoints_user_metadata.py
│   ├── ui_extra_networks_hypernets.py
│   ├── ui_extra_networks_textual_inversion.py
│   ├── ui_extra_networks_user_metadata.py
│   ├── ui_gradio_extensions.py
│   ├── ui_loadsave.py
│   ├── ui_postprocessing.py
│   ├── ui_prompt_styles.py
│   ├── ui_settings.py
│   ├── ui_tempdir.py
│   ├── ui_toprow.py
│   ├── upscaler.py
│   ├── upscaler_utils.py
│   ├── util.py
│   ├── xlmr.py
│   ├── xlmr_m18.py
│   └── xpu_specific.py
├── package.json
├── pyproject.toml
├── requirements-test.txt
├── requirements.txt
├── requirements_npu.txt
├── requirements_versions.txt
├── script.js
├── scripts/
│   ├── custom_code.py
│   ├── img2imgalt.py
│   ├── loopback.py
│   ├── outpainting_mk_2.py
│   ├── poor_mans_outpainting.py
│   ├── postprocessing_codeformer.py
│   ├── postprocessing_gfpgan.py
│   ├── postprocessing_upscale.py
│   ├── prompt_matrix.py
│   ├── prompts_from_file.py
│   ├── sd_upscale.py
│   └── xyz_grid.py
├── style.css
├── test/
│   ├── __init__.py
│   ├── conftest.py
│   ├── test_extras.py
│   ├── test_face_restorers.py
│   ├── test_files/
│   │   └── empty.pt
│   ├── test_img2img.py
│   ├── test_torch_utils.py
│   ├── test_txt2img.py
│   └── test_utils.py
├── textual_inversion_templates/
│   ├── hypernetwork.txt
│   ├── none.txt
│   ├── style.txt
│   ├── style_filewords.txt
│   ├── subject.txt
│   └── subject_filewords.txt
├── webui-macos-env.sh
├── webui.bat
├── webui.py
└── webui.sh
Download .txt
Showing preview only (215K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2616 symbols across 230 files)

FILE: extensions-builtin/LDSR/ldsr_model_arch.py
  class LDSR (line 21) | class LDSR:
    method load_model_from_config (line 22) | def load_model_from_config(self, half_attention):
    method __init__ (line 54) | def __init__(self, model_path, yaml_path):
    method run (line 59) | def run(model, selected_path, custom_steps, eta):
    method super_resolution (line 106) | def super_resolution(self, image, steps=100, target_scale=2, half_atte...
  function get_cond (line 157) | def get_cond(selected_path):
  function convsample_ddim (line 176) | def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, n...
  function make_convolutional_sample (line 194) | def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, ...

FILE: extensions-builtin/LDSR/preload.py
  function preload (line 5) | def preload(parser):

FILE: extensions-builtin/LDSR/scripts/ldsr_model.py
  class UpscalerLDSR (line 11) | class UpscalerLDSR(Upscaler):
    method __init__ (line 12) | def __init__(self, user_path):
    method load_model (line 21) | def load_model(self, path: str):
    method do_upscale (line 51) | def do_upscale(self, img, path):
  function on_ui_settings (line 61) | def on_ui_settings():

FILE: extensions-builtin/LDSR/sd_hijack_autoencoder.py
  class VQModel (line 20) | class VQModel(pl.LightningModule):
    method __init__ (line 21) | def __init__(self,
    method ema_scope (line 70) | def ema_scope(self, context=None):
    method init_from_ckpt (line 84) | def init_from_ckpt(self, path, ignore_keys=None):
    method on_train_batch_end (line 99) | def on_train_batch_end(self, *args, **kwargs):
    method encode (line 103) | def encode(self, x):
    method encode_to_prequant (line 109) | def encode_to_prequant(self, x):
    method decode (line 114) | def decode(self, quant):
    method decode_code (line 119) | def decode_code(self, code_b):
    method forward (line 124) | def forward(self, input, return_pred_indices=False):
    method get_input (line 131) | def get_input(self, batch, k):
    method training_step (line 149) | def training_step(self, batch, batch_idx, optimizer_idx):
    method validation_step (line 171) | def validation_step(self, batch, batch_idx):
    method _validation_step (line 177) | def _validation_step(self, batch, batch_idx, suffix=""):
    method configure_optimizers (line 204) | def configure_optimizers(self):
    method get_last_layer (line 237) | def get_last_layer(self):
    method log_images (line 240) | def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
    method to_rgb (line 263) | def to_rgb(self, x):
  class VQModelInterface (line 272) | class VQModelInterface(VQModel):
    method __init__ (line 273) | def __init__(self, embed_dim, *args, **kwargs):
    method encode (line 277) | def encode(self, x):
    method decode (line 282) | def decode(self, h, force_not_quantize=False):

FILE: extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
  function disabled_train (line 33) | def disabled_train(self, mode=True):
  function uniform_on_device (line 39) | def uniform_on_device(r1, r2, shape, device):
  class DDPMV1 (line 43) | class DDPMV1(pl.LightningModule):
    method __init__ (line 45) | def __init__(self,
    method register_schedule (line 116) | def register_schedule(self, given_betas=None, beta_schedule="linear", ...
    method ema_scope (line 171) | def ema_scope(self, context=None):
    method init_from_ckpt (line 185) | def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
    method q_mean_variance (line 203) | def q_mean_variance(self, x_start, t):
    method predict_start_from_noise (line 215) | def predict_start_from_noise(self, x_t, t, noise):
    method q_posterior (line 221) | def q_posterior(self, x_start, x_t, t):
    method p_mean_variance (line 230) | def p_mean_variance(self, x, t, clip_denoised: bool):
    method p_sample (line 243) | def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
    method p_sample_loop (line 252) | def p_sample_loop(self, shape, return_intermediates=False):
    method sample (line 267) | def sample(self, batch_size=16, return_intermediates=False):
    method q_sample (line 273) | def q_sample(self, x_start, t, noise=None):
    method get_loss (line 278) | def get_loss(self, pred, target, mean=True):
    method p_losses (line 293) | def p_losses(self, x_start, t, noise=None):
    method forward (line 322) | def forward(self, x, *args, **kwargs):
    method get_input (line 328) | def get_input(self, batch, k):
    method shared_step (line 336) | def shared_step(self, batch):
    method training_step (line 341) | def training_step(self, batch, batch_idx):
    method validation_step (line 357) | def validation_step(self, batch, batch_idx):
    method on_train_batch_end (line 365) | def on_train_batch_end(self, *args, **kwargs):
    method _get_rows_from_list (line 369) | def _get_rows_from_list(self, samples):
    method log_images (line 377) | def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=Non...
    method configure_optimizers (line 414) | def configure_optimizers(self):
  class LatentDiffusionV1 (line 423) | class LatentDiffusionV1(DDPMV1):
    method __init__ (line 425) | def __init__(self,
    method make_cond_schedule (line 470) | def make_cond_schedule(self, ):
    method on_train_batch_start (line 477) | def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
    method register_schedule (line 492) | def register_schedule(self,
    method instantiate_first_stage (line 501) | def instantiate_first_stage(self, config):
    method instantiate_cond_stage (line 508) | def instantiate_cond_stage(self, config):
    method _get_denoise_row_from_list (line 529) | def _get_denoise_row_from_list(self, samples, desc='', force_no_decode...
    method get_first_stage_encoding (line 541) | def get_first_stage_encoding(self, encoder_posterior):
    method get_learned_conditioning (line 550) | def get_learned_conditioning(self, c):
    method meshgrid (line 563) | def meshgrid(self, h, w):
    method delta_border (line 570) | def delta_border(self, h, w):
    method get_weighting (line 584) | def get_weighting(self, h, w, Ly, Lx, device):
    method get_fold_unfold (line 600) | def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1):  # todo...
    method get_input (line 653) | def get_input(self, batch, k, return_first_stage_outputs=False, force_...
    method decode_first_stage (line 705) | def decode_first_stage(self, z, predict_cids=False, force_not_quantize...
    method differentiable_decode_first_stage (line 765) | def differentiable_decode_first_stage(self, z, predict_cids=False, for...
    method encode_first_stage (line 825) | def encode_first_stage(self, x):
    method shared_step (line 864) | def shared_step(self, batch, **kwargs):
    method forward (line 869) | def forward(self, x, c, *args, **kwargs):
    method apply_model (line 880) | def apply_model(self, x_noisy, t, cond, return_ids=False):
    method _predict_eps_from_xstart (line 983) | def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
    method _prior_bpd (line 987) | def _prior_bpd(self, x_start):
    method p_losses (line 1001) | def p_losses(self, x_start, cond, t, noise=None):
    method p_mean_variance (line 1036) | def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codeboo...
    method p_sample (line 1068) | def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
    method progressive_denoising (line 1099) | def progressive_denoising(self, cond, shape, verbose=True, callback=No...
    method p_sample_loop (line 1157) | def p_sample_loop(self, cond, shape, return_intermediates=False,
    method sample (line 1210) | def sample(self, cond, batch_size=16, return_intermediates=False, x_T=...
    method sample_log (line 1228) | def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
    method log_images (line 1244) | def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200,...
    method configure_optimizers (line 1354) | def configure_optimizers(self):
    method to_rgb (line 1379) | def to_rgb(self, x):
  class DiffusionWrapperV1 (line 1388) | class DiffusionWrapperV1(pl.LightningModule):
    method __init__ (line 1389) | def __init__(self, diff_model_config, conditioning_key):
    method forward (line 1395) | def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
  class Layout2ImgDiffusionV1 (line 1417) | class Layout2ImgDiffusionV1(LatentDiffusionV1):
    method __init__ (line 1419) | def __init__(self, cond_stage_key, *args, **kwargs):
    method log_images (line 1423) | def log_images(self, batch, N=8, *args, **kwargs):

FILE: extensions-builtin/LDSR/vqvae_quantize.py
  class VectorQuantizer2 (line 30) | class VectorQuantizer2(nn.Module):
    method __init__ (line 39) | def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
    method remap_to_used (line 65) | def remap_to_used(self, inds):
    method unmap_to_all (line 79) | def unmap_to_all(self, inds):
    method forward (line 89) | def forward(self, z, temp=None, rescale_logits=False, return_logits=Fa...
    method get_codebook_entry (line 132) | def get_codebook_entry(self, indices, shape):

FILE: extensions-builtin/Lora/extra_networks_lora.py
  class ExtraNetworkLora (line 5) | class ExtraNetworkLora(extra_networks.ExtraNetwork):
    method __init__ (line 6) | def __init__(self):
    method activate (line 14) | def activate(self, p, params_list):
    method deactivate (line 58) | def deactivate(self, p):

FILE: extensions-builtin/Lora/lora_logger.py
  class ColoredFormatter (line 6) | class ColoredFormatter(logging.Formatter):
    method format (line 16) | def format(self, record):

FILE: extensions-builtin/Lora/lora_patches.py
  class LoraPatches (line 7) | class LoraPatches:
    method __init__ (line 8) | def __init__(self):
    method undo (line 20) | def undo(self):

FILE: extensions-builtin/Lora/lyco_helpers.py
  function make_weight_cp (line 4) | def make_weight_cp(t, wa, wb):
  function rebuild_conventional (line 9) | def rebuild_conventional(up, down, shape, dyn_dim=None):
  function rebuild_cp_decomposition (line 18) | def rebuild_cp_decomposition(up, down, mid):
  function factorization (line 25) | def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:

FILE: extensions-builtin/Lora/network.py
  class SdVersion (line 17) | class SdVersion(enum.Enum):
  class NetworkOnDisk (line 24) | class NetworkOnDisk:
    method __init__ (line 25) | def __init__(self, name, filename):
    method detect_version (line 61) | def detect_version(self):
    method set_hash (line 71) | def set_hash(self, v):
    method read_hash (line 79) | def read_hash(self):
    method get_alias (line 83) | def get_alias(self):
  class Network (line 91) | class Network:  # LoraModule
    method __init__ (line 92) | def __init__(self, name, network_on_disk: NetworkOnDisk):
  class ModuleType (line 106) | class ModuleType:
    method create_module (line 107) | def create_module(self, net: Network, weights: NetworkWeights) -> Netw...
  class NetworkModule (line 111) | class NetworkModule:
    method __init__ (line 112) | def __init__(self, net: Network, weights: NetworkWeights):
    method multiplier (line 161) | def multiplier(self):
    method calc_scale (line 167) | def calc_scale(self):
    method apply_weight_decompose (line 175) | def apply_weight_decompose(self, updown, orig_weight):
    method finalize_updown (line 196) | def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=N...
    method calc_updown (line 218) | def calc_updown(self, target):
    method forward (line 221) | def forward(self, x, y):

FILE: extensions-builtin/Lora/network_full.py
  class ModuleTypeFull (line 4) | class ModuleTypeFull(network.ModuleType):
    method create_module (line 5) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleFull (line 12) | class NetworkModuleFull(network.NetworkModule):
    method __init__ (line 13) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 19) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_glora.py
  class ModuleTypeGLora (line 4) | class ModuleTypeGLora(network.ModuleType):
    method create_module (line 5) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleGLora (line 12) | class NetworkModuleGLora(network.NetworkModule):
    method __init__ (line 13) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 24) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_hada.py
  class ModuleTypeHada (line 5) | class ModuleTypeHada(network.ModuleType):
    method create_module (line 6) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleHada (line 13) | class NetworkModuleHada(network.NetworkModule):
    method __init__ (line 14) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 29) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_ia3.py
  class ModuleTypeIa3 (line 4) | class ModuleTypeIa3(network.ModuleType):
    method create_module (line 5) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleIa3 (line 12) | class NetworkModuleIa3(network.NetworkModule):
    method __init__ (line 13) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 19) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_lokr.py
  class ModuleTypeLokr (line 7) | class ModuleTypeLokr(network.ModuleType):
    method create_module (line 8) | def create_module(self, net: network.Network, weights: network.Network...
  function make_kron (line 17) | def make_kron(orig_shape, w1, w2):
  class NetworkModuleLokr (line 24) | class NetworkModuleLokr(network.NetworkModule):
    method __init__ (line 25) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 38) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_lora.py
  class ModuleTypeLora (line 9) | class ModuleTypeLora(network.ModuleType):
    method create_module (line 10) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleLora (line 24) | class NetworkModuleLora(network.NetworkModule):
    method __init__ (line 25) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method create_module (line 34) | def create_module(self, weights, key, none_ok=False):
    method calc_updown (line 71) | def calc_updown(self, orig_weight):
    method forward (line 88) | def forward(self, x, y):

FILE: extensions-builtin/Lora/network_norm.py
  class ModuleTypeNorm (line 4) | class ModuleTypeNorm(network.ModuleType):
    method create_module (line 5) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleNorm (line 12) | class NetworkModuleNorm(network.NetworkModule):
    method __init__ (line 13) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 19) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/network_oft.py
  class ModuleTypeOFT (line 6) | class ModuleTypeOFT(network.ModuleType):
    method create_module (line 7) | def create_module(self, net: network.Network, weights: network.Network...
  class NetworkModuleOFT (line 15) | class NetworkModuleOFT(network.NetworkModule):
    method __init__ (line 16) | def __init__(self,  net: network.Network, weights: network.NetworkWeig...
    method calc_updown (line 70) | def calc_updown(self, orig_weight):

FILE: extensions-builtin/Lora/networks.py
  function convert_diffusers_name_to_compvis (line 56) | def convert_diffusers_name_to_compvis(key, is_sd2):
  function assign_network_names_to_compvis_modules (line 122) | def assign_network_names_to_compvis_modules(sd_model):
  class BundledTIHash (line 150) | class BundledTIHash(str):
    method __init__ (line 151) | def __init__(self, hash_str):
    method __str__ (line 154) | def __str__(self):
  function load_network (line 158) | def load_network(name, network_on_disk):
  function purge_networks_from_memory (line 273) | def purge_networks_from_memory():
  function load_networks (line 281) | def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn...
  function allowed_layer_without_weight (line 369) | def allowed_layer_without_weight(layer):
  function store_weights_backup (line 376) | def store_weights_backup(weight):
  function restore_weights_backup (line 383) | def restore_weights_backup(obj, field, weight):
  function network_restore_weights_from_backup (line 391) | def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, tor...
  function network_apply_weights (line 411) | def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, ...
  function network_forward (line 545) | def network_forward(org_module, input, original_forward):
  function network_reset_cached_weight (line 572) | def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Li...
  function network_Linear_forward (line 578) | def network_Linear_forward(self, input):
  function network_Linear_load_state_dict (line 587) | def network_Linear_load_state_dict(self, *args, **kwargs):
  function network_Conv2d_forward (line 593) | def network_Conv2d_forward(self, input):
  function network_Conv2d_load_state_dict (line 602) | def network_Conv2d_load_state_dict(self, *args, **kwargs):
  function network_GroupNorm_forward (line 608) | def network_GroupNorm_forward(self, input):
  function network_GroupNorm_load_state_dict (line 617) | def network_GroupNorm_load_state_dict(self, *args, **kwargs):
  function network_LayerNorm_forward (line 623) | def network_LayerNorm_forward(self, input):
  function network_LayerNorm_load_state_dict (line 632) | def network_LayerNorm_load_state_dict(self, *args, **kwargs):
  function network_MultiheadAttention_forward (line 638) | def network_MultiheadAttention_forward(self, *args, **kwargs):
  function network_MultiheadAttention_load_state_dict (line 644) | def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
  function process_network_files (line 650) | def process_network_files(names: list[str] | None = None):
  function update_available_networks_by_names (line 675) | def update_available_networks_by_names(names: list[str]):
  function list_available_networks (line 679) | def list_available_networks():
  function infotext_pasted (line 694) | def infotext_pasted(infotext, params):

FILE: extensions-builtin/Lora/preload.py
  function preload (line 6) | def preload(parser):

FILE: extensions-builtin/Lora/scripts/lora_script.py
  function unload (line 15) | def unload():
  function before_ui (line 19) | def before_ui():
  function create_lora_json (line 53) | def create_lora_json(obj: network.NetworkOnDisk):
  function api_networks (line 62) | def api_networks(_: gr.Blocks, app: FastAPI):
  function infotext_pasted (line 77) | def infotext_pasted(infotext, d):

FILE: extensions-builtin/Lora/ui_edit_user_metadata.py
  function is_non_comma_tagset (line 11) | def is_non_comma_tagset(tags):
  function build_tags (line 21) | def build_tags(metadata):
  class LoraUserMetadataEditor (line 48) | class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadat...
    method __init__ (line 49) | def __init__(self, ui, tabname, page):
    method save_lora_user_metadata (line 59) | def save_lora_user_metadata(self, name, desc, sd_version, activation_t...
    method get_metadata_table (line 70) | def get_metadata_table(self, name):
    method put_values_into_components (line 117) | def put_values_into_components(self, name):
    method generate_random_prompt (line 138) | def generate_random_prompt(self, name):
    method generate_random_prompt_from_tags (line 145) | def generate_random_prompt_from_tags(self, tags):
    method create_extra_default_items_in_left_column (line 160) | def create_extra_default_items_in_left_column(self):
    method create_editor (line 165) | def create_editor(self):

FILE: extensions-builtin/Lora/ui_extra_networks_lora.py
  class ExtraNetworksPageLora (line 11) | class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
    method __init__ (line 12) | def __init__(self):
    method refresh (line 15) | def refresh(self):
    method create_item (line 18) | def create_item(self, name, index=None, enable_filter=True):
    method list_items (line 78) | def list_items(self):
    method allowed_directories_for_previews (line 86) | def allowed_directories_for_previews(self):
    method create_user_metadata_editor (line 89) | def create_user_metadata_editor(self, ui, tabname):

FILE: extensions-builtin/ScuNET/preload.py
  function preload (line 5) | def preload(parser):

FILE: extensions-builtin/ScuNET/scripts/scunet_model.py
  class UpscalerScuNET (line 9) | class UpscalerScuNET(modules.upscaler.Upscaler):
    method __init__ (line 10) | def __init__(self, dirname):
    method do_upscale (line 38) | def do_upscale(self, img: PIL.Image.Image, selected_file):
    method load_model (line 57) | def load_model(self, path: str):
  function on_ui_settings (line 67) | def on_ui_settings():

FILE: extensions-builtin/SwinIR/preload.py
  function preload (line 5) | def preload(parser):

FILE: extensions-builtin/SwinIR/scripts/swinir_model.py
  class UpscalerSwinIR (line 15) | class UpscalerSwinIR(Upscaler):
    method __init__ (line 16) | def __init__(self, dirname):
    method do_upscale (line 35) | def do_upscale(self, img: Image.Image, model_file: str) -> Image.Image:
    method load_model (line 60) | def load_model(self, path, scale=4):
    method _get_device (line 83) | def _get_device(self):
  function on_ui_settings (line 87) | def on_ui_settings():

FILE: extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
  function getActiveTab (line 31) | function getActiveTab(elements, all = false) {
  function getTabId (line 45) | function getTabId(elements) {
  function waitForOpts (line 52) | async function waitForOpts() {
  function hasHorizontalScrollbar (line 62) | function hasHorizontalScrollbar(element) {
  function isModifierKey (line 67) | function isModifierKey(event, key) {
  function isValidHotkey (line 81) | function isValidHotkey(value) {
  function normalizeHotkey (line 92) | function normalizeHotkey(hotkey) {
  function formatHotkeyForDisplay (line 97) | function formatHotkeyForDisplay(hotkey) {
  function createHotkeyConfig (line 102) | function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
  function disableFunctions (line 153) | function disableFunctions(config, disabledFunctions) {
  function restoreImgRedMask (line 177) | function restoreImgRedMask(elements) {
  function applyZoomAndPan (line 279) | function applyZoomAndPan(elemId, isExtension = true) {

FILE: extensions-builtin/extra-options-section/scripts/extra_options_section.py
  class ExtraOptionsSection (line 8) | class ExtraOptionsSection(scripts.Script):
    method __init__ (line 11) | def __init__(self):
    method title (line 15) | def title(self):
    method show (line 18) | def show(self, is_img2img):
    method ui (line 21) | def ui(self, is_img2img):
    method before_process (line 66) | def before_process(self, p, *args):

FILE: extensions-builtin/hypertile/hypertile.py
  class HypertileParams (line 22) | class HypertileParams:
  function get_divisors (line 192) | def get_divisors(value: int, min_value: int, /, max_options: int = 1) ->...
  function random_divisor (line 205) | def random_divisor(value: int, min_value: int, /, max_options: int = 1) ...
  function set_hypertile_seed (line 217) | def set_hypertile_seed(seed: int) -> None:
  function largest_tile_size_available (line 222) | def largest_tile_size_available(width: int, height: int) -> int:
  function iterative_closest_divisors (line 234) | def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int,...
  function find_hw_candidates (line 248) | def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]:
  function self_attn_forward (line 269) | def self_attn_forward(params: HypertileParams, scale_depth=True) -> Call...
  function hypertile_hook_model (line 318) | def hypertile_hook_model(model: nn.Module, width, height, *, enable=Fals...

FILE: extensions-builtin/hypertile/scripts/hypertile_script.py
  class ScriptHypertile (line 5) | class ScriptHypertile(scripts.Script):
    method title (line 8) | def title(self):
    method show (line 11) | def show(self, is_img2img):
    method process (line 14) | def process(self, p, *args):
    method before_hr (line 21) | def before_hr(self, p, *args):
    method add_infotext (line 36) | def add_infotext(self, p, add_unet_params=False):
  function configure_hypertile (line 57) | def configure_hypertile(width, height, enable_unet=True):
  function on_ui_settings (line 80) | def on_ui_settings():
  function add_axis_options (line 106) | def add_axis_options():

FILE: extensions-builtin/mobile/javascript/mobile.js
  function isMobile (line 3) | function isMobile() {
  function reportWindowSize (line 14) | function reportWindowSize() {

FILE: extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py
  function center_crop (line 7) | def center_crop(image: Image, w: int, h: int):
  function multicrop_pic (line 18) | def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, object...
  class ScriptPostprocessingAutosizedCrop (line 29) | class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPos...
    method ui (line 33) | def ui(self):
    method process (line 56) | def process(self, pp: scripts_postprocessing.PostprocessedImage, enabl...

FILE: extensions-builtin/postprocessing-for-training/scripts/postprocessing_caption.py
  class ScriptPostprocessingCeption (line 5) | class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostproce...
    method ui (line 9) | def ui(self):
    method process (line 18) | def process(self, pp: scripts_postprocessing.PostprocessedImage, enabl...

FILE: extensions-builtin/postprocessing-for-training/scripts/postprocessing_create_flipped_copies.py
  class ScriptPostprocessingCreateFlippedCopies (line 7) | class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.Scr...
    method ui (line 11) | def ui(self):
    method process (line 21) | def process(self, pp: scripts_postprocessing.PostprocessedImage, enabl...

FILE: extensions-builtin/postprocessing-for-training/scripts/postprocessing_focal_crop.py
  class ScriptPostprocessingFocalCrop (line 8) | class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostpro...
    method ui (line 12) | def ui(self):
    method process (line 27) | def process(self, pp: scripts_postprocessing.PostprocessedImage, enabl...

FILE: extensions-builtin/postprocessing-for-training/scripts/postprocessing_split_oversized.py
  function split_pic (line 7) | def split_pic(image, inverse_xy, width, height, overlap_ratio):
  class ScriptPostprocessingSplitOversized (line 31) | class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPo...
    method ui (line 35) | def ui(self):
    method process (line 47) | def process(self, pp: scripts_postprocessing.PostprocessedImage, enabl...

FILE: extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
  function checkBrackets (line 6) | function checkBrackets(textArea, counterElt) {
  function setupBracketChecking (line 28) | function setupBracketChecking(id_prompt, id_counter) {

FILE: extensions-builtin/soft-inpainting/scripts/soft_inpainting.py
  class SoftInpaintingSettings (line 9) | class SoftInpaintingSettings:
    method __init__ (line 10) | def __init__(self,
    method add_generation_params (line 24) | def add_generation_params(self, dest):
  function processing_uses_inpainting (line 36) | def processing_uses_inpainting(p):
  function latent_blend (line 50) | def latent_blend(settings, a, b, t):
  function get_modified_nmask (line 108) | def get_modified_nmask(settings, nmask, sigma):
  function apply_adaptive_masks (line 127) | def apply_adaptive_masks(
  function apply_masks (line 205) | def apply_masks(
  function weighted_histogram_filter (line 247) | def weighted_histogram_filter(img, kernel, kernel_center, percentile_min...
  function smoothstep (line 373) | def smoothstep(x):
  function smootherstep (line 381) | def smootherstep(x):
  function get_gaussian_kernel (line 389) | def get_gaussian_kernel(stddev_radius=1.0, max_radius=2):
  class Script (line 486) | class Script(scripts.Script):
    method __init__ (line 487) | def __init__(self):
    method title (line 492) | def title(self):
    method show (line 495) | def show(self, is_img2img):
    method ui (line 498) | def ui(self, is_img2img):
    method process (line 661) | def process(self, p, enabled, power, scale, detail_preservation, mask_...
    method on_mask_blend (line 676) | def on_mask_blend(self, p, mba: scripts.MaskBlendArgs, enabled, power,...
    method post_sample (line 696) | def post_sample(self, p, ps: scripts.PostSampleArgs, enabled, power, s...
    method postprocess_maskoverlay (line 745) | def postprocess_maskoverlay(self, p, ppmo: scripts.PostProcessMaskOver...

FILE: javascript/aspectRatioOverlay.js
  function dimensionChange (line 6) | function dimensionChange(e, is_width, is_height) {

FILE: javascript/contextMenus.js
  function showContextMenu (line 10) | function showContextMenu(event, element, menuEntries) {
  function appendContextMenuOption (line 43) | function appendContextMenuOption(targetElementSelector, entryName, entry...
  function removeContextMenuOption (line 62) | function removeContextMenuOption(uid) {
  function addContextMenuEventListener (line 76) | function addContextMenuEventListener() {

FILE: javascript/dragdrop.js
  function isValidImageList (line 3) | function isValidImageList(files) {
  function dropReplaceImage (line 7) | function dropReplaceImage(imgWrap, files) {
  function eventHasFiles (line 51) | function eventHasFiles(e) {
  function isURL (line 59) | function isURL(url) {
  function dragDropTargetIsPrompt (line 68) | function dragDropTargetIsPrompt(target) {

FILE: javascript/edit-attention.js
  function keyupEditAttention (line 1) | function keyupEditAttention(event) {

FILE: javascript/edit-order.js
  function keyupEditOrder (line 3) | function keyupEditOrder(event) {

FILE: javascript/extensions.js
  function extensions_apply (line 2) | function extensions_apply(_disabled_list, _update_list, disable_all) {
  function extensions_check (line 24) | function extensions_check() {
  function install_extension_from_index (line 46) | function install_extension_from_index(button, url) {
  function config_state_confirm_restore (line 57) | function config_state_confirm_restore(_, config_state_name, config_resto...
  function toggle_all_extensions (line 79) | function toggle_all_extensions(event) {
  function toggle_extension (line 85) | function toggle_extension() {

FILE: javascript/extraNetworks.js
  function toggleCss (line 1) | function toggleCss(key, css, enable) {
  function setupExtraNetworksForTab (line 18) | function setupExtraNetworksForTab(tabname) {
  function extraNetworksMovePromptToTab (line 131) | function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegat...
  function extraNetworksShowControlsForPage (line 157) | function extraNetworksShowControlsForPage(tabname, tabname_full) {
  function extraNetworksUnrelatedTabSelected (line 165) | function extraNetworksUnrelatedTabSelected(tabname) { // called from pyt...
  function extraNetworksTabSelected (line 171) | function extraNetworksTabSelected(tabname, id, showPrompt, showNegativeP...
  function applyExtraNetworkFilter (line 177) | function applyExtraNetworkFilter(tabname_full) {
  function applyExtraNetworkSort (line 188) | function applyExtraNetworkSort(tabname_full) {
  function setupExtraNetworks (line 199) | function setupExtraNetworks() {
  function tryToRemoveExtraNetworkFromPrompt (line 209) | function tryToRemoveExtraNetworkFromPrompt(textarea, text, isNeg) {
  function updatePromptArea (line 248) | function updatePromptArea(text, textArea, isNeg) {
  function cardClicked (line 256) | function cardClicked(tabname, textToAdd, textToAddNegative, allowNegativ...
  function saveCardPreview (line 266) | function saveCardPreview(event, tabname, filename) {
  function extraNetworksSearchButton (line 279) | function extraNetworksSearchButton(tabname, extra_networks_tabname, even...
  function extraNetworksTreeProcessFileClick (line 288) | function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_ne...
  function extraNetworksTreeProcessDirectoryClick (line 301) | function extraNetworksTreeProcessDirectoryClick(event, btn, tabname, ext...
  function extraNetworksTreeOnClick (line 378) | function extraNetworksTreeOnClick(event, tabname, extra_networks_tabname) {
  function extraNetworksControlSortOnClick (line 398) | function extraNetworksControlSortOnClick(event, tabname, extra_networks_...
  function extraNetworksControlSortDirOnClick (line 413) | function extraNetworksControlSortDirOnClick(event, tabname, extra_networ...
  function extraNetworksControlTreeViewOnClick (line 434) | function extraNetworksControlTreeViewOnClick(event, tabname, extra_netwo...
  function extraNetworksControlRefreshOnClick (line 452) | function extraNetworksControlRefreshOnClick(event, tabname, extra_networ...
  function closePopup (line 472) | function closePopup() {
  function popup (line 477) | function popup(contents) {
  function popupId (line 502) | function popupId(id) {
  function extraNetworksFlattenMetadata (line 510) | function extraNetworksFlattenMetadata(obj) {
  function extraNetworksShowMetadata (line 561) | function extraNetworksShowMetadata(text) {
  function requestGet (line 582) | function requestGet(url, data, handler, errorHandler) {
  function extraNetworksCopyCardPath (line 608) | function extraNetworksCopyCardPath(event) {
  function extraNetworksRequestMetadata (line 613) | function extraNetworksRequestMetadata(event, extraPage) {
  function extraNetworksEditUserMetadata (line 633) | function extraNetworksEditUserMetadata(event, tabname, extraPage) {
  function extraNetworksRefreshSingleCard (line 656) | function extraNetworksRefreshSingleCard(page, tabname, name) {
  function scheduleAfterScriptsCallbacks (line 691) | function scheduleAfterScriptsCallbacks() {

FILE: javascript/generationParams.js
  function attachGalleryListeners (line 26) | function attachGalleryListeners(tab_name) {

FILE: javascript/hints.js
  function updateTooltip (line 116) | function updateTooltip(element) {
  function processTooltipCheckNodes (line 152) | function processTooltipCheckNodes() {

FILE: javascript/hires_fix.js
  function onCalcResolutionHires (line 2) | function onCalcResolutionHires(enable, width, height, hr_scale, hr_resiz...

FILE: javascript/imageMaskFix.js
  function imageMaskResize (line 5) | function imageMaskResize() {

FILE: javascript/imageviewer.js
  function closeModal (line 2) | function closeModal() {
  function showModal (line 6) | function showModal(event) {
  function negmod (line 30) | function negmod(n, m) {
  function updateOnBackgroundChange (line 34) | function updateOnBackgroundChange() {
  function modalImageSwitch (line 52) | function modalImageSwitch(offset) {
  function saveImage (line 74) | function saveImage() {
  function modalSaveImage (line 88) | function modalSaveImage(event) {
  function modalNextImage (line 93) | function modalNextImage(event) {
  function modalPrevImage (line 98) | function modalPrevImage(event) {
  function modalKeyHandler (line 103) | function modalKeyHandler(event) {
  function setupImageForLightbox (line 120) | function setupImageForLightbox(e) {
  function modalZoomSet (line 147) | function modalZoomSet(modalImage, enable) {
  function modalZoomToggle (line 151) | function modalZoomToggle(event) {
  function modalLivePreviewToggle (line 157) | function modalLivePreviewToggle(event) {
  function modalTileImageToggle (line 164) | function modalTileImageToggle(event) {

FILE: javascript/imageviewerGamepad.js
  function sleepUntil (line 53) | function sleepUntil(f, timeout) {

FILE: javascript/inputAccordion.js
  function inputAccordionChecked (line 1) | function inputAccordionChecked(id, checked) {
  function setupAccordion (line 7) | function setupAccordion(accordion) {

FILE: javascript/localStorage.js
  function localSet (line 2) | function localSet(k, v) {
  function localGet (line 10) | function localGet(k, def) {
  function localRemove (line 20) | function localRemove(k) {

FILE: javascript/localization.js
  function hasLocalization (line 27) | function hasLocalization() {
  function textNodesUnder (line 31) | function textNodesUnder(el) {
  function canBeTranslated (line 37) | function canBeTranslated(node, text) {
  function getTranslation (line 59) | function getTranslation(text) {
  function processTextNode (line 74) | function processTextNode(node) {
  function processNode (line 85) | function processNode(node) {
  function localizeWholePage (line 110) | function localizeWholePage() {
  function dumpTranslations (line 139) | function dumpTranslations() {
  function download_localization (line 159) | function download_localization() {

FILE: javascript/profilerVisualization.js
  function createRow (line 2) | function createRow(table, cellName, items) {
  function createVisualizationTable (line 36) | function createVisualizationTable(data, cutoff = 0, sort = "") {
  function showProfile (line 167) | function showProfile(path, cutoff = 0.05) {

FILE: javascript/progressbar.js
  function rememberGallerySelection (line 3) | function rememberGallerySelection() {
  function getGallerySelectedIndex (line 7) | function getGallerySelectedIndex() {
  function request (line 11) | function request(url, data, handler, errorHandler) {
  function pad2 (line 34) | function pad2(x) {
  function formatTime (line 38) | function formatTime(secs) {
  function setTitle (line 55) | function setTitle(progress) {
  function randomId (line 68) | function randomId() {
  function requestProgress (line 75) | function requestProgress(id_task, progressbarContainer, gallery, atEnd, ...

FILE: javascript/resizeHandle.js
  function setLeftColGridTemplate (line 20) | function setLeftColGridTemplate(el, width) {
  function displayResizeHandle (line 24) | function displayResizeHandle(parent) {
  function afterResize (line 39) | function afterResize(parent) {
  function setup (line 54) | function setup(parent) {
  function setupAllResizeHandles (line 195) | function setupAllResizeHandles() {

FILE: javascript/settings.js
  function settingsShowAllTabs (line 8) | function settingsShowAllTabs() {
  function settingsShowOneTab (line 16) | function settingsShowOneTab() {

FILE: javascript/textualInversion.js
  function start_training_textual_inversion (line 4) | function start_training_textual_inversion() {

FILE: javascript/token-counters.js
  function update_txt2img_tokens (line 3) | function update_txt2img_tokens(...args) {
  function update_img2img_tokens (line 13) | function update_img2img_tokens(...args) {
  function update_token_counter (line 23) | function update_token_counter(button_id) {
  function recalculatePromptTokens (line 28) | function recalculatePromptTokens(name) {
  function recalculate_prompts_txt2img (line 32) | function recalculate_prompts_txt2img() {
  function recalculate_prompts_img2img (line 39) | function recalculate_prompts_img2img() {
  function setupTokenCounting (line 46) | function setupTokenCounting(id, id_counter, id_button) {
  function toggleTokenCountingVisibility (line 67) | function toggleTokenCountingVisibility(id, id_counter, id_button) {
  function runCodeForTokenCounters (line 74) | function runCodeForTokenCounters(fun) {

FILE: javascript/ui.js
  function set_theme (line 3) | function set_theme(theme) {
  function all_gallery_buttons (line 10) | function all_gallery_buttons() {
  function selected_gallery_button (line 21) | function selected_gallery_button() {
  function selected_gallery_index (line 25) | function selected_gallery_index() {
  function gallery_container_buttons (line 29) | function gallery_container_buttons(gallery_container) {
  function selected_gallery_index_id (line 33) | function selected_gallery_index_id(gallery_container) {
  function extract_image_from_gallery (line 37) | function extract_image_from_gallery(gallery) {
  function switch_to_txt2img (line 57) | function switch_to_txt2img() {
  function switch_to_img2img_tab (line 63) | function switch_to_img2img_tab(no) {
  function switch_to_img2img (line 67) | function switch_to_img2img() {
  function switch_to_sketch (line 72) | function switch_to_sketch() {
  function switch_to_inpaint (line 77) | function switch_to_inpaint() {
  function switch_to_inpaint_sketch (line 82) | function switch_to_inpaint_sketch() {
  function switch_to_extras (line 87) | function switch_to_extras() {
  function get_tab_index (line 93) | function get_tab_index(tabId) {
  function create_tab_index_args (line 103) | function create_tab_index_args(tabId, args) {
  function get_img2img_tab_index (line 109) | function get_img2img_tab_index() {
  function create_submit_args (line 116) | function create_submit_args(args) {
  function setSubmitButtonsVisibility (line 130) | function setSubmitButtonsVisibility(tabname, showInterrupt, showSkip, sh...
  function showSubmitButtons (line 136) | function showSubmitButtons(tabname, show) {
  function showSubmitInterruptingPlaceholder (line 140) | function showSubmitInterruptingPlaceholder(tabname) {
  function showRestoreProgressButton (line 144) | function showRestoreProgressButton(tabname, show) {
  function submit (line 150) | function submit() {
  function submit_txt2img_upscale (line 169) | function submit_txt2img_upscale() {
  function submit_img2img (line 177) | function submit_img2img() {
  function submit_extras (line 197) | function submit_extras() {
  function restoreProgressTxt2img (line 214) | function restoreProgressTxt2img() {
  function restoreProgressImg2img (line 228) | function restoreProgressImg2img() {
  function setupResolutionPasting (line 248) | function setupResolutionPasting(tabname) {
  function modelmerger (line 274) | function modelmerger() {
  function ask_for_style_name (line 284) | function ask_for_style_name(_, prompt_text, negative_prompt_text) {
  function confirm_clear_prompt (line 289) | function confirm_clear_prompt(prompt, negative_prompt) {
  function restart_reload (line 348) | function restart_reload() {
  function updateInput (line 366) | function updateInput(target) {
  function selectCheckpoint (line 374) | function selectCheckpoint(name) {
  function currentImg2imgSourceResolution (line 379) | function currentImg2imgSourceResolution(w, h, scaleBy) {
  function updateImg2imgResizeToTextAfterChangingImage (line 384) | function updateImg2imgResizeToTextAfterChangingImage() {
  function setRandomSeed (line 398) | function setRandomSeed(elem_id) {
  function switchWidthHeight (line 407) | function switchWidthHeight(tabname) {
  function onEdit (line 425) | function onEdit(editId, elem, afterMs, func) {

FILE: javascript/ui_settings_hints.js
  function settingsHintsShowQuicksettings (line 42) | function settingsHintsShowQuicksettings() {

FILE: launch.py
  function main (line 27) | def main():

FILE: modules/api/api.py
  function script_name_to_index (line 36) | def script_name_to_index(name, scripts):
  function validate_sampler_name (line 43) | def validate_sampler_name(name):
  function setUpscalers (line 51) | def setUpscalers(req: dict):
  function verify_url (line 58) | def verify_url(url):
  function decode_base64_to_image (line 77) | def decode_base64_to_image(encoding):
  function encode_pil_to_base64 (line 102) | def encode_pil_to_base64(image):
  function api_middleware (line 135) | def api_middleware(app: FastAPI):
  class Api (line 199) | class Api:
    method __init__ (line 200) | def __init__(self, app: FastAPI, queue_lock: Lock):
    method add_api_route (line 274) | def add_api_route(self, path: str, endpoint, **kwargs):
    method auth (line 279) | def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
    method get_selectable_script (line 286) | def get_selectable_script(self, script_name, script_runner):
    method get_scripts_list (line 294) | def get_scripts_list(self):
    method get_script_info (line 300) | def get_script_info(self):
    method get_script (line 308) | def get_script(self, script_name, script_runner):
    method init_default_script_args (line 315) | def init_default_script_args(self, script_runner):
    method init_script_args (line 335) | def init_script_args(self, request, default_script_args, selectable_sc...
    method apply_infotext (line 363) | def apply_infotext(self, request, tabname, *, script_runner=None, ment...
    method text2imgapi (line 432) | def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcess...
    method img2imgapi (line 492) | def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessi...
    method extras_single_image_api (line 567) | def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
    method extras_batch_images_api (line 577) | def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
    method pnginfoapi (line 588) | def pnginfoapi(self, req: models.PNGInfoRequest):
    method progressapi (line 602) | def progressapi(self, req: models.ProgressRequest = Depends()):
    method interrogateapi (line 630) | def interrogateapi(self, interrogatereq: models.InterrogateRequest):
    method interruptapi (line 649) | def interruptapi(self):
    method unloadapi (line 654) | def unloadapi(self):
    method reloadapi (line 659) | def reloadapi(self):
    method skip (line 664) | def skip(self):
    method get_config (line 667) | def get_config(self):
    method set_config (line 678) | def set_config(self, req: dict[str, Any]):
    method get_cmd_flags (line 689) | def get_cmd_flags(self):
    method get_samplers (line 692) | def get_samplers(self):
    method get_schedulers (line 695) | def get_schedulers(self):
    method get_upscalers (line 706) | def get_upscalers(self):
    method get_latent_upscale_modes (line 718) | def get_latent_upscale_modes(self):
    method get_sd_models (line 726) | def get_sd_models(self):
    method get_sd_vaes (line 730) | def get_sd_vaes(self):
    method get_hypernetworks (line 734) | def get_hypernetworks(self):
    method get_face_restorers (line 737) | def get_face_restorers(self):
    method get_realesrgan_models (line 740) | def get_realesrgan_models(self):
    method get_prompt_styles (line 743) | def get_prompt_styles(self):
    method get_embeddings (line 751) | def get_embeddings(self):
    method refresh_embeddings (line 771) | def refresh_embeddings(self):
    method refresh_checkpoints (line 775) | def refresh_checkpoints(self):
    method refresh_vae (line 779) | def refresh_vae(self):
    method create_embedding (line 783) | def create_embedding(self, args: dict):
    method create_hypernetwork (line 795) | def create_hypernetwork(self, args: dict):
    method train_embedding (line 805) | def train_embedding(self, args: dict):
    method train_hypernetwork (line 826) | def train_hypernetwork(self, args: dict):
    method get_memory (line 851) | def get_memory(self):
    method get_extensions_list (line 886) | def get_extensions_list(self):
    method launch (line 905) | def launch(self, server_name, port, root_path):
    method kill_webui (line 917) | def kill_webui(self):
    method restart_webui (line 920) | def restart_webui(self):
    method stop_webui (line 925) | def stop_webui(request):

FILE: modules/api/models.py
  class ModelDef (line 27) | class ModelDef(BaseModel):
  class PydanticModelGenerator (line 37) | class PydanticModelGenerator:
    method __init__ (line 44) | def __init__(
    method generate_model (line 87) | def generate_model(self):
  class TextToImageResponse (line 134) | class TextToImageResponse(BaseModel):
  class ImageToImageResponse (line 139) | class ImageToImageResponse(BaseModel):
  class ExtrasBaseRequest (line 144) | class ExtrasBaseRequest(BaseModel):
  class ExtraBaseResponse (line 159) | class ExtraBaseResponse(BaseModel):
  class ExtrasSingleImageRequest (line 162) | class ExtrasSingleImageRequest(ExtrasBaseRequest):
  class ExtrasSingleImageResponse (line 165) | class ExtrasSingleImageResponse(ExtraBaseResponse):
  class FileData (line 168) | class FileData(BaseModel):
  class ExtrasBatchImagesRequest (line 172) | class ExtrasBatchImagesRequest(ExtrasBaseRequest):
  class ExtrasBatchImagesResponse (line 175) | class ExtrasBatchImagesResponse(ExtraBaseResponse):
  class PNGInfoRequest (line 178) | class PNGInfoRequest(BaseModel):
  class PNGInfoResponse (line 181) | class PNGInfoResponse(BaseModel):
  class ProgressRequest (line 186) | class ProgressRequest(BaseModel):
  class ProgressResponse (line 189) | class ProgressResponse(BaseModel):
  class InterrogateRequest (line 196) | class InterrogateRequest(BaseModel):
  class InterrogateResponse (line 200) | class InterrogateResponse(BaseModel):
  class TrainResponse (line 203) | class TrainResponse(BaseModel):
  class CreateResponse (line 206) | class CreateResponse(BaseModel):
  class SamplerItem (line 233) | class SamplerItem(BaseModel):
  class SchedulerItem (line 238) | class SchedulerItem(BaseModel):
  class UpscalerItem (line 245) | class UpscalerItem(BaseModel):
  class LatentUpscalerModeItem (line 252) | class LatentUpscalerModeItem(BaseModel):
  class SDModelItem (line 255) | class SDModelItem(BaseModel):
  class SDVaeItem (line 263) | class SDVaeItem(BaseModel):
  class HypernetworkItem (line 267) | class HypernetworkItem(BaseModel):
  class FaceRestorerItem (line 271) | class FaceRestorerItem(BaseModel):
  class RealesrganItem (line 275) | class RealesrganItem(BaseModel):
  class PromptStyleItem (line 280) | class PromptStyleItem(BaseModel):
  class EmbeddingItem (line 286) | class EmbeddingItem(BaseModel):
  class EmbeddingsResponse (line 293) | class EmbeddingsResponse(BaseModel):
  class MemoryResponse (line 297) | class MemoryResponse(BaseModel):
  class ScriptsList (line 302) | class ScriptsList(BaseModel):
  class ScriptArg (line 307) | class ScriptArg(BaseModel):
  class ScriptInfo (line 316) | class ScriptInfo(BaseModel):
  class ExtensionItem (line 322) | class ExtensionItem(BaseModel):

FILE: modules/cache.py
  function dump_cache (line 17) | def dump_cache():
  function make_cache (line 23) | def make_cache(subsection: str) -> diskcache.Cache:
  function convert_old_cached_data (line 31) | def convert_old_cached_data():
  function cache (line 56) | def cache(subsection):
  function cached_data_for_file (line 81) | def cached_data_for_file(subsection, title, filename, func):

FILE: modules/call_queue.py
  function wrap_queued_call (line 11) | def wrap_queued_call(func):
  function wrap_gradio_gpu_call (line 21) | def wrap_gradio_gpu_call(func, extra_outputs=None):
  function wrap_gradio_call (line 49) | def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
  function wrap_gradio_call_no_job (line 65) | def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):

FILE: modules/codeformer_model.py
  class FaceRestorerCodeFormer (line 25) | class FaceRestorerCodeFormer(face_restoration_utils.CommonFaceRestoration):
    method name (line 26) | def name(self):
    method load_net (line 29) | def load_net(self) -> torch.Module:
    method get_device (line 44) | def get_device(self):
    method restore (line 47) | def restore(self, np_image, w: float | None = None):
  function setup_model (line 58) | def setup_model(dirname: str) -> None:

FILE: modules/config_states.py
  function list_config_states (line 18) | def list_config_states():
  function get_webui_config (line 48) | def get_webui_config():
  function get_extension_config (line 80) | def get_extension_config():
  function get_config (line 103) | def get_config():
  function restore_webui_config (line 115) | def restore_webui_config(config):
  function restore_extension_config (line 146) | def restore_extension_config(config):

FILE: modules/dat_model.py
  class UpscalerDAT (line 9) | class UpscalerDAT(Upscaler):
    method __init__ (line 10) | def __init__(self, user_path):
    method do_upscale (line 25) | def do_upscale(self, img, path):
    method load_model (line 45) | def load_model(self, path):
  function get_dat_models (line 59) | def get_dat_models(scaler):

FILE: modules/deepbooru.py
  class DeepDanbooru (line 12) | class DeepDanbooru:
    method __init__ (line 13) | def __init__(self):
    method load (line 16) | def load(self):
    method start (line 33) | def start(self):
    method stop (line 37) | def stop(self):
    method tag (line 42) | def tag(self, pil_image):
    method tag_multi (line 49) | def tag_multi(self, pil_image, force_disable_ranks=False):

FILE: modules/deepbooru_model.py
  class DeepDanbooruModel (line 10) | class DeepDanbooruModel(nn.Module):
    method __init__ (line 11) | def __init__(self):
    method forward (line 197) | def forward(self, *inputs):
    method load_state_dict (line 674) | def load_state_dict(self, state_dict, **kwargs):

FILE: modules/devices.py
  function has_xpu (line 15) | def has_xpu() -> bool:
  function has_mps (line 19) | def has_mps() -> bool:
  function cuda_no_autocast (line 26) | def cuda_no_autocast(device_id=None) -> bool:
  function get_cuda_device_id (line 35) | def get_cuda_device_id():
  function get_cuda_device_string (line 43) | def get_cuda_device_string():
  function get_optimal_device_name (line 50) | def get_optimal_device_name():
  function get_optimal_device (line 66) | def get_optimal_device():
  function get_device_for (line 70) | def get_device_for(task):
  function torch_gc (line 77) | def torch_gc():
  function torch_npu_set_device (line 95) | def torch_npu_set_device():
  function enable_tf32 (line 101) | def enable_tf32():
  function cond_cast_unet (line 132) | def cond_cast_unet(input):
  function cond_cast_float (line 138) | def cond_cast_float(input):
  function manual_cast_forward (line 152) | def manual_cast_forward(target_dtype):
  function manual_cast (line 188) | def manual_cast(target_dtype):
  function autocast (line 210) | def autocast(disable=False):
  function without_autocast (line 234) | def without_autocast(disable=False):
  class NansException (line 238) | class NansException(Exception):
  function test_for_nans (line 242) | def test_for_nans(x, where):
  function first_time_calculation (line 269) | def first_time_calculation():
  function force_model_fp16 (line 284) | def force_model_fp16():

FILE: modules/errors.py
  function format_traceback (line 9) | def format_traceback(tb):
  function format_exception (line 13) | def format_exception(e, tb):
  function get_exceptions (line 17) | def get_exceptions():
  function record_exception (line 24) | def record_exception():
  function report (line 38) | def report(message: str, *, exc_info: bool = False) -> None:
  function print_error_explanation (line 52) | def print_error_explanation(message):
  function display (line 64) | def display(e: Exception, task, *, full_traceback=False):
  function display_once (line 85) | def display_once(e: Exception, task):
  function run (line 96) | def run(code, task):
  function check_versions (line 103) | def check_versions():

FILE: modules/esrgan_model.py
  class UpscalerESRGAN (line 7) | class UpscalerESRGAN(Upscaler):
    method __init__ (line 8) | def __init__(self, dirname):
    method do_upscale (line 29) | def do_upscale(self, img, selected_model):
    method load_model (line 38) | def load_model(self, path: str):
  function esrgan_upscale (line 56) | def esrgan_upscale(model, img):

FILE: modules/extensions.py
  function active (line 21) | def active():
  class CallbackOrderInfo (line 31) | class CallbackOrderInfo:
  class ExtensionMetadata (line 37) | class ExtensionMetadata:
    method __init__ (line 43) | def __init__(self, path, canonical_name):
    method get_script_requirements (line 59) | def get_script_requirements(self, field, section, extra_section=None):
    method parse_list (line 79) | def parse_list(self, text):
    method list_callback_order_instructions (line 88) | def list_callback_order_instructions(self):
  class Extension (line 105) | class Extension:
    method __init__ (line 110) | def __init__(self, name, path, enabled=True, is_builtin=False, metadat...
    method to_dict (line 126) | def to_dict(self):
    method from_dict (line 129) | def from_dict(self, d):
    method read_info_from_repo (line 133) | def read_info_from_repo(self):
    method do_read_info_from_repo (line 153) | def do_read_info_from_repo(self):
    method list_files (line 179) | def list_files(self, subdir, extension):
    method check_updates (line 192) | def check_updates(self):
    method fetch_and_reset_hard (line 217) | def fetch_and_reset_hard(self, commit=None):
  function list_extensions (line 228) | def list_extensions():
  function find_extension (line 287) | def find_extension(filename):

FILE: modules/extra_networks.py
  function initialize (line 13) | def initialize():
  function register_extra_network (line 18) | def register_extra_network(extra_network):
  function register_extra_network_alias (line 22) | def register_extra_network_alias(extra_network, alias):
  function register_default_extra_networks (line 26) | def register_default_extra_networks():
  class ExtraNetworkParams (line 31) | class ExtraNetworkParams:
    method __init__ (line 32) | def __init__(self, items=None):
    method __eq__ (line 44) | def __eq__(self, other):
  class ExtraNetwork (line 48) | class ExtraNetwork:
    method __init__ (line 49) | def __init__(self, name):
    method activate (line 52) | def activate(self, p, params_list):
    method deactivate (line 82) | def deactivate(self, p):
  function lookup_extra_networks (line 90) | def lookup_extra_networks(extra_network_data):
  function activate (line 126) | def activate(p, extra_network_data):
  function deactivate (line 153) | def deactivate(p, extra_network_data):
  function parse_prompt (line 178) | def parse_prompt(prompt):
  function parse_prompts (line 194) | def parse_prompts(prompts):
  function get_user_metadata (line 209) | def get_user_metadata(filename, lister=None):

FILE: modules/extra_networks_hypernet.py
  class ExtraNetworkHypernet (line 5) | class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
    method __init__ (line 6) | def __init__(self):
    method activate (line 9) | def activate(self, p, params_list):
    method deactivate (line 27) | def deactivate(self, p):

FILE: modules/extras.py
  function run_pnginfo (line 16) | def run_pnginfo(image):
  function create_config (line 39) | def create_config(ckpt_result, config_source, a, b, c):
  function to_half (line 68) | def to_half(tensor, enable):
  function read_metadata (line 75) | def read_metadata(primary_model_name, secondary_model_name, tertiary_mod...
  function run_modelmerger (line 88) | def run_modelmerger(id_task, primary_model_name, secondary_model_name, t...

FILE: modules/face_restoration.py
  class FaceRestoration (line 4) | class FaceRestoration:
    method name (line 5) | def name(self):
    method restore (line 8) | def restore(self, np_image):
  function restore_faces (line 12) | def restore_faces(np_image):

FILE: modules/face_restoration_utils.py
  function bgr_image_to_rgb_tensor (line 20) | def bgr_image_to_rgb_tensor(img: np.ndarray) -> torch.Tensor:
  function rgb_tensor_to_bgr_image (line 29) | def rgb_tensor_to_bgr_image(tensor: torch.Tensor, *, min_max=(0.0, 1.0))...
  function create_face_helper (line 42) | def create_face_helper(device) -> FaceRestoreHelper:
  function restore_with_face_helper (line 58) | def restore_with_face_helper(
  class CommonFaceRestoration (line 113) | class CommonFaceRestoration(face_restoration.FaceRestoration):
    method __init__ (line 118) | def __init__(self, model_path: str):
    method face_helper (line 125) | def face_helper(self) -> FaceRestoreHelper:
    method send_model_to (line 128) | def send_model_to(self, device):
    method get_device (line 137) | def get_device(self):
    method load_net (line 140) | def load_net(self) -> torch.Module:
    method restore_with_helper (line 143) | def restore_with_helper(
  function patch_facexlib (line 163) | def patch_facexlib(dirname: str) -> None:

FILE: modules/fifo_lock.py
  class FIFOLock (line 6) | class FIFOLock(object):
    method __init__ (line 7) | def __init__(self):
    method acquire (line 12) | def acquire(self, blocking=True):
    method release (line 26) | def release(self):
    method __exit__ (line 36) | def __exit__(self, t, v, tb):

FILE: modules/gfpgan_model.py
  class FaceRestorerGFPGAN (line 23) | class FaceRestorerGFPGAN(face_restoration_utils.CommonFaceRestoration):
    method name (line 24) | def name(self):
    method get_device (line 27) | def get_device(self):
    method load_net (line 30) | def load_net(self) -> torch.Module:
    method restore (line 46) | def restore(self, np_image):
  function gfpgan_fix_faces (line 54) | def gfpgan_fix_faces(np_image):
  function setup_model (line 61) | def setup_model(dirname: str) -> None:

FILE: modules/gitpython_hack.py
  class Git (line 9) | class Git(git.Git):
    method _get_persistent_cmd (line 14) | def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
    method get_object_header (line 17) | def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
    method stream_object_data (line 26) | def stream_object_data(self, ref: str) -> tuple[str, str, int, Git.Cat...
  class Repo (line 41) | class Repo(git.Repo):

FILE: modules/gradio_extensons.py
  function add_classes_to_gradio_component (line 6) | def add_classes_to_gradio_component(comp):
  function IOComponent_init (line 17) | def IOComponent_init(self, *args, **kwargs):
  function Block_get_config (line 37) | def Block_get_config(self):
  function BlockContext_init (line 49) | def BlockContext_init(self, *args, **kwargs):
  function Blocks_get_config_file (line 67) | def Blocks_get_config_file(self, *args, **kwargs):

FILE: modules/hashes.py
  function calculate_sha256 (line 11) | def calculate_sha256(filename):
  function sha256_from_cache (line 22) | def sha256_from_cache(filename, title, use_addnet_hash=False):
  function sha256 (line 41) | def sha256(filename, title, use_addnet_hash=False):
  function addnet_hash_safetensors (line 69) | def addnet_hash_safetensors(b):

FILE: modules/hat_model.py
  class UpscalerHAT (line 10) | class UpscalerHAT(Upscaler):
    method __init__ (line 11) | def __init__(self, dirname):
    method do_upscale (line 22) | def do_upscale(self, img, selected_model):
    method load_model (line 36) | def load_model(self, path: str):

FILE: modules/hypernetworks/hypernetwork.py
  class HypernetworkModule (line 25) | class HypernetworkModule(torch.nn.Module):
    method __init__ (line 37) | def __init__(self, dim, state_dict=None, layer_structure=None, activat...
    method fix_old_state_dict (line 101) | def fix_old_state_dict(self, state_dict):
    method forward (line 117) | def forward(self, x):
    method trainables (line 120) | def trainables(self):
  function parse_dropout_structure (line 129) | def parse_dropout_structure(layer_structure, use_dropout, last_layer_dro...
  class Hypernetwork (line 144) | class Hypernetwork:
    method __init__ (line 148) | def __init__(self, name=None, enable_sizes=None, layer_structure=None,...
    method weights (line 178) | def weights(self):
    method train (line 185) | def train(self, mode=True):
    method to (line 192) | def to(self, device):
    method set_multiplier (line 199) | def set_multiplier(self, multiplier):
    method eval (line 206) | def eval(self):
    method save (line 213) | def save(self, filename):
    method load (line 243) | def load(self, filename):
    method shorthash (line 306) | def shorthash(self):
  function list_hypernetworks (line 312) | def list_hypernetworks(path):
  function load_hypernetwork (line 322) | def load_hypernetwork(name):
  function load_hypernetworks (line 337) | def load_hypernetworks(names, multipliers=None):
  function apply_single_hypernetwork (line 358) | def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=...
  function apply_hypernetworks (line 373) | def apply_hypernetworks(hypernetworks, context, layer=None):
  function attention_CrossAttention_forward (line 382) | def attention_CrossAttention_forward(self, x, context=None, mask=None, *...
  function stack_conds (line 410) | def stack_conds(conds):
  function statistics (line 425) | def statistics(data):
  function create_hypernetwork (line 440) | def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structu...
  function train_hypernetwork (line 472) | def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: floa...
  function save_hypernetwork (line 770) | def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filen...

FILE: modules/hypernetworks/ui.py
  function create_hypernetwork (line 11) | def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structu...
  function train_hypernetwork (line 17) | def train_hypernetwork(*args):

FILE: modules/images.py
  function get_font (line 29) | def get_font(fontsize: int):
  function image_grid (line 36) | def image_grid(imgs, batch_size=1, rows=None):
  class Grid (line 69) | class Grid(namedtuple("_Grid", ["tiles", "tile_w", "tile_h", "image_w", ...
    method tile_count (line 71) | def tile_count(self) -> int:
  function split_grid (line 78) | def split_grid(image: Image.Image, tile_w: int = 512, tile_h: int = 512,...
  function combine_grid (line 114) | def combine_grid(grid):
  class GridAnnotation (line 144) | class GridAnnotation:
    method __init__ (line 145) | def __init__(self, text='', is_active=True):
  function draw_grid_annotations (line 151) | def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margi...
  function draw_prompt_matrix (line 239) | def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
  function resize_image (line 252) | def resize_image(resize_mode, im, width, height, upscaler_name=None):
  function sanitize_filename_part (line 342) | def sanitize_filename_part(text, replace_spaces=True):
  function get_scheduler_str (line 356) | def get_scheduler_str(sampler_name, scheduler_name):
  function get_sampler_scheduler_str (line 365) | def get_sampler_scheduler_str(sampler_name, scheduler_name):
  function get_sampler_scheduler (line 370) | def get_sampler_scheduler(p, sampler):
  class FilenameGenerator (line 381) | class FilenameGenerator:
    method __init__ (line 420) | def __init__(self, p, seed, prompt, image, zip=False, basename=""):
    method get_vae_filename (line 428) | def get_vae_filename(self):
    method hasprompt (line 444) | def hasprompt(self, *args):
    method prompt_no_style (line 460) | def prompt_no_style(self):
    method prompt_words (line 474) | def prompt_words(self):
    method datetime (line 480) | def datetime(self, *args):
    method image_hash (line 497) | def image_hash(self, *args):
    method string_hash (line 501) | def string_hash(self, text, *args):
    method apply (line 505) | def apply(self, x):
  function get_next_sequence_number (line 543) | def get_next_sequence_number(path, basename):
  function save_image_with_geninfo (line 565) | def save_image_with_geninfo(image, geninfo, filename, extension=None, ex...
  function save_image (line 624) | def save_image(image, path, basename, seed=None, prompt=None, extension=...
  function read_info_from_image (line 777) | def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
  function image_data (line 820) | def image_data(data):
  function flatten (line 841) | def flatten(img, bgcolor):
  function read (line 852) | def read(fp, **kwargs):
  function fix_image (line 859) | def fix_image(image: Image.Image):
  function fix_png_transparency (line 872) | def fix_png_transparency(image: Image.Image):

FILE: modules/img2img.py
  function process_batch (line 20) | def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale...
  function img2img (line 152) | def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, n...

FILE: modules/infotext_utils.py
  class ParamBinding (line 23) | class ParamBinding:
    method __init__ (line 24) | def __init__(self, paste_button, tabname, source_text_component=None, ...
  class PasteField (line 34) | class PasteField(tuple):
    method __new__ (line 35) | def __new__(cls, component, target, *, api=None):
    method __init__ (line 38) | def __init__(self, component, target, *, api=None):
  function reset (line 51) | def reset():
  function quote (line 56) | def quote(text):
  function unquote (line 63) | def unquote(text):
  function image_from_url_text (line 73) | def image_from_url_text(filedata):
  function add_paste_fields (line 102) | def add_paste_fields(tabname, init_img, fields, override_settings_compon...
  function create_buttons (line 119) | def create_buttons(tabs_list):
  function bind_buttons (line 126) | def bind_buttons(buttons, send_image, send_generate_info):
  function register_paste_params_button (line 135) | def register_paste_params_button(binding: ParamBinding):
  function connect_paste_params_buttons (line 139) | def connect_paste_params_buttons():
  function send_image_and_dimensions (line 186) | def send_image_and_dimensions(x):
  function restore_old_hires_fix_params (line 202) | def restore_old_hires_fix_params(res):
  function parse_generation_parameters (line 234) | def parse_generation_parameters(x: str, skip_fields: list[str] | None = ...
  function create_override_settings_dict (line 404) | def create_override_settings_dict(text_pairs):
  function get_override_settings (line 434) | def get_override_settings(params, *, skip_fields=None):
  function connect_paste (line 476) | def connect_paste(button, paste_fields, input_comp, override_settings_co...

FILE: modules/infotext_versions.py
  function parse_version (line 12) | def parse_version(text):
  function backcompat (line 26) | def backcompat(d):

FILE: modules/initialize.py
  function imports (line 11) | def imports():
  function check_versions (line 43) | def check_versions():
  function initialize (line 51) | def initialize():
  function initialize_rest (line 78) | def initialize_rest(*, reload_script_modules=False):

FILE: modules/initialize_util.py
  function gradio_server_name (line 10) | def gradio_server_name():
  function fix_torch_version (line 19) | def fix_torch_version():
  function fix_pytorch_lightning (line 27) | def fix_pytorch_lightning():
  function fix_asyncio_event_loop_policy (line 35) | def fix_asyncio_event_loop_policy():
  function restore_config_state_file (line 75) | def restore_config_state_file():
  function validate_tls_options (line 95) | def validate_tls_options():
  function get_gradio_auth_creds (line 114) | def get_gradio_auth_creds():
  function dumpstacks (line 142) | def dumpstacks():
  function configure_sigint_handler (line 158) | def configure_sigint_handler():
  function configure_opts_onchange (line 177) | def configure_opts_onchange():
  function setup_middleware (line 192) | def setup_middleware(app):
  function configure_cors_middleware (line 201) | def configure_cors_middleware(app):

FILE: modules/interrogate.py
  function category_types (line 22) | def category_types():
  function download_default_clip_interrogate_categories (line 26) | def download_default_clip_interrogate_categories(content_dir):
  class InterrogateModels (line 45) | class InterrogateModels:
    method __init__ (line 52) | def __init__(self, content_dir):
    method categories (line 58) | def categories(self):
    method create_fake_fairscale (line 83) | def create_fake_fairscale(self):
    method load_blip_model (line 90) | def load_blip_model(self):
    method load_clip_model (line 106) | def load_clip_model(self):
    method load (line 119) | def load(self):
    method send_clip_to_ram (line 136) | def send_clip_to_ram(self):
    method send_blip_to_ram (line 141) | def send_blip_to_ram(self):
    method unload (line 146) | def unload(self):
    method rank (line 152) | def rank(self, image_features, text_array, top_count=1):
    method generate_caption (line 173) | def generate_caption(self, pil_image):
    method interrogate (line 185) | def interrogate(self, pil_image):

FILE: modules/launch_utils.py
  function check_python_version (line 34) | def check_python_version():
  function commit_hash (line 66) | def commit_hash():
  function git_tag (line 74) | def git_tag():
  function run (line 89) | def run(command, desc=None, errdesc=None, custom_env=None, live: bool = ...
  function is_installed (line 121) | def is_installed(package):
  function repo_dir (line 135) | def repo_dir(name):
  function run_pip (line 139) | def run_pip(command, desc=None, live=default_command_live):
  function check_run_python (line 147) | def check_run_python(code: str) -> bool:
  function git_fix_workspace (line 152) | def git_fix_workspace(dir, name):
  function run_git (line 158) | def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None...
  function git_clone (line 171) | def git_clone(url, dir, name, commithash=None):
  function git_pull_recursive (line 201) | def git_pull_recursive(dir):
  function version_check (line 211) | def version_check(commit):
  function run_extension_installer (line 228) | def run_extension_installer(extension_dir):
  function list_extensions (line 244) | def list_extensions(settings_file):
  function run_extensions_installers (line 265) | def run_extensions_installers(settings_file):
  function requirements_met (line 283) | def requirements_met(requirements_file):
  function prepare_environment (line 318) | def prepare_environment():
  function configure_for_tests (line 449) | def configure_for_tests():
  function start (line 463) | def start():
  function dump_sysinfo (line 472) | def dump_sysinfo():

FILE: modules/localization.py
  function list_localizations (line 9) | def list_localizations(dirname):
  function localization_js (line 26) | def localization_js(current_localization_name: str) -> str:

FILE: modules/logging_config.py
  class TqdmLoggingHandler (line 8) | class TqdmLoggingHandler(logging.Handler):
    method __init__ (line 9) | def __init__(self, fallback_handler: logging.Handler):
    method emit (line 13) | def emit(self, record):
  function setup_logging (line 28) | def setup_logging(loglevel):

FILE: modules/lowvram.py
  function send_everything_to_cpu (line 11) | def send_everything_to_cpu():
  function is_needed (line 20) | def is_needed(sd_model):
  function apply (line 24) | def apply(sd_model):
  function setup_for_low_vram (line 34) | def setup_for_low_vram(sd_model, use_medvram):
  function is_enabled (line 164) | def is_enabled(sd_model):

FILE: modules/mac_specific.py
  function check_for_mps (line 17) | def check_for_mps() -> bool:
  function torch_mps_gc (line 33) | def torch_mps_gc() -> None:
  function cumsum_fix (line 45) | def cumsum_fix(input, cumsum_func, *args, **kwargs):
  function interpolate_with_fp32_fallback (line 56) | def interpolate_with_fp32_fallback(orig_func, *args, **kwargs) -> Tensor:

FILE: modules/masking.py
  function get_crop_region_v2 (line 4) | def get_crop_region_v2(mask, pad=0):
  function get_crop_region (line 22) | def get_crop_region(mask, pad=0):
  function expand_crop_region (line 39) | def expand_crop_region(crop_region, processing_width, processing_height,...
  function fill (line 80) | def fill(image, mask):

FILE: modules/memmon.py
  class MemUsageMonitor (line 8) | class MemUsageMonitor(threading.Thread):
    method __init__ (line 15) | def __init__(self, name, device, opts):
    method cuda_mem_get_info (line 32) | def cuda_mem_get_info(self):
    method run (line 36) | def run(self):
    method dump_debug (line 58) | def dump_debug(self):
    method monitor (line 72) | def monitor(self):
    method read (line 75) | def read(self):
    method stop (line 90) | def stop(self):

FILE: modules/modelloader.py
  function load_file_from_url (line 20) | def load_file_from_url(
  function load_models (line 44) | def load_models(model_path: str, model_url: str = None, command_path: st...
  function friendly_name (line 93) | def friendly_name(file: str):
  function load_upscalers (line 102) | def load_upscalers():
  function _init_spandrel_extra_archs (line 146) | def _init_spandrel_extra_archs() -> None:
  function load_spandrel_model (line 164) | def load_spandrel_model(

FILE: modules/models/diffusion/ddpm_edit.py
  class VQModelInterface (line 34) | class VQModelInterface:
  function disabled_train (line 42) | def disabled_train(self, mode=True):
  function uniform_on_device (line 48) | def uniform_on_device(r1, r2, shape, device):
  class DDPM (line 52) | class DDPM(pl.LightningModule):
    method __init__ (line 54) | def __init__(self,
    method register_schedule (line 133) | def register_schedule(self, given_betas=None, beta_schedule="linear", ...
    method ema_scope (line 188) | def ema_scope(self, context=None):
    method init_from_ckpt (line 202) | def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
    method q_mean_variance (line 243) | def q_mean_variance(self, x_start, t):
    method predict_start_from_noise (line 255) | def predict_start_from_noise(self, x_t, t, noise):
    method q_posterior (line 261) | def q_posterior(self, x_start, x_t, t):
    method p_mean_variance (line 270) | def p_mean_variance(self, x, t, clip_denoised: bool):
    method p_sample (line 283) | def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
    method p_sample_loop (line 292) | def p_sample_loop(self, shape, return_intermediates=False):
    method sample (line 307) | def sample(self, batch_size=16, return_intermediates=False):
    method q_sample (line 313) | def q_sample(self, x_start, t, noise=None):
    method get_loss (line 318) | def get_loss(self, pred, target, mean=True):
    method p_losses (line 333) | def p_losses(self, x_start, t, noise=None):
    method forward (line 362) | def forward(self, x, *args, **kwargs):
    method get_input (line 368) | def get_input(self, batch, k):
    method shared_step (line 371) | def shared_step(self, batch):
    method training_step (line 376) | def training_step(self, batch, batch_idx):
    method validation_step (line 392) | def validation_step(self, batch, batch_idx):
    method on_train_batch_end (line 400) | def on_train_batch_end(self, *args, **kwargs):
    method _get_rows_from_list (line 404) | def _get_rows_from_list(self, samples):
    method log_images (line 412) | def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=Non...
    method configure_optimizers (line 449) | def configure_optimizers(self):
  class LatentDiffusion (line 458) | class LatentDiffusion(DDPM):
    method __init__ (line 460) | def __init__(self,
    method make_cond_schedule (line 510) | def make_cond_schedule(self, ):
    method on_train_batch_start (line 517) | def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
    method register_schedule (line 532) | def register_schedule(self,
    method instantiate_first_stage (line 541) | def instantiate_first_stage(self, config):
    method instantiate_cond_stage (line 548) | def instantiate_cond_stage(self, config):
    method _get_denoise_row_from_list (line 569) | def _get_denoise_row_from_list(self, samples, desc='', force_no_decode...
    method get_first_stage_encoding (line 581) | def get_first_stage_encoding(self, encoder_posterior):
    method get_learned_conditioning (line 590) | def get_learned_conditioning(self, c):
    method meshgrid (line 603) | def meshgrid(self, h, w):
    method delta_border (line 610) | def delta_border(self, h, w):
    method get_weighting (line 624) | def get_weighting(self, h, w, Ly, Lx, device):
    method get_fold_unfold (line 640) | def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1):  # todo...
    method get_input (line 693) | def get_input(self, batch, k, return_first_stage_outputs=False, force_...
    method decode_first_stage (line 726) | def decode_first_stage(self, z, predict_cids=False, force_not_quantize...
    method differentiable_decode_first_stage (line 786) | def differentiable_decode_first_stage(self, z, predict_cids=False, for...
    method encode_first_stage (line 846) | def encode_first_stage(self, x):
    method shared_step (line 885) | def shared_step(self, batch, **kwargs):
    method forward (line 890) | def forward(self, x, c, *args, **kwargs):
    method apply_model (line 901) | def apply_model(self, x_noisy, t, cond, return_ids=False):
    method _predict_eps_from_xstart (line 1004) | def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
    method _prior_bpd (line 1008) | def _prior_bpd(self, x_start):
    method p_losses (line 1022) | def p_losses(self, x_start, cond, t, noise=None):
    method p_mean_variance (line 1057) | def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codeboo...
    method p_sample (line 1089) | def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
    method progressive_denoising (line 1120) | def progressive_denoising(self, cond, shape, verbose=True, callback=No...
    method p_sample_loop (line 1178) | def p_sample_loop(self, cond, shape, return_intermediates=False,
    method sample (line 1231) | def sample(self, cond, batch_size=16, return_intermediates=False, x_T=...
    method sample_log (line 1249) | def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
    method log_images (line 1265) | def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200,...
    method configure_optimizers (line 1376) | def configure_optimizers(self):
    method to_rgb (line 1401) | def to_rgb(self, x):
  class DiffusionWrapper (line 1410) | class DiffusionWrapper(pl.LightningModule):
    method __init__ (line 1411) | def __init__(self, diff_model_config, conditioning_key):
    method forward (line 1417) | def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
  class Layout2ImgDiffusion (line 1439) | class Layout2ImgDiffusion(LatentDiffusion):
    method __init__ (line 1441) | def __init__(self, cond_stage_key, *args, **kwargs):
    method log_images (line 1445) | def log_images(self, batch, N=8, *args, **kwargs):

FILE: modules/models/diffusion/uni_pc/sampler.py
  class UniPCSampler (line 9) | class UniPCSampler(object):
    method __init__ (line 10) | def __init__(self, model, **kwargs):
    method register_buffer (line 18) | def register_buffer(self, name, attr):
    method set_hooks (line 24) | def set_hooks(self, before_sample, after_sample, after_update):
    method sample (line 30) | def sample(self,

FILE: modules/models/diffusion/uni_pc/uni_pc.py
  class NoiseScheduleVP (line 6) | class NoiseScheduleVP:
    method __init__ (line 7) | def __init__(
    method marginal_log_mean_coeff (line 125) | def marginal_log_mean_coeff(self, t):
    method marginal_alpha (line 138) | def marginal_alpha(self, t):
    method marginal_std (line 144) | def marginal_std(self, t):
    method marginal_lambda (line 150) | def marginal_lambda(self, t):
    method inverse_lambda (line 158) | def inverse_lambda(self, lamb):
  function model_wrapper (line 177) | def model_wrapper(
  class UniPC (line 372) | class UniPC:
    method __init__ (line 373) | def __init__(
    method dynamic_thresholding_fn (line 403) | def dynamic_thresholding_fn(self, x0, t=None):
    method model (line 414) | def model(self, x, t):
    method noise_prediction_fn (line 429) | def noise_prediction_fn(self, x, t):
    method data_prediction_fn (line 435) | def data_prediction_fn(self, x, t):
    method model_fn (line 450) | def model_fn(self, x, t):
    method get_time_steps (line 459) | def get_time_steps(self, skip_type, t_T, t_0, N, device):
    method get_orders_and_timesteps_for_singlestep_solver (line 476) | def get_orders_and_timesteps_for_singlestep_solver(self, steps, order,...
    method denoise_to_zero_fn (line 507) | def denoise_to_zero_fn(self, x, s):
    method multistep_uni_pc_update (line 513) | def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, ...
    method multistep_uni_pc_vary_update (line 522) | def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list...
    method multistep_uni_pc_bh_update (line 625) | def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, ...
    method sample (line 746) | def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_...
  function interpolate_fn (line 811) | def interpolate_fn(x, xp, yp):
  function expand_dims (line 853) | def expand_dims(v, dims):

FILE: modules/models/sd3/mmdit.py
  class PatchEmbed (line 12) | class PatchEmbed(nn.Module):
    method __init__ (line 14) | def __init__(
    method forward (line 45) | def forward(self, x):
  function modulate (line 53) | def modulate(x, shift, scale):
  function get_2d_sincos_pos_embed (line 64) | def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra...
  function get_2d_sincos_pos_embed_from_grid (line 85) | def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
  function get_1d_sincos_pos_embed_from_grid (line 94) | def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
  class TimestepEmbedder (line 116) | class TimestepEmbedder(nn.Module):
    method __init__ (line 119) | def __init__(self, hidden_size, frequency_embedding_size=256, dtype=No...
    method timestep_embedding (line 129) | def timestep_embedding(t, dim, max_period=10000):
    method forward (line 152) | def forward(self, t, dtype, **kwargs):
  class VectorEmbedder (line 158) | class VectorEmbedder(nn.Module):
    method __init__ (line 161) | def __init__(self, input_dim: int, hidden_size: int, dtype=None, devic...
    method forward (line 169) | def forward(self, x: torch.Tensor) -> torch.Tensor:
  class QkvLinear (line 178) | class QkvLinear(torch.nn.Linear):
  function split_qkv (line 181) | def split_qkv(qkv, head_dim):
  function optimized_attention (line 185) | def optimized_attention(qkv, num_heads):
  class SelfAttention (line 188) | class SelfAttention(nn.Module):
    method __init__ (line 191) | def __init__(
    method pre_attention (line 227) | def pre_attention(self, x: torch.Tensor):
    method post_attention (line 235) | def post_attention(self, x: torch.Tensor) -> torch.Tensor:
    method forward (line 240) | def forward(self, x: torch.Tensor) -> torch.Tensor:
  class RMSNorm (line 247) | class RMSNorm(torch.nn.Module):
    method __init__ (line 248) | def __init__(
    method _norm (line 268) | def _norm(self, x):
    method forward (line 278) | def forward(self, x):
  class SwiGLUFeedForward (line 293) | class SwiGLUFeedForward(nn.Module):
    method __init__ (line 294) | def __init__(
    method forward (line 327) | def forward(self, x):
  class DismantledBlock (line 331) | class DismantledBlock(nn.Module):
    method __init__ (line 336) | def __init__(
    method pre_attention (line 378) | def pre_attention(self, x: torch.Tensor, c: torch.Tensor):
    method post_attention (line 398) | def post_attention(self, attn, x, gate_msa, shift_mlp, scale_mlp, gate...
    method forward (line 404) | def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
  function block_mixing (line 411) | def block_mixing(context, x, context_block, x_block, c):
  class JointBlock (line 433) | class JointBlock(nn.Module):
    method __init__ (line 436) | def __init__(self, *args, **kwargs):
    method forward (line 443) | def forward(self, *args, **kwargs):
  class FinalLayer (line 447) | class FinalLayer(nn.Module):
    method __init__ (line 452) | def __init__(self, hidden_size: int, patch_size: int, out_channels: in...
    method forward (line 462) | def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
  class MMDiT (line 469) | class MMDiT(nn.Module):
    method __init__ (line 472) | def __init__(
    method cropped_pos_embed (line 550) | def cropped_pos_embed(self, hw):
    method unpatchify (line 571) | def unpatchify(self, x, hw=None):
    method forward_core_with_concat (line 591) | def forward_core_with_concat(self, x: torch.Tensor, c_mod: torch.Tenso...
    method forward (line 603) | def forward(self, x: torch.Tensor, t: torch.Tensor, y: Optional[torch....

FILE: modules/models/sd3/other_impls.py
  class AutocastLinear (line 16) | class AutocastLinear(nn.Linear):
    method forward (line 24) | def forward(self, x):
  function attention (line 28) | def attention(q, k, v, heads, mask=None):
  class Mlp (line 37) | class Mlp(nn.Module):
    method __init__ (line 39) | def __init__(self, in_features, hidden_features=None, out_features=Non...
    method forward (line 48) | def forward(self, x):
  class CLIPAttention (line 60) | class CLIPAttention(torch.nn.Module):
    method __init__ (line 61) | def __init__(self, embed_dim, heads, dtype, device):
    method forward (line 69) | def forward(self, x, mask=None):
  class CLIPLayer (line 82) | class CLIPLayer(torch.nn.Module):
    method __init__ (line 83) | def __init__(self, embed_dim, heads, intermediate_size, intermediate_a...
    method forward (line 91) | def forward(self, x, mask=None):
  class CLIPEncoder (line 97) | class CLIPEncoder(torch.nn.Module):
    method __init__ (line 98) | def __init__(self, num_layers, embed_dim, heads, intermediate_size, in...
    method forward (line 102) | def forward(self, x, mask=None, intermediate_output=None):
  class CLIPEmbeddings (line 114) | class CLIPEmbeddings(torch.nn.Module):
    method __init__ (line 115) | def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtyp...
    method forward (line 120) | def forward(self, input_tokens):
  class CLIPTextModel_ (line 124) | class CLIPTextModel_(torch.nn.Module):
    method __init__ (line 125) | def __init__(self, config_dict, dtype, device):
    method forward (line 136) | def forward(self, input_tokens, intermediate_output=None, final_layer_...
  class CLIPTextModel (line 147) | class CLIPTextModel(torch.nn.Module):
    method __init__ (line 148) | def __init__(self, config_dict, dtype, device):
    method get_input_embeddings (line 157) | def get_input_embeddings(self):
    method set_input_embeddings (line 160) | def set_input_embeddings(self, embeddings):
    method forward (line 163) | def forward(self, *args, **kwargs):
  class SDTokenizer (line 169) | class SDTokenizer:
    method __init__ (line 170) | def __init__(self, max_length=77, pad_with_end=True, tokenizer=None, h...
    method tokenize_with_weights (line 190) | def tokenize_with_weights(self, text:str):
  class SDXLClipGTokenizer (line 211) | class SDXLClipGTokenizer(SDTokenizer):
    method __init__ (line 212) | def __init__(self, tokenizer):
  class SD3Tokenizer (line 216) | class SD3Tokenizer:
    method __init__ (line 217) | def __init__(self):
    method tokenize_with_weights (line 223) | def tokenize_with_weights(self, text:str):
  class ClipTokenWeightEncoder (line 231) | class ClipTokenWeightEncoder:
    method encode_token_weights (line 232) | def encode_token_weights(self, token_weight_pairs):
  class SDClipModel (line 243) | class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
    method __init__ (line 246) | def __init__(self, device="cpu", max_length=77, layer="last", layer_id...
    method set_clip_options (line 268) | def set_clip_options(self, options):
    method forward (line 277) | def forward(self, tokens):
  class SDXLClipG (line 295) | class SDXLClipG(SDClipModel):
    method __init__ (line 297) | def __init__(self, config, device="cpu", layer="penultimate", layer_id...
  class T5XXLModel (line 304) | class T5XXLModel(SDClipModel):
    method __init__ (line 306) | def __init__(self, config, device="cpu", layer="last", layer_idx=None,...
  class T5XXLTokenizer (line 314) | class T5XXLTokenizer(SDTokenizer):
    method __init__ (line 316) | def __init__(self):
  class T5LayerNorm (line 320) | class T5LayerNorm(torch.nn.Module):
    method __init__ (line 321) | def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None):
    method forward (line 326) | def forward(self, x):
  class T5DenseGatedActDense (line 332) | class T5DenseGatedActDense(torch.nn.Module):
    method __init__ (line 333) | def __init__(self, model_dim, ff_dim, dtype, device):
    method forward (line 339) | def forward(self, x):
  class T5LayerFF (line 347) | class T5LayerFF(torch.nn.Module):
    method __init__ (line 348) | def __init__(self, model_dim, ff_dim, dtype, device):
    method forward (line 353) | def forward(self, x):
  class T5Attention (line 360) | class T5Attention(torch.nn.Module):
    method __init__ (line 361) | def __init__(self, model_dim, inner_dim, num_heads, relative_attention...
    method _relative_position_bucket (line 376) | def _relative_position_bucket(relative_position, bidirectional=True, n...
    method compute_bias (line 418) | def compute_bias(self, query_length, key_length, device):
    method forward (line 433) | def forward(self, x, past_bias=None):
  class T5LayerSelfAttention (line 450) | class T5LayerSelfAttention(torch.nn.Module):
    method __init__ (line 451) | def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_a...
    method forward (line 456) | def forward(self, x, past_bias=None):
  class T5Block (line 462) | class T5Block(torch.nn.Module):
    method __init__ (line 463) | def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_a...
    method forward (line 469) | def forward(self, x, past_bias=None):
  class T5Stack (line 475) | class T5Stack(torch.nn.Module):
    method __init__ (line 476) | def __init__(self, num_layers, model_dim, inner_dim, ff_dim, num_heads...
    method forward (line 482) | def forward(self, input_ids, intermediate_output=None, final_layer_nor...
  class T5 (line 496) | class T5(torch.nn.Module):
    method __init__ (line 497) | def __init__(self, config_dict, dtype, device):
    method get_input_embeddings (line 503) | def get_input_embeddings(self):
    method set_input_embeddings (line 506) | def set_input_embeddings(self, embeddings):
    method forward (line 509) | def forward(self, *args, **kwargs):

FILE: modules/models/sd3/sd3_cond.py
  class SafetensorsMapping (line 12) | class SafetensorsMapping(typing.Mapping):
    method __init__ (line 13) | def __init__(self, file):
    method __len__ (line 16) | def __len__(self):
    method __iter__ (line 19) | def __iter__(self):
    method __getitem__ (line 23) | def __getitem__(self, key):
  class Sd3ClipLG (line 56) | class Sd3ClipLG(sd_hijack_clip.TextConditionalModel):
    method __init__ (line 57) | def __init__(self, clip_l, clip_g):
    method tokenize (line 72) | def tokenize(self, texts):
    method encode_with_transformers (line 75) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 93) | def encode_embedding_init_text(self, init_text, nvpt):
  class Sd3T5 (line 97) | class Sd3T5(torch.nn.Module):
    method __init__ (line 98) | def __init__(self, t5xxl):
    method tokenize (line 108) | def tokenize(self, texts):
    method tokenize_line (line 111) | def tokenize_line(self, line, *, target_token_count=None):
    method forward (line 142) | def forward(self, texts, *, token_count):
    method encode_embedding_init_text (line 156) | def encode_embedding_init_text(self, init_text, nvpt):
  class SD3Cond (line 160) | class SD3Cond(torch.nn.Module):
    method __init__ (line 161) | def __init__(self, *args, **kwargs):
    method forward (line 178) | def forward(self, prompts: list[str]):
    method before_load_weights (line 189) | def before_load_weights(self, state_dict):
    method encode_embedding_init_text (line 207) | def encode_embedding_init_text(self, init_text, nvpt):
    method tokenize (line 210) | def tokenize(self, texts):
    method medvram_modules (line 213) | def medvram_modules(self):
    method get_token_count (line 216) | def get_token_count(self, text):
    method get_target_prompt_token_count (line 221) | def get_target_prompt_token_count(self, token_count):

FILE: modules/models/sd3/sd3_impls.py
  class ModelSamplingDiscreteFlow (line 15) | class ModelSamplingDiscreteFlow(torch.nn.Module):
    method __init__ (line 17) | def __init__(self, shift=1.0):
    method sigma_min (line 25) | def sigma_min(self):
    method sigma_max (line 29) | def sigma_max(self):
    method timestep (line 32) | def timestep(self, sigma):
    method sigma (line 35) | def sigma(self, timestep: torch.Tensor):
    method calculate_denoised (line 41) | def calculate_denoised(self, sigma, model_output, model_input):
    method noise_scaling (line 45) | def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
  class BaseModel (line 49) | class BaseModel(torch.nn.Module):
    method __init__ (line 51) | def __init__(self, shift=1.0, device=None, dtype=torch.float32, state_...
    method apply_model (line 72) | def apply_model(self, x, sigma, c_crossattn=None, y=None):
    method forward (line 78) | def forward(self, *args, **kwargs):
    method get_dtype (line 81) | def get_dtype(self):
  class CFGDenoiser (line 85) | class CFGDenoiser(torch.nn.Module):
    method __init__ (line 87) | def __init__(self, model):
    method forward (line 91) | def forward(self, x, timestep, cond, uncond, cond_scale):
  class SD3LatentFormat (line 100) | class SD3LatentFormat:
    method __init__ (line 102) | def __init__(self):
    method process_in (line 106) | def process_in(self, latent):
    method process_out (line 109) | def process_out(self, latent):
    method decode_latent_to_preview (line 112) | def decode_latent_to_preview(self, x0):
  function append_dims (line 139) | def append_dims(x, target_dims):
  function to_d (line 145) | def to_d(x, sigma, denoised):
  function sample_euler (line 152) | def sample_euler(model, x, sigmas, extra_args=None):
  function Normalize (line 171) | def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=No...
  class ResnetBlock (line 175) | class ResnetBlock(torch.nn.Module):
    method __init__ (line 176) | def __init__(self, *, in_channels, out_channels=None, dtype=torch.floa...
    method forward (line 192) | def forward(self, x):
  class AttnBlock (line 205) | class AttnBlock(torch.nn.Module):
    method __init__ (line 206) | def __init__(self, in_channels, dtype=torch.float32, device=None):
    method forward (line 214) | def forward(self, x):
  class Downsample (line 227) | class Downsample(torch.nn.Module):
    method __init__ (line 228) | def __init__(self, in_channels, dtype=torch.float32, device=None):
    method forward (line 232) | def forward(self, x):
  class Upsample (line 239) | class Upsample(torch.nn.Module):
    method __init__ (line 240) | def __init__(self, in_channels, dtype=torch.float32, device=None):
    method forward (line 244) | def forward(self, x):
  class VAEEncoder (line 250) | class VAEEncoder(torch.nn.Module):
    method __init__ (line 251) | def __init__(self, ch=128, ch_mult=(1,2,4,4), num_res_blocks=2, in_cha...
    method forward (line 284) | def forward(self, x):
  class VAEDecoder (line 305) | class VAEDecoder(torch.nn.Module):
    method __init__ (line 306) | def __init__(self, ch=128, out_ch=3, ch_mult=(1, 2, 4, 4), num_res_blo...
    method forward (line 338) | def forward(self, z):
  class SDVAE (line 358) | class SDVAE(torch.nn.Module):
    method __init__ (line 359) | def __init__(self, dtype=torch.float32, device=None):
    method decode (line 365) | def decode(self, latent):
    method encode (line 369) | def encode(self, image):

FILE: modules/models/sd3/sd3_model.py
  class SD3Denoiser (line 12) | class SD3Denoiser(k_diffusion.external.DiscreteSchedule):
    method __init__ (line 13) | def __init__(self, inner_model, sigmas):
    method forward (line 17) | def forward(self, input, sigma, **kwargs):
  class SD3Inferencer (line 21) | class SD3Inferencer(torch.nn.Module):
    method __init__ (line 22) | def __init__(self, state_dict, shift=3, use_ema=False):
    method cond_stage_model (line 44) | def cond_stage_model(self):
    method before_load_weights (line 47) | def before_load_weights(self, state_dict):
    method ema_scope (line 50) | def ema_scope(self):
    method get_learned_conditioning (line 53) | def get_learned_conditioning(self, batch: list[str]):
    method apply_model (line 56) | def apply_model(self, x, t, cond):
    method decode_first_stage (line 59) | def decode_first_stage(self, latent):
    method encode_first_stage (line 63) | def encode_first_stage(self, image):
    method get_first_stage_encoding (line 67) | def get_first_stage_encoding(self, x):
    method create_denoiser (line 70) | def create_denoiser(self):
    method medvram_fields (line 73) | def medvram_fields(self):
    method add_noise_to_latent (line 80) | def add_noise_to_latent(self, x, noise, amount):
    method fix_dimensions (line 83) | def fix_dimensions(self, width, height):
    method diffusers_weight_mapping (line 86) | def diffusers_weight_mapping(self):

FILE: modules/ngrok.py
  function connect (line 4) | def connect(token, port, options):

FILE: modules/npu_specific.py
  function check_for_npu (line 7) | def check_for_npu():
  function get_npu_device_string (line 20) | def get_npu_device_string():
  function torch_npu_gc (line 26) | def torch_npu_gc():

FILE: modules/options.py
  class OptionInfo (line 13) | class OptionInfo:
    method __init__ (line 14) | def __init__(self, default=None, label="", component=None, component_a...
    method link (line 36) | def link(self, label, url):
    method js (line 40) | def js(self, label, js_func):
    method info (line 44) | def info(self, info):
    method html (line 48) | def html(self, html):
    method needs_restart (line 52) | def needs_restart(self):
    method needs_reload_ui (line 56) | def needs_reload_ui(self):
  class OptionHTML (line 61) | class OptionHTML(OptionInfo):
    method __init__ (line 62) | def __init__(self, text):
  function options_section (line 68) | def options_section(section_identifier, options_dict):
  class Options (line 82) | class Options:
    method __init__ (line 85) | def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
    method __setattr__ (line 90) | def __setattr__(self, key, value):
    method __getattr__ (line 131) | def __getattr__(self, item):
    method set (line 144) | def set(self, key, value, is_api=False, run_callbacks=True):
    method get_default (line 173) | def get_default(self, key):
    method save (line 182) | def save(self, filename):
    method same_type (line 188) | def same_type(self, x, y):
    method load (line 197) | def load(self, filename):
    method onchange (line 229) | def onchange(self, key, func, call=True):
    method dumpjson (line 236) | def dumpjson(self):
    method add_option (line 256) | def add_option(self, key, info):
    method reorder (line 261) | def reorder(self):
    method cast_value (line 297) | def cast_value(self, key, value):
  class OptionsCategory (line 321) | class OptionsCategory:
  class OptionsCategories (line 325) | class OptionsCategories:
    method __init__ (line 326) | def __init__(self):
    method register_category (line 329) | def register_category(self, category_id, label):

FILE: modules/patches.py
  function patch (line 4) | def patch(key, obj, field, replacement):
  function undo (line 32) | def undo(key, obj, field):
  function original (line 57) | def original(key, obj, field):

FILE: modules/paths.py
  function mute_sdxl_imports (line 8) | def mute_sdxl_imports():

FILE: modules/postprocessing.py
  function run_postprocessing (line 9) | def run_postprocessing(extras_mode, image, image_folder, input_dir, outp...
  function run_postprocessing_webui (line 132) | def run_postprocessing_webui(id_task, *args, **kwargs):
  function run_extras (line 136) | def run_extras(extras_mode, resize_mode, image, image_folder, input_dir,...

FILE: modules/processing.py
  function setup_color_correction (line 43) | def setup_color_correction(image):
  function apply_color_correction (line 49) | def apply_color_correction(correction, original_image):
  function uncrop (line 65) | def uncrop(image, dest_size, paste_loc):
  function apply_overlay (line 75) | def apply_overlay(image, paste_loc, overlay):
  function create_binary_mask (line 90) | def create_binary_mask(image, round=True):
  function txt2img_image_conditioning (line 100) | def txt2img_image_conditioning(sd_model, x, width, height):
  class StableDiffusionProcessing (line 137) | class StableDiffusionProcessing:
    method __post_init__ (line 228) | def __post_init__(self):
    method fill_fields_from_opts (line 254) | def fill_fields_from_opts(self):
    method sd_model (line 262) | def sd_model(self):
    method sd_model (line 266) | def sd_model(self, value):
    method scripts (line 270) | def scripts(self):
    method scripts (line 274) | def scripts(self, value):
    method script_args (line 281) | def script_args(self):
    method script_args (line 285) | def script_args(self, value):
    method setup_scripts (line 291) | def setup_scripts(self):
    method comment (line 296) | def comment(self, text):
    method txt2img_image_conditioning (line 299) | def txt2img_image_conditioning(self, x, width=None, height=None):
    method depth2img_image_conditioning (line 304) | def depth2img_image_conditioning(self, source_image):
    method edit_image_conditioning (line 323) | def edit_image_conditioning(self, source_image):
    method unclip_image_conditioning (line 328) | def unclip_image_conditioning(self, source_image):
    method inpainting_image_conditioning (line 336) | def inpainting_image_conditioning(self, source_image, latent_image, im...
    method img2img_image_conditioning (line 375) | def img2img_image_conditioning(self, source_image, latent_image, image...
    method init (line 398) | def init(self, all_prompts, all_seeds, all_subseeds):
    method sample (line 401) | def sample(self, conditioning, unconditional_conditioning, seeds, subs...
    method close (line 404) | def close(self):
    method get_token_merging_ratio (line 412) | def get_token_merging_ratio(self, for_hr=False):
    method setup_prompts (line 418) | def setup_prompts(self):
    method cached_params (line 440) | def cached_params(self, required_prompts, steps, extra_network_data, h...
    method get_conds_with_caching (line 460) | def get_conds_with_caching(self, function, required_prompts, steps, ca...
    method setup_conds (line 493) | def setup_conds(self):
    method get_conds (line 505) | def get_conds(self):
    method parse_extra_network_prompts (line 508) | def parse_extra_network_prompts(self):
    method save_samples (line 511) | def save_samples(self) -> bool:
  class Processed (line 516) | class Processed:
    method __init__ (line 517) | def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1,...
    method js (line 571) | def js(self):
    method infotext (line 609) | def infotext(self, p: StableDiffusionProcessing, index):
    method get_token_merging_ratio (line 612) | def get_token_merging_ratio(self, for_hr=False):
  function create_random_tensors (line 616) | def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=...
  class DecodedSamples (line 621) | class DecodedSamples(list):
  function decode_latent_batch (line 625) | def decode_latent_batch(model, batch, target_device=None, check_for_nans...
  function get_fixed_seed (line 675) | def get_fixed_seed(seed):
  function fix_seed (line 690) | def fix_seed(p):
  function program_version (line 695) | def program_version():
  function create_infotext (line 705) | def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=No...
  function process_images (line 819) | def process_images(p: StableDiffusionProcessing) -> Processed:
  function process_images_inner (line 863) | def process_images_inner(p: StableDiffusionProcessing) -> Processed:
  function old_hires_fix_first_pass_dimensions (line 1153) | def old_hires_fix_first_pass_dimensions(width, height):
  class StableDiffusionProcessingTxt2Img (line 1166) | class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
    method __post_init__ (line 1201) | def __post_init__(self):
    method calculate_target_resolution (line 1213) | def calculate_target_resolution(self):
    method init (line 1252) | def init(self, all_prompts, all_seeds, all_subseeds):
    method sample (line 1307) | def sample(self, conditioning, unconditional_conditioning, seeds, subs...
    method sample_hr_pass (line 1364) | def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, su...
    method close (line 1466) | def close(self):
    method setup_prompts (line 1474) | def setup_prompts(self):
    method calculate_hr_conds (line 1499) | def calculate_hr_conds(self):
    method setup_conds (line 1513) | def setup_conds(self):
    method get_conds (line 1538) | def get_conds(self):
    method parse_extra_network_prompts (line 1544) | def parse_extra_network_prompts(self):
  class StableDiffusionProcessingImg2Img (line 1557) | class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
    method __post_init__ (line 1583) | def __post_init__(self):
    method mask_blur (line 1591) | def mask_blur(self):
    method mask_blur (line 1597) | def mask_blur(self, value):
    method init (line 1602) | def init(self, all_prompts, all_seeds, all_subseeds):
    method sample (line 1759) | def sample(self, conditioning, unconditional_conditioning, seeds, subs...
    method get_token_merging_ratio (line 1791) | def get_token_merging_ratio(self, for_hr=False):

FILE: modules/processing_scripts/comments.py
  function strip_comments (line 5) | def strip_comments(text):
  class ScriptStripComments (line 12) | class ScriptStripComments(scripts.Script):
    method title (line 13) | def title(self):
    method show (line 16) | def show(self, is_img2img):
    method process (line 19) | def process(self, p, *args):
  function before_token_counter (line 37) | def before_token_counter(params: script_callbacks.BeforeTokenCounterPara...

FILE: modules/processing_scripts/refiner.py
  class ScriptRefiner (line 9) | class ScriptRefiner(scripts.ScriptBuiltinUI):
    method __init__ (line 13) | def __init__(self):
    method title (line 16) | def title(self):
    method show (line 19) | def show(self, is_img2img):
    method ui (line 22) | def ui(self, is_img2img):
    method setup (line 42) | def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_...

FILE: modules/processing_scripts/sampler.py
  class ScriptSampler (line 8) | class ScriptSampler(scripts.ScriptBuiltinUI):
    method __init__ (line 11) | def __init__(self):
    method title (line 16) | def title(self):
    method ui (line 19) | def ui(self, is_img2img):
    method setup (line 42) | def setup(self, p, steps, sampler_name, scheduler):

FILE: modules/processing_scripts/seed.py
  class ScriptSeed (line 12) | class ScriptSeed(scripts.ScriptBuiltinUI):
    method __init__ (line 16) | def __init__(self):
    method title (line 21) | def title(self):
    method show (line 24) | def show(self, is_img2img):
    method ui (line 27) | def ui(self, is_img2img):
    method setup (line 69) | def setup(self, p, seed, seed_checkbox, subseed, subseed_strength, see...
  function connect_reuse_seed (line 81) | def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generatio...

FILE: modules/profiling.py
  class Profiler (line 6) | class Profiler:
    method __init__ (line 7) | def __init__(self):
    method __enter__ (line 29) | def __enter__(self):
    method __exit__ (line 35) | def __exit__(self, exc_type, exc, exc_tb):
  function webpath (line 44) | def webpath():

FILE: modules/progress.py
  function start_task (line 23) | def start_task(id_task):
  function finish_task (line 30) | def finish_task(id_task):
  function create_task_id (line 40) | def create_task_id(task_type):
  function record_results (line 46) | def record_results(id_task, res):
  function add_task_to_queue (line 52) | def add_task_to_queue(id_job):
  class PendingTasksResponse (line 55) | class PendingTasksResponse(BaseModel):
  class ProgressRequest (line 59) | class ProgressRequest(BaseModel):
  class ProgressResponse (line 65) | class ProgressResponse(BaseModel):
  function setup_progress_api (line 76) | def setup_progress_api(app):
  function get_pending_tasks (line 81) | def get_pending_tasks():
  function progressapi (line 87) | def progressapi(req: ProgressRequest):
  function restore_progress (line 144) | def restore_progress(id_task):

FILE: modules/prompt_parser.py
  function get_learned_conditioning_prompt_schedules (line 28) | def get_learned_conditioning_prompt_schedules(prompts, base_steps, hires...
  class SdConditioning (line 139) | class SdConditioning(list):
    method __init__ (line 144) | def __init__(self, prompts, is_negative_prompt=False, width=None, heig...
  function get_learned_conditioning (line 157) | def get_learned_conditioning(model, prompts: SdConditioning | list[str],...
  function get_multicond_prompt_list (line 209) | def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
  class ComposableScheduledPromptConditioning (line 240) | class ComposableScheduledPromptConditioning:
    method __init__ (line 241) | def __init__(self, schedules, weight=1.0):
  class MulticondLearnedConditioning (line 246) | class MulticondLearnedConditioning:
    method __init__ (line 247) | def __init__(self, shape, batch):
  function get_multicond_learned_conditioning (line 252) | def get_multicond_learned_conditioning(model, prompts, steps, hires_step...
  class DictWithShape (line 270) | class DictWithShape(dict):
    method __init__ (line 271) | def __init__(self, x, shape=None):
    method shape (line 276) | def shape(self):
  function reconstruct_cond_batch (line 280) | def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], c...
  function stack_conds (line 307) | def stack_conds(tensors):
  function reconstruct_multicond_batch (line 321) | def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current...
  function parse_prompt_attention (line 370) | def parse_prompt_attention(text):

FILE: modules/realesrgan_model.py
  class UpscalerRealESRGAN (line 9) | class UpscalerRealESRGAN(Upscaler):
    method __init__ (line 10) | def __init__(self, path):
    method do_upscale (line 29) | def do_upscale(self, img, path):
    method load_model (line 53) | def load_model(self, path):
  function get_realesrgan_models (line 67) | def get_realesrgan_models(scaler: UpscalerRealESRGAN):

FILE: modules/restart.py
  function is_restartable (line 7) | def is_restartable() -> bool:
  function restart_program (line 14) | def restart_program() -> None:
  function stop_program (line 24) | def stop_program() -> None:

FILE: modules/rng.py
  function randn (line 6) | def randn(seed, shape, generator=None):
  function randn_local (line 22) | def randn_local(seed, shape):
  function randn_like (line 36) | def randn_like(x):
  function randn_without_seed (line 50) | def randn_without_seed(shape, generator=None):
  function manual_seed (line 64) | def manual_seed(seed):
  function create_generator (line 75) | def create_generator(seed):
  function slerp (line 85) | def slerp(val, low, high):
  class ImageRNG (line 99) | class ImageRNG:
    method __init__ (line 100) | def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, ...
    method first (line 112) | def first(self):
    method next (line 153) | def next(self):

FILE: modules/rng_philox.py
  function uint32 (line 27) | def uint32(x):
  function philox4_round (line 32) | def philox4_round(counter, key):
  function philox4_32 (line 44) | def philox4_32(counter, key, rounds=10):
  function box_muller (line 66) | def box_muller(x, y):
  class Generator (line 77) | class Generator:
    method __init__ (line 80) | def __init__(self, seed):
    method randn (line 84) | def randn(self, shape):

FILE: modules/safe.py
  function encode (line 18) | def encode(*args):
  class RestrictedUnpickler (line 23) | class RestrictedUnpickler(pickle.Unpickler):
    method persistent_load (line 26) | def persistent_load(self, saved_id):
    method find_class (line 34) | def find_class(self, module, name):
  function check_zip_filenames (line 71) | def check_zip_filenames(filename, names):
  function check_pt (line 79) | def check_pt(filename, extra_handler):
  function load (line 107) | def load(filename, *args, **kwargs):
  function load_with_extra (line 111) | def load_with_extra(filename, extra_handler=None, *args, **kwargs):
  class Extra (line 159) | class Extra:
    method __init__ (line 179) | def __init__(self, handler):
    method __enter__ (line 182) | def __enter__(self):
    method __exit__ (line 188) | def __exit__(self, exc_type, exc_val, exc_tb):

FILE: modules/script_callbacks.py
  function report_exception (line 14) | def report_exception(c, job):
  class ImageSaveParams (line 18) | class ImageSaveParams:
    method __init__ (line 19) | def __init__(self, image, p, filename, pnginfo):
  class ExtraNoiseParams (line 33) | class ExtraNoiseParams:
    method __init__ (line 34) | def __init__(self, noise, x, xi):
  class CFGDenoiserParams (line 45) | class CFGDenoiserParams:
    method __init__ (line 46) | def __init__(self, x, image_cond, sigma, sampling_step, total_sampling...
  class CFGDenoisedParams (line 72) | class CFGDenoisedParams:
    method __init__ (line 73) | def __init__(self, x, sampling_step, total_sampling_steps, inner_model):
  class AfterCFGCallbackParams (line 87) | class AfterCFGCallbackParams:
    method __init__ (line 88) | def __init__(self, x, sampling_step, total_sampling_steps):
  class UiTrainTabParams (line 99) | class UiTrainTabParams:
    method __init__ (line 100) | def __init__(self, txt2img_preview_params):
  class ImageGridLoopParams (line 104) | class ImageGridLoopParams:
    method __init__ (line 105) | def __init__(self, imgs, cols, rows):
  class BeforeTokenCounterParams (line 112) | class BeforeTokenCounterParams:
  class ScriptCallback (line 121) | class ScriptCallback:
  function add_callback (line 127) | def add_callback(callbacks, fun, *, name=None, category='unknown', filen...
  function sort_callbacks (line 150) | def sort_callbacks(category, unordered_callbacks, *, enable_user_sort=Tr...
  function ordered_callbacks (line 194) | def ordered_callbacks(category, unordered_callbacks=None, *, enable_user...
  function enumerate_callbacks (line 211) | def enumerate_callbacks():
  function clear_callbacks (line 246) | def clear_callbacks():
  function app_started_callback (line 253) | def app_started_callback(demo: Optional[Blocks], app: FastAPI):
  function app_reload_callback (line 262) | def app_reload_callback():
  function model_loaded_callback (line 270) | def model_loaded_callback(sd_model):
  function ui_tabs_callback (line 278) | def ui_tabs_callback():
  function ui_train_tabs_callback (line 290) | def ui_train_tabs_callback(params: UiTrainTabParams):
  function ui_settings_callback (line 298) | def ui_settings_callback():
  function before_image_saved_callback (line 306) | def before_image_saved_callback(params: ImageSaveParams):
  function image_saved_callback (line 314) | def image_saved_callback(params: ImageSaveParams):
  function extra_noise_callback (line 322) | def extra_noise_callback(params: ExtraNoiseParams):
  function cfg_denoiser_callback (line 330) | def cfg_denoiser_callback(params: CFGDenoiserParams):
  function cfg_denoised_callback (line 338) | def cfg_denoised_callback(params: CFGDenoisedParams):
  function cfg_after_cfg_callback (line 346) | def cfg_after_cfg_callback(params: AfterCFGCallbackParams):
  function before_component_callback (line 354) | def before_component_callback(component, **kwargs):
  function after_component_callback (line 362) | def after_component_callback(component, **kwargs):
  function image_grid_callback (line 370) | def image_grid_callback(params: ImageGridLoopParams):
  function infotext_pasted_callback (line 378) | def infotext_pasted_callback(infotext: str, params: dict[str, Any]):
  function script_unloaded_callback (line 386) | def script_unloaded_callback():
  function before_ui_callback (line 394) | def before_ui_callback():
  function list_optimizers_callback (line 402) | def list_optimizers_callback():
  function list_unets_callback (line 414) | def list_unets_callback():
  function before_token_counter_callback (line 426) | def before_token_counter_callback(params: BeforeTokenCounterParams):
  function remove_current_script_callbacks (line 434) | def remove_current_script_callbacks():
  function remove_callbacks_for_function (line 447) | def remove_callbacks_for_function(callback_func):
  function on_app_started (line 456) | def on_app_started(callback, *, name=None):
  function on_before_reload (line 462) | def on_before_reload(callback, *, name=None):
  function on_model_loaded (line 467) | def on_model_loaded(callback, *, name=None):
  function on_ui_tabs (line 473) | def on_ui_tabs(callback, *, name=None):
  function on_ui_train_tabs (line 486) | def on_ui_train_tabs(callback, *, name=None):
  function on_ui_settings (line 493) | def on_ui_settings(callback, *, name=None):
  function on_before_image_saved (line 499) | def on_before_image_saved(callback, *, name=None):
  function on_image_saved (line 507) | def on_image_saved(callback, *, name=None):
  function on_extra_noise (line 515) | def on_extra_noise(callback, *, name=None):
  function on_cfg_denoiser (line 523) | def on_cfg_denoiser(callback, *, name=None):
  function on_cfg_denoised (line 531) | def on_cfg_denoised(callback, *, name=None):
  function on_cfg_after_cfg (line 539) | def on_cfg_after_cfg(callback, *, name=None):
  function on_before_component (line 547) | def on_before_component(callback, *, name=None):
  function on_after_component (line 559) | def on_after_component(callback, *, name=None):
  function on_image_grid (line 564) | def on_image_grid(callback, *, name=None):
  function on_infotext_pasted (line 572) | def on_infotext_pasted(callback, *, name=None):
  function on_script_unloaded (line 581) | def on_script_unloaded(callback, *, name=None):
  function on_before_ui (line 588) | def on_before_ui(callback, *, name=None):
  function on_list_optimizers (line 594) | def on_list_optimizers(callback, *, name=None):
  function on_list_unets (line 602) | def on_list_unets(callback, *, name=None):
  function on_before_token_counter (line 609) | def on_before_token_counter(callback, *, name=None):

FILE: modules/script_loading.py
  function load_module (line 10) | def load_module(path):
  function preload_extensions (line 19) | def preload_extensions(extensions_dir, parser, extension_list=None):

FILE: modules/scripts.py
  class MaskBlendArgs (line 16) | class MaskBlendArgs:
    method __init__ (line 17) | def __init__(self, current_latent, nmask, init_latent, mask, blended_l...
  class PostSampleArgs (line 28) | class PostSampleArgs:
    method __init__ (line 29) | def __init__(self, samples):
  class PostprocessImageArgs (line 32) | class PostprocessImageArgs:
    method __init__ (line 33) | def __init__(self, image):
  class PostProcessMaskOverlayArgs (line 36) | class PostProcessMaskOverlayArgs:
    method __init__ (line 37) | def __init__(self, index, mask_for_overlay, overlay_image):
  class PostprocessBatchListArgs (line 42) | class PostprocessBatchListArgs:
    method __init__ (line 43) | def __init__(self, images):
  class OnComponent (line 48) | class OnComponent:
  class Script (line 52) | class Script:
    method title (line 99) | def title(self):
    method ui (line 104) | def ui(self, is_img2img):
    method show (line 112) | def show(self, is_img2img):
    method run (line 124) | def run(self, p, *args):
    method setup (line 137) | def setup(self, p, *args):
    method before_process (line 143) | def before_process(self, p, *args):
    method process (line 152) | def process(self, p, *args):
    method before_process_batch (line 161) | def before_process_batch(self, p, *args, **kwargs):
    method after_extra_networks_activate (line 175) | def after_extra_networks_activate(self, p, *args, **kwargs):
    method process_before_every_sampling (line 190) | def process_before_every_sampling(self, p, *args, **kwargs):
    method process_batch (line 197) | def process_batch(self, p, *args, **kwargs):
    method postprocess_batch (line 210) | def postprocess_batch(self, p, *args, **kwargs):
    method postprocess_batch_list (line 221) | def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, *arg...
    method on_mask_blend (line 240) | def on_mask_blend(self, p, mba: MaskBlendArgs, *args):
    method post_sample (line 250) | def post_sample(self, p, ps: PostSampleArgs, *args):
    method postprocess_image (line 259) | def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
    method postprocess_maskoverlay (line 266) | def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs,...
    method postprocess_image_after_composite (line 273) | def postprocess_image_after_composite(self, p, pp: PostprocessImageArg...
    method postprocess (line 282) | def postprocess(self, p, processed, *args):
    method before_component (line 290) | def before_component(self, component, **kwargs):
    method after_component (line 300) | def after_component(self, component, **kwargs):
    method on_before_component (line 307) | def on_before_component(self, callback, *, elem_id):
    method on_after_component (line 321) | def on_after_component(self, callback, *, elem_id):
    method describe (line 330) | def describe(self):
    method elem_id (line 334) | def elem_id(self, item_id):
    method before_hr (line 344) | def before_hr(self, p, *args):
  class ScriptBuiltinUI (line 351) | class ScriptBuiltinUI(Script):
    method elem_id (line 354) | def elem_id(self, item_id):
    method show (line 362) | def show(self, is_img2img):
  function basedir (line 369) | def basedir():
  class ScriptWithDependencies (line 385) | class ScriptWithDependencies:
  function list_scripts (line 393) | def list_scripts(scriptdirname, extension, *, include_extensions=True):
  function list_files_with_name (line 471) | def list_files_with_name(filename):
  function load_scripts (line 487) | def load_scripts():
  function wrap_call (line 533) | def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
  class ScriptRunner (line 542) | class ScriptRunner:
    method __init__ (line 543) | def __init__(self):
    method initialize_scripts (line 578) | def initialize_scripts(self, is_img2img):
    method apply_on_before_component_callbacks (line 614) | def apply_on_before_component_callbacks(self):
    method create_script_ui (line 634) | def create_script_ui(self, script):
    method create_script_ui_inner (line 644) | def create_script_ui_inner(self, script):
    method setup_ui_for_section (line 689) | def setup_ui_for_section(self, section, scriptlist=None):
    method prepare_ui (line 705) | def prepare_ui(self):
    method setup_ui (line 708) | def setup_ui(self):
    method run (line 768) | def run(self, p, *args):
    method list_scripts_for_method (line 786) | def list_scripts_for_method(self, method_name):
    method create_ordered_callbacks_list (line 792) | def create_ordered_callbacks_list(self,  method_name, *, enable_user_s...
    method ordered_callbacks (line 805) | def ordered_callbacks(self, method_name, *, enable_user_sort=True):
    method ordered_scripts (line 817) | def ordered_scripts(self, method_name):
    method before_process (line 820) | def before_process(self, p):
    method process (line 828) | def process(self, p):
    method process_before_every_sampling (line 836) | def process_before_every_sampling(self, p, **kwargs):
    method before_process_batch (line 844) | def before_process_batch(self, p, **kwargs):
    method after_extra_networks_activate (line 852) | def after_extra_networks_activate(self, p, **kwargs):
    method process_batch (line 860) | def process_batch(self, p, **kwargs):
    method postprocess (line 868) | def postprocess(self, p, processed):
    method postprocess_batch (line 876) | def postprocess_batch(self, p, images, **kwargs):
    method postprocess_batch_list (line 884) | def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kw...
    method post_sample (line 892) | def post_sample(self, p, ps: PostSampleArgs):
    method on_mask_blend (line 900) | def on_mask_blend(self, p, mba: MaskBlendArgs):
    method postprocess_image (line 908) | def postprocess_image(self, p, pp: PostprocessImageArgs):
    method postprocess_maskoverlay (line 916) | def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs):
    method postprocess_image_after_composite (line 924) | def postprocess_image_after_composite(self, p, pp: PostprocessImageArgs):
    method before_component (line 932) | def before_component(self, component, **kwargs):
    method after_component (line 945) | def after_component(self, component, **kwargs):
    method script (line 958) | def script(self, title):
    method reload_sources (line 961) | def reload_sources(self, cache):
    method before_hr (line 979) | def before_hr(self, p):
    method setup_scrips (line 987) | def setup_scrips(self, p, *, is_ui=True):
    method set_named_arg (line 998) | def set_named_arg(self, args, script_name, arg_elem_id, value, fuzzy=F...
  function reload_script_body_only (line 1034) | def reload_script_body_only():

FILE: modules/scripts_auto_postprocessing.py
  class ScriptPostprocessingForMainUI (line 4) | class ScriptPostprocessingForMainUI(scripts.Script):
    method __init__ (line 5) | def __init__(self, script_postproc):
    method title (line 9) | def title(self):
    method show (line 12) | def show(self, is_img2img):
    method ui (line 15) | def ui(self, is_img2img):
    method postprocess_image (line 19) | def postprocess_image(self, p, script_pp, *args):
  function create_auto_preprocessing_script_data (line 29) | def create_auto_preprocessing_script_data():

FILE: modules/scripts_postprocessing.py
  class PostprocessedImageSharedInfo (line 9) | class PostprocessedImageSharedInfo:
  class PostprocessedImage (line 14) | class PostprocessedImage:
    method __init__ (line 15) | def __init__(self, image):
    method get_suffix (line 24) | def get_suffix(self, used_suffixes=None):
    method create_copy (line 43) | def create_copy(self, new_image, *, nametags=None, disable_processing=...
  class ScriptPostprocessing (line 56) | class ScriptPostprocessing:
    method ui (line 71) | def ui(self):
    method process (line 80) | def process(self, pp: PostprocessedImage, **args):
    method process_firstpass (line 88) | def process_firstpass(self, pp: PostprocessedImage, **args):
    method image_changed (line 97) | def image_changed(self):
  function wrap_call (line 101) | def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
  class ScriptPostprocessingRunner (line 111) | class ScriptPostprocessingRunner:
    method __init__ (line 112) | def __init__(self):
    method initialize_scripts (line 116) | def initialize_scripts(self, scripts_data):
    method create_script_ui (line 128) | def create_script_ui(self, script, inputs):
    method scripts_in_preferred_order (line 140) | def scripts_in_preferred_order(self):
    method setup_ui (line 160) | def setup_ui(self):
    method run (line 172) | def run(self, pp: PostprocessedImage, args):
    method create_args_for_run (line 210) | def create_args_for_run(self, scripts_args):
    method image_changed (line 227) | def image_changed(self):

FILE: modules/sd_disable_initialization.py
  class ReplaceHelper (line 9) | class ReplaceHelper:
    method __init__ (line 10) | def __init__(self):
    method replace (line 13) | def replace(self, obj, field, func):
    method restore (line 23) | def restore(self):
  class DisableInitialization (line 30) | class DisableInitialization(ReplaceHelper):
    method __init__ (line 46) | def __init__(self, disable_clip=True):
    method replace (line 50) | def replace(self, obj, field, func):
    method __enter__ (line 60) | def __enter__(self):
    method __exit__ (line 111) | def __exit__(self, exc_type, exc_val, exc_tb):
  class InitializeOnMeta (line 115) | class InitializeOnMeta(ReplaceHelper):
    method __enter__ (line 128) | def __enter__(self):
    method __exit__ (line 141) | def __exit__(self, exc_type, exc_val, exc_tb):
  class LoadStateDictOnMeta (line 145) | class LoadStateDictOnMeta(ReplaceHelper):
    method __init__ (line 158) | def __init__(self, state_dict, device, weight_dtype_conversion=None):
    method get_weight_dtype (line 165) | def get_weight_dtype(self, key):
    method __enter__ (line 169) | def __enter__(self):
    method __exit__ (line 231) | def __exit__(self, exc_type, exc_val, exc_tb):

FILE: modules/sd_emphasis.py
  class Emphasis (line 5) | class Emphasis:
    method after_transformers (line 20) | def after_transformers(self):
  class EmphasisNone (line 26) | class EmphasisNone(Emphasis):
  class EmphasisIgnore (line 31) | class EmphasisIgnore(Emphasis):
  class EmphasisOriginal (line 36) | class EmphasisOriginal(Emphasis):
    method after_transformers (line 40) | def after_transformers(self):
  class EmphasisOriginalNoNorm (line 49) | class EmphasisOriginalNoNorm(EmphasisOriginal):
    method after_transformers (line 53) | def after_transformers(self):
  function get_current_option (line 57) | def get_current_option(emphasis_option_name):
  function get_options_descriptions (line 61) | def get_options_descriptions():

FILE: modules/sd_hijack.py
  function list_optimizers (line 48) | def list_optimizers():
  function apply_optimizations (line 59) | def apply_optimizations(option=None):
  function undo_optimizations (line 103) | def undo_optimizations():
  function fix_checkpoint (line 113) | def fix_checkpoint():
  function weighted_loss (line 120) | def weighted_loss(sd_model, pred, target, mean=True):
  function weighted_forward (line 132) | def weighted_forward(sd_model, x, c, w, *args, **kwargs):
  function apply_weighted_forward (line 157) | def apply_weighted_forward(sd_model):
  function undo_weighted_forward (line 161) | def undo_weighted_forward(sd_model):
  class StableDiffusionModelHijack (line 168) | class StableDiffusionModelHijack:
    method __init__ (line 175) | def __init__(self):
    method apply_optimizations (line 184) | def apply_optimizations(self, option=None):
    method convert_sdxl_to_ssd (line 191) | def convert_sdxl_to_ssd(self, m):
    method hijack (line 205) | def hijack(self, m):
    method undo_hijack (line 275) | def undo_hijack(self, m):
    method apply_circular (line 311) | def apply_circular(self, enable):
    method clear_comments (line 320) | def clear_comments(self):
    method get_prompt_lengths (line 324) | def get_prompt_lengths(self, text):
    method redo_hijack (line 335) | def redo_hijack(self, m):
  class EmbeddingsWithFixes (line 340) | class EmbeddingsWithFixes(torch.nn.Module):
    method __init__ (line 341) | def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
    method forward (line 347) | def forward(self, input_ids):
  class TextualInversionEmbeddings (line 369) | class TextualInversionEmbeddings(torch.nn.Embedding):
    method __init__ (line 370) | def __init__(self, num_embeddings: int, embedding_dim: int, textual_in...
    method wrapped (line 377) | def wrapped(self):
    method forward (line 380) | def forward(self, input_ids):
  function add_circular_option_to_conv_2d (line 384) | def add_circular_option_to_conv_2d():
  function register_buffer (line 396) | def register_buffer(self, name, attr):

FILE: modules/sd_hijack_checkpoint.py
  function BasicTransformerBlock_forward (line 7) | def BasicTransformerBlock_forward(self, x, context=None):
  function AttentionBlock_forward (line 11) | def AttentionBlock_forward(self, x):
  function ResBlock_forward (line 15) | def ResBlock_forward(self, x, emb):
  function add (line 22) | def add():
  function remove (line 37) | def remove():

FILE: modules/sd_hijack_clip.py
  class PromptChunk (line 10) | class PromptChunk:
    method __init__ (line 18) | def __init__(self):
  class TextConditionalModel (line 30) | class TextConditionalModel(torch.nn.Module):
    method __init__ (line 31) | def __init__(self):
    method empty_chunk (line 46) | def empty_chunk(self):
    method get_target_prompt_token_count (line 54) | def get_target_prompt_token_count(self, token_count):
    method tokenize (line 59) | def tokenize(self, texts):
    method encode_with_transformers (line 64) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 75) | def encode_embedding_init_text(self, init_text, nvpt):
    method tokenize_line (line 81) | def tokenize_line(self, line):
    method process_texts (line 176) | def process_texts(self, texts):
    method forward (line 199) | def forward(self, texts):
    method process_tokens (line 253) | def process_tokens(self, remade_batch_tokens, batch_multipliers):
  class FrozenCLIPEmbedderWithCustomWordsBase (line 288) | class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel):
    method __init__ (line 293) | def __init__(self, wrapped, hijack):
    method forward (line 308) | def forward(self, texts):
  class FrozenCLIPEmbedderWithCustomWords (line 316) | class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWord...
    method __init__ (line 317) | def __init__(self, wrapped, hijack):
    method tokenize (line 346) | def tokenize(self, texts):
    method encode_with_transformers (line 351) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 362) | def encode_embedding_init_text(self, init_text, nvpt):
  class FrozenCLIPEmbedderForSDXLWithCustomWords (line 370) | class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCus...
    method __init__ (line 371) | def __init__(self, wrapped, hijack):
    method encode_with_transformers (line 374) | def encode_with_transformers(self, tokens):

FILE: modules/sd_hijack_clip_old.py
  function process_text_old (line 5) | def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWo...
  function forward_old (line 72) | def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBa...

FILE: modules/sd_hijack_ip2p.py
  function should_hijack_ip2p (line 4) | def should_hijack_ip2p(checkpoint_info):

FILE: modules/sd_hijack_open_clip.py
  class FrozenOpenCLIPEmbedderWithCustomWords (line 10) | class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmb...
    method __init__ (line 11) | def __init__(self, wrapped, hijack):
    method tokenize (line 19) | def tokenize(self, texts):
    method encode_with_transformers (line 26) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 32) | def encode_embedding_init_text(self, init_text, nvpt):
  class FrozenOpenCLIPEmbedder2WithCustomWords (line 40) | class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEm...
    method __init__ (line 41) | def __init__(self, wrapped, hijack):
    method tokenize (line 49) | def tokenize(self, texts):
    method encode_with_transformers (line 56) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 66) | def encode_embedding_init_text(self, init_text, nvpt):

FILE: modules/sd_hijack_optimizations.py
  class SdOptimization (line 25) | class SdOptimization:
    method title (line 31) | def title(self):
    method is_available (line 37) | def is_available(self):
    method apply (line 40) | def apply(self):
    method undo (line 43) | def undo(self):
  class SdOptimizationXformers (line 51) | class SdOptimizationXformers(SdOptimization):
    method is_available (line 56) | def is_available(self):
    method apply (line 59) | def apply(self):
  class SdOptimizationSdpNoMem (line 66) | class SdOptimizationSdpNoMem(SdOptimization):
    method is_available (line 72) | def is_available(self):
    method apply (line 75) | def apply(self):
  class SdOptimizationSdp (line 82) | class SdOptimizationSdp(SdOptimizationSdpNoMem):
    method apply (line 88) | def apply(self):
  class SdOptimizationSubQuad (line 95) | class SdOptimizationSubQuad(SdOptimization):
    method priority (line 100) | def priority(self):
    method apply (line 103) | def apply(self):
  class SdOptimizationV1 (line 110) | class SdOptimizationV1(SdOptimization):
    method apply (line 116) | def apply(self):
  class SdOptimizationInvokeAI (line 121) | class SdOptimizationInvokeAI(SdOptimization):
    method priority (line 126) | def priority(self):
    method apply (line 129) | def apply(self):
  class SdOptimizationDoggettx (line 134) | class SdOptimizationDoggettx(SdOptimization):
    method apply (line 139) | def apply(self):
  function list_optimizers (line 146) | def list_optimizers(res):
  function get_available_vram (line 166) | def get_available_vram():
  function split_cross_attention_forward_v1 (line 180) | def split_cross_attention_forward_v1(self, x, context=None, mask=None, *...
  function split_cross_attention_forward (line 221) | def split_cross_attention_forward(self, x, context=None, mask=None, **kw...
  function einsum_op_compvis (line 288) | def einsum_op_compvis(q, k, v):
  function einsum_op_slice_0 (line 294) | def einsum_op_slice_0(q, k, v, slice_size):
  function einsum_op_slice_1 (line 302) | def einsum_op_slice_1(q, k, v, slice_size):
  function einsum_op_mps_v1 (line 310) | def einsum_op_mps_v1(q, k, v):
  function einsum_op_mps_v2 (line 320) | def einsum_op_mps_v2(q, k, v):
  function einsum_op_tensor_mem (line 327) | def einsum_op_tensor_mem(q, k, v, max_tensor_mb):
  function einsum_op_cuda (line 337) | def einsum_op_cuda(q, k, v):
  function einsum_op (line 348) | def einsum_op(q, k, v):
  function split_cross_attention_forward_invokeAI (line 362) | def split_cross_attention_forward_invokeAI(self, x, context=None, mask=N...
  function sub_quad_attention_forward (line 390) | def sub_quad_attention_forward(self, x, context=None, mask=None, **kwargs):
  function sub_quad_attention (line 427) | def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, k...
  function get_xformers_flash_attention_op (line 465) | def get_xformers_flash_attention_op(q, k, v):
  function xformers_attention_forward (line 480) | def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
  function scaled_dot_product_attention_forward (line 508) | def scaled_dot_product_attention_forward(self, x, context=None, mask=Non...
  function scaled_dot_product_no_mem_attention_forward (line 549) | def scaled_dot_product_no_mem_attention_forward(self, x, context=None, m...
  function cross_attention_attnblock_forward (line 554) | def cross_attention_attnblock_forward(self, x):
  function xformers_attnblock_forward (line 613) | def xformers_attnblock_forward(self, x):
  function sdp_attnblock_forward (line 637) | def sdp_attnblock_forward(self, x):
  function sdp_no_mem_attnblock_forward (line 658) | def sdp_no_mem_attnblock_forward(self, x):
  function sub_quad_attnblock_forward (line 663) | def sub_quad_attnblock_forward(self, x):

FILE: modules/sd_hijack_unet.py
  class TorchHijackForUnet (line 10) | class TorchHijackForUnet:
    method __getattr__ (line 16) | def __getattr__(self, item):
    method cat (line 25) | def cat(self, tensors, *args, **kwargs):
  function apply_model (line 40) | def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
  function timestep_embedding (line 58) | def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=...
  function spatial_transformer_forward (line 83) | def spatial_transformer_forward(_, self, x: torch.Tensor, context=None):
  class GELUHijack (line 105) | class GELUHijack(torch.nn.GELU, torch.nn.Module):
    method __init__ (line 106) | def __init__(self, *args, **kwargs):
    method forward (line 108) | def forward(self, x):
  function hijack_ddpm_edit (line 116) | def hijack_ddpm_edit():
  function timestep_embedding_cast_result (line 145) | def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs):

FILE: modules/sd_hijack_utils.py
  class CondFunc (line 7) | class CondFunc:
    method __new__ (line 8) | def __new__(cls, orig_func, sub_func, cond_func=always_true_func):
    method __init__ (line 28) | def __init__(self, orig_func, sub_func, cond_func):
    method __call__ (line 32) | def __call__(self, *args, **kwargs):

FILE: modules/sd_hijack_xlmr.py
  class FrozenXLMREmbedderWithCustomWords (line 6) | class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedde...
    method __init__ (line 7) | def __init__(self, wrapped, hijack):
    method encode_with_transformers (line 16) | def encode_with_transformers(self, tokens):
    method encode_embedding_init_text (line 27) | def encode_embedding_init_text(self, init_text, nvpt):

FILE: modules/sd_models.py
  class ModelType (line 30) | class ModelType(enum.Enum):
  function replace_key (line 38) | def replace_key(d, key, new_key, value):
  class CheckpointInfo (line 56) | class CheckpointInfo:
    method __init__ (line 57) | def __init__(self, filename):
    method register (line 102) | def register(self):
    method calculate_shorthash (line 107) | def calculate_shorthash(self):
  function setup_model (line 140) | def setup_model():
  function checkpoint_tiles (line 149) | def checkpoint_tiles(use_short=False):
  function list_models (line 153) | def list_models():
  function get_closet_checkpoint_match (line 183) | def get_closet_checkpoint_match(search_string):
  function model_hash (line 203) | def model_hash(filename):
  function select_checkpoint (line 218) | def select_checkpoint():
  function transform_checkpoint_dict_key (line 254) | def transform_checkpoint_dict_key(k, replacements):
  function get_state_dict_from_checkpoint (line 262) | def get_state_dict_from_checkpoint(pl_sd):
  function read_metadata_from_safetensors (line 284) | def read_metadata_from_safetensors(filename):
  function read_state_dict (line 312) | def read_state_dict(checkpoint_file, print_global_state=False, map_locat...
  function get_checkpoint_state_dict (line 332) | def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
  class SkipWritingToConfig (line 350) | class SkipWritingToConfig:
    method __enter__ (line 356) | def __enter__(self):
    method __exit__ (line 361) | def __exit__(self, exc_type, exc_value, exc_traceback):
  function check_fp8 (line 365) | def check_fp8(model):
  function set_model_type (line 379) | def set_model_type(model, state_dict):
  function set_model_fields (line 405) | def set_model_fields(model):
  function load_model_weights (line 410) | def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dic...
  function enable_midas_autodownload (line 542) | def enable_midas_autodownload():
  function patch_given_betas (line 585) | def patch_given_betas():
  function repair_config (line 599) | def repair_config(sd_config, state_dict=None):
  function rescale_zero_terminal_snr_abar (line 628) | def rescale_zero_terminal_snr_abar(alphas_cumprod):
  function apply_alpha_schedule_override (line 647) | def apply_alpha_schedule_override(sd_model, p=None):
  class SdModelData (line 676) | class SdModelData:
    method __init__ (line 677) | def __init__(self):
    method get_sd_model (line 683) | def get_sd_model(self):
    method set_sd_model (line 703) | def set_sd_model(self, v, already_loaded=False):
  function get_empty_cond (line 722) | def get_empty_cond(sd_model):
  function send_model_to_cpu (line 738) | def send_model_to_cpu(m):
  function model_target_device (line 748) | def model_target_device(m):
  function send_model_to_device (line 755) | def send_model_to_device(m):
  function send_model_to_trash (line 762) | def send_model_to_trash(m):
  function instantiate_from_config (line 767) | def instantiate_from_config(config, state_dict=None):
  function get_obj_from_str (line 778) | def get_obj_from_str(string, reload=False):
  function load_model (line 786) | def load_model(checkpoint_info=None, already_loaded_state_dict=None):
  function reuse_model_from_already_loaded (line 878) | def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
  function reload_model_weights (line 940) | def reload_model_weights(sd_model=None, info=None, forced_reload=False):
  function unload_model_weights (line 1005) | def unload_model_weights(sd_model=None, info=None):
  function apply_token_merging (line 1011) | def apply_token_merging(sd_model, token_merging_ratio):

FILE: modules/sd_models_config.py
  function is_using_v_parameterization_for_sd2 (line 29) | def is_using_v_parameterization_for_sd2(state_dict):
  function guess_model_config_from_state_dict (line 72) | def guess_model_config_from_state_dict(sd, filename):
  function find_checkpoint_config (line 117) | def find_checkpoint_config(state_dict, info):
  function find_checkpoint_config_near_filename (line 128) | def find_checkpoint_config_near_filename(info):

FILE: modules/sd_models_types.py
  class WebuiSdModel (line 9) | class WebuiSdModel(LatentDiffusion):

FILE: modules/sd_models_xl.py
  function get_learned_conditioning (line 12) | def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine,...
  function apply_model (line 37) | def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
  function get_first_stage_encoding (line 46) | def get_first_stage_encoding(self, x):  # SDXL's encode_first_stage does...
  function encode_embedding_init_text (line 55) | def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, ini...
  function tokenize (line 65) | def tokenize(self: sgm.modules.GeneralConditioner, texts):
  function process_texts (line 73) | def process_texts(self, texts):
  function get_target_prompt_token_count (line 78) | def get_target_prompt_token_count(self, token_count):
  function extend_sdxl (line 90) | def extend_sdxl(model):

FILE: modules/sd_samplers.py
  function find_sampler_config (line 24) | def find_sampler_config(name):
  function create_sampler (line 33) | def create_sampler(name, model):
  function set_samplers (line 47) | def set_samplers():
  function visible_sampler_names (line 61) | def visible_sampler_names():
  function visible_samplers (line 65) | def visible_samplers():
  function get_sampler_from_infotext (line 69) | def get_sampler_from_infotext(d: dict):
  function get_scheduler_from_infotext (line 73) | def get_scheduler_from_infotext(d: dict):
  function get_hr_sampler_and_scheduler (line 77) | def get_hr_sampler_and_scheduler(d: dict):
  function get_hr_sampler_from_infotext (line 92) | def get_hr_sampler_from_infotext(d: dict):
  function get_hr_scheduler_from_infotext (line 96) | def get_hr_scheduler_from_infotext(d: dict):
  function get_sampler_and_scheduler (line 101) | def get_sampler_and_scheduler(sampler_name, scheduler_name, *, convert_a...
  function fix_p_invalid_sampler_and_scheduler (line 125) | def fix_p_invalid_sampler_and_scheduler(p):

FILE: modules/sd_samplers_cfg_denoiser.py
  function catenate_conds (line 11) | def catenate_conds(conds):
  function subscript_cond (line 18) | def subscript_cond(cond, a, b):
  function pad_cond (line 25) | def pad_cond(tensor, repeats, empty):
  class CFGDenoiser (line 33) | class CFGDenoiser(torch.nn.Module):
    method __init__ (line 41) | def __init__(self, sampler):
    method inner_model (line 71) | def inner_model(self):
    method combine_denoised (line 74) | def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
    method combine_denoised_for_edit_model (line 84) | def combine_denoised_for_edit_model(self, x_out, cond_scale):
    method get_pred_x0 (line 90) | def get_pred_x0(self, x_in, x_out, sigma):
    method update_inner_model (line 93) | def update_inner_model(self):
    method pad_cond_uncond (line 100) | def pad_cond_uncond(self, cond, uncond):
    method pad_cond_uncond_v0 (line 113) | def pad_cond_uncond_v0(self, cond, uncond):
    method forward (line 156) | def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, im...

FILE: modules/sd_samplers_common.py
  class SamplerData (line 14) | class SamplerData(SamplerDataTuple):
    method total_steps (line 15) | def total_steps(self, steps):
  function setup_img2img_steps (line 22) | def setup_img2img_steps(p, steps=None):
  function samples_to_images_tensor (line 37) | def samples_to_images_tensor(sample, approximation=None, model=None):
  function single_sample_to_image (line 63) | def single_sample_to_image(sample, approximation=None):
  function decode_first_stage (line 73) | def decode_first_stage(model, x):
  function sample_to_image (line 79) | def sample_to_image(samples, index=0, approximation=None):
  function samples_to_image_grid (line 83) | def samples_to_image_grid(samples, approximation=None):
  function images_tensor_to_samples (line 87) | def images_tensor_to_samples(image, approximation=None, model=None):
  function store_latent (line 115) | def store_latent(decoded):
  function is_sampler_using_eta_noise_seed_delta (line 123) | def is_sampler_using_eta_noise_seed_delta(p):
  class InterruptedException (line 142) | class InterruptedException(BaseException):
  function replace_torchsde_browinan (line 146) | def replace_torchsde_browinan():
  function apply_refiner (line 158) | def apply_refiner(cfg_denoiser, sigma=None):
  class TorchHijack (line 205) | class TorchHijack:
    method __init__ (line 213) | def __init__(self, p):
    method __getattr__ (line 216) | def __getattr__(self, item):
    method randn_like (line 225) | def randn_like(self, x):
  class Sampler (line 229) | class Sampler:
    method __init__ (line 230) | def __init__(self, funcname):
    method callback_state (line 256) | def callback_state(self, d):
    method launch_sampling (line 265) | def launch_sampling(self, steps, func):
    method number_of_needed_noises (line 283) | def number_of_needed_noises(self, p):
    method initialize (line 286) | def initialize(self, p) -> dict:
    method create_noise_sampler (line 334) | def create_noise_sampler(self, x, sigmas, p):
    method sample (line 344) | def sample(self, p, x, conditioning, unconditional_conditioning, steps...
    method sample_img2img (line 347) | def sample_img2img(self, p, x, noise, conditioning, unconditional_cond...
    method add_infotext (line 350) | def add_infotext(self, p):

FILE: modules/sd_samplers_extra.py
  function restart_sampler (line 7) | def restart_sampler(model, x, sigmas, extra_args=None, callback=None, di...

FILE: modules/sd_samplers_kdiffusion.py
  class CFGDenoiserKDiffusion (line 52) | class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser):
    method inner_model (line 54) | def inner_model(self):
  class KDiffusionSampler (line 67) | class KDiffusionSampler(sd_samplers_common.Sampler):
    method __init__ (line 68) | def __init__(self, funcname, sd_model, options=None):
    method get_sigmas (line 79) | def get_sigmas(self, p, steps):
    method sample_img2img (line 134) | def sample_img2img(self, p, x, noise, conditioning, unconditional_cond...
    method sample (line 190) | def sample(self, p, x, conditioning, unconditional_conditioning, steps...

FILE: modules/sd_samplers_lcm.py
  class LCMCompVisDenoiser (line 10) | class LCMCompVisDenoiser(DiscreteEpsDDPMDenoiser):
    method __init__ (line 11) | def __init__(self, model):
    method get_sigmas (line 23) | def get_sigmas(self, n=None,):
    method sigma_to_t (line 35) | def sigma_to_t(self, sigma, quantize=None):
    method t_to_sigma (line 41) | def t_to_sigma(self, timestep):
    method get_eps (line 46) | def get_eps(self, *args, **kwargs):
    method get_scaled_out (line 50) | def get_scaled_out(self, sigma, output, input):
    method forward (line 60) | def forward(self, input, sigma, **kwargs):
  function sample_lcm (line 66) | def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable...
  class CFGDenoiserLCM (line 83) | class CFGDenoiserLCM(sd_samplers_cfg_denoiser.CFGDenoiser):
    method inner_model (line 85) | def inner_model(self):
  class LCMSampler (line 93) | class LCMSampler(sd_samplers_kdiffusion.KDiffusionSampler):
    method __init__ (line 94) | def __init__(self, funcname, sd_model, options=None):

FILE: modules/sd_samplers_timesteps.py
  class CompVisTimestepsDenoiser (line 25) | class CompVisTimestepsDenoiser(torch.nn.Module):
    method __init__ (line 26) | def __init__(self, model, *args, **kwargs):
    method forward (line 30) | def forward(self, input, timesteps, **kwargs):
  class CompVisTimestepsVDenoiser (line 34) | class CompVisTimestepsVDenoiser(torch.nn.Module):
    method __init__ (line 35) | def __init__(self, model, *args, **kwargs):
    method predict_eps_from_z_and_v (line 39) | def predict_eps_from_z_and_v(self, x_t, t, v):
    method forward (line 42) | def forward(self, input, timesteps, **kwargs):
  class CFGDenoiserTimesteps (line 48) | class CFGDenoiserTimesteps(CFGDenoiser):
    method __init__ (line 50) | def __init__(self, sampler):
    method get_pred_x0 (line 56) | def get_pred_x0(self, x_in, x_out, sigma):
    method inner_model (line 67) | def inner_model(self):
  class CompVisSampler (line 75) | class CompVisSampler(sd_samplers_common.Sampler):
    method __init__ (line 76) | def __init__(self, funcname, sd_model):
    method get_timesteps (line 86) | def get_timesteps(self, p, steps):
    method sample_img2img (line 98) | def sample_img2img(self, p, x, noise, conditioning, unconditional_cond...
    method sample (line 141) | def sample(self, p, x, conditioning, unconditional_conditioning, steps...

FILE: modules/sd_samplers_timesteps_impl.py
  function ddim (line 12) | def ddim(model, x, timesteps, extra_args=None, callback=None, disable=No...
  function ddim_cfgpp (line 44) | def ddim_cfgpp(model, x, timesteps, extra_args=None, callback=None, disa...
  function plms (line 84) | def plms(model, x, timesteps, extra_args=None, callback=None, disable=No...
  class UniPCCFG (line 145) | class UniPCCFG(uni_pc.UniPC):
    method __init__ (line 146) | def __init__(self, cfg_model, extra_args, callback, *args, **kwargs):
    method get_model_input_time (line 159) | def get_model_input_time(self, t_continuous):
    method model (line 162) | def model(self, x, t):
  function unipc (line 170) | def unipc(model, x, timesteps, extra_args=None, callback=None, disable=N...

FILE: modules/sd_schedulers.py
  function to_d (line 10) | def to_d(x, sigma, denoised):
  class Scheduler (line 19) | class Scheduler:
  function uniform (line 29) | def uniform(n, sigma_min, sigma_max, inner_model, device):
  function sgm_uniform (line 33) | def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
  function get_align_your_steps_sigmas (line 44) | def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device):
  function kl_optimal (line 73) | def kl_optimal(n, sigma_min, sigma_max, device):
  function simple_scheduler (line 81) | def simple_scheduler(n, sigma_min, sigma_max, inner_model, device):
  function normal_scheduler (line 90) | def normal_scheduler(n, sigma_min, sigma_max, inner_model, device, sgm=F...
  function ddim_scheduler (line 107) | def ddim_scheduler(n, sigma_min, sigma_max, inner_model, device):
  function beta_scheduler (line 119) | def beta_scheduler(n, sigma_min, sigma_max, inner_model, device):

FILE: modules/sd_unet.py
  function list_unets (line 10) | def list_unets():
  function get_unet_option (line 17) | def get_unet_option(option=None):
  function apply_unet (line 33) | def apply_unet(option=None):
  class SdUnetOption (line 63) | class SdUnetOption:
    method create_unet (line 70) | def create_unet(self):
  class SdUnet (line 75) | class SdUnet(torch.nn.Module):
    method forward (line 76) | def forward(self, x, timesteps, context, *args, **kwargs):
    method activate (line 79) | def activate(self):
    method deactivate (line 82) | def deactivate(self):
  function create_unet_forward (line 86) | def create_unet_forward(original_forward):

FILE: modules/sd_vae.py
  function get_loaded_vae_name (line 23) | def get_loaded_vae_name():
  function get_loaded_vae_hash (line 30) | def get_loaded_vae_hash():
  function get_base_vae (line 39) | def get_base_vae(model):
  function store_base_vae (line 45) | def store_base_vae(model):
  function delete_base_vae (line 53) | def delete_base_vae():
  function restore_base_vae (line 59) | def restore_base_vae(model):
  function get_filename (line 68) | def get_filename(filepath):
  function refresh_vae_list (line 72) | def refresh_vae_list():
  function find_vae_near_checkpoint (line 109) | def find_vae_near_checkpoint(checkpoint_file):
  class VaeResolution (line 119) | class VaeResolution:
    method tuple (line 124) | def tuple(self):
  function is_automatic (line 128) | def is_automatic():
  function resolve_vae_from_setting (line 132) | def resolve_vae_from_setting() -> VaeResolution:
  function resolve_vae_from_user_metadata (line 146) | def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution:
  function resolve_vae_near_checkpoint (line 160) | def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution:
  function resolve_vae (line 168) | def resolve_vae(checkpoint_file) -> VaeResolution:
  function load_vae_dict (line 188) | def load_vae_dict(filename, map_location):
  function load_vae (line 194) | def load_vae(model, vae_file=None, vae_source="from unknown source"):
  function _load_vae_dict (line 238) | def _load_vae_dict(model, vae_dict_1):
  function clear_loaded_vae (line 243) | def clear_loaded_vae():
  function reload_vae_weights (line 251) | def reload_vae_weights(sd_model=None, vae_file=unspecified):

FILE: modules/sd_vae_approx.py
  class VAEApprox (line 10) | class VAEApprox(nn.Module):
    method __init__ (line 11) | def __init__(self, latent_channels=4):
    method forward (line 22) | def forward(self, x):
  function download_model (line 34) | def download_model(model_path, model_url):
  function model (line 42) | def model():
  function cheap_approximation (line 70) | def cheap_approximation(sample):

FILE: modules/sd_vae_taesd.py
  function conv (line 16) | def conv(n_in, n_out, **kwargs):
  class Clamp (line 20) | class Clamp(nn.Module):
    method forward (line 22) | def forward(x):
  class Block (line 26) | class Block(nn.Module):
    method __init__ (line 27) | def __init__(self, n_in, n_out):
    method forward (line 33) | def forward(self, x):
  function decoder (line 37) | def decoder(latent_channels=4):
  function encoder (line 47) | def encoder(latent_channels=4):
  class TAESDDecoder (line 57) | class TAESDDecoder(nn.Module):
    method __init__ (line 61) | def __init__(self, decoder_path="taesd_decoder.pth", latent_channels=N...
  class TAESDEncoder (line 73) | class TAESDEncoder(nn.Module):
    method __init__ (line 77) | def __init__(self, encoder_path="taesd_encoder.pth", latent_channels=N...
  function download_model (line 89) | def download_model(model_path, model_url):
  function decoder_model (line 97) | def decoder_model():
  function encoder_model (line 122) | def encoder_model():

FILE: modules/shared_gradio_themes.py
  function reload_gradio_theme (line 44) | def reload_gradio_theme(theme_name=None):
  function resolve_var (line 74) | def resolve_var(name: str, gradio_theme=None, history=None):

FILE: modules/shared_init.py
  function initialize (line 9) | def initialize():

FILE: modules/shared_items.py
  function realesrgan_models_names (line 9) | def realesrgan_models_names():
  function dat_models_names (line 14) | def dat_models_names():
  function postprocessing_scripts (line 19) | def postprocessing_scripts():
  function sd_vae_items (line 25) | def sd_vae_items():
  function refresh_vae_list (line 31) | def refresh_vae_list():
  function cross_attention_optimizations (line 37) | def cross_attention_optimizations():
  function sd_unet_items (line 43) | def sd_unet_items():
  function refresh_unet_list (line 49) | def refresh_unet_list():
  function list_checkpoint_tiles (line 55) | def list_checkpoint_tiles(use_short=False):
  function refresh_checkpoints (line 60) | def refresh_checkpoints():
  function list_samplers (line 65) | def list_samplers():
  function reload_hypernetworks (line 70) | def reload_hypernetworks():
  function get_infotext_names (line 77) | def get_infotext_names():
  function ui_reorder_categories (line 109) | def ui_reorder_categories():
  function callbacks_order_settings (line 124) | def callbacks_order_settings():
  class Shared (line 163) | class Shared(sys.modules[__name__].__class__):
    method sd_model (line 172) | def sd_model(self):
    method sd_model (line 178) | def sd_model(self, value):

FILE: modules/shared_state.py
  class State (line 12) | class State:
    method __init__ (line 33) | def __init__(self):
    method need_restart (line 37) | def need_restart(self) -> bool:
    method need_restart (line 42) | def need_restart(self, value: bool) -> None:
    method server_command (line 48) | def server_command(self):
    method server_command (line 52) | def server_command(self, value: Optional[str]) -> None:
    method wait_for_server_command (line 59) | def wait_for_server_command(self, timeout: Optional[float] = None) -> ...
    method request_restart (line 70) | def request_restart(self) -> None:
    method skip (line 75) | def skip(self):
    method interrupt (line 79) | def interrupt(self):
    method stop_generating (line 83) | def stop_generating(self):
    method nextjob (line 87) | def nextjob(self):
    method dict (line 95) | def dict(self):
    method begin (line 110) | def begin(self, job: str = "(unknown)"):
    method end (line 129) | def end(self):
    method set_current_image (line 137) | def set_current_image(self):
    method do_set_current_image (line 145) | def do_set_current_image(self):
    method assign_current_image (line 164) | def assign_current_image(self, image):

FILE: modules/shared_total_tqdm.py
  class TotalTQDM (line 6) | class TotalTQDM:
    method __init__ (line 7) | def __init__(self):
    method reset (line 10) | def reset(self):
    method update (line 18) | def update(self):
    method updateTotal (line 25) | def updateTotal(self, new_total):
    method clear (line 32) | def clear(self):

FILE: modules/styles.py
  class PromptStyle (line 10) | class PromptStyle(typing.NamedTuple):
  function merge_prompts (line 17) | def merge_prompts(style_prompt: str, prompt: str) -> str:
  function apply_styles_to_prompt (line 27) | def apply_styles_to_prompt(prompt, styles):
  function extract_style_text_from_prompt (line 34) | def extract_style_text_from_prompt(style_text, prompt):
  function extract_original_prompts (line 62) | def extract_original_prompts(style: PromptStyle, prompt, negative_prompt):
  class StyleDatabase (line 82) | class StyleDatabase:
    method __init__ (line 83) | def __init__(self, paths: list[str | Path]):
    method reload (line 101) | def reload(self):
    method load_from_csv (line 132) | def load_from_csv(self, path: str | Path):
    method get_style_paths (line 150) | def get_style_paths(self) -> set:
    method get_style_prompts (line 169) | def get_style_prompts(self, styles):
    method get_negative_style_prompts (line 172) | def get_negative_style_prompts(self, styles):
    method apply_styles_to_prompt (line 175) | def apply_styles_to_prompt(self, prompt, styles):
    method apply_negative_styles_to_prompt (line 180) | def apply_negative_styles_to_prompt(self, prompt, styles):
    method save_styles (line 185) | def save_styles(self, path: str = None) -> None:
    method extract_styles_from_prompt (line 210) | def extract_styles_from_prompt(self, prompt, negative_prompt):

FILE: modules/sub_quadratic_attention.py
  function narrow_trunc (line 21) | def narrow_trunc(
  class AttnChunk (line 30) | class AttnChunk(NamedTuple):
  class SummarizeChunk (line 36) | class SummarizeChunk:
    method __call__ (line 38) | def __call__(
  class ComputeQueryChunkAttn (line 45) | class ComputeQueryChunkAttn:
    method __call__ (line 47) | def __call__(
  function _summarize_chunk (line 54) | def _summarize_chunk(
  function _query_chunk_attention (line 75) | def _query_chunk_attention(
  function _get_attention_scores_no_kv_chunking (line 117) | def _get_attention_scores_no_kv_chunking(
  class ScannedChunk (line 136) | class ScannedChunk(NamedTuple):
  function efficient_dot_product_attention (line 141) | def efficient_dot_product_attention(

FILE: modules/sysinfo.py
  function pretty_bytes (line 38) | def pretty_bytes(num, suffix="B"):
  function get (line 45) | def get():
  function check (line 59) | def check(x):
  function get_cpu_info (line 70) | def get_cpu_info():
  function get_ram_info (line 81) | def get_ram_info():
  function get_packages (line 90) | def get_packages():
  function get_dict (line 102) | def get_dict():
  function get_environment (line 130) | def get_environment():
  function get_argv (line 134) | def get_argv():
  function get_torch_sysinfo (line 154) | def get_torch_sysinfo():
  function run_git (line 164) | def run_git(path, *args):
  function git_status (line 171) | def git_status(path):
  function get_info_from_repo_path (line 176) | def get_info_from_repo_path(path: Path):
  function get_extensions (line 187) | def get_extensions(*, enabled, fallback_disabled_extensions=None):
  function get_config (line 206) | def get_config():

FILE: modules/textual_inversion/autocrop.py
  function crop_image (line 14) | def crop_image(im, settings):
  function focal_point (line 74) | def focal_point(im, settings):
  function image_face_points (line 142) | def image_face_points(im, settings):
  function image_corner_points (line 200) | def image_corner_points(im, settings):
  function image_entropy_points (line 228) | def image_entropy_points(im, settings):
  function image_entropy (line 260) | def image_entropy(im):
  function centroid (line 269) | def centroid(pois):
  function poi_average (line 275) | def poi_average(pois, settings):
  function is_landscape (line 289) | def is_landscape(w, h):
  function is_portrait (line 293) | def is_portrait(w, h):
  function is_square (line 297) | def is_square(w, h):
  function download_and_cache_models (line 310) | def download_and_cache_models():
  class PointOfInterest (line 320) | class PointOfInterest:
    method __init__ (line 321) | def __init__(self, x, y, weight=1.0, size=10):
    method bounding (line 327) | def bounding(self, size):
  class Settings (line 336) | class Settings:
    method __init__ (line 337) | def __init__(self, crop_width=512, crop_height=512, corner_points_weig...

FILE: modules/textual_inversion/dataset.py
  class DatasetEntry (line 20) | class DatasetEntry:
    method __init__ (line 21) | def __init__(self, filename=None, filename_text=None, latent_dist=None...
  class PersonalizedBase (line 32) | class PersonalizedBase(Dataset):
    method __init__ (line 33) | def __init__(self, data_root, width, height, repeats, flip_p=0.5, plac...
    method create_text (line 152) | def create_text(self, filename_text):
    method __len__ (line 163) | def __len__(self):
    method __getitem__ (line 166) | def __getitem__(self, i):
  class GroupedBatchSampler (line 175) | class GroupedBatchSampler(Sampler):
    method __init__ (line 176) | def __init__(self, data_source: PersonalizedBase, batch_size: int):
    method __len__ (line 188) | def __len__(self):
    method __iter__ (line 191) | def __iter__(self):
  class PersonalizedDataLoader (line 209) | class PersonalizedDataLoader(DataLoader):
    method __init__ (line 210) | def __init__(self, dataset, latent_sampling_method="once", batch_size=...
  class BatchLoader (line 218) | class BatchLoader:
    method __init__ (line 219) | def __init__(self, data):
    method pin_memory (line 230) | def pin_memory(self):
  function collate_wrapper (line 234) | def collate_wrapper(batch):
  class BatchLoaderRandom (line 237) | class BatchLoaderRandom(BatchLoader):
    method __init__ (line 238) | def __init__(self, data):
    method pin_memory (line 241) | def pin_memory(self):
  function collate_wrapper_random (line 244) | def collate_wrapper_random(batch):

FILE: modules/textual_inversion/image_embedding.py
  class EmbeddingEncoder (line 15) | class EmbeddingEncoder(json.JSONEncoder):
    method default (line 16) | def default(self, obj):
  class EmbeddingDecoder (line 22) | class EmbeddingDecoder(json.JSONDecoder):
    method __init__ (line 23) | def __init__(self, *args, **kwargs):
    method object_hook (line 26) | def object_hook(self, d):
  function embedding_to_b64 (line 32) | def embedding_to_b64(data):
  function embedding_from_b64 (line 37) | def embedding_from_b64(data):
  function lcg (line 42) | def lcg(m=2**32, a=1664525, c=1013904223, seed=0):
  function xor_block (line 48) | def xor_block(block):
  function style_block (line 54) | def style_block(block, sequence):
  function insert_image_data_embed (line 72) | def insert_image_data_embed(image, data):
  function crop_black (line 108) | def crop_black(img, tol=0):
  function extract_image_data_embed (line 116) | def extract_image_data_embed(image):
  function caption_image_overlay (line 137) | def caption_image_overlay(srcimage, title, footerLeft, footerMid, footer...

FILE: modules/textual_inversion/learn_schedule.py
  class LearnScheduleIterator (line 4) | class LearnScheduleIterator:
    method __init__ (line 5) | def __init__(self, learn_rate, max_steps, cur_step=0):
    method __iter__ (line 39) | def __iter__(self):
    method __next__ (line 42) | def __next__(self):
  class LearnRateScheduler (line 50) | class LearnRateScheduler:
    method __init__ (line 51) | def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True):
    method step (line 61) | def step(self, step_number):
    method apply (line 72) | def apply(self, optimizer, step_number):

FILE: modules/textual_inversion/saving_settings.py
  function save_settings_to_file (line 52) | def save_settings_to_file(log_directory, all_params):

FILE: modules/textual_inversion/textual_inversion.py
  function list_textual_inversion_templates (line 27) | def list_textual_inversion_templates():
  class Embedding (line 39) | class Embedding:
    method __init__ (line 40) | def __init__(self, vec, name, step=None):
    method save (line 54) | def save(self, filename):
    method checksum (line 73) | def checksum(self):
    method set_hash (line 86) | def set_hash(self, v):
  class DirWithTextualInversionEmbeddings (line 91) | class DirWithTextualInversionEmbeddings:
    method __init__ (line 92) | def __init__(self, path):
    method has_changed (line 96) | def has_changed(self):
    method update (line 104) | def update(self):
  class EmbeddingDatabase (line 111) | class EmbeddingDatabase:
    method __init__ (line 112) | def __init__(self):
    method add_embedding_dir (line 120) | def add_embedding_dir(self, path):
    method clear_embedding_dirs (line 123) | def clear_embedding_dirs(self):
    method register_embedding (line 126) | def register_embedding(self, embedding, model):
    method register_embedding_by_name (line 129) | def register_embedding_by_name(self, embedding, model, name):
    method get_expected_shape (line 152) | def get_expected_shape(self):
    method load_from_file (line 157) | def load_from_file(self, path, filename):
    method load_from_dir (line 195) | def load_from_dir(self, embdir):
    method load_textual_inversion_embeddings (line 212) | def load_textual_inversion_embeddings(self, force_reload=False):
    method find_embedding_at_position (line 245) | def find_embedding_at_position(self, tokens, offset):
  function create_embedding (line 259) | def create_embedding(name, num_vectors_per_token, overwrite_old, init_te...
  function create_embedding_from_data (line 287) | def create_embedding_from_data(data, name, filename='unknown embedding f...
  function write_loss (line 326) | def write_loss(log_directory, filename, step, epoch_len, values):
  function tensorboard_setup (line 350) | def tensorboard_setup(log_directory):
  function tensorboard_add (line 357) | def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_r...
  function tensorboard_add_scaler (line 363) | def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
  function tensorboard_add_image (line 367) | def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
  function validate_train_inputs (line 376) | def validate_train_inputs(model_name, learn_rate, batch_size, gradient_s...
  function train_embedding (line 400) | def train_embedding(id_task, embedding_name, learn_rate, batch_size, gra...
  function save_embedding (line 691) | def save_embedding(embedding, optimizer, checkpoint, embedding_name, fil...

FILE: modules/textual_inversion/ui.py
  function create_embedding (line 9) | def create_embedding(name, initialization_text, nvpt, overwrite_old):
  function train_embedding (line 17) | def train_embedding(*args):

FILE: modules/timer.py
  class TimerSubcategory (line 5) | class TimerSubcategory:
    method __init__ (line 6) | def __init__(self, timer, category):
    method __enter__ (line 12) | def __enter__(self):
    method __exit__ (line 20) | def __exit__(self, exc_type, exc_val, exc_tb):
  class Timer (line 28) | class Timer:
    method __init__ (line 29) | def __init__(self, print_log=False):
    method elapsed (line 37) | def elapsed(self):
    method add_time_to_record (line 43) | def add_time_to_record(self, category, amount):
    method record (line 49) | def record(self, category, extra_time=0, disable_log=False):
    method subcategory (line 59) | def subcategory(self, name):
    method summary (line 65) | def summary(self):
    method dump (line 78) | def dump(self):
    method reset (line 81) | def reset(self):

FILE: modules/torch_utils.py
  function get_param (line 7) | def get_param(model) -> torch.nn.Parameter:
  function float64 (line 21) | def float64(t: torch.Tensor):

FILE: modules/txt2img.py
  function txt2img_create_processing (line 14) | def txt2img_create_processing(id_task: str, request: gr.Request, prompt:...
  function txt2img_upscale (line 58) | def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_...
  function txt2img (line 102) | def txt2img(id_task: str, request: gr.Request, *args):

FILE: modules/ui.py
  function gr_show (line 62) | def gr_show(visible=True):
  function send_gradio_gallery_to_image (line 87) | def send_gradio_gallery_to_image(x):
  function calc_resolution_hires (line 93) | def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, ...
  function resize_from_to_html (line 103) | def resize_from_to_html(width, height, scale_by):
  function process_interrogate (line 113) | def process_interrogate(interrogation_function, mode, ii_input_dir, ii_o...
  function interrogate (line 136) | def interrogate(image):
  function interrogate_deepbooru (line 141) | def interrogate_deepbooru(image):
  function connect_clear_prompt (line 146) | def connect_clear_prompt(button):
  function update_token_counter (line 156) | def update_token_counter(text, steps, styles, *, is_positive=True):
  function update_negative_prompt_token_counter (line 189) | def update_negative_prompt_token_counter(*args):
  function setup_progressbar (line 193) | def setup_progressbar(*args, **kwargs):
  function apply_setting (line 197) | def apply_setting(key, value):
  function create_output_panel (line 230) | def create_output_panel(tabname, outdir, toprow=None):
  function ordered_ui_categories (line 234) | def ordered_ui_categories():
  function create_override_settings_dropdown (line 241) | def create_override_settings_dropdown(tabname, row):
  function create_ui (line 253) | def create_ui():
  function versions_html (line 1178) | def versions_html():
  function setup_ui_api (line 1207) | def setup_ui_api(app):

FILE: modules/ui_checkpoint_merger.py
  function update_interp_description (line 9) | def update_interp_description(value):
  function modelmerger (line 19) | def modelmerger(*args):
  class UiCheckpointMerger (line 29) | class UiCheckpointMerger:
    method __init__ (line 30) | def __init__(self):
    method setup_ui (line 86) | def setup_ui(self, dummy_component, sd_model_checkpoint_component):

FILE: modules/ui_common.py
  function update_generation_info (line 20) | def update_generation_info(generation_info, html_info, img_index):
  function plaintext_to_html (line 32) | def plaintext_to_html(text, classname=None):
  function update_logfile (line 38) | def update_logfile(logfile_path, fields):
  function save_files (line 64) | def save_files(js_data, images, do_make_zip, index):
  class OutputPanel (line 156) | class OutputPanel:
  function create_output_panel (line 164) | def create_output_panel(tabname, outdir, toprow=None):
  function create_refresh_button (line 284) | def create_refresh_button(refresh_component, refresh_method, refreshed_a...
  function setup_dialog (line 312) | def setup_dialog(button_show, dialog, *, button_close=None):

FILE: modules/ui_components.py
  class FormComponent (line 4) | class FormComponent:
    method get_expected_parent (line 5) | def get_expected_parent(self):
  class ToolButton (line 12) | class ToolButton(FormComponent, gr.Button):
    method __init__ (line 15) | def __init__(self, *args, **kwargs):
    method get_block_name (line 19) | def get_block_name(self):
  class ResizeHandleRow (line 23) | class ResizeHandleRow(gr.Row):
    method __init__ (line 26) | def __init__(self, **kwargs):
    method get_block_name (line 31) | def get_block_name(self):
  class FormRow (line 35) | class FormRow(FormComponent, gr.Row):
    method get_block_name (line 38) | def get_block_name(self):
  class FormColumn (line 42) | class FormColumn(FormComponent, gr.Column):
    method get_block_name (line 45) | def get_block_name(self):
  class FormGroup (line 49) | class FormGroup(FormComponent, gr.Group):
    method get_block_name (line 52) | def get_block_name(self):
  class FormHTML (line 56) | class FormHTML(FormComponent, gr.HTML):
    method get_block_name (line 59) | def get_block_name(self):
  class FormColorPicker (line 63) | class FormColorPicker(FormComponent, gr.ColorPicker):
    method get_block_name (line 66) | def get_block_name(self):
  class DropdownMulti (line 70) | class DropdownMulti(FormComponent, gr.Dropdown):
    method __init__ (line 72) | def __init__(self, **kwargs):
    method get_block_name (line 75) | def get_block_name(self):
  class DropdownEditable (line 79) | class DropdownEditable(FormComponent, gr.Dropdown):
    method __init__ (line 81) | def __init__(self, **kwargs):
    method get_block_name (line 84) | def get_block_name(self):
  class InputAccordion (line 88) | class InputAccordion(gr.Checkbox):
    method __init__ (line 96) | def __init__(self, value, **kwargs):
    method extra (line 120) | def extra(self):
    method __enter__ (line 136) | def __enter__(self):
    method __exit__ (line 140) | def __exit__(self, exc_type, exc_val, exc_tb):
    method get_block_name (line 143) | def get_block_name(self):

FILE: modules/ui_extensions.py
  function check_access (line 22) | def check_access():
  function apply_and_restart (line 26) | def apply_and_restart(disable_list, update_list, disable_all):
  function save_config_state (line 59) | def save_config_state(name):
  function restore_config_state (line 76) | def restore_config_state(confirmed, config_state_name, restore_type):
  function check_updates (line 100) | def check_updates(id_task, disable_list):
  function make_commit_link (line 125) | def make_commit_link(commit_hash, remote, text=None):
  function extension_table (line 137) | def extension_table():
  function update_config_states_table (line 194) | def update_config_states_table(state_name):
  function normalize_git_url (line 331) | def normalize_git_url(url):
  function get_extension_dirname_from_url (line 339) | def get_extension_dirname_from_url(url):
  function install_extension_from_url (line 344) | def install_extension_from_url(dirname, url, branch_name=None):
  function install_extension_from_index (line 399) | def install_extension_from_index(url, selected_tags, showing_type, filte...
  function refresh_available_extensions (line 407) | def refresh_available_extensions(url, selected_tags, showing_type, filte...
  function refresh_available_extensions_for_tags (line 421) | def refresh_available_extensions_for_tags(selected_tags, showing_type, f...
  function search_extensions (line 427) | def search_extensions(filter_text, selected_tags, showing_type, filterin...
  function get_date (line 446) | def get_date(info: dict, key):
  function refresh_available_extensions_from_data (line 453) | def refresh_available_extensions_from_data(selected_tags, showing_type, ...
  function preload_extensions_git_metadata (line 539) | def preload_extensions_git_metadata():
  function create_ui (line 544) | def create_ui():

FILE: modules/ui_extra_networks.py
  function allowed_preview_extensions_with_extra (line 25) | def allowed_preview_extensions_with_extra(extra_extensions=None):
  function allowed_preview_extensions (line 29) | def allowed_preview_extensions():
  class ExtraNetworksItem (line 34) | class ExtraNetworksItem:
  function get_tree (line 39) | def get_tree(paths: Union[str, list[str]], items: dict[str, ExtraNetwork...
  function register_page (line 89) | def register_page(page):
  function fetch_file (line 97) | def fetch_file(filename: str = ""):
  function fetch_cover_images (line 114) | def fetch_cover_images(page: str = "", item: str = "", index: int = 0):
  function get_metadata (line 139) | def get_metadata(page: str = "", item: str = ""):
  function get_single_card (line 155) | def get_single_card(page: str = "", tabname: str = "", name: str = ""):
  function add_pages_to_demo (line 173) | def add_pages_to_demo(app):
  function quote_js (line 180) | def quote_js(s):
  class ExtraNetworksPage (line 186) | class ExtraNetworksPage:
    method __init__ (line 187) | def __init__(self, title):
    method refresh (line 207) | def refresh(self):
    method read_user_metadata (line 210) | def read_user_metadata(self, item, use_cache=True):
    method link_preview (line 220) | def link_preview(self, filename):
    method search_terms_from_path (line 225) | def search_terms_from_path(self, filename, possible_directories=None):
    method create_item_html (line 234) | def create_item_html(
    method create_tree_dir_item_html (line 353) | def create_tree_dir_item_html(
    method create_tree_file_item_html (line 405) | def create_tree_file_item_html(self, tabname: str, file_path: str, ite...
    method create_tree_view_html (line 455) | def create_tree_view_html(self, tabname: str) -> str:
    method create_dirs_view_html (line 511) | def create_dirs_view_html(self, tabname: str) -> str:
    method create_card_view_html (line 552) | def create_card_view_html(self, tabname: str, *, none_message) -> str:
    method create_html (line 575) | def create_html(self, tabname, *, empty=False):
    method create_item (line 625) | def create_item(self, name, index=None):
    method list_items (line 628) | def list_items(self):
    method allowed_directories_for_previews (line 631) | def allowed_directories_for_previews(self):
    method get_sort_keys (line 634) | def get_sort_keys(self, path):
    method find_preview (line 647) | def find_preview(self, path):
    method find_embedded_preview (line 660) | def find_embedded_preview(self, path, name, metadata):
    method find_description (line 671) | def find_description(self, path):
    method create_user_metadata_editor (line 686) | def create_user_metadata_editor(self, ui, tabname):
  function initialize (line 690) | def initialize():
  function register_default_pages (line 694) | def register_default_pages():
  class ExtraNetworksUi (line 703) | class ExtraNetworksUi:
    method __init__ (line 704) | def __init__(self):
  function pages_in_preferred_order (line 719) | def pages_in_preferred_order(pages):
  function create_ui (line 735) | def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
  function path_is_parent (line 795) | def path_is_parent(parent_path, child_path):
  function setup_ui (line 802) | def setup_ui(ui, gallery):

FILE: modules/ui_extra_networks_checkpoints.py
  class ExtraNetworksPageCheckpoints (line 8) | class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
    method __init__ (line 9) | def __init__(self):
    method refresh (line 14) | def refresh(self):
    method create_item (line 17) | def create_item(self, name, index=None, enable_filter=True):
    method list_items (line 39) | def list_items(self):
    method allowed_directories_for_previews (line 47) | def allowed_directories_for_previews(self):
    method create_user_metadata_editor (line 50) | def create_user_metadata_editor(self, ui, tabname):

FILE: modules/ui_extra_networks_checkpoints_user_metadata.py
  class CheckpointUserMetadataEditor (line 7) | class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserM...
    method __init__ (line 8) | def __init__(self, ui, tabname, page):
    method save_user_metadata (line 13) | def save_user_metadata(self, name, desc, notes, vae):
    method update_vae (line 21) | def update_vae(self, name):
    method put_values_into_components (line 25) | def put_values_into_components(self, name):
    method create_editor (line 34) | def create_editor(self):

FILE: modules/ui_extra_networks_hypernets.py
  class ExtraNetworksPageHypernetworks (line 8) | class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
    method __init__ (line 9) | def __init__(self):
    method refresh (line 12) | def refresh(self):
    method create_item (line 15) | def create_item(self, name, index=None, enable_filter=True):
    method list_items (line 38) | def list_items(self):
    method allowed_directories_for_previews (line 46) | def allowed_directories_for_previews(self):

FILE: modules/ui_extra_networks_textual_inversion.py
  class ExtraNetworksPageTextualInversion (line 7) | class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksP...
    method __init__ (line 8) | def __init__(self):
    method refresh (line 12) | def refresh(self):
    method create_item (line 15) | def create_item(self, name, index=None, enable_filter=True):
    method list_items (line 36) | def list_items(self):
    method allowed_directories_for_previews (line 44) | def allowed_directories_for_previews(self):

FILE: modules/ui_extra_networks_user_metadata.py
  class UserMetadataEditor (line 11) | class UserMetadataEditor:
    method __init__ (line 13) | def __init__(self, ui, tabname, page):
    method get_user_metadata (line 35) | def get_user_metadata(self, name):
    method create_extra_default_items_in_left_column (line 45) | def create_extra_default_items_in_left_column(self):
    method create_default_editor_elems (line 48) | def create_default_editor_elems(self):
    method create_default_buttons (line 60) | def create_default_buttons(self):
    method get_card_html (line 71) | def get_card_html(self, name):
    method relative_path (line 92) | def relative_path(self, path):
    method get_metadata_table (line 99) | def get_metadata_table(self, name):
    method put_values_into_components (line 118) | def put_values_into_components(self, name):
    method write_user_metadata (line 131) | def write_user_metadata(self, name, metadata):
    method save_user_metadata (line 141) | def save_user_metadata(self, name, desc, notes):
    method setup_save_handler (line 148) | def setup_save_handler(self, button, func, components):
    method create_editor (line 153) | def create_editor(self):
    method create_ui (line 166) | def create_ui(self):
    method save_preview (line 175) | def save_preview(self, index, gallery, name):
    method setup_ui (line 194) | def setup_ui(self, gallery):

FILE: modules/ui_gradio_extensions.py
  function webpath (line 8) | def webpath(fn):
  function javascript_html (line 12) | def javascript_html():
  function css_html (line 31) | def css_html():
  function reload_javascript (line 52) | def reload_javascript():

FILE: modules/ui_loadsave.py
  function radio_choices (line 10) | def radio_choices(comp):  # gradio 3.41 changes choices from list of val...
  class UiLoadsave (line 14) | class UiLoadsave:
    method __init__ (line 17) | def __init__(self, filename):
    method add_component (line 36) | def add_component(self, path, x):
    method add_block (line 127) | def add_block(self, x, path=""):
    method read_from_file (line 141) | def read_from_file(self):
    method write_to_file (line 145) | def write_to_file(self, current_ui_settings):
    method dump_defaults (line 149) | def dump_defaults(self):
    method iter_changes (line 157) | def iter_changes(self, current_ui_settings, values):
    method ui_view (line 184) | def ui_view(self, *values):
    method ui_apply (line 199) | def ui_apply(self, *values):
    method create_ui (line 215) | def create_ui(self):
    method setup_ui (line 231) | def setup_ui(self):

FILE: modules/ui_postprocessing.py
  function create_ui (line 7) | def create_ui():

FILE: modules/ui_prompt_styles.py
  function select_style (line 10) | def select_style(name):
  function save_style (line 21) | def save_style(name, prompt, negative_prompt):
  function delete_style (line 35) | def delete_style(name):
  function materialize_styles (line 45) | def materialize_styles(prompt, negative_prompt, styles):
  function refresh_styles (line 52) | def refresh_styles():
  class UiPromptStyles (line 56) | class UiPromptStyles:
    method __init__ (line 57) | def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt):
    method setup_apply_button (line 117) | def setup_apply_button(self, button):

FILE: modules/ui_settings.py
  function get_value_for_setting (line 12) | def get_value_for_setting(key):
  function create_setting_component (line 22) | def create_setting_component(key, is_quicksettings=False):
  class UiSettings (line 58) | class UiSettings:
    method run_settings (line 72) | def run_settings(self, *args):
    method run_settings_single (line 91) | def run_settings_single(self, value, key):
    method register_settings (line 102) | def register_settings(self):
    method create_ui (line 105) | def create_ui(self, loadsave, dummy_component):
    method add_quicksettings (line 290) | def add_quicksettings(self):
    method add_functionality (line 296) | def add_functionality(self, demo):
    method search (line 342) | def search(self, text):

FILE: modules/ui_tempdir.py
  function register_tmp_file (line 16) | def register_tmp_file(gradio, filename):
  function check_tmp_file (line 24) | def check_tmp_file(gradio, filename):
  function save_pil_to_file (line 34) | def save_pil_to_file(self, pil_image, dir=None, format="png"):
  function install_ui_tempdir_override (line 59) | def install_ui_tempdir_override():
  function on_tmpdir_changed (line 64) | def on_tmpdir_changed():
  function cleanup_tmpdr (line 73) | def cleanup_tmpdr():
  function is_gradio_temp_path (line 88) | def is_gradio_temp_path(path):

FILE: modules/ui_toprow.py
  class Toprow (line 9) | class Toprow:
    method __init__ (line 38) | def __init__(self, is_img2img, is_compact=False, id_part=None):
    method create_classic_toprow (line 52) | def create_classic_toprow(self):
    method create_inline_toprow_prompts (line 62) | def create_inline_toprow_prompts(self):
    method create_inline_toprow_image (line 74) | def create_inline_toprow_image(self):
    method create_prompts (line 80) | def create_prompts(self):
    method create_submit_box (line 96) | def create_submit_box(self):
    method create_tools_row (line 116) | def create_tools_row(self):
    method create_styles_ui (line 142) | def create_styles_ui(self):

FILE: modules/upscaler.py
  class Upscaler (line 14) | class Upscaler:
    method __init__ (line 26) | def __init__(self, create_dirs=False):
    method do_upscale (line 51) | def do_upscale(self, img: PIL.Image, selected_model: str):
    method upscale (line 54) | def upscale(self, img: PIL.Image, scale, selected_model: str = None):
    method load_model (line 79) | def load_model(self, path: str):
    method find_models (line 82) | def find_models(self, ext_filter=None) -> list:
    method update_status (line 85) | def update_status(self, prompt):
  class UpscalerData (line 89) | class UpscalerData:
    method __init__ (line 96) | def __init__(self, name: str, path: str, upscaler: Upscaler = None, sc...
    method __repr__ (line 104) | def __repr__(self):
  class UpscalerNone (line 108) | class UpscalerNone(Upscaler):
    method load_model (line 112) | def load_model(self, path):
    method do_upscale (line 115) | def do_upscale(self, img, selected_model=None):
    method __init__ (line 118) | def __init__(self, dirname=None):
  class UpscalerLanczos (line 123) | class UpscalerLanczos(Upscaler):
    method do_upscale (line 126) | def do_upscale(self, img, selected_model=None):
    method load_model (line 129) | def load_model(self, _):
    method __init__ (line 132) | def __init__(self, dirname=None):
  class UpscalerNearest (line 138) | class UpscalerNearest(Upscaler):
    method do_upscale (line 141) | def do_upscale(self, img, selected_model=None):
    method load_model (line 144) | def load_model(self, _):
    method __init__ (line 147) | def __init__(self, dirname=None):

FILE: modules/upscaler_utils.py
  function pil_image_to_torch_bgr (line 14) | def pil_image_to_torch_bgr(img: Image.Image) -> torch.Tensor:
  function torch_bgr_to_pil_image (line 22) | def torch_bgr_to_pil_image(tensor: torch.Tensor) -> Image.Image:
  function upscale_pil_patch (line 38) | def upscale_pil_patch(model, img: Image.Image) -> Image.Image:
  function upscale_with_model (line 51) | def upscale_with_model(
  function tiled_upscale_2 (line 91) | def tiled_upscale_2(
  function upscale_2 (line 165) | def upscale_2(

FILE: modules/util.py
  function natural_sort_key (line 8) | def natural_sort_key(s, regex=re.compile('([0-9]+)')):
  function listfiles (line 12) | def listfiles(dirname):
  function html_path (line 17) | def html_path(filename):
  function html (line 21) | def html(filename):
  function walk_files (line 31) | def walk_files(path, allowed_extensions=None):
  function ldm_print (line 54) | def ldm_print(*args, **kwargs):
  function truncate_path (line 61) | def truncate_path(target_path, base_path=cwd):
  class MassFileListerCachedDir (line 71) | class MassFileListerCachedDir:
    method __init__ (line 74) | def __init__(self, dirname):
    method update_entry (line 84) | def update_entry(self, filename):
  class MassFileLister (line 96) | class MassFileLister:
    method __init__ (line 99) | def __init__(self):
    method
Condensed preview — 298 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,416K chars).
[
  {
    "path": ".eslintignore",
    "chars": 48,
    "preview": "extensions\nextensions-disabled\nrepositories\nvenv"
  },
  {
    "path": ".eslintrc.js",
    "chars": 3423,
    "preview": "/* global module */\nmodule.exports = {\n    env: {\n        browser: true,\n        es2021: true,\n    },\n    extends: \"esli"
  },
  {
    "path": ".git-blame-ignore-revs",
    "chars": 55,
    "preview": "# Apply ESlint\n9c54b78d9dde5601e916f308d9a9d6953ec39430"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.yml",
    "chars": 4405,
    "preview": "name: Bug Report\ndescription: You think something is broken in the UI\ntitle: \"[Bug]: \"\nlabels: [\"bug-report\"]\n\nbody:\n  -"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "chars": 203,
    "preview": "blank_issues_enabled: false\ncontact_links:\n  - name: WebUI Community Support\n    url: https://github.com/AUTOMATIC1111/s"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.yml",
    "chars": 1413,
    "preview": "name: Feature request\ndescription: Suggest an idea for this project\ntitle: \"[Feature Request]: \"\nlabels: [\"enhancement\"]"
  },
  {
    "path": ".github/pull_request_template.md",
    "chars": 577,
    "preview": "## Description\n\n* a simple description of what you're trying to accomplish\n* a summary of changes in code\n* which issues"
  },
  {
    "path": ".github/workflows/on_pull_request.yaml",
    "chars": 1231,
    "preview": "name: Linter\n\non:\n  - push\n  - pull_request\n\njobs:\n  lint-python:\n    name: ruff\n    runs-on: ubuntu-latest\n    if: gith"
  },
  {
    "path": ".github/workflows/run_tests.yaml",
    "chars": 2529,
    "preview": "name: Tests\n\non:\n  - push\n  - pull_request\n\njobs:\n  test:\n    name: tests on CPU with empty model\n    runs-on: ubuntu-la"
  },
  {
    "path": ".github/workflows/warns_merge_master.yml",
    "chars": 424,
    "preview": "name: Pull requests can't target master branch\n\n\"on\":\n  pull_request:\n    types:\n      - opened\n      - synchronize\n    "
  },
  {
    "path": ".gitignore",
    "chars": 573,
    "preview": "__pycache__\n*.ckpt\n*.safetensors\n*.pth\n.DS_Store\n/ESRGAN/*\n/SwinIR/*\n/repositories\n/venv\n/tmp\n/model.ckpt\n/models/**/*\n/"
  },
  {
    "path": ".pylintrc",
    "chars": 119,
    "preview": "# See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html\n[MESSAGES CONTROL]\ndisable=C,R,W,E,I\n"
  },
  {
    "path": "CHANGELOG.md",
    "chars": 97076,
    "preview": "## 1.10.1\r\n\r\n### Bug Fixes:\r\n* fix image upscale on cpu ([#16275](https://github.com/AUTOMATIC1111/stable-diffusion-webu"
  },
  {
    "path": "CITATION.cff",
    "chars": 243,
    "preview": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n  - given-names: AUTOMATIC1111"
  },
  {
    "path": "CODEOWNERS",
    "chars": 657,
    "preview": "*       @AUTOMATIC1111\r\n\r\n# if you were managing a localization and were removed from this file, this is because\r\n# the "
  },
  {
    "path": "LICENSE.txt",
    "chars": 35240,
    "preview": "                    GNU AFFERO GENERAL PUBLIC LICENSE\r\n                       Version 3, 19 November 2007\r\n\r\n           "
  },
  {
    "path": "README.md",
    "chars": 12924,
    "preview": "# Stable Diffusion web UI\r\nA web interface for Stable Diffusion, implemented using Gradio library.\r\n\r\n![](screenshot.png"
  },
  {
    "path": "_typos.toml",
    "chars": 146,
    "preview": "[default.extend-words]\n# Part of \"RGBa\" (Pillow's pre-multiplied alpha RGB mode)\nBa = \"Ba\"\n# HSA is something AMD uses f"
  },
  {
    "path": "configs/alt-diffusion-inference.yaml",
    "chars": 1913,
    "preview": "model:\n  base_learning_rate: 1.0e-04\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "configs/alt-diffusion-m18-inference.yaml",
    "chars": 1968,
    "preview": "model:\n  base_learning_rate: 1.0e-04\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "configs/instruct-pix2pix.yaml",
    "chars": 2601,
    "preview": "# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).\n# See more de"
  },
  {
    "path": "configs/sd3-inference.yaml",
    "chars": 104,
    "preview": "model:\n  target: modules.models.sd3.sd3_model.SD3Inferencer\n  params:\n    shift: 3\n    state_dict: null\n"
  },
  {
    "path": "configs/sd_xl_inpaint.yaml",
    "chars": 3248,
    "preview": "model:\n  target: sgm.models.diffusion.DiffusionEngine\n  params:\n    scale_factor: 0.13025\n    disable_first_stage_autoca"
  },
  {
    "path": "configs/v1-inference.yaml",
    "chars": 1874,
    "preview": "model:\n  base_learning_rate: 1.0e-04\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "configs/v1-inpainting-inference.yaml",
    "chars": 1992,
    "preview": "model:\n  base_learning_rate: 7.5e-05\n  target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion\n  params:\n    linear_sta"
  },
  {
    "path": "environment-wsl2.yaml",
    "chars": 167,
    "preview": "name: automatic\nchannels:\n  - pytorch\n  - defaults\ndependencies:\n  - python=3.10\n  - pip=23.0\n  - cudatoolkit=11.8\n  - p"
  },
  {
    "path": "extensions-builtin/LDSR/ldsr_model_arch.py",
    "chars": 9759,
    "preview": "import os\nimport gc\nimport time\n\nimport numpy as np\nimport torch\nimport torchvision\nfrom PIL import Image\nfrom einops im"
  },
  {
    "path": "extensions-builtin/LDSR/preload.py",
    "chars": 221,
    "preview": "import os\r\nfrom modules import paths\r\n\r\n\r\ndef preload(parser):\r\n    parser.add_argument(\"--ldsr-models-path\", type=str, "
  },
  {
    "path": "extensions-builtin/LDSR/scripts/ldsr_model.py",
    "chars": 3128,
    "preview": "import os\n\nfrom modules.modelloader import load_file_from_url\nfrom modules.upscaler import Upscaler, UpscalerData\nfrom l"
  },
  {
    "path": "extensions-builtin/LDSR/sd_hijack_autoencoder.py",
    "chars": 11836,
    "preview": "# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo\n# The VQMo"
  },
  {
    "path": "extensions-builtin/LDSR/sd_hijack_ddpm_v1.py",
    "chars": 67254,
    "preview": "# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)\n# Original filename: ldm/models/diff"
  },
  {
    "path": "extensions-builtin/LDSR/vqvae_quantize.py",
    "chars": 6478,
    "preview": "# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/t"
  },
  {
    "path": "extensions-builtin/Lora/extra_networks_lora.py",
    "chars": 2647,
    "preview": "from modules import extra_networks, shared\r\nimport networks\r\n\r\n\r\nclass ExtraNetworkLora(extra_networks.ExtraNetwork):\r\n "
  },
  {
    "path": "extensions-builtin/Lora/lora.py",
    "chars": 357,
    "preview": "import networks\r\n\r\nlist_available_loras = networks.list_available_networks\r\n\r\navailable_loras = networks.available_netwo"
  },
  {
    "path": "extensions-builtin/Lora/lora_logger.py",
    "chars": 929,
    "preview": "import sys\nimport copy\nimport logging\n\n\nclass ColoredFormatter(logging.Formatter):\n    COLORS = {\n        \"DEBUG\": \"\\033"
  },
  {
    "path": "extensions-builtin/Lora/lora_patches.py",
    "chars": 2556,
    "preview": "import torch\r\n\r\nimport networks\r\nfrom modules import patches\r\n\r\n\r\nclass LoraPatches:\r\n    def __init__(self):\r\n        s"
  },
  {
    "path": "extensions-builtin/Lora/lyco_helpers.py",
    "chars": 2403,
    "preview": "import torch\r\n\r\n\r\ndef make_weight_cp(t, wa, wb):\r\n    temp = torch.einsum('i j k l, j r -> i r k l', t, wb)\r\n    return "
  },
  {
    "path": "extensions-builtin/Lora/network.py",
    "chars": 7944,
    "preview": "from __future__ import annotations\r\nimport os\r\nfrom collections import namedtuple\r\nimport enum\r\n\r\nimport torch.nn as nn\r"
  },
  {
    "path": "extensions-builtin/Lora/network_full.py",
    "chars": 904,
    "preview": "import network\r\n\r\n\r\nclass ModuleTypeFull(network.ModuleType):\r\n    def create_module(self, net: network.Network, weights"
  },
  {
    "path": "extensions-builtin/Lora/network_glora.py",
    "chars": 1246,
    "preview": "\nimport network\n\nclass ModuleTypeGLora(network.ModuleType):\n    def create_module(self, net: network.Network, weights: n"
  },
  {
    "path": "extensions-builtin/Lora/network_hada.py",
    "chars": 1985,
    "preview": "import lyco_helpers\r\nimport network\r\n\r\n\r\nclass ModuleTypeHada(network.ModuleType):\r\n    def create_module(self, net: net"
  },
  {
    "path": "extensions-builtin/Lora/network_ia3.py",
    "chars": 908,
    "preview": "import network\r\n\r\n\r\nclass ModuleTypeIa3(network.ModuleType):\r\n    def create_module(self, net: network.Network, weights:"
  },
  {
    "path": "extensions-builtin/Lora/network_lokr.py",
    "chars": 2354,
    "preview": "import torch\r\n\r\nimport lyco_helpers\r\nimport network\r\n\r\n\r\nclass ModuleTypeLokr(network.ModuleType):\r\n    def create_modul"
  },
  {
    "path": "extensions-builtin/Lora/network_lora.py",
    "chars": 4116,
    "preview": "import torch\r\n\r\nimport lyco_helpers\r\nimport modules.models.sd3.mmdit\r\nimport network\r\nfrom modules import devices\r\n\r\n\r\nc"
  },
  {
    "path": "extensions-builtin/Lora/network_norm.py",
    "chars": 889,
    "preview": "import network\n\n\nclass ModuleTypeNorm(network.ModuleType):\n    def create_module(self, net: network.Network, weights: ne"
  },
  {
    "path": "extensions-builtin/Lora/network_oft.py",
    "chars": 5247,
    "preview": "import torch\nimport network\nfrom einops import rearrange\n\n\nclass ModuleTypeOFT(network.ModuleType):\n    def create_modul"
  },
  {
    "path": "extensions-builtin/Lora/networks.py",
    "chars": 29205,
    "preview": "from __future__ import annotations\r\nimport gradio as gr\r\nimport logging\r\nimport os\r\nimport re\r\n\r\nimport lora_patches\r\nim"
  },
  {
    "path": "extensions-builtin/Lora/preload.py",
    "chars": 513,
    "preview": "import os\r\nfrom modules import paths\r\nfrom modules.paths_internal import normalized_filepath\r\n\r\n\r\ndef preload(parser):\r\n"
  },
  {
    "path": "extensions-builtin/Lora/scripts/lora_script.py",
    "chars": 4182,
    "preview": "import re\r\n\r\nimport gradio as gr\r\nfrom fastapi import FastAPI\r\n\r\nimport network\r\nimport networks\r\nimport lora  # noqa:F4"
  },
  {
    "path": "extensions-builtin/Lora/ui_edit_user_metadata.py",
    "chars": 8703,
    "preview": "import datetime\r\nimport html\r\nimport random\r\n\r\nimport gradio as gr\r\nimport re\r\n\r\nfrom modules import ui_extra_networks_u"
  },
  {
    "path": "extensions-builtin/Lora/ui_extra_networks_lora.py",
    "chars": 3796,
    "preview": "import os\r\n\r\nimport network\r\nimport networks\r\n\r\nfrom modules import shared, ui_extra_networks\r\nfrom modules.ui_extra_net"
  },
  {
    "path": "extensions-builtin/ScuNET/preload.py",
    "chars": 227,
    "preview": "import os\r\nfrom modules import paths\r\n\r\n\r\ndef preload(parser):\r\n    parser.add_argument(\"--scunet-models-path\", type=str"
  },
  {
    "path": "extensions-builtin/ScuNET/scripts/scunet_model.py",
    "chars": 3095,
    "preview": "import sys\n\nimport PIL.Image\n\nimport modules.upscaler\nfrom modules import devices, errors, modelloader, script_callbacks"
  },
  {
    "path": "extensions-builtin/SwinIR/preload.py",
    "chars": 227,
    "preview": "import os\r\nfrom modules import paths\r\n\r\n\r\ndef preload(parser):\r\n    parser.add_argument(\"--swinir-models-path\", type=str"
  },
  {
    "path": "extensions-builtin/SwinIR/scripts/swinir_model.py",
    "chars": 3767,
    "preview": "import logging\nimport sys\n\nimport torch\nfrom PIL import Image\n\nfrom modules import devices, modelloader, script_callback"
  },
  {
    "path": "extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js",
    "chars": 36827,
    "preview": "onUiLoaded(async() => {\n    const elementIDs = {\n        img2imgTabs: \"#mode_img2img .tab-nav\",\n        inpaint: \"#img2m"
  },
  {
    "path": "extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py",
    "chars": 1961,
    "preview": "import gradio as gr\nfrom modules import shared\n\nshared.options_templates.update(shared.options_section(('canvas_hotkey',"
  },
  {
    "path": "extensions-builtin/canvas-zoom-and-pan/style.css",
    "chars": 1097,
    "preview": ".canvas-tooltip-info {\n  position: absolute;\n  top: 10px;\n  left: 10px;\n  cursor: help;\n  background-color: rgba(0, 0, 0"
  },
  {
    "path": "extensions-builtin/extra-options-section/scripts/extra_options_section.py",
    "chars": 4065,
    "preview": "import math\r\n\r\nimport gradio as gr\r\nfrom modules import scripts, shared, ui_components, ui_settings, infotext_utils, err"
  },
  {
    "path": "extensions-builtin/hypertile/hypertile.py",
    "chars": 14160,
    "preview": "\"\"\"\nHypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE\nWarn: The patch works well only if th"
  },
  {
    "path": "extensions-builtin/hypertile/scripts/hypertile_script.py",
    "chars": 7451,
    "preview": "import hypertile\r\nfrom modules import scripts, script_callbacks, shared\r\n\r\n\r\nclass ScriptHypertile(scripts.Script):\r\n   "
  },
  {
    "path": "extensions-builtin/mobile/javascript/mobile.js",
    "chars": 1089,
    "preview": "var isSetupForMobile = false;\n\nfunction isMobile() {\n    for (var tab of [\"txt2img\", \"img2img\"]) {\n        var imageTab "
  },
  {
    "path": "extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py",
    "chars": 3145,
    "preview": "from PIL import Image\r\n\r\nfrom modules import scripts_postprocessing, ui_components\r\nimport gradio as gr\r\n\r\n\r\ndef center_"
  },
  {
    "path": "extensions-builtin/postprocessing-for-training/scripts/postprocessing_caption.py",
    "chars": 971,
    "preview": "from modules import scripts_postprocessing, ui_components, deepbooru, shared\r\nimport gradio as gr\r\n\r\n\r\nclass ScriptPostp"
  },
  {
    "path": "extensions-builtin/postprocessing-for-training/scripts/postprocessing_create_flipped_copies.py",
    "chars": 1150,
    "preview": "from PIL import ImageOps, Image\r\n\r\nfrom modules import scripts_postprocessing, ui_components\r\nimport gradio as gr\r\n\r\n\r\nc"
  },
  {
    "path": "extensions-builtin/postprocessing-for-training/scripts/postprocessing_focal_crop.py",
    "chars": 2392,
    "preview": "\r\nfrom modules import scripts_postprocessing, ui_components, errors\r\nimport gradio as gr\r\n\r\nfrom modules.textual_inversi"
  },
  {
    "path": "extensions-builtin/postprocessing-for-training/scripts/postprocessing_split_oversized.py",
    "chars": 2481,
    "preview": "import math\r\n\r\nfrom modules import scripts_postprocessing, ui_components\r\nimport gradio as gr\r\n\r\n\r\ndef split_pic(image, "
  },
  {
    "path": "extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js",
    "chars": 1714,
    "preview": "// Stable Diffusion WebUI - Bracket checker\n// By Hingashi no Florin/Bwin4L & @akx\n// Counts open and closed brackets (r"
  },
  {
    "path": "extensions-builtin/soft-inpainting/scripts/soft_inpainting.py",
    "chars": 30877,
    "preview": "import numpy as np\nimport gradio as gr\nimport math\nfrom modules.ui_components import InputAccordion\nimport modules.scrip"
  },
  {
    "path": "html/extra-networks-card.html",
    "chars": 360,
    "preview": "<div class=\"card\" style=\"{style}\" onclick=\"{card_clicked}\" data-name=\"{name}\" {sort_keys}>\n\t{background_image}\n\t<div cla"
  },
  {
    "path": "html/extra-networks-copy-path-button.html",
    "chars": 168,
    "preview": "<div class=\"copy-path-button card-button\"\n    title=\"Copy path to clipboard\"\n    onclick=\"extraNetworksCopyCardPath(even"
  },
  {
    "path": "html/extra-networks-edit-item-button.html",
    "chars": 162,
    "preview": "<div class=\"edit-button card-button\"\n    title=\"Edit metadata\"\n    onclick=\"extraNetworksEditUserMetadata(event, '{tabna"
  },
  {
    "path": "html/extra-networks-metadata-button.html",
    "chars": 161,
    "preview": "<div class=\"metadata-button card-button\"\n    title=\"Show internal metadata\"\n    onclick=\"extraNetworksRequestMetadata(ev"
  },
  {
    "path": "html/extra-networks-no-cards.html",
    "chars": 119,
    "preview": "<div class='nocards'>\n<h1>Nothing here. Add some content to the following directories:</h1>\n\n<ul>\n{dirs}\n</ul>\n</div>\n\n"
  },
  {
    "path": "html/extra-networks-pane-dirs.html",
    "chars": 322,
    "preview": "    <div class=\"extra-network-pane-content-dirs\">\r\n        <div id='{tabname}_{extra_networks_tabname}_dirs' class='extr"
  },
  {
    "path": "html/extra-networks-pane-tree.html",
    "chars": 423,
    "preview": "    <div class=\"extra-network-pane-content-tree resize-handle-row\">\r\n        <div id='{tabname}_{extra_networks_tabname}"
  },
  {
    "path": "html/extra-networks-pane.html",
    "chars": 3675,
    "preview": "<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane {tree_view_div_default_display_class}'>\n    "
  },
  {
    "path": "html/extra-networks-tree-button.html",
    "chars": 912,
    "preview": "<span data-filterable-item-text hidden>{search_terms}</span>\n<div class=\"tree-list-content {subclass}\"\n    type=\"button\""
  },
  {
    "path": "html/footer.html",
    "chars": 532,
    "preview": "<div>\r\n        <a href=\"{api_docs}\">API</a>\r\n         • \r\n        <a href=\"https://github.com/AUTOMATIC1111/stable-diffu"
  },
  {
    "path": "html/licenses.html",
    "chars": 20645,
    "preview": "<style>\r\n    #licenses h2 {font-size: 1.2em; font-weight: bold; margin-bottom: 0.2em;}\r\n    #licenses small {font-size: "
  },
  {
    "path": "javascript/aspectRatioOverlay.js",
    "chars": 3994,
    "preview": "\nlet currentWidth = null;\nlet currentHeight = null;\nlet arFrameTimeout = setTimeout(function() {}, 0);\n\nfunction dimensi"
  },
  {
    "path": "javascript/contextMenus.js",
    "chars": 5658,
    "preview": "\nvar contextMenuInit = function() {\n    let eventListenerApplied = false;\n    let menuSpecs = new Map();\n\n    const uid "
  },
  {
    "path": "javascript/dragdrop.js",
    "chars": 5150,
    "preview": "// allows drag-dropping files into gradio image elements, and also pasting images from clipboard\n\nfunction isValidImageL"
  },
  {
    "path": "javascript/edit-attention.js",
    "chars": 5842,
    "preview": "function keyupEditAttention(event) {\n    let target = event.originalTarget || event.composedPath()[0];\n    if (!target.m"
  },
  {
    "path": "javascript/edit-order.js",
    "chars": 1597,
    "preview": "/* alt+left/right moves text in prompt */\n\nfunction keyupEditOrder(event) {\n    if (!opts.keyedit_move) return;\n\n    let"
  },
  {
    "path": "javascript/extensions.js",
    "chars": 3072,
    "preview": "\nfunction extensions_apply(_disabled_list, _update_list, disable_all) {\n    var disable = [];\n    var update = [];\n    c"
  },
  {
    "path": "javascript/extraNetworks.js",
    "chars": 26954,
    "preview": "function toggleCss(key, css, enable) {\n    var style = document.getElementById(key);\n    if (enable && !style) {\n       "
  },
  {
    "path": "javascript/generationParams.js",
    "chars": 1504,
    "preview": "// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image chan"
  },
  {
    "path": "javascript/hints.js",
    "chars": 14565,
    "preview": "// mouseover tooltips for various UI elements\n\nvar titles = {\n    \"Sampling steps\": \"How many times to improve the gener"
  },
  {
    "path": "javascript/hires_fix.js",
    "chars": 886,
    "preview": "\nfunction onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) {\n    function setInactive(el"
  },
  {
    "path": "javascript/imageMaskFix.js",
    "chars": 1354,
    "preview": "/**\n * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668\n * @see https://github.com/g"
  },
  {
    "path": "javascript/imageviewer.js",
    "chars": 9450,
    "preview": "// A full size 'lightbox' preview modal shown when left clicking on gallery previews\nfunction closeModal() {\n    gradioA"
  },
  {
    "path": "javascript/imageviewerGamepad.js",
    "chars": 1844,
    "preview": "let gamepads = [];\n\nwindow.addEventListener('gamepadconnected', (e) => {\n    const index = e.gamepad.index;\n    let isWa"
  },
  {
    "path": "javascript/inputAccordion.js",
    "chars": 2325,
    "preview": "function inputAccordionChecked(id, checked) {\n    var accordion = gradioApp().getElementById(id);\n    accordion.visibleC"
  },
  {
    "path": "javascript/localStorage.js",
    "chars": 534,
    "preview": "\nfunction localSet(k, v) {\n    try {\n        localStorage.setItem(k, v);\n    } catch (e) {\n        console.warn(`Failed "
  },
  {
    "path": "javascript/localization.js",
    "chars": 5708,
    "preview": "\n// localization = {} -- the dict with translations is created by the backend\n\nvar ignore_ids_for_localization = {\n    s"
  },
  {
    "path": "javascript/notification.js",
    "chars": 1676,
    "preview": "// Monitors the gallery and sends a browser notification when the leading image is new.\n\nlet lastHeadImg = null;\n\nlet no"
  },
  {
    "path": "javascript/profilerVisualization.js",
    "chars": 4939,
    "preview": "\nfunction createRow(table, cellName, items) {\n    var tr = document.createElement('tr');\n    var res = [];\n\n    items.fo"
  },
  {
    "path": "javascript/progressbar.js",
    "chars": 6542,
    "preview": "// code related to showing and updating progressbar shown as the image is being made\n\nfunction rememberGallerySelection("
  },
  {
    "path": "javascript/resizeHandle.js",
    "chars": 6623,
    "preview": "(function() {\n    const GRADIO_MIN_WIDTH = 320;\n    const PAD = 16;\n    const DEBOUNCE_TIME = 100;\n    const DOUBLE_TAP_"
  },
  {
    "path": "javascript/settings.js",
    "chars": 2272,
    "preview": "let settingsExcludeTabsFromShowAll = {\n    settings_tab_defaults: 1,\n    settings_tab_sysinfo: 1,\n    settings_tab_actio"
  },
  {
    "path": "javascript/textualInversion.js",
    "chars": 441,
    "preview": "\n\n\nfunction start_training_textual_inversion() {\n    gradioApp().querySelector('#ti_error').innerHTML = '';\n\n    var id "
  },
  {
    "path": "javascript/token-counters.js",
    "chars": 2737,
    "preview": "let promptTokenCountUpdateFunctions = {};\n\nfunction update_txt2img_tokens(...args) {\n    // Called from Gradio\n    updat"
  },
  {
    "path": "javascript/ui.js",
    "chars": 13380,
    "preview": "// various functions for interaction with ui.py not large enough to warrant putting them in separate files\n\nfunction set"
  },
  {
    "path": "javascript/ui_settings_hints.js",
    "chars": 2311,
    "preview": "// various hints and extra info for the settings tab\n\nvar settingsHintsSetup = false;\n\nonOptionsChanged(function() {\n   "
  },
  {
    "path": "launch.py",
    "chars": 1297,
    "preview": "from modules import launch_utils\r\n\r\nargs = launch_utils.args\r\npython = launch_utils.python\r\ngit = launch_utils.git\r\ninde"
  },
  {
    "path": "localizations/Put localization files here.txt",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "modules/api/api.py",
    "chars": 42038,
    "preview": "import base64\nimport io\nimport os\nimport time\nimport datetime\nimport uvicorn\nimport ipaddress\nimport requests\nimport gra"
  },
  {
    "path": "modules/api/models.py",
    "chars": 16555,
    "preview": "import inspect\n\nfrom pydantic import BaseModel, Field, create_model\nfrom typing import Any, Optional, Literal\nfrom infle"
  },
  {
    "path": "modules/cache.py",
    "chars": 4132,
    "preview": "import json\r\nimport os\r\nimport os.path\r\nimport threading\r\n\r\nimport diskcache\r\nimport tqdm\r\n\r\nfrom modules.paths import d"
  },
  {
    "path": "modules/call_queue.py",
    "chars": 5012,
    "preview": "import os.path\r\nfrom functools import wraps\r\nimport html\r\nimport time\r\n\r\nfrom modules import shared, progress, errors, d"
  },
  {
    "path": "modules/cmd_args.py",
    "chars": 18344,
    "preview": "import argparse\r\nimport json\r\nimport os\r\nfrom modules.paths_internal import normalized_filepath, models_path, script_pat"
  },
  {
    "path": "modules/codeformer_model.py",
    "chars": 1911,
    "preview": "from __future__ import annotations\r\n\r\nimport logging\r\n\r\nimport torch\r\n\r\nfrom modules import (\r\n    devices,\r\n    errors,"
  },
  {
    "path": "modules/config_states.py",
    "chars": 5875,
    "preview": "\"\"\"\nSupports saving and restoring webui and extensions from a known working set of commits\n\"\"\"\n\nimport os\nimport json\nim"
  },
  {
    "path": "modules/dat_model.py",
    "chars": 2684,
    "preview": "import os\n\nfrom modules import modelloader, errors\nfrom modules.shared import cmd_opts, opts\nfrom modules.upscaler impor"
  },
  {
    "path": "modules/deepbooru.py",
    "chars": 3039,
    "preview": "import os\nimport re\n\nimport torch\nimport numpy as np\n\nfrom modules import modelloader, paths, deepbooru_model, devices, "
  },
  {
    "path": "modules/deepbooru_model.py",
    "chars": 36341,
    "preview": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom modules import devices\r\n\r\n# see https://git"
  },
  {
    "path": "modules/devices.py",
    "chars": 8827,
    "preview": "import sys\nimport contextlib\nfrom functools import lru_cache\n\nimport torch\nfrom modules import errors, shared, npu_speci"
  },
  {
    "path": "modules/errors.py",
    "chars": 4585,
    "preview": "import sys\r\nimport textwrap\r\nimport traceback\r\n\r\n\r\nexception_records = []\r\n\r\n\r\ndef format_traceback(tb):\r\n    return [[f"
  },
  {
    "path": "modules/esrgan_model.py",
    "chars": 2188,
    "preview": "from modules import modelloader, devices, errors\r\nfrom modules.shared import opts\r\nfrom modules.upscaler import Upscaler"
  },
  {
    "path": "modules/extensions.py",
    "chars": 11320,
    "preview": "from __future__ import annotations\r\n\r\nimport configparser\r\nimport dataclasses\r\nimport os\r\nimport threading\r\nimport re\r\n\r"
  },
  {
    "path": "modules/extra_networks.py",
    "chars": 7290,
    "preview": "import json\r\nimport os\r\nimport re\r\nimport logging\r\nfrom collections import defaultdict\r\n\r\nfrom modules import errors\r\n\r\n"
  },
  {
    "path": "modules/extra_networks_hypernet.py",
    "chars": 1148,
    "preview": "from modules import extra_networks, shared\r\nfrom modules.hypernetworks import hypernetwork\r\n\r\n\r\nclass ExtraNetworkHypern"
  },
  {
    "path": "modules/extras.py",
    "chars": 12610,
    "preview": "import os\r\nimport re\r\nimport shutil\r\nimport json\r\n\r\n\r\nimport torch\r\nimport tqdm\r\n\r\nfrom modules import shared, images, s"
  },
  {
    "path": "modules/face_restoration.py",
    "chars": 494,
    "preview": "from modules import shared\r\n\r\n\r\nclass FaceRestoration:\r\n    def name(self):\r\n        return \"None\"\r\n\r\n    def restore(se"
  },
  {
    "path": "modules/face_restoration_utils.py",
    "chars": 6443,
    "preview": "from __future__ import annotations\n\nimport logging\nimport os\nfrom functools import cached_property\nfrom typing import TY"
  },
  {
    "path": "modules/fifo_lock.py",
    "chars": 1020,
    "preview": "import threading\nimport collections\n\n\n# reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a\ncla"
  },
  {
    "path": "modules/gfpgan_model.py",
    "chars": 2097,
    "preview": "from __future__ import annotations\r\n\r\nimport logging\r\nimport os\r\n\r\nimport torch\r\n\r\nfrom modules import (\r\n    devices,\r\n"
  },
  {
    "path": "modules/gitpython_hack.py",
    "chars": 1464,
    "preview": "from __future__ import annotations\n\nimport io\nimport subprocess\n\nimport git\n\n\nclass Git(git.Git):\n    \"\"\"\n    Git subcla"
  },
  {
    "path": "modules/gradio_extensons.py",
    "chars": 2743,
    "preview": "import gradio as gr\r\n\r\nfrom modules import scripts, ui_tempdir, patches\r\n\r\n\r\ndef add_classes_to_gradio_component(comp):\r"
  },
  {
    "path": "modules/hashes.py",
    "chars": 2197,
    "preview": "import hashlib\r\nimport os.path\r\n\r\nfrom modules import shared\r\nimport modules.cache\r\n\r\ndump_cache = modules.cache.dump_ca"
  },
  {
    "path": "modules/hat_model.py",
    "chars": 1685,
    "preview": "import os\r\nimport sys\r\n\r\nfrom modules import modelloader, devices\r\nfrom modules.shared import opts\r\nfrom modules.upscale"
  },
  {
    "path": "modules/hypernetworks/hypernetwork.py",
    "chars": 35924,
    "preview": "import datetime\r\nimport glob\r\nimport html\r\nimport os\r\nimport inspect\r\nfrom contextlib import closing\r\n\r\nimport modules.t"
  },
  {
    "path": "modules/hypernetworks/ui.py",
    "chars": 1520,
    "preview": "import html\r\n\r\nimport gradio as gr\r\nimport modules.hypernetworks.hypernetwork\r\nfrom modules import devices, sd_hijack, s"
  },
  {
    "path": "modules/images.py",
    "chars": 35653,
    "preview": "from __future__ import annotations\r\n\r\nimport datetime\r\nimport functools\r\nimport pytz\r\nimport io\r\nimport math\r\nimport os\r"
  },
  {
    "path": "modules/img2img.py",
    "chars": 11799,
    "preview": "import os\r\nfrom contextlib import closing\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageO"
  },
  {
    "path": "modules/import_hook.py",
    "chars": 654,
    "preview": "import sys\n\n# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to "
  },
  {
    "path": "modules/infotext_utils.py",
    "chars": 19851,
    "preview": "from __future__ import annotations\r\nimport base64\r\nimport io\r\nimport json\r\nimport os\r\nimport re\r\nimport sys\r\n\r\nimport gr"
  },
  {
    "path": "modules/infotext_versions.py",
    "chars": 1113,
    "preview": "from modules import shared\r\nfrom packaging import version\r\nimport re\r\n\r\n\r\nv160 = version.parse(\"1.6.0\")\r\nv170_tsnr = ver"
  },
  {
    "path": "modules/initialize.py",
    "chars": 5887,
    "preview": "import importlib\r\nimport logging\r\nimport os\r\nimport sys\r\nimport warnings\r\nfrom threading import Thread\r\n\r\nfrom modules.t"
  },
  {
    "path": "modules/initialize_util.py",
    "chars": 8465,
    "preview": "import json\r\nimport os\r\nimport signal\r\nimport sys\r\nimport re\r\n\r\nfrom modules.timer import startup_timer\r\n\r\n\r\ndef gradio_"
  },
  {
    "path": "modules/interrogate.py",
    "chars": 8589,
    "preview": "import os\r\nimport sys\r\nfrom collections import namedtuple\r\nfrom pathlib import Path\r\nimport re\r\n\r\nimport torch\r\nimport t"
  },
  {
    "path": "modules/launch_utils.py",
    "chars": 20319,
    "preview": "# this scripts installs necessary requirements and launches main program in webui.py\r\nimport logging\r\nimport re\r\nimport "
  },
  {
    "path": "modules/localization.py",
    "chars": 1084,
    "preview": "import json\r\nimport os\r\n\r\nfrom modules import errors, scripts\r\n\r\nlocalizations = {}\r\n\r\n\r\ndef list_localizations(dirname)"
  },
  {
    "path": "modules/logging_config.py",
    "chars": 1616,
    "preview": "import logging\r\nimport os\r\n\r\ntry:\r\n    from tqdm import tqdm\r\n\r\n\r\n    class TqdmLoggingHandler(logging.Handler):\r\n      "
  },
  {
    "path": "modules/lowvram.py",
    "chars": 6239,
    "preview": "from collections import namedtuple\r\n\r\nimport torch\r\nfrom modules import devices, shared\r\n\r\nmodule_in_gpu = None\r\ncpu = t"
  },
  {
    "path": "modules/mac_specific.py",
    "chars": 5902,
    "preview": "import logging\n\nimport torch\nfrom torch import Tensor\nimport platform\nfrom modules.sd_hijack_utils import CondFunc\nfrom "
  },
  {
    "path": "modules/masking.py",
    "chars": 3751,
    "preview": "from PIL import Image, ImageFilter, ImageOps\r\n\r\n\r\ndef get_crop_region_v2(mask, pad=0):\r\n    \"\"\"\r\n    Finds a rectangular"
  },
  {
    "path": "modules/memmon.py",
    "chars": 2755,
    "preview": "import threading\nimport time\nfrom collections import defaultdict\n\nimport torch\n\n\nclass MemUsageMonitor(threading.Thread)"
  },
  {
    "path": "modules/modelloader.py",
    "chars": 7022,
    "preview": "from __future__ import annotations\n\nimport importlib\nimport logging\nimport os\nfrom typing import TYPE_CHECKING\nfrom urll"
  },
  {
    "path": "modules/models/diffusion/ddpm_edit.py",
    "chars": 67976,
    "preview": "\"\"\"\nwild mixture of\nhttps://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e316"
  },
  {
    "path": "modules/models/diffusion/uni_pc/__init__.py",
    "chars": 48,
    "preview": "from .sampler import UniPCSampler  # noqa: F401\n"
  },
  {
    "path": "modules/models/diffusion/uni_pc/sampler.py",
    "chars": 3897,
    "preview": "\"\"\"SAMPLING ONLY.\"\"\"\n\nimport torch\n\nfrom .uni_pc import NoiseScheduleVP, model_wrapper, UniPC\nfrom modules import shared"
  },
  {
    "path": "modules/models/diffusion/uni_pc/uni_pc.py",
    "chars": 37903,
    "preview": "import torch\nimport math\nimport tqdm\n\n\nclass NoiseScheduleVP:\n    def __init__(\n            self,\n            schedule='"
  },
  {
    "path": "modules/models/sd3/mmdit.py",
    "chars": 24480,
    "preview": "### This file contains impls for MM-DiT, the core model component of SD3\n\nimport math\nfrom typing import Dict, Optional\n"
  },
  {
    "path": "modules/models/sd3/other_impls.py",
    "chars": 24104,
    "preview": "### This file contains impls for underlying related models (CLIP, T5, etc)\n\nimport torch\nimport math\nfrom torch import n"
  },
  {
    "path": "modules/models/sd3/sd3_cond.py",
    "chars": 8119,
    "preview": "import os\r\nimport safetensors\r\nimport torch\r\nimport typing\r\n\r\nfrom transformers import CLIPTokenizer, T5TokenizerFast\r\n\r"
  },
  {
    "path": "modules/models/sd3/sd3_impls.py",
    "chars": 16415,
    "preview": "### Impls of the SD3 core diffusion model and VAE\n\nimport torch\nimport math\nimport einops\nfrom modules.models.sd3.mmdit "
  },
  {
    "path": "modules/models/sd3/sd3_model.py",
    "chars": 3903,
    "preview": "import contextlib\r\n\r\nimport torch\r\n\r\nimport k_diffusion\r\nfrom modules.models.sd3.sd3_impls import BaseModel, SDVAE, SD3L"
  },
  {
    "path": "modules/ngrok.py",
    "chars": 1146,
    "preview": "import ngrok\n\n# Connect to ngrok for ingress\ndef connect(token, port, options):\n    account = None\n    if token is None:"
  },
  {
    "path": "modules/npu_specific.py",
    "chars": 649,
    "preview": "import importlib\nimport torch\n\nfrom modules import shared\n\n\ndef check_for_npu():\n    if importlib.util.find_spec(\"torch_"
  },
  {
    "path": "modules/options.py",
    "chars": 13089,
    "preview": "import os\r\nimport json\r\nimport sys\r\nfrom dataclasses import dataclass\r\n\r\nimport gradio as gr\r\n\r\nfrom modules import erro"
  },
  {
    "path": "modules/patches.py",
    "chars": 1831,
    "preview": "from collections import defaultdict\r\n\r\n\r\ndef patch(key, obj, field, replacement):\r\n    \"\"\"Replaces a function in a modul"
  },
  {
    "path": "modules/paths.py",
    "chars": 2367,
    "preview": "import os\r\nimport sys\r\nfrom modules.paths_internal import models_path, script_path, data_path, extensions_dir, extension"
  },
  {
    "path": "modules/paths_internal.py",
    "chars": 1679,
    "preview": "\"\"\"this module defines internal paths used by program and is safe to import before dependencies are installed in launch."
  },
  {
    "path": "modules/postprocessing.py",
    "chars": 6640,
    "preview": "import os\r\n\r\nfrom PIL import Image\r\n\r\nfrom modules import shared, images, devices, scripts, scripts_postprocessing, ui_c"
  },
  {
    "path": "modules/processing.py",
    "chars": 81170,
    "preview": "from __future__ import annotations\r\nimport json\r\nimport logging\r\nimport math\r\nimport os\r\nimport sys\r\nimport hashlib\r\nfro"
  },
  {
    "path": "modules/processing_scripts/comments.py",
    "chars": 1743,
    "preview": "from modules import scripts, shared, script_callbacks\r\nimport re\r\n\r\n\r\ndef strip_comments(text):\r\n    text = re.sub('(^|\\"
  },
  {
    "path": "modules/processing_scripts/refiner.py",
    "chars": 2320,
    "preview": "import gradio as gr\r\n\r\nfrom modules import scripts, sd_models\r\nfrom modules.infotext_utils import PasteField\r\nfrom modul"
  },
  {
    "path": "modules/processing_scripts/sampler.py",
    "chars": 2252,
    "preview": "import gradio as gr\r\n\r\nfrom modules import scripts, sd_samplers, sd_schedulers, shared\r\nfrom modules.infotext_utils impo"
  },
  {
    "path": "modules/processing_scripts/seed.py",
    "chars": 5515,
    "preview": "import json\r\n\r\nimport gradio as gr\r\n\r\nfrom modules import scripts, ui, errors\r\nfrom modules.infotext_utils import PasteF"
  },
  {
    "path": "modules/profiling.py",
    "chars": 1347,
    "preview": "import torch\r\n\r\nfrom modules import shared, ui_gradio_extensions\r\n\r\n\r\nclass Profiler:\r\n    def __init__(self):\r\n        "
  },
  {
    "path": "modules/progress.py",
    "chars": 5977,
    "preview": "import base64\r\nimport io\r\nimport time\r\n\r\nimport gradio as gr\r\nfrom pydantic import BaseModel, Field\r\n\r\nfrom modules.shar"
  },
  {
    "path": "modules/prompt_parser.py",
    "chars": 16743,
    "preview": "from __future__ import annotations\r\n\r\nimport re\r\nfrom collections import namedtuple\r\nimport lark\r\n\r\n# a prompt like this"
  },
  {
    "path": "modules/realesrgan_model.py",
    "chars": 4058,
    "preview": "import os\r\n\r\nfrom modules import modelloader, errors\r\nfrom modules.shared import cmd_opts, opts\r\nfrom modules.upscaler i"
  },
  {
    "path": "modules/restart.py",
    "chars": 638,
    "preview": "import os\nfrom pathlib import Path\n\nfrom modules.paths_internal import script_path\n\n\ndef is_restartable() -> bool:\n    \""
  },
  {
    "path": "modules/rng.py",
    "chars": 6516,
    "preview": "import torch\r\n\r\nfrom modules import devices, rng_philox, shared\r\n\r\n\r\ndef randn(seed, shape, generator=None):\r\n    \"\"\"Gen"
  },
  {
    "path": "modules/rng_philox.py",
    "chars": 3087,
    "preview": "\"\"\"RNG imitiating torch cuda randn on CPU. You are welcome.\r\n\r\nUsage:\r\n\r\n```\r\ng = Generator(seed=0)\r\nprint(g.randn(shape"
  },
  {
    "path": "modules/safe.py",
    "chars": 7126,
    "preview": "# this code is adapted from the script contributed by anon from /h/\r\n\r\nimport pickle\r\nimport collections\r\n\r\nimport torch"
  },
  {
    "path": "modules/script_callbacks.py",
    "chars": 22112,
    "preview": "from __future__ import annotations\r\n\r\nimport dataclasses\r\nimport inspect\r\nimport os\r\nfrom typing import Optional, Any\r\n\r"
  },
  {
    "path": "modules/script_loading.py",
    "chars": 1052,
    "preview": "import os\r\nimport importlib.util\r\n\r\nfrom modules import errors\r\n\r\n\r\nloaded_scripts = {}\r\n\r\n\r\ndef load_module(path):\r\n   "
  },
  {
    "path": "modules/scripts.py",
    "chars": 41802,
    "preview": "import os\r\nimport re\r\nimport sys\r\nimport inspect\r\nfrom collections import namedtuple\r\nfrom dataclasses import dataclass\r"
  },
  {
    "path": "modules/scripts_auto_postprocessing.py",
    "chars": 1491,
    "preview": "from modules import scripts, scripts_postprocessing, shared\r\n\r\n\r\nclass ScriptPostprocessingForMainUI(scripts.Script):\r\n "
  },
  {
    "path": "modules/scripts_postprocessing.py",
    "chars": 7226,
    "preview": "import dataclasses\r\nimport os\r\nimport gradio as gr\r\n\r\nfrom modules import errors, shared\r\n\r\n\r\n@dataclasses.dataclass\r\ncl"
  },
  {
    "path": "modules/sd_disable_initialization.py",
    "chars": 11425,
    "preview": "import ldm.modules.encoders.modules\r\nimport open_clip\r\nimport torch\r\nimport transformers.utils.hub\r\n\r\nfrom modules impor"
  },
  {
    "path": "modules/sd_emphasis.py",
    "chars": 2131,
    "preview": "from __future__ import annotations\r\nimport torch\r\n\r\n\r\nclass Emphasis:\r\n    \"\"\"Emphasis class decides how to death with ("
  },
  {
    "path": "modules/sd_hijack.py",
    "chars": 17795,
    "preview": "import torch\r\nfrom torch.nn.functional import silu\r\nfrom types import MethodType\r\n\r\nfrom modules import devices, sd_hija"
  },
  {
    "path": "modules/sd_hijack_checkpoint.py",
    "chars": 1253,
    "preview": "from torch.utils.checkpoint import checkpoint\n\nimport ldm.modules.attention\nimport ldm.modules.diffusionmodules.openaimo"
  },
  {
    "path": "modules/sd_hijack_clip.py",
    "chars": 15725,
    "preview": "import math\r\nfrom collections import namedtuple\r\n\r\nimport torch\r\n\r\nfrom modules import prompt_parser, devices, sd_hijack"
  },
  {
    "path": "modules/sd_hijack_clip_old.py",
    "chars": 3586,
    "preview": "from modules import sd_hijack_clip\r\nfrom modules import shared\r\n\r\n\r\ndef process_text_old(self: sd_hijack_clip.FrozenCLIP"
  },
  {
    "path": "modules/sd_hijack_ip2p.py",
    "chars": 360,
    "preview": "import os.path\n\n\ndef should_hijack_ip2p(checkpoint_info):\n    from modules import sd_models_config\n\n    ckpt_basename = "
  },
  {
    "path": "modules/sd_hijack_open_clip.py",
    "chars": 2620,
    "preview": "import open_clip.tokenizer\r\nimport torch\r\n\r\nfrom modules import sd_hijack_clip, devices\r\nfrom modules.shared import opts"
  },
  {
    "path": "modules/sd_hijack_optimizations.py",
    "chars": 24557,
    "preview": "from __future__ import annotations\r\nimport math\r\nimport psutil\r\nimport platform\r\n\r\nimport torch\r\nfrom torch import einsu"
  },
  {
    "path": "modules/sd_hijack_unet.py",
    "chars": 7267,
    "preview": "import torch\r\nfrom packaging import version\r\nfrom einops import repeat\r\nimport math\r\n\r\nfrom modules import devices\r\nfrom"
  },
  {
    "path": "modules/sd_hijack_utils.py",
    "chars": 1558,
    "preview": "import importlib\r\n\r\n\r\nalways_true_func = lambda *args, **kwargs: True\r\n\r\n\r\nclass CondFunc:\r\n    def __new__(cls, orig_fu"
  },
  {
    "path": "modules/sd_hijack_xlmr.py",
    "chars": 1449,
    "preview": "import torch\r\n\r\nfrom modules import sd_hijack_clip, devices\r\n\r\n\r\nclass FrozenXLMREmbedderWithCustomWords(sd_hijack_clip."
  },
  {
    "path": "modules/sd_models.py",
    "chars": 37536,
    "preview": "import collections\r\nimport importlib\r\nimport os\r\nimport sys\r\nimport threading\r\nimport enum\r\n\r\nimport torch\r\nimport re\r\ni"
  },
  {
    "path": "modules/sd_models_config.py",
    "chars": 5597,
    "preview": "import os\r\n\r\nimport torch\r\n\r\nfrom modules import shared, paths, sd_disable_initialization, devices\r\n\r\nsd_configs_path = "
  },
  {
    "path": "modules/sd_models_types.py",
    "chars": 1284,
    "preview": "from ldm.models.diffusion.ddpm import LatentDiffusion\r\nfrom typing import TYPE_CHECKING\r\n\r\n\r\nif TYPE_CHECKING:\r\n    from"
  },
  {
    "path": "modules/sd_models_xl.py",
    "chars": 5007,
    "preview": "from __future__ import annotations\r\n\r\nimport torch\r\n\r\nimport sgm.models.diffusion\r\nimport sgm.modules.diffusionmodules.d"
  }
]

// ... and 98 more files (download for full content)

About this extraction

This page contains the full source code of the AUTOMATIC1111/stable-diffusion-webui GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 298 files (2.2 MB), approximately 585.1k tokens, and a symbol index with 2616 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!