main 0dffd1eeb026 cached
115 files
811.3 KB
216.4k tokens
849 symbols
1 requests
Download .txt
Showing preview only (854K chars total). Download the full file or copy to clipboard to get everything.
Repository: timothybrooks/instruct-pix2pix
Branch: main
Commit: 0dffd1eeb026
Files: 115
Total size: 811.3 KB

Directory structure:
gitextract_odg4toud/

├── LICENSE
├── README.md
├── configs/
│   ├── generate.yaml
│   └── train.yaml
├── dataset_creation/
│   ├── generate_img_dataset.py
│   ├── generate_txt_dataset.py
│   ├── prepare_dataset.py
│   └── prepare_for_gpt.py
├── edit_app.py
├── edit_cli.py
├── edit_dataset.py
├── environment.yaml
├── main.py
├── metrics/
│   ├── clip_similarity.py
│   └── compute_metrics.py
├── prompt_app.py
├── scripts/
│   ├── download_checkpoints.sh
│   ├── download_data.sh
│   └── download_pretrained_sd.sh
└── stable_diffusion/
    ├── LICENSE
    ├── README.md
    ├── Stable_Diffusion_v1_Model_Card.md
    ├── assets/
    │   ├── results.gif.REMOVED.git-id
    │   ├── stable-samples/
    │   │   ├── img2img/
    │   │   │   ├── upscaling-in.png.REMOVED.git-id
    │   │   │   └── upscaling-out.png.REMOVED.git-id
    │   │   └── txt2img/
    │   │       ├── merged-0005.png.REMOVED.git-id
    │   │       ├── merged-0006.png.REMOVED.git-id
    │   │       └── merged-0007.png.REMOVED.git-id
    │   └── txt2img-preview.png.REMOVED.git-id
    ├── configs/
    │   ├── autoencoder/
    │   │   ├── autoencoder_kl_16x16x16.yaml
    │   │   ├── autoencoder_kl_32x32x4.yaml
    │   │   ├── autoencoder_kl_64x64x3.yaml
    │   │   └── autoencoder_kl_8x8x64.yaml
    │   ├── latent-diffusion/
    │   │   ├── celebahq-ldm-vq-4.yaml
    │   │   ├── cin-ldm-vq-f8.yaml
    │   │   ├── cin256-v2.yaml
    │   │   ├── ffhq-ldm-vq-4.yaml
    │   │   ├── lsun_bedrooms-ldm-vq-4.yaml
    │   │   ├── lsun_churches-ldm-kl-8.yaml
    │   │   └── txt2img-1p4B-eval.yaml
    │   ├── retrieval-augmented-diffusion/
    │   │   └── 768x768.yaml
    │   └── stable-diffusion/
    │       └── v1-inference.yaml
    ├── data/
    │   ├── example_conditioning/
    │   │   └── text_conditional/
    │   │       └── sample_0.txt
    │   ├── imagenet_clsidx_to_label.txt
    │   ├── imagenet_train_hr_indices.p.REMOVED.git-id
    │   ├── imagenet_val_hr_indices.p
    │   └── index_synset.yaml
    ├── environment.yaml
    ├── ldm/
    │   ├── data/
    │   │   ├── __init__.py
    │   │   ├── base.py
    │   │   ├── imagenet.py
    │   │   └── lsun.py
    │   ├── lr_scheduler.py
    │   ├── models/
    │   │   ├── autoencoder.py
    │   │   └── diffusion/
    │   │       ├── __init__.py
    │   │       ├── classifier.py
    │   │       ├── ddim.py
    │   │       ├── ddpm.py
    │   │       ├── ddpm_edit.py
    │   │       ├── dpm_solver/
    │   │       │   ├── __init__.py
    │   │       │   ├── dpm_solver.py
    │   │       │   └── sampler.py
    │   │       └── plms.py
    │   ├── modules/
    │   │   ├── attention.py
    │   │   ├── diffusionmodules/
    │   │   │   ├── __init__.py
    │   │   │   ├── model.py
    │   │   │   ├── openaimodel.py
    │   │   │   └── util.py
    │   │   ├── distributions/
    │   │   │   ├── __init__.py
    │   │   │   └── distributions.py
    │   │   ├── ema.py
    │   │   ├── encoders/
    │   │   │   ├── __init__.py
    │   │   │   └── modules.py
    │   │   ├── image_degradation/
    │   │   │   ├── __init__.py
    │   │   │   ├── bsrgan.py
    │   │   │   ├── bsrgan_light.py
    │   │   │   └── utils_image.py
    │   │   ├── losses/
    │   │   │   ├── __init__.py
    │   │   │   ├── contperceptual.py
    │   │   │   └── vqperceptual.py
    │   │   └── x_transformer.py
    │   └── util.py
    ├── main.py
    ├── models/
    │   ├── first_stage_models/
    │   │   ├── kl-f16/
    │   │   │   └── config.yaml
    │   │   ├── kl-f32/
    │   │   │   └── config.yaml
    │   │   ├── kl-f4/
    │   │   │   └── config.yaml
    │   │   ├── kl-f8/
    │   │   │   └── config.yaml
    │   │   ├── vq-f16/
    │   │   │   └── config.yaml
    │   │   ├── vq-f4/
    │   │   │   └── config.yaml
    │   │   ├── vq-f4-noattn/
    │   │   │   └── config.yaml
    │   │   ├── vq-f8/
    │   │   │   └── config.yaml
    │   │   └── vq-f8-n256/
    │   │       └── config.yaml
    │   └── ldm/
    │       ├── bsr_sr/
    │       │   └── config.yaml
    │       ├── celeba256/
    │       │   └── config.yaml
    │       ├── cin256/
    │       │   └── config.yaml
    │       ├── ffhq256/
    │       │   └── config.yaml
    │       ├── inpainting_big/
    │       │   └── config.yaml
    │       ├── layout2img-openimages256/
    │       │   └── config.yaml
    │       ├── lsun_beds256/
    │       │   └── config.yaml
    │       ├── lsun_churches256/
    │       │   └── config.yaml
    │       ├── semantic_synthesis256/
    │       │   └── config.yaml
    │       ├── semantic_synthesis512/
    │       │   └── config.yaml
    │       └── text2img256/
    │           └── config.yaml
    ├── notebook_helpers.py
    ├── scripts/
    │   ├── download_first_stages.sh
    │   ├── download_models.sh
    │   ├── img2img.py
    │   ├── inpaint.py
    │   ├── knn2img.py
    │   ├── latent_imagenet_diffusion.ipynb.REMOVED.git-id
    │   ├── sample_diffusion.py
    │   ├── tests/
    │   │   └── test_watermark.py
    │   ├── train_searcher.py
    │   └── txt2img.py
    └── setup.py

================================================
FILE CONTENTS
================================================

================================================
FILE: LICENSE
================================================
Copyright 2023 Timothy Brooks, Aleksander Holynski, Alexei A. Efros

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Portions of code and models (such as pretrained checkpoints, which are fine-tuned starting from released Stable Diffusion checkpoints) are derived from the Stable Diffusion codebase (https://github.com/CompVis/stable-diffusion). Further restrictions may apply. Please consult the Stable Diffusion license `stable_diffusion/LICENSE`. Modified code is denoted as such in comments at the start of each file. 


================================================
FILE: README.md
================================================
# InstructPix2Pix: Learning to Follow Image Editing Instructions
### [Project Page](https://www.timothybrooks.com/instruct-pix2pix/) | [Paper](https://arxiv.org/abs/2211.09800) | [Data](http://instruct-pix2pix.eecs.berkeley.edu/)
PyTorch implementation of InstructPix2Pix, an instruction-based image editing model, based on the original [CompVis/stable_diffusion](https://github.com/CompVis/stable-diffusion) repo. <br>

[InstructPix2Pix: Learning to Follow Image Editing Instructions](https://www.timothybrooks.com/instruct-pix2pix/)  
 [Tim Brooks](https://www.timothybrooks.com/)\*,
 [Aleksander Holynski](https://holynski.org/)\*,
 [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros/) <br>
 UC Berkeley <br>
  \*denotes equal contribution  
  
  <img src='https://instruct-pix2pix.timothybrooks.com/teaser.jpg'/>

## TL;DR: quickstart 

Follow the instructions below to download and run InstructPix2Pix on your own images. These instructions have been tested on a GPU with >18GB VRAM. If you don't have a GPU, you may need to change the default configuration, or check out [other ways of using the model](https://github.com/timothybrooks/instruct-pix2pix#other-ways-of-using-instructpix2pix). 

### Set up a conda environment, and download a pretrained model:
```
conda env create -f environment.yaml
conda activate ip2p
bash scripts/download_checkpoints.sh
```

### Edit a single image:
```
python edit_cli.py --input imgs/example.jpg --output imgs/output.jpg --edit "turn him into a cyborg"

# Optionally, you can specify parameters to tune your result:
# python edit_cli.py --steps 100 --resolution 512 --seed 1371 --cfg-text 7.5 --cfg-image 1.2 --input imgs/example.jpg --output imgs/output.jpg --edit "turn him into a cyborg"
```

### Or launch your own interactive editing Gradio app:
```
python edit_app.py 
```
![Edit app](https://github.com/timothybrooks/instruct-pix2pix/blob/main/imgs/edit_app.jpg?raw=true)

_(For advice on how to get the best results by tuning parameters, see the [Tips](https://github.com/timothybrooks/instruct-pix2pix#tips) section)._

## Setup

Install all dependencies with:
```
conda env create -f environment.yaml
```

Download the pretrained models by running:
```
bash scripts/download_checkpoints.sh
```

## Generated Dataset

Our image editing model is trained on a generated dataset consisting of 454,445 examples. Each example contains (1) an input image, (2) an editing instruction, and (3) an output edited image. We provide two versions of the dataset, one in which each pair of edited images is generated 100 times, and the best examples are chosen based on CLIP metrics (Section 3.1.2 in the paper) (`clip-filtered-dataset`), and one in which examples are randomly chosen (`random-sample-dataset`).

For the released version of this dataset, we've additionally filtered prompts and images for NSFW content. After NSFW filtering, the GPT-3 generated dataset contains 451,990 examples. The final image-pair datasets contain:

|  | # of image editing examples | Dataset size |
|--|-----------------------|----------------------- |
| `random-sample-dataset` |451990|727GB|
|  `clip-filtered-dataset` |313010|436GB|

To download one of these datasets, along with the entire NSFW-filtered text data, run the following command with the appropriate dataset name:

```
bash scripts/download_data.sh clip-filtered-dataset
```


## Training InstructPix2Pix

InstructPix2Pix is trained by fine-tuning from an initial StableDiffusion checkpoint. The first step is to download a Stable Diffusion checkpoint. For our trained models, we used the v1.5 checkpoint as the starting point. To download the same ones we used, you can run the following script:
```
bash scripts/download_pretrained_sd.sh
```
If you'd like to use a different checkpoint, point to it in the config file `configs/train.yaml`, on line 8, after `ckpt_path:`. 

Next, we need to change the config to point to our downloaded (or generated) dataset. If you're using the `clip-filtered-dataset` from above, you can skip this. Otherwise, you may need to edit lines 85 and 94 of the config (`data.params.train.params.path`, `data.params.validation.params.path`). 

Finally, start a training job with the following command:

```
python main.py --name default --base configs/train.yaml --train --gpus 0,1,2,3,4,5,6,7
```


## Creating your own dataset

Our generated dataset of paired images and editing instructions is made in two phases: First, we use GPT-3 to generate text triplets: (a) a caption describing an image, (b) an edit instruction, (c) a caption describing the image after the edit. Then, we turn pairs of captions (before/after the edit) into pairs of images using Stable Diffusion and Prompt-to-Prompt.

### (1) Generate a dataset of captions and instructions

We provide our generated dataset of captions and edit instructions [here](https://instruct-pix2pix.eecs.berkeley.edu/gpt-generated-prompts.jsonl). If you plan to use our captions+instructions, skip to step (2). Otherwise, if you would like to create your own text dataset, please follow steps (1.1-1.3) below. Note that generating very large datasets using GPT-3 can be expensive.

#### (1.1) Manually write a dataset of instructions and captions

The first step of the process is fine-tuning GPT-3. To do this, we made a dataset of 700 examples broadly covering of edits that we might want our model to be able to perform. Our examples are available [here](https://instruct-pix2pix.eecs.berkeley.edu/human-written-prompts.jsonl). These should be diverse and cover a wide range of possible captions and types of edits. Ideally, they should avoid duplication or significant overlap of captions and instructions. It is also important to be mindful of limitations of Stable Diffusion and Prompt-to-Prompt in writing these examples, such as inability to perform large spatial transformations (e.g., moving the camera, zooming in, swapping object locations). 

Input prompts should closely match the distribution of input prompts used to generate the larger dataset. We sampled the 700 input prompts from the _LAION Improved Aesthetics 6.5+_ dataset and also use this dataset for generating examples. We found this dataset is quite noisy (many of the captions are overly long and contain irrelevant text). For this reason, we also considered MSCOCO and LAION-COCO datasets, but ultimately chose _LAION Improved Aesthetics 6.5+_ due to its diversity of content, proper nouns, and artistic mediums. If you choose to use another dataset or combination of datasets as input to GPT-3 when generating examples, we recommend you sample the input prompts from the same distribution when manually writing training examples.

#### (1.2) Finetune GPT-3

The next step is to finetune a large language model on the manually written instructions/outputs to generate edit instructions and edited caption from a new input caption. For this, we finetune GPT-3's Davinci model via the OpenAI API, although other language models could be used.

To prepare training data for GPT-3, one must first create an OpenAI developer account to access the needed APIs, and [set up the API keys on your local device](https://beta.openai.com/docs/api-reference/introduction). Also, run the `prompts/prepare_for_gpt.py` script, which forms the prompts into the correct format by concatenating instructions and captions and adding delimiters and stop sequences.

```bash
python dataset_creation/prepare_for_gpt.py --input-path data/human-written-prompts.jsonl --output-path data/human-written-prompts-for-gpt.jsonl
```

Next, finetune GPT-3 via the OpenAI CLI. We provide an example below, although please refer to OpenAI's official documentation for this, as best practices may change. We trained the Davinci model for a single epoch. You can experiment with smaller less expensive GPT-3 variants or with open source language models, although this may negatively affect performance.

```bash
openai api fine_tunes.create -t data/human-written-prompts-for-gpt.jsonl -m davinci --n_epochs 1 --suffix "instruct-pix2pix"
```

You can test out the finetuned GPT-3 model by launching the provided Gradio app:

```bash
python prompt_app.py --openai-api-key OPENAI_KEY --openai-model OPENAI_MODEL_NAME
```

![Prompt app](https://github.com/timothybrooks/instruct-pix2pix/blob/main/imgs/prompt_app.jpg?raw=true)

#### (1.3) Generate a large dataset of captions and instructions

We now use the finetuned GPT-3 model to generate a large dataset. Our dataset cost thousands of dollars to create. See `prompts/gen_instructions_and_captions.py` for the script which generates these examples. We recommend first generating a small number of examples (by setting a low value of `--num-samples`) and gradually increasing the scale to ensure the results are working as desired before increasing scale.

```bash
python dataset_creation/generate_txt_dataset.py --openai-api-key OPENAI_KEY --openai-model OPENAI_MODEL_NAME
```

If you are generating at a very large scale (e.g., 100K+), it will be noteably faster to generate the dataset with multiple processes running in parallel. This can be accomplished by setting `--partitions=N` to a higher number and running multiple processes, setting each `--partition` to the corresponding value.

```bash
python dataset_creation/generate_txt_dataset.py --openai-api-key OPENAI_KEY --openai-model OPENAI_MODEL_NAME --partitions=10 --partition=0
```

### (2) Turn paired captions into paired images

The next step is to turn pairs of text captions into pairs of images. For this, we need to copy some pre-trained Stable Diffusion checkpoints to `stable_diffusion/models/ldm/stable-diffusion-v1/`. You may have already done this if you followed the instructions above for training with our provided data, but if not, you can do this by running:

```bash
bash scripts/download_pretrained_sd.sh
```

For our model, we used [checkpoint v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt), and the [new autoencoder](https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt), but other models may work as well. If you choose to use other models, make sure to change point to the corresponding checkpoints by passing in the `--ckpt` and `--vae-ckpt` arguments. Once all checkpoints have been downloaded, we can generate the dataset with the following command:

```
python dataset_creation/generate_img_dataset.py --out_dir data/instruct-pix2pix-dataset-000 --prompts_file path/to/generated_prompts.jsonl
```

This command operates on a single GPU (typically a V100 or A100). To parallelize over many GPUs/machines, set `--n-partitions` to the total number of parallel jobs and `--partition` to the index of each job.

```
python dataset_creation/generate_img_dataset.py --out_dir data/instruct-pix2pix-dataset-000 --prompts_file path/to/generated_prompts.jsonl --n-partitions 100 --partition 0
```

The default parameters match that of our dataset, although in practice you can use a smaller number of steps (e.g., `--steps=25`) to generate high quality data faster. By default, we generate 100 samples per prompt and use CLIP filtering to keep a max of 4 per prompt. You can experiment with fewer samples by setting `--n-samples`. The command below turns off CLIP filtering entirely and is therefore faster:

```
python dataset_creation/generate_img_dataset.py --out_dir data/instruct-pix2pix-dataset-000 --prompts_file path/to/generated_prompts.jsonl --n-samples 4 --clip-threshold 0 --clip-dir-threshold 0 --clip-img-threshold 0 --n-partitions 100 --partition 0
```

After generating all of the dataset examples, run the following command below to create a list of the examples. This is needed for the dataset onject to efficiently be able to sample examples without needing to iterate over the entire dataset directory at the start of each training run.

```
python dataset_creation/prepare_dataset.py data/instruct-pix2pix-dataset-000
```

## Evaluation

To generate plots like the ones in Figures 8 and 10 in the paper, run the following command:

```
python metrics/compute_metrics.py --ckpt /path/to/your/model.ckpt
```

## Tips

If you're not getting the quality result you want, there may be a few reasons:
1. **Is the image not changing enough?** Your Image CFG weight may be too high. This value dictates how similar the output should be to the input. It's possible your edit requires larger changes from the original image, and your Image CFG weight isn't allowing that. Alternatively, your Text CFG weight may be too low. This value dictates how much to listen to the text instruction. The default Image CFG of 1.5 and Text CFG of 7.5 are a good starting point, but aren't necessarily optimal for each edit. Try:
    * Decreasing the Image CFG weight, or
    * Increasing the Text CFG weight, or
2. Conversely, **is the image changing too much**, such that the details in the original image aren't preserved? Try:
    * Increasing the Image CFG weight, or
    * Decreasing the Text CFG weight
3. Try generating results with different random seeds by setting "Randomize Seed" and running generation multiple times. You can also try setting "Randomize CFG" to sample new Text CFG and Image CFG values each time.
4. Rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog").
5. Increasing the number of steps sometimes improves results.
6. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try cropping the image so the face takes up a larger portion of the frame.

## Comments

- Our codebase is based on the [Stable Diffusion codebase](https://github.com/CompVis/stable-diffusion).

## BibTeX

```
@article{brooks2022instructpix2pix,
  title={InstructPix2Pix: Learning to Follow Image Editing Instructions},
  author={Brooks, Tim and Holynski, Aleksander and Efros, Alexei A},
  journal={arXiv preprint arXiv:2211.09800},
  year={2022}
}
```
## Other ways of using InstructPix2Pix

### InstructPix2Pix on [HuggingFace](https://huggingface.co/spaces/timbrooks/instruct-pix2pix):
> A browser-based version of the demo is available as a [HuggingFace space](https://huggingface.co/spaces/timbrooks/instruct-pix2pix). For this version, you only need a browser, a picture you want to edit, and an instruction! Note that this is a shared online demo, and processing time may be slower during peak utilization. 

### InstructPix2Pix on [Replicate](https://replicate.com/timothybrooks/instruct-pix2pix):
> Replicate provides a production-ready cloud API for running the InstructPix2Pix model. You can run the model from any environment using a simple API call with cURL, Python, JavaScript, or your language of choice. Replicate also provides a web interface for running the model and sharing predictions.

### InstructPix2Pix in [Imaginairy](https://github.com/brycedrennan/imaginAIry#-edit-images-with-instructions-alone-by-instructpix2pix):
> Imaginairy offers another way of easily installing InstructPix2Pix with a single command. It can run on devices without GPUs (like a Macbook!). 
> ```bash
> pip install imaginairy --upgrade
> aimg edit any-image.jpg --gif "turn him into a cyborg" 
> ```
> It also offers an easy way to perform a bunch of edits on an image, and can save edits out to an animated GIF:
> ```
> aimg edit --gif --surprise-me pearl-earring.jpg 
> ```
> <img src="https://raw.githubusercontent.com/brycedrennan/imaginAIry/7c05c3aae2740278978c5e84962b826e58201bac/assets/girl_with_a_pearl_earring_suprise.gif" width="512">

### InstructPix2Pix in [🧨 Diffusers](https://github.com/huggingface/diffusers):

> InstructPix2Pix in Diffusers is a bit more optimized, so it may be faster and more suitable for GPUs with less memory. Below are instructions for installing the library and editing an image: 
> 1. Install diffusers and relevant dependencies:
>
> ```bash
> pip install transformers accelerate torch
>
> pip install git+https://github.com/huggingface/diffusers.git
> ```
> 
> 2. Load the model and edit the image:
>
> ```python
> 
> import torch
> from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
> 
> model_id = "timbrooks/instruct-pix2pix"
> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
> pipe.to("cuda")
> pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
> # `image` is an RGB PIL.Image
> images = pipe("turn him into cyborg", image=image).images
> images[0]
> ```
> 
> For more information, check the docs [here](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix).


================================================
FILE: configs/generate.yaml
================================================
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
# See more details in LICENSE.

model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm_edit.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: edited
    cond_stage_key: edit
    # image_size: 64
    # image_size: 32
    image_size: 16
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: hybrid
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: true
    load_ema: true

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 0 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 8
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: True
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 128
    num_workers: 1
    wrap: false
    validation:
      target: edit_dataset.EditDataset
      params:
        path: data/clip-filtered-dataset
        cache_dir:  data/
        cache_name: data_10k
        split: val
        min_text_sim: 0.2
        min_image_sim: 0.75
        min_direction_sim: 0.2
        max_samples_per_prompt: 1
        min_resize_res: 512
        max_resize_res: 512
        crop_res: 512
        output_as_edit: False
        real_input: True


================================================
FILE: configs/train.yaml
================================================
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
# See more details in LICENSE.

model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm_edit.LatentDiffusion
  params:
    ckpt_path: stable_diffusion/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: edited
    cond_stage_key: edit
    image_size: 32
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: hybrid
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: true
    load_ema: false

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 0 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 8
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: True
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 32
    num_workers: 2
    train:
      target: edit_dataset.EditDataset
      params:
        path: data/clip-filtered-dataset
        split: train
        min_resize_res: 256
        max_resize_res: 256
        crop_res: 256
        flip_prob: 0.5
    validation:
      target: edit_dataset.EditDataset
      params:
        path: data/clip-filtered-dataset
        split: val
        min_resize_res: 256
        max_resize_res: 256
        crop_res: 256

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 2000
        max_images: 2
        increase_log_steps: False

  trainer:
    max_epochs: 2000
    benchmark: True
    accumulate_grad_batches: 4
    check_val_every_n_epoch: 4


================================================
FILE: dataset_creation/generate_img_dataset.py
================================================
import argparse
import json
import sys
from pathlib import Path

import k_diffusion
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange, repeat
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from tqdm import tqdm

sys.path.append("./")
sys.path.append("./stable_diffusion")

from ldm.modules.attention import CrossAttention
from ldm.util import instantiate_from_config
from metrics.clip_similarity import ClipSimilarity


################################################################################
# Modified K-diffusion Euler ancestral sampler with prompt-to-prompt.
# https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py


def append_dims(x, target_dims):
    """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
    dims_to_append = target_dims - x.ndim
    if dims_to_append < 0:
        raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
    return x[(...,) + (None,) * dims_to_append]


def to_d(x, sigma, denoised):
    """Converts a denoiser output to a Karras ODE derivative."""
    return (x - denoised) / append_dims(sigma, x.ndim)


def get_ancestral_step(sigma_from, sigma_to):
    """Calculates the noise level (sigma_down) to step down to and the amount
    of noise to add (sigma_up) when doing an ancestral sampling step."""
    sigma_up = min(sigma_to, (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5)
    sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
    return sigma_down, sigma_up


def sample_euler_ancestral(model, x, sigmas, prompt2prompt_threshold=0.0, **extra_args):
    """Ancestral sampling with Euler method steps."""
    s_in = x.new_ones([x.shape[0]])
    for i in range(len(sigmas) - 1):
        prompt_to_prompt = prompt2prompt_threshold > i / (len(sigmas) - 2)
        for m in model.modules():
            if isinstance(m, CrossAttention):
                m.prompt_to_prompt = prompt_to_prompt
        denoised = model(x, sigmas[i] * s_in, **extra_args)
        sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
        d = to_d(x, sigmas[i], denoised)
        # Euler method
        dt = sigma_down - sigmas[i]
        x = x + d * dt
        if sigmas[i + 1] > 0:
            # Make noise the same across all samples in batch.
            x = x + torch.randn_like(x[:1]) * sigma_up
    return x


################################################################################


def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
    print(f"Loading model from {ckpt}")
    pl_sd = torch.load(ckpt, map_location="cpu")
    if "global_step" in pl_sd:
        print(f"Global Step: {pl_sd['global_step']}")
    sd = pl_sd["state_dict"]
    if vae_ckpt is not None:
        print(f"Loading VAE from {vae_ckpt}")
        vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
        sd = {
            k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
            for k, v in sd.items()
        }
    model = instantiate_from_config(config.model)
    m, u = model.load_state_dict(sd, strict=False)
    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)
    return model


class CFGDenoiser(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model

    def forward(self, x, sigma, uncond, cond, cfg_scale):
        x_in = torch.cat([x] * 2)
        sigma_in = torch.cat([sigma] * 2)
        cond_in = torch.cat([uncond, cond])
        uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
        return uncond + (cond - uncond) * cfg_scale


def to_pil(image: torch.Tensor) -> Image.Image:
    image = 255.0 * rearrange(image.cpu().numpy(), "c h w -> h w c")
    image = Image.fromarray(image.astype(np.uint8))
    return image


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--out_dir",
        type=str,
        required=True,
        help="Path to output dataset directory.",
    )
    parser.add_argument(
        "--prompts_file",
        type=str,
        required=True,
        help="Path to prompts .jsonl file.",
    )
    parser.add_argument(
        "--ckpt",
        type=str,
        default="stable_diffusion/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt",
        help="Path to stable diffusion checkpoint.",
    )
    parser.add_argument(
        "--vae-ckpt",
        type=str,
        default="stable_diffusion/models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
        help="Path to vae checkpoint.",
    )
    parser.add_argument(
        "--steps",
        type=int,
        default=100,
        help="Number of sampling steps.",
    )
    parser.add_argument(
        "--n-samples",
        type=int,
        default=100,
        help="Number of samples to generate per prompt (before CLIP filtering).",
    )
    parser.add_argument(
        "--max-out-samples",
        type=int,
        default=4,
        help="Max number of output samples to save per prompt (after CLIP filtering).",
    )
    parser.add_argument(
        "--n-partitions",
        type=int,
        default=1,
        help="Number of total partitions.",
    )
    parser.add_argument(
        "--partition",
        type=int,
        default=0,
        help="Partition index.",
    )
    parser.add_argument(
        "--min-p2p",
        type=float,
        default=0.1,
        help="Min prompt2prompt threshold (portion of denoising for which to fix self attention maps).",
    )
    parser.add_argument(
        "--max-p2p",
        type=float,
        default=0.9,
        help="Max prompt2prompt threshold (portion of denoising for which to fix self attention maps).",
    )
    parser.add_argument(
        "--min-cfg",
        type=float,
        default=7.5,
        help="Min classifier free guidance scale.",
    )
    parser.add_argument(
        "--max-cfg",
        type=float,
        default=15,
        help="Max classifier free guidance scale.",
    )
    parser.add_argument(
        "--clip-threshold",
        type=float,
        default=0.2,
        help="CLIP threshold for text-image similarity of each image.",
    )
    parser.add_argument(
        "--clip-dir-threshold",
        type=float,
        default=0.2,
        help="Directional CLIP threshold for similarity of change between pairs of text and pairs of images.",
    )
    parser.add_argument(
        "--clip-img-threshold",
        type=float,
        default=0.7,
        help="CLIP threshold for image-image similarity.",
    )
    opt = parser.parse_args()

    global_seed = torch.randint(1 << 32, ()).item()
    print(f"Global seed: {global_seed}")
    seed_everything(global_seed)

    model = load_model_from_config(
        OmegaConf.load("stable_diffusion/configs/stable-diffusion/v1-inference.yaml"),
        ckpt=opt.ckpt,
        vae_ckpt=opt.vae_ckpt,
    )
    model.cuda().eval()
    model_wrap = k_diffusion.external.CompVisDenoiser(model)

    clip_similarity = ClipSimilarity().cuda()

    out_dir = Path(opt.out_dir)
    out_dir.mkdir(exist_ok=True, parents=True)

    with open(opt.prompts_file) as fp:
        prompts = [json.loads(line) for line in fp]

    print(f"Partition index {opt.partition} ({opt.partition + 1} / {opt.n_partitions})")
    prompts = np.array_split(list(enumerate(prompts)), opt.n_partitions)[opt.partition]

    with torch.no_grad(), torch.autocast("cuda"), model.ema_scope():
        uncond = model.get_learned_conditioning(2 * [""])
        sigmas = model_wrap.get_sigmas(opt.steps)

        for i, prompt in tqdm(prompts, desc="Prompts"):
            prompt_dir = out_dir.joinpath(f"{i:07d}")
            prompt_dir.mkdir(exist_ok=True)

            with open(prompt_dir.joinpath("prompt.json"), "w") as fp:
                json.dump(prompt, fp)

            cond = model.get_learned_conditioning([prompt["caption"], prompt["output"]])
            results = {}

            with tqdm(total=opt.n_samples, desc="Samples") as progress_bar:

                while len(results) < opt.n_samples:
                    seed = torch.randint(1 << 32, ()).item()
                    if seed in results:
                        continue
                    torch.manual_seed(seed)

                    x = torch.randn(1, 4, 512 // 8, 512 // 8, device="cuda") * sigmas[0]
                    x = repeat(x, "1 ... -> n ...", n=2)

                    model_wrap_cfg = CFGDenoiser(model_wrap)
                    p2p_threshold = opt.min_p2p + torch.rand(()).item() * (opt.max_p2p - opt.min_p2p)
                    cfg_scale = opt.min_cfg + torch.rand(()).item() * (opt.max_cfg - opt.min_cfg)
                    extra_args = {"cond": cond, "uncond": uncond, "cfg_scale": cfg_scale}
                    samples_ddim = sample_euler_ancestral(model_wrap_cfg, x, sigmas, p2p_threshold, **extra_args)
                    x_samples_ddim = model.decode_first_stage(samples_ddim)
                    x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)

                    x0 = x_samples_ddim[0]
                    x1 = x_samples_ddim[1]

                    clip_sim_0, clip_sim_1, clip_sim_dir, clip_sim_image = clip_similarity(
                        x0[None], x1[None], [prompt["caption"]], [prompt["output"]]
                    )

                    results[seed] = dict(
                        image_0=to_pil(x0),
                        image_1=to_pil(x1),
                        p2p_threshold=p2p_threshold,
                        cfg_scale=cfg_scale,
                        clip_sim_0=clip_sim_0[0].item(),
                        clip_sim_1=clip_sim_1[0].item(),
                        clip_sim_dir=clip_sim_dir[0].item(),
                        clip_sim_image=clip_sim_image[0].item(),
                    )

                    progress_bar.update()

            # CLIP filter to get best samples for each prompt.
            metadata = [
                (result["clip_sim_dir"], seed)
                for seed, result in results.items()
                if result["clip_sim_image"] >= opt.clip_img_threshold
                and result["clip_sim_dir"] >= opt.clip_dir_threshold
                and result["clip_sim_0"] >= opt.clip_threshold
                and result["clip_sim_1"] >= opt.clip_threshold
            ]
            metadata.sort(reverse=True)
            for _, seed in metadata[: opt.max_out_samples]:
                result = results[seed]
                image_0 = result.pop("image_0")
                image_1 = result.pop("image_1")
                image_0.save(prompt_dir.joinpath(f"{seed}_0.jpg"), quality=100)
                image_1.save(prompt_dir.joinpath(f"{seed}_1.jpg"), quality=100)
                with open(prompt_dir.joinpath(f"metadata.jsonl"), "a") as fp:
                    fp.write(f"{json.dumps(dict(seed=seed, **result))}\n")

    print("Done.")


if __name__ == "__main__":
    main()


================================================
FILE: dataset_creation/generate_txt_dataset.py
================================================
from __future__ import annotations

import json
import time
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional

import datasets
import numpy as np
import openai
from tqdm.auto import tqdm


DELIMITER_0 = "\n##\n"
DELIMITER_1 = "\n%%\n"
STOP = "\nEND"


def generate(
    openai_model: str,
    caption: str,
    num_retries: int = 3,
    max_tokens: int = 256,
    temperature: float = 0.7,
    top_p: float = 1.0,
    frequency_penalty: float = 0.1,
    presence_penalty: float = 0.0,
    sleep_on_error: float = 1.0,
) -> Optional[tuple[str, str]]:
    for _ in range(1 + num_retries):
        try:
            response = openai.Completion.create(
                model=openai_model,
                prompt=caption + DELIMITER_0,
                temperature=temperature,
                max_tokens=max_tokens,
                top_p=top_p,
                frequency_penalty=frequency_penalty,
                presence_penalty=presence_penalty,
                stop=[STOP],
            )
        except Exception as e:
            print(e)
            time.sleep(sleep_on_error)
            continue
        output = response["choices"][0]["text"].split(DELIMITER_1)
        if len(output) == 2:
            instruction, edited_caption = output
            results = openai.Moderation.create([instruction, edited_caption])["results"]
            if results[0]["flagged"] or results[1]["flagged"]:
                continue
            if caption.strip().strip(".!?").lower() != edited_caption.strip().strip(".!?").lower():
                return instruction, edited_caption


def main(openai_model: str, num_samples: int, num_partitions: int, partition: int, seed: int):
    dataset = datasets.load_dataset("ChristophSchuhmann/improved_aesthetics_6.5plus", split="train")
    # Other datasets we considered that may be worth trying:
    # dataset = datasets.load_dataset("ChristophSchuhmann/MS_COCO_2017_URL_TEXT", split="train")
    # dataset = datasets.load_dataset("laion/laion-coco", split="train")

    np.random.seed(seed)
    permutation = np.array_split(np.random.permutation(len(dataset)), num_partitions)[partition]
    dataset = dataset[permutation]
    captions = dataset["TEXT"]
    urls = dataset["URL"]
    output_path = f"data/dataset=laion-aesthetics-6.5_model={openai_model}_samples={num_samples}_partition={partition}.jsonl"  # fmt: skip
    print(f"Prompt file path: {output_path}")

    count = 0
    caption_set = set()
    url_set = set()

    if Path(output_path).exists():
        with open(output_path, "r") as f:
            for line in tqdm(f, desc="Resuming from existing prompts"):
                prompt = json.loads(line)
                if prompt["caption"] not in caption_set and prompt["url"] not in url_set:
                    caption_set.add(prompt["caption"])
                    url_set.add(prompt["url"])
                    count += 1

    with open(output_path, "a") as fp:
        with tqdm(total=num_samples - count, desc="Generating instructions and edited captions") as progress_bar:
            for caption, url in zip(captions, urls):
                if caption in caption_set or url in url_set:
                    continue
                if openai.Moderation.create(caption)["results"][0]["flagged"]:
                    continue
                edit_output = generate(openai_model, caption)
                if edit_output is not None:
                    edit, output = edit_output
                    fp.write(f"{json.dumps(dict(caption=caption, edit=edit, output=output, url=url))}\n")
                    count += 1
                    progress_bar.update()
                    caption_set.add(caption)
                    url_set.add(url)
                if count == num_samples:
                    break


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--openai-api-key", required=True, type=str)
    parser.add_argument("--openai-model", required=True, type=str)
    parser.add_argument("--num-samples", default=10000, type=int)
    parser.add_argument("--num-partitions", default=1, type=int)
    parser.add_argument("--partition", default=0, type=int)
    parser.add_argument("--seed", default=0, type=int)
    args = parser.parse_args()
    openai.api_key = args.openai_api_key
    main(args.openai_model, args.num_samples, args.num_partitions, args.partition, args.seed)


================================================
FILE: dataset_creation/prepare_dataset.py
================================================
import json
from argparse import ArgumentParser
from pathlib import Path

from tqdm.auto import tqdm


def main():
    parser = ArgumentParser()
    parser.add_argument("dataset_dir")
    args = parser.parse_args()
    dataset_dir = Path(args.dataset_dir)

    seeds = []
    with tqdm(desc="Listing dataset image seeds") as progress_bar:
        for prompt_dir in dataset_dir.iterdir():
            if prompt_dir.is_dir():
                prompt_seeds = [image_path.name.split("_")[0] for image_path in sorted(prompt_dir.glob("*_0.jpg"))]
                if len(prompt_seeds) > 0:
                    seeds.append((prompt_dir.name, prompt_seeds))
                    progress_bar.update()
    seeds.sort()

    with open(dataset_dir.joinpath("seeds.json"), "w") as f:
        json.dump(seeds, f)


if __name__ == "__main__":
    main()


================================================
FILE: dataset_creation/prepare_for_gpt.py
================================================
import json
from argparse import ArgumentParser

from generate_txt_dataset import DELIMITER_0, DELIMITER_1, STOP


def main(input_path: str, output_path: str):
    with open(input_path) as f:
        prompts = [json.loads(l) for l in f]

    with open(output_path, "w") as f:
        for prompt in prompts:
            prompt_for_gpt = {
                "prompt": f"{prompt['input']}{DELIMITER_0}",
                "completion": f"{prompt['edit']}{DELIMITER_1}{prompt['output']}{STOP}",
            }
            f.write(f"{json.dumps(prompt_for_gpt)}\n")


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--input-path", required=True, type=str)
    parser.add_argument("--output-path", required=True, type=str)
    args = parser.parse_args()
    main(args.input_path, args.output_path)


================================================
FILE: edit_app.py
================================================
from __future__ import annotations

import math
import random
import sys
from argparse import ArgumentParser

import einops
import gradio as gr
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast

sys.path.append("./stable_diffusion")

from stable_diffusion.ldm.util import instantiate_from_config


help_text = """
If you're not getting what you want, there may be a few reasons:
1. Is the image not changing enough? Your Image CFG weight may be too high. This value dictates how similar the output should be to the input. It's possible your edit requires larger changes from the original image, and your Image CFG weight isn't allowing that. Alternatively, your Text CFG weight may be too low. This value dictates how much to listen to the text instruction. The default Image CFG of 1.5 and Text CFG of 7.5 are a good starting point, but aren't necessarily optimal for each edit. Try:
    * Decreasing the Image CFG weight, or
    * Incerasing the Text CFG weight, or
2. Conversely, is the image changing too much, such that the details in the original image aren't preserved? Try:
    * Increasing the Image CFG weight, or
    * Decreasing the Text CFG weight
3. Try generating results with different random seeds by setting "Randomize Seed" and running generation multiple times. You can also try setting "Randomize CFG" to sample new Text CFG and Image CFG values each time.
4. Rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog").
5. Increasing the number of steps sometimes improves results.
6. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try:
    * Cropping the image so the face takes up a larger portion of the frame.
"""


example_instructions = [
    "Make it a picasso painting",
    "as if it were by modigliani",
    "convert to a bronze statue",
    "Turn it into an anime.",
    "have it look like a graphic novel",
    "make him gain weight",
    "what would he look like bald?",
    "Have him smile",
    "Put him in a cocktail party.",
    "move him at the beach.",
    "add dramatic lighting",
    "Convert to black and white",
    "What if it were snowing?",
    "Give him a leather jacket",
    "Turn him into a cyborg!",
    "make him wear a beanie",
]


class CFGDenoiser(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model

    def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale):
        cfg_z = einops.repeat(z, "1 ... -> n ...", n=3)
        cfg_sigma = einops.repeat(sigma, "1 ... -> n ...", n=3)
        cfg_cond = {
            "c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0], uncond["c_crossattn"][0]])],
            "c_concat": [torch.cat([cond["c_concat"][0], cond["c_concat"][0], uncond["c_concat"][0]])],
        }
        out_cond, out_img_cond, out_uncond = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3)
        return out_uncond + text_cfg_scale * (out_cond - out_img_cond) + image_cfg_scale * (out_img_cond - out_uncond)


def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
    print(f"Loading model from {ckpt}")
    pl_sd = torch.load(ckpt, map_location="cpu")
    if "global_step" in pl_sd:
        print(f"Global Step: {pl_sd['global_step']}")
    sd = pl_sd["state_dict"]
    if vae_ckpt is not None:
        print(f"Loading VAE from {vae_ckpt}")
        vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
        sd = {
            k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
            for k, v in sd.items()
        }
    model = instantiate_from_config(config.model)
    m, u = model.load_state_dict(sd, strict=False)
    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)
    return model


def main():
    parser = ArgumentParser()
    parser.add_argument("--resolution", default=512, type=int)
    parser.add_argument("--config", default="configs/generate.yaml", type=str)
    parser.add_argument("--ckpt", default="checkpoints/instruct-pix2pix-00-22000.ckpt", type=str)
    parser.add_argument("--vae-ckpt", default=None, type=str)
    args = parser.parse_args()

    config = OmegaConf.load(args.config)
    model = load_model_from_config(config, args.ckpt, args.vae_ckpt)
    model.eval().cuda()
    model_wrap = K.external.CompVisDenoiser(model)
    model_wrap_cfg = CFGDenoiser(model_wrap)
    null_token = model.get_learned_conditioning([""])
    example_image = Image.open("imgs/example.jpg").convert("RGB")

    def load_example(
        steps: int,
        randomize_seed: bool,
        seed: int,
        randomize_cfg: bool,
        text_cfg_scale: float,
        image_cfg_scale: float,
    ):
        example_instruction = random.choice(example_instructions)
        return [example_image, example_instruction] + generate(
            example_image,
            example_instruction,
            steps,
            randomize_seed,
            seed,
            randomize_cfg,
            text_cfg_scale,
            image_cfg_scale,
        )

    def generate(
        input_image: Image.Image,
        instruction: str,
        steps: int,
        randomize_seed: bool,
        seed: int,
        randomize_cfg: bool,
        text_cfg_scale: float,
        image_cfg_scale: float,
    ):
        seed = random.randint(0, 100000) if randomize_seed else seed
        text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale
        image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale

        width, height = input_image.size
        factor = args.resolution / max(width, height)
        factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
        width = int((width * factor) // 64) * 64
        height = int((height * factor) // 64) * 64
        input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)

        if instruction == "":
            return [input_image, seed]

        with torch.no_grad(), autocast("cuda"), model.ema_scope():
            cond = {}
            cond["c_crossattn"] = [model.get_learned_conditioning([instruction])]
            input_image = 2 * torch.tensor(np.array(input_image)).float() / 255 - 1
            input_image = rearrange(input_image, "h w c -> 1 c h w").to(model.device)
            cond["c_concat"] = [model.encode_first_stage(input_image).mode()]

            uncond = {}
            uncond["c_crossattn"] = [null_token]
            uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])]

            sigmas = model_wrap.get_sigmas(steps)

            extra_args = {
                "cond": cond,
                "uncond": uncond,
                "text_cfg_scale": text_cfg_scale,
                "image_cfg_scale": image_cfg_scale,
            }
            torch.manual_seed(seed)
            z = torch.randn_like(cond["c_concat"][0]) * sigmas[0]
            z = K.sampling.sample_euler_ancestral(model_wrap_cfg, z, sigmas, extra_args=extra_args)
            x = model.decode_first_stage(z)
            x = torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0)
            x = 255.0 * rearrange(x, "1 c h w -> h w c")
            edited_image = Image.fromarray(x.type(torch.uint8).cpu().numpy())

            return [seed, text_cfg_scale, image_cfg_scale, edited_image]

    def reset():
        return [0, "Randomize Seed", 1371, "Fix CFG", 7.5, 1.5, None]

    with gr.Blocks(css="footer {visibility: hidden}") as demo:
        with gr.Row():
            with gr.Column(scale=1, min_width=100):
                generate_button = gr.Button("Generate")
            with gr.Column(scale=1, min_width=100):
                load_button = gr.Button("Load Example")
            with gr.Column(scale=1, min_width=100):
                reset_button = gr.Button("Reset")
            with gr.Column(scale=3):
                instruction = gr.Textbox(lines=1, label="Edit Instruction", interactive=True)

        with gr.Row():
            input_image = gr.Image(label="Input Image", type="pil", interactive=True)
            edited_image = gr.Image(label=f"Edited Image", type="pil", interactive=False)
            input_image.style(height=512, width=512)
            edited_image.style(height=512, width=512)

        with gr.Row():
            steps = gr.Number(value=100, precision=0, label="Steps", interactive=True)
            randomize_seed = gr.Radio(
                ["Fix Seed", "Randomize Seed"],
                value="Randomize Seed",
                type="index",
                show_label=False,
                interactive=True,
            )
            seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True)
            randomize_cfg = gr.Radio(
                ["Fix CFG", "Randomize CFG"],
                value="Fix CFG",
                type="index",
                show_label=False,
                interactive=True,
            )
            text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True)
            image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True)

        gr.Markdown(help_text)

        load_button.click(
            fn=load_example,
            inputs=[
                steps,
                randomize_seed,
                seed,
                randomize_cfg,
                text_cfg_scale,
                image_cfg_scale,
            ],
            outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image],
        )
        generate_button.click(
            fn=generate,
            inputs=[
                input_image,
                instruction,
                steps,
                randomize_seed,
                seed,
                randomize_cfg,
                text_cfg_scale,
                image_cfg_scale,
            ],
            outputs=[seed, text_cfg_scale, image_cfg_scale, edited_image],
        )
        reset_button.click(
            fn=reset,
            inputs=[],
            outputs=[steps, randomize_seed, seed, randomize_cfg, text_cfg_scale, image_cfg_scale, edited_image],
        )

    demo.queue(concurrency_count=1)
    demo.launch(share=True)


if __name__ == "__main__":
    main()


================================================
FILE: edit_cli.py
================================================
from __future__ import annotations

import math
import random
import sys
from argparse import ArgumentParser

import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast

sys.path.append("./stable_diffusion")

from stable_diffusion.ldm.util import instantiate_from_config


class CFGDenoiser(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model

    def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale):
        cfg_z = einops.repeat(z, "1 ... -> n ...", n=3)
        cfg_sigma = einops.repeat(sigma, "1 ... -> n ...", n=3)
        cfg_cond = {
            "c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0], uncond["c_crossattn"][0]])],
            "c_concat": [torch.cat([cond["c_concat"][0], cond["c_concat"][0], uncond["c_concat"][0]])],
        }
        out_cond, out_img_cond, out_uncond = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3)
        return out_uncond + text_cfg_scale * (out_cond - out_img_cond) + image_cfg_scale * (out_img_cond - out_uncond)


def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
    print(f"Loading model from {ckpt}")
    pl_sd = torch.load(ckpt, map_location="cpu")
    if "global_step" in pl_sd:
        print(f"Global Step: {pl_sd['global_step']}")
    sd = pl_sd["state_dict"]
    if vae_ckpt is not None:
        print(f"Loading VAE from {vae_ckpt}")
        vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
        sd = {
            k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
            for k, v in sd.items()
        }
    model = instantiate_from_config(config.model)
    m, u = model.load_state_dict(sd, strict=False)
    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)
    return model


def main():
    parser = ArgumentParser()
    parser.add_argument("--resolution", default=512, type=int)
    parser.add_argument("--steps", default=100, type=int)
    parser.add_argument("--config", default="configs/generate.yaml", type=str)
    parser.add_argument("--ckpt", default="checkpoints/instruct-pix2pix-00-22000.ckpt", type=str)
    parser.add_argument("--vae-ckpt", default=None, type=str)
    parser.add_argument("--input", required=True, type=str)
    parser.add_argument("--output", required=True, type=str)
    parser.add_argument("--edit", required=True, type=str)
    parser.add_argument("--cfg-text", default=7.5, type=float)
    parser.add_argument("--cfg-image", default=1.5, type=float)
    parser.add_argument("--seed", type=int)
    args = parser.parse_args()

    config = OmegaConf.load(args.config)
    model = load_model_from_config(config, args.ckpt, args.vae_ckpt)
    model.eval().cuda()
    model_wrap = K.external.CompVisDenoiser(model)
    model_wrap_cfg = CFGDenoiser(model_wrap)
    null_token = model.get_learned_conditioning([""])

    seed = random.randint(0, 100000) if args.seed is None else args.seed
    input_image = Image.open(args.input).convert("RGB")
    width, height = input_image.size
    factor = args.resolution / max(width, height)
    factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
    width = int((width * factor) // 64) * 64
    height = int((height * factor) // 64) * 64
    input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)

    if args.edit == "":
        input_image.save(args.output)
        return

    with torch.no_grad(), autocast("cuda"), model.ema_scope():
        cond = {}
        cond["c_crossattn"] = [model.get_learned_conditioning([args.edit])]
        input_image = 2 * torch.tensor(np.array(input_image)).float() / 255 - 1
        input_image = rearrange(input_image, "h w c -> 1 c h w").to(model.device)
        cond["c_concat"] = [model.encode_first_stage(input_image).mode()]

        uncond = {}
        uncond["c_crossattn"] = [null_token]
        uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])]

        sigmas = model_wrap.get_sigmas(args.steps)

        extra_args = {
            "cond": cond,
            "uncond": uncond,
            "text_cfg_scale": args.cfg_text,
            "image_cfg_scale": args.cfg_image,
        }
        torch.manual_seed(seed)
        z = torch.randn_like(cond["c_concat"][0]) * sigmas[0]
        z = K.sampling.sample_euler_ancestral(model_wrap_cfg, z, sigmas, extra_args=extra_args)
        x = model.decode_first_stage(z)
        x = torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0)
        x = 255.0 * rearrange(x, "1 c h w -> h w c")
        edited_image = Image.fromarray(x.type(torch.uint8).cpu().numpy())
    edited_image.save(args.output)


if __name__ == "__main__":
    main()


================================================
FILE: edit_dataset.py
================================================
from __future__ import annotations

import json
import math
from pathlib import Path
from typing import Any

import numpy as np
import torch
import torchvision
from einops import rearrange
from PIL import Image
from torch.utils.data import Dataset


class EditDataset(Dataset):
    def __init__(
        self,
        path: str,
        split: str = "train",
        splits: tuple[float, float, float] = (0.9, 0.05, 0.05),
        min_resize_res: int = 256,
        max_resize_res: int = 256,
        crop_res: int = 256,
        flip_prob: float = 0.0,
    ):
        assert split in ("train", "val", "test")
        assert sum(splits) == 1
        self.path = path
        self.min_resize_res = min_resize_res
        self.max_resize_res = max_resize_res
        self.crop_res = crop_res
        self.flip_prob = flip_prob

        with open(Path(self.path, "seeds.json")) as f:
            self.seeds = json.load(f)

        split_0, split_1 = {
            "train": (0.0, splits[0]),
            "val": (splits[0], splits[0] + splits[1]),
            "test": (splits[0] + splits[1], 1.0),
        }[split]

        idx_0 = math.floor(split_0 * len(self.seeds))
        idx_1 = math.floor(split_1 * len(self.seeds))
        self.seeds = self.seeds[idx_0:idx_1]

    def __len__(self) -> int:
        return len(self.seeds)

    def __getitem__(self, i: int) -> dict[str, Any]:
        name, seeds = self.seeds[i]
        propt_dir = Path(self.path, name)
        seed = seeds[torch.randint(0, len(seeds), ()).item()]
        with open(propt_dir.joinpath("prompt.json")) as fp:
            prompt = json.load(fp)["edit"]

        image_0 = Image.open(propt_dir.joinpath(f"{seed}_0.jpg"))
        image_1 = Image.open(propt_dir.joinpath(f"{seed}_1.jpg"))

        reize_res = torch.randint(self.min_resize_res, self.max_resize_res + 1, ()).item()
        image_0 = image_0.resize((reize_res, reize_res), Image.Resampling.LANCZOS)
        image_1 = image_1.resize((reize_res, reize_res), Image.Resampling.LANCZOS)

        image_0 = rearrange(2 * torch.tensor(np.array(image_0)).float() / 255 - 1, "h w c -> c h w")
        image_1 = rearrange(2 * torch.tensor(np.array(image_1)).float() / 255 - 1, "h w c -> c h w")

        crop = torchvision.transforms.RandomCrop(self.crop_res)
        flip = torchvision.transforms.RandomHorizontalFlip(float(self.flip_prob))
        image_0, image_1 = flip(crop(torch.cat((image_0, image_1)))).chunk(2)

        return dict(edited=image_1, edit=dict(c_concat=image_0, c_crossattn=prompt))


class EditDatasetEval(Dataset):
    def __init__(
        self,
        path: str,
        split: str = "train",
        splits: tuple[float, float, float] = (0.9, 0.05, 0.05),
        res: int = 256,
    ):
        assert split in ("train", "val", "test")
        assert sum(splits) == 1
        self.path = path
        self.res = res

        with open(Path(self.path, "seeds.json")) as f:
            self.seeds = json.load(f)

        split_0, split_1 = {
            "train": (0.0, splits[0]),
            "val": (splits[0], splits[0] + splits[1]),
            "test": (splits[0] + splits[1], 1.0),
        }[split]

        idx_0 = math.floor(split_0 * len(self.seeds))
        idx_1 = math.floor(split_1 * len(self.seeds))
        self.seeds = self.seeds[idx_0:idx_1]

    def __len__(self) -> int:
        return len(self.seeds)

    def __getitem__(self, i: int) -> dict[str, Any]:
        name, seeds = self.seeds[i]
        propt_dir = Path(self.path, name)
        seed = seeds[torch.randint(0, len(seeds), ()).item()]
        with open(propt_dir.joinpath("prompt.json")) as fp:
            prompt = json.load(fp)
            edit = prompt["edit"]
            input_prompt = prompt["input"]
            output_prompt = prompt["output"]

        image_0 = Image.open(propt_dir.joinpath(f"{seed}_0.jpg"))

        reize_res = torch.randint(self.res, self.res + 1, ()).item()
        image_0 = image_0.resize((reize_res, reize_res), Image.Resampling.LANCZOS)

        image_0 = rearrange(2 * torch.tensor(np.array(image_0)).float() / 255 - 1, "h w c -> c h w")

        return dict(image_0=image_0, input_prompt=input_prompt, edit=edit, output_prompt=output_prompt)


================================================
FILE: environment.yaml
================================================
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
# See more details in LICENSE.

name: ip2p
channels:
  - pytorch
  - defaults
dependencies:
  - python=3.8.5
  - pip=20.3
  - cudatoolkit=11.3
  - pytorch=1.11.0
  - torchvision=0.12.0
  - numpy=1.19.2
  - pip:
    - albumentations==0.4.3
    - datasets==2.8.0
    - diffusers
    - opencv-python==4.1.2.30
    - pudb==2019.2
    - invisible-watermark
    - imageio==2.9.0
    - imageio-ffmpeg==0.4.2
    - pytorch-lightning==1.4.2
    - omegaconf==2.1.1
    - test-tube>=0.7.5
    - streamlit>=0.73.1
    - einops==0.3.0
    - torch-fidelity==0.3.0
    - transformers==4.19.2
    - torchmetrics==0.6.0
    - kornia==0.6
    - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
    - -e git+https://github.com/openai/CLIP.git@main#egg=clip
    - openai
    - gradio
    - seaborn
    - git+https://github.com/crowsonkb/k-diffusion.git


================================================
FILE: main.py
================================================
import argparse, os, sys, datetime, glob
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
import json
import pickle

from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image

import torch.distributed as dist
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.plugins import DDPPlugin

sys.path.append("./stable_diffusion")

from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config


def get_parser(**parser_kwargs):
    def str2bool(v):
        if isinstance(v, bool):
            return v
        if v.lower() in ("yes", "true", "t", "y", "1"):
            return True
        elif v.lower() in ("no", "false", "f", "n", "0"):
            return False
        else:
            raise argparse.ArgumentTypeError("Boolean value expected.")

    parser = argparse.ArgumentParser(**parser_kwargs)
    parser.add_argument(
        "-n",
        "--name",
        type=str,
        const=True,
        default="",
        nargs="?",
        help="postfix for logdir",
    )
    parser.add_argument(
        "-r",
        "--resume",
        type=str,
        const=True,
        default="",
        nargs="?",
        help="resume from logdir or checkpoint in logdir",
    )
    parser.add_argument(
        "-b",
        "--base",
        nargs="*",
        metavar="base_config.yaml",
        help="paths to base configs. Loaded from left-to-right. "
             "Parameters can be overwritten or added with command-line options of the form `--key value`.",
        default=list(),
    )
    parser.add_argument(
        "-t",
        "--train",
        type=str2bool,
        const=True,
        default=False,
        nargs="?",
        help="train",
    )
    parser.add_argument(
        "--no-test",
        type=str2bool,
        const=True,
        default=False,
        nargs="?",
        help="disable test",
    )
    parser.add_argument(
        "-p",
        "--project",
        help="name of new or path to existing project"
    )
    parser.add_argument(
        "-d",
        "--debug",
        type=str2bool,
        nargs="?",
        const=True,
        default=False,
        help="enable post-mortem debugging",
    )
    parser.add_argument(
        "-s",
        "--seed",
        type=int,
        default=23,
        help="seed for seed_everything",
    )
    parser.add_argument(
        "-f",
        "--postfix",
        type=str,
        default="",
        help="post-postfix for default name",
    )
    parser.add_argument(
        "-l",
        "--logdir",
        type=str,
        default="logs",
        help="directory for logging dat shit",
    )
    parser.add_argument(
        "--scale_lr",
        action="store_true",
        default=False,
        help="scale base-lr by ngpu * batch_size * n_accumulate",
    )
    return parser


def nondefault_trainer_args(opt):
    parser = argparse.ArgumentParser()
    parser = Trainer.add_argparse_args(parser)
    args = parser.parse_args([])
    return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))


class WrappedDataset(Dataset):
    """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""

    def __init__(self, dataset):
        self.data = dataset

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


def worker_init_fn(_):
    worker_info = torch.utils.data.get_worker_info()

    dataset = worker_info.dataset
    worker_id = worker_info.id

    if isinstance(dataset, Txt2ImgIterableBaseDataset):
        split_size = dataset.num_records // worker_info.num_workers
        # reset num_records to the true number to retain reliable length information
        dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
        current_id = np.random.choice(len(np.random.get_state()[1]), 1)
        return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
    else:
        return np.random.seed(np.random.get_state()[1][0] + worker_id)


class DataModuleFromConfig(pl.LightningDataModule):
    def __init__(self, batch_size, train=None, validation=None, test=None, predict=None,
                 wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False,
                 shuffle_val_dataloader=False):
        super().__init__()
        self.batch_size = batch_size
        self.dataset_configs = dict()
        self.num_workers = num_workers if num_workers is not None else batch_size * 2
        self.use_worker_init_fn = use_worker_init_fn
        if train is not None:
            self.dataset_configs["train"] = train
            self.train_dataloader = self._train_dataloader
        if validation is not None:
            self.dataset_configs["validation"] = validation
            self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
        if test is not None:
            self.dataset_configs["test"] = test
            self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
        if predict is not None:
            self.dataset_configs["predict"] = predict
            self.predict_dataloader = self._predict_dataloader
        self.wrap = wrap

    def prepare_data(self):
        for data_cfg in self.dataset_configs.values():
            instantiate_from_config(data_cfg)

    def setup(self, stage=None):
        self.datasets = dict(
            (k, instantiate_from_config(self.dataset_configs[k]))
            for k in self.dataset_configs)
        if self.wrap:
            for k in self.datasets:
                self.datasets[k] = WrappedDataset(self.datasets[k])

    def _train_dataloader(self):
        is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
        if is_iterable_dataset or self.use_worker_init_fn:
            init_fn = worker_init_fn
        else:
            init_fn = None
        return DataLoader(self.datasets["train"], batch_size=self.batch_size,
                          num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
                          worker_init_fn=init_fn, persistent_workers=True)

    def _val_dataloader(self, shuffle=False):
        if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
            init_fn = worker_init_fn
        else:
            init_fn = None
        return DataLoader(self.datasets["validation"],
                          batch_size=self.batch_size,
                          num_workers=self.num_workers,
                          worker_init_fn=init_fn,
                          shuffle=shuffle, persistent_workers=True)

    def _test_dataloader(self, shuffle=False):
        is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
        if is_iterable_dataset or self.use_worker_init_fn:
            init_fn = worker_init_fn
        else:
            init_fn = None

        # do not shuffle dataloader for iterable dataset
        shuffle = shuffle and (not is_iterable_dataset)

        return DataLoader(self.datasets["test"], batch_size=self.batch_size,
                          num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle, persistent_workers=True)

    def _predict_dataloader(self, shuffle=False):
        if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
            init_fn = worker_init_fn
        else:
            init_fn = None
        return DataLoader(self.datasets["predict"], batch_size=self.batch_size,
                          num_workers=self.num_workers, worker_init_fn=init_fn, persistent_workers=True)


class SetupCallback(Callback):
    def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
        super().__init__()
        self.resume = resume
        self.now = now
        self.logdir = logdir
        self.ckptdir = ckptdir
        self.cfgdir = cfgdir
        self.config = config
        self.lightning_config = lightning_config

    def on_keyboard_interrupt(self, trainer, pl_module):
        if trainer.global_rank == 0:
            print("Summoning checkpoint.")
            ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
            trainer.save_checkpoint(ckpt_path)

    def on_pretrain_routine_start(self, trainer, pl_module):
        if trainer.global_rank == 0:
            # Create logdirs and save configs
            # os.makedirs(self.logdir, exist_ok=True)
            # os.makedirs(self.ckptdir, exist_ok=True)
            # os.makedirs(self.cfgdir, exist_ok=True)

            if "callbacks" in self.lightning_config:
                if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
                    os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
            print("Project config")
            print(OmegaConf.to_yaml(self.config))
            OmegaConf.save(self.config,
                           os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))

            print("Lightning config")
            print(OmegaConf.to_yaml(self.lightning_config))
            OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
                           os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))

def get_world_size():
    if not dist.is_available():
        return 1
    if not dist.is_initialized():
        return 1
    return dist.get_world_size()

def all_gather(data):
    """
    Run all_gather on arbitrary picklable data (not necessarily tensors)
    Args:
        data: any picklable object
    Returns:
        list[data]: list of data gathered from each rank
    """
    world_size = get_world_size()
    if world_size == 1:
        return [data]

    # serialized to a Tensor
    origin_size = None
    if not isinstance(data, torch.Tensor):
        buffer = pickle.dumps(data)
        storage = torch.ByteStorage.from_buffer(buffer)
        tensor = torch.ByteTensor(storage).to("cuda")
    else:
        origin_size = data.size()
        tensor = data.reshape(-1)

    tensor_type = tensor.dtype

    # obtain Tensor size of each rank
    local_size = torch.LongTensor([tensor.numel()]).to("cuda")
    size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
    dist.all_gather(size_list, local_size)
    size_list = [int(size.item()) for size in size_list]
    max_size = max(size_list)

    # receiving Tensor from all ranks
    # we pad the tensor because torch all_gather does not support
    # gathering tensors of different shapes
    tensor_list = []
    for _ in size_list:
        tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type))
    if local_size != max_size:
        padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type)
        tensor = torch.cat((tensor, padding), dim=0)
    dist.all_gather(tensor_list, tensor)

    data_list = []
    for size, tensor in zip(size_list, tensor_list):
        if origin_size is None:
            buffer = tensor.cpu().numpy().tobytes()[:size]
            data_list.append(pickle.loads(buffer))
        else:
            buffer = tensor[:size]
            data_list.append(buffer)

    if origin_size is not None:
        new_shape = [-1] + list(origin_size[1:])
        resized_list = []
        for data in data_list:
            # suppose the difference of tensor size exist in first dimension
            data = data.reshape(new_shape)
            resized_list.append(data)

        return resized_list
    else:
        return data_list

class ImageLogger(Callback):
    def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True,
                 rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
                 log_images_kwargs=None):
        super().__init__()
        self.rescale = rescale
        self.batch_freq = batch_frequency
        self.max_images = max_images
        self.logger_log_images = {
            pl.loggers.TestTubeLogger: self._testtube,
        }
        self.log_steps = [2 ** n for n in range(6, int(np.log2(self.batch_freq)) + 1)]
        if not increase_log_steps:
            self.log_steps = [self.batch_freq]
        self.clamp = clamp
        self.disabled = disabled
        self.log_on_batch_idx = log_on_batch_idx
        self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
        self.log_first_step = log_first_step

    @rank_zero_only
    def _testtube(self, pl_module, images, batch_idx, split):
        for k in images:
            grid = torchvision.utils.make_grid(images[k])
            grid = (grid + 1.0) / 2.0  # -1,1 -> 0,1; c,h,w

            tag = f"{split}/{k}"
            pl_module.logger.experiment.add_image(
                tag, grid,
                global_step=pl_module.global_step)

    @rank_zero_only
    def log_local(self, save_dir, split, images, prompts,
                  global_step, current_epoch, batch_idx):
        root = os.path.join(save_dir, "images", split)
        names = {"reals": "before", "inputs": "after", "reconstruction": "before-vq", "samples": "after-gen"}
        # print(root)
        for k in images:
            grid = torchvision.utils.make_grid(images[k], nrow=8)
            if self.rescale:
                grid = (grid + 1.0) / 2.0  # -1,1 -> 0,1; c,h,w
            grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
            grid = grid.numpy()
            grid = (grid * 255).astype(np.uint8)
            filename = "gs-{:06}_e-{:06}_b-{:06}_{}.png".format(
                global_step,
                current_epoch,
                batch_idx,
                names[k])
            path = os.path.join(root, filename)
            os.makedirs(os.path.split(path)[0], exist_ok=True)
            # print(path)
            Image.fromarray(grid).save(path)

        filename = "gs-{:06}_e-{:06}_b-{:06}_prompt.json".format(
            global_step,
            current_epoch,
            batch_idx)
        path = os.path.join(root, filename)
        with open(path, "w") as f:
            for p in prompts:
                f.write(f"{json.dumps(p)}\n")

    def log_img(self, pl_module, batch, batch_idx, split="train"):
        check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
        if (self.check_frequency(check_idx) and  # batch_idx % self.batch_freq == 0
                hasattr(pl_module, "log_images") and
                callable(pl_module.log_images) and
                self.max_images > 0) or (split == "val" and batch_idx == 0):
            logger = type(pl_module.logger)

            is_train = pl_module.training
            if is_train:
                pl_module.eval()

            with torch.no_grad():
                images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)

            prompts = batch["edit"]["c_crossattn"][:self.max_images]
            prompts = [p for ps in all_gather(prompts) for p in ps]

            for k in images:
                N = min(images[k].shape[0], self.max_images)
                images[k] = images[k][:N]
                images[k] = torch.cat(all_gather(images[k][:N]))
                if isinstance(images[k], torch.Tensor):
                    images[k] = images[k].detach().cpu()
                    if self.clamp:
                        images[k] = torch.clamp(images[k], -1., 1.)

            self.log_local(pl_module.logger.save_dir, split, images, prompts,
                           pl_module.global_step, pl_module.current_epoch, batch_idx)

            logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
            logger_log_images(pl_module, images, pl_module.global_step, split)

            if is_train:
                pl_module.train()

    def check_frequency(self, check_idx):
        if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
                check_idx > 0 or self.log_first_step):
            if len(self.log_steps) > 0:
                self.log_steps.pop(0)
            return True
        return False

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
            self.log_img(pl_module, batch, batch_idx, split="train")

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
        if not self.disabled and pl_module.global_step > 0:
            self.log_img(pl_module, batch, batch_idx, split="val")
        if hasattr(pl_module, 'calibrate_grad_norm'):
            if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0:
                self.log_gradients(trainer, pl_module, batch_idx=batch_idx)


class CUDACallback(Callback):
    # see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py
    def on_train_epoch_start(self, trainer, pl_module):
        # Reset the memory use counter
        torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
        torch.cuda.synchronize(trainer.root_gpu)
        self.start_time = time.time()

    def on_train_epoch_end(self, trainer, pl_module, outputs):
        torch.cuda.synchronize(trainer.root_gpu)
        max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20
        epoch_time = time.time() - self.start_time

        try:
            max_memory = trainer.training_type_plugin.reduce(max_memory)
            epoch_time = trainer.training_type_plugin.reduce(epoch_time)

            rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds")
            rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB")
        except AttributeError:
            pass


if __name__ == "__main__":
    # custom parser to specify config files, train, test and debug mode,
    # postfix, resume.
    # `--key value` arguments are interpreted as arguments to the trainer.
    # `nested.key=value` arguments are interpreted as config parameters.
    # configs are merged from left-to-right followed by command line parameters.

    # model:
    #   base_learning_rate: float
    #   target: path to lightning module
    #   params:
    #       key: value
    # data:
    #   target: main.DataModuleFromConfig
    #   params:
    #      batch_size: int
    #      wrap: bool
    #      train:
    #          target: path to train dataset
    #          params:
    #              key: value
    #      validation:
    #          target: path to validation dataset
    #          params:
    #              key: value
    #      test:
    #          target: path to test dataset
    #          params:
    #              key: value
    # lightning: (optional, has sane defaults and can be specified on cmdline)
    #   trainer:
    #       additional arguments to trainer
    #   logger:
    #       logger to instantiate
    #   modelcheckpoint:
    #       modelcheckpoint to instantiate
    #   callbacks:
    #       callback1:
    #           target: importpath
    #           params:
    #               key: value

    now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")

    # add cwd for convenience and to make classes in this file available when
    # running as `python main.py`
    # (in particular `main.DataModuleFromConfig`)
    sys.path.append(os.getcwd())

    parser = get_parser()
    parser = Trainer.add_argparse_args(parser)

    opt, unknown = parser.parse_known_args()

    assert opt.name
    cfg_fname = os.path.split(opt.base[0])[-1]
    cfg_name = os.path.splitext(cfg_fname)[0]
    nowname = f"{cfg_name}_{opt.name}"
    logdir = os.path.join(opt.logdir, nowname)
    ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
    resume = False

    if os.path.isfile(ckpt):
        opt.resume_from_checkpoint = ckpt
        base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
        opt.base = base_configs + opt.base
        _tmp = logdir.split("/")
        nowname = _tmp[-1]
        resume = True

    ckptdir = os.path.join(logdir, "checkpoints")
    cfgdir = os.path.join(logdir, "configs")

    os.makedirs(logdir, exist_ok=True)
    os.makedirs(ckptdir, exist_ok=True)
    os.makedirs(cfgdir, exist_ok=True)

    try:
        # init and save configs
        configs = [OmegaConf.load(cfg) for cfg in opt.base]
        cli = OmegaConf.from_dotlist(unknown)
        config = OmegaConf.merge(*configs, cli)

        if resume:
            # By default, when finetuning from Stable Diffusion, we load the EMA-only checkpoint to initialize all weights.
            # If resuming InstructPix2Pix from a finetuning checkpoint, instead load both EMA and non-EMA weights.
            config.model.params.load_ema = True

        lightning_config = config.pop("lightning", OmegaConf.create())
        # merge trainer cli with config
        trainer_config = lightning_config.get("trainer", OmegaConf.create())
        # default to ddp
        trainer_config["accelerator"] = "ddp"
        for k in nondefault_trainer_args(opt):
            trainer_config[k] = getattr(opt, k)
        if not "gpus" in trainer_config:
            del trainer_config["accelerator"]
            cpu = True
        else:
            gpuinfo = trainer_config["gpus"]
            print(f"Running on GPUs {gpuinfo}")
            cpu = False
        trainer_opt = argparse.Namespace(**trainer_config)
        lightning_config.trainer = trainer_config

        # model
        model = instantiate_from_config(config.model)

        # trainer and callbacks
        trainer_kwargs = dict()

        # default logger configs
        default_logger_cfgs = {
            "wandb": {
                "target": "pytorch_lightning.loggers.WandbLogger",
                "params": {
                    "name": nowname,
                    "save_dir": logdir,
                    "id": nowname,
                }
            },
            "testtube": {
                "target": "pytorch_lightning.loggers.TestTubeLogger",
                "params": {
                    "name": "testtube",
                    "save_dir": logdir,
                }
            },
        }
        default_logger_cfg = default_logger_cfgs["wandb"]
        if "logger" in lightning_config:
            logger_cfg = lightning_config.logger
        else:
            logger_cfg = OmegaConf.create()
        logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
        trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)

        # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
        # specify which metric is used to determine best models
        default_modelckpt_cfg = {
            "target": "pytorch_lightning.callbacks.ModelCheckpoint",
            "params": {
                "dirpath": ckptdir,
                "filename": "{epoch:06}",
                "verbose": True,
                "save_last": True,
            }
        }

        if "modelcheckpoint" in lightning_config:
            modelckpt_cfg = lightning_config.modelcheckpoint
        else:
            modelckpt_cfg =  OmegaConf.create()
        modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
        print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
        if version.parse(pl.__version__) < version.parse('1.4.0'):
            trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)

        # add callback which sets up log directory
        default_callbacks_cfg = {
            "setup_callback": {
                "target": "main.SetupCallback",
                "params": {
                    "resume": opt.resume,
                    "now": now,
                    "logdir": logdir,
                    "ckptdir": ckptdir,
                    "cfgdir": cfgdir,
                    "config": config,
                    "lightning_config": lightning_config,
                }
            },
            "image_logger": {
                "target": "main.ImageLogger",
                "params": {
                    "batch_frequency": 750,
                    "max_images": 4,
                    "clamp": True
                }
            },
            "learning_rate_logger": {
                "target": "main.LearningRateMonitor",
                "params": {
                    "logging_interval": "step",
                    # "log_momentum": True
                }
            },
            "cuda_callback": {
                "target": "main.CUDACallback"
            },
        }
        if version.parse(pl.__version__) >= version.parse('1.4.0'):
            default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg})

        if "callbacks" in lightning_config:
            callbacks_cfg = lightning_config.callbacks
        else:
            callbacks_cfg = OmegaConf.create()

        print(
            'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
        default_metrics_over_trainsteps_ckpt_dict = {
            'metrics_over_trainsteps_checkpoint': {
                "target": 'pytorch_lightning.callbacks.ModelCheckpoint',
                'params': {
                    "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'),
                    "filename": "{epoch:06}-{step:09}",
                    "verbose": True,
                    'save_top_k': -1,
                    'every_n_train_steps': 1000,
                    'save_weights_only': True
                }
            }
        }
        default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)

        callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
        if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'):
            callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint
        elif 'ignore_keys_callback' in callbacks_cfg:
            del callbacks_cfg['ignore_keys_callback']

        trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]

        trainer = Trainer.from_argparse_args(trainer_opt, plugins=DDPPlugin(find_unused_parameters=False), **trainer_kwargs)
        trainer.logdir = logdir  ###

        # data
        data = instantiate_from_config(config.data)
        # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
        # calling these ourselves should not be necessary but it is.
        # lightning still takes care of proper multiprocessing though
        data.prepare_data()
        data.setup()
        print("#### Data #####")
        for k in data.datasets:
            print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")

        # configure learning rate
        bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
        if not cpu:
            ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
        else:
            ngpu = 1
        if 'accumulate_grad_batches' in lightning_config.trainer:
            accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
        else:
            accumulate_grad_batches = 1
        print(f"accumulate_grad_batches = {accumulate_grad_batches}")
        lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
        if opt.scale_lr:
            model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
            print(
                "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
                    model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
        else:
            model.learning_rate = base_lr
            print("++++ NOT USING LR SCALING ++++")
            print(f"Setting learning rate to {model.learning_rate:.2e}")


        # allow checkpointing via USR1
        def melk(*args, **kwargs):
            # run all checkpoint hooks
            if trainer.global_rank == 0:
                print("Summoning checkpoint.")
                ckpt_path = os.path.join(ckptdir, "last.ckpt")
                trainer.save_checkpoint(ckpt_path)


        def divein(*args, **kwargs):
            if trainer.global_rank == 0:
                import pudb;
                pudb.set_trace()


        import signal

        signal.signal(signal.SIGUSR1, melk)
        signal.signal(signal.SIGUSR2, divein)

        # run
        if opt.train:
            try:
                trainer.fit(model, data)
            except Exception:
                melk()
                raise
        if not opt.no_test and not trainer.interrupted:
            trainer.test(model, data)
    except Exception:
        if opt.debug and trainer.global_rank == 0:
            try:
                import pudb as debugger
            except ImportError:
                import pdb as debugger
            debugger.post_mortem()
        raise
    finally:
        # move newly created debug project to debug_runs
        if opt.debug and not opt.resume and trainer.global_rank == 0:
            dst, name = os.path.split(logdir)
            dst = os.path.join(dst, "debug_runs", name)
            os.makedirs(os.path.split(dst)[0], exist_ok=True)
            os.rename(logdir, dst)
        if trainer.global_rank == 0:
            print(trainer.profiler.summary())


================================================
FILE: metrics/clip_similarity.py
================================================
from __future__ import annotations

import clip
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange


class ClipSimilarity(nn.Module):
    def __init__(self, name: str = "ViT-L/14"):
        super().__init__()
        assert name in ("RN50", "RN101", "RN50x4", "RN50x16", "RN50x64", "ViT-B/32", "ViT-B/16", "ViT-L/14", "ViT-L/14@336px")  # fmt: skip
        self.size = {"RN50x4": 288, "RN50x16": 384, "RN50x64": 448, "ViT-L/14@336px": 336}.get(name, 224)

        self.model, _ = clip.load(name, device="cpu", download_root="./")
        self.model.eval().requires_grad_(False)

        self.register_buffer("mean", torch.tensor((0.48145466, 0.4578275, 0.40821073)))
        self.register_buffer("std", torch.tensor((0.26862954, 0.26130258, 0.27577711)))

    def encode_text(self, text: list[str]) -> torch.Tensor:
        text = clip.tokenize(text, truncate=True).to(next(self.parameters()).device)
        text_features = self.model.encode_text(text)
        text_features = text_features / text_features.norm(dim=1, keepdim=True)
        return text_features

    def encode_image(self, image: torch.Tensor) -> torch.Tensor:  # Input images in range [0, 1].
        image = F.interpolate(image.float(), size=self.size, mode="bicubic", align_corners=False)
        image = image - rearrange(self.mean, "c -> 1 c 1 1")
        image = image / rearrange(self.std, "c -> 1 c 1 1")
        image_features = self.model.encode_image(image)
        image_features = image_features / image_features.norm(dim=1, keepdim=True)
        return image_features

    def forward(
        self, image_0: torch.Tensor, image_1: torch.Tensor, text_0: list[str], text_1: list[str]
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        image_features_0 = self.encode_image(image_0)
        image_features_1 = self.encode_image(image_1)
        text_features_0 = self.encode_text(text_0)
        text_features_1 = self.encode_text(text_1)
        sim_0 = F.cosine_similarity(image_features_0, text_features_0)
        sim_1 = F.cosine_similarity(image_features_1, text_features_1)
        sim_direction = F.cosine_similarity(image_features_1 - image_features_0, text_features_1 - text_features_0)
        sim_image = F.cosine_similarity(image_features_0, image_features_1)
        return sim_0, sim_1, sim_direction, sim_image


================================================
FILE: metrics/compute_metrics.py
================================================
from __future__ import annotations

import math
import random
import sys
from argparse import ArgumentParser

import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from tqdm.auto import tqdm
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast

import json
import matplotlib.pyplot as plt
import seaborn
from pathlib import Path

sys.path.append("./")

from clip_similarity import ClipSimilarity
from edit_dataset import EditDatasetEval

sys.path.append("./stable_diffusion")

from ldm.util import instantiate_from_config


class CFGDenoiser(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model

    def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale):
        cfg_z = einops.repeat(z, "1 ... -> n ...", n=3)
        cfg_sigma = einops.repeat(sigma, "1 ... -> n ...", n=3)
        cfg_cond = {
            "c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0], uncond["c_crossattn"][0]])],
            "c_concat": [torch.cat([cond["c_concat"][0], cond["c_concat"][0], uncond["c_concat"][0]])],
        }
        out_cond, out_img_cond, out_uncond = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3)
        return out_uncond + text_cfg_scale * (out_cond - out_img_cond) + image_cfg_scale * (out_img_cond - out_uncond)


def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
    print(f"Loading model from {ckpt}")
    pl_sd = torch.load(ckpt, map_location="cpu")
    if "global_step" in pl_sd:
        print(f"Global Step: {pl_sd['global_step']}")
    sd = pl_sd["state_dict"]
    if vae_ckpt is not None:
        print(f"Loading VAE from {vae_ckpt}")
        vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
        sd = {
            k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
            for k, v in sd.items()
        }
    model = instantiate_from_config(config.model)
    m, u = model.load_state_dict(sd, strict=False)
    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)
    return model

class ImageEditor(nn.Module):
    def __init__(self, config, ckpt, vae_ckpt=None):
        super().__init__()
        
        config = OmegaConf.load(config)
        self.model = load_model_from_config(config, ckpt, vae_ckpt)
        self.model.eval().cuda()
        self.model_wrap = K.external.CompVisDenoiser(self.model)
        self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
        self.null_token = self.model.get_learned_conditioning([""])

    def forward(
        self,
        image: torch.Tensor,
        edit: str,
        scale_txt: float = 7.5,
        scale_img: float = 1.0,
        steps: int = 100,
    ) -> torch.Tensor:
        assert image.dim() == 3
        assert image.size(1) % 64 == 0
        assert image.size(2) % 64 == 0
        with torch.no_grad(), autocast("cuda"), self.model.ema_scope():
            cond = {
                "c_crossattn": [self.model.get_learned_conditioning([edit])],
                "c_concat": [self.model.encode_first_stage(image[None]).mode()],
            }
            uncond = {
                "c_crossattn": [self.model.get_learned_conditioning([""])],
                "c_concat": [torch.zeros_like(cond["c_concat"][0])],
            }
            extra_args = {
                "uncond": uncond,
                "cond": cond,
                "image_cfg_scale": scale_img,
                "text_cfg_scale": scale_txt,
            }
            sigmas = self.model_wrap.get_sigmas(steps)
            x = torch.randn_like(cond["c_concat"][0]) * sigmas[0]
            x = K.sampling.sample_euler_ancestral(self.model_wrap_cfg, x, sigmas, extra_args=extra_args)
            x = self.model.decode_first_stage(x)[0]
            return x


def compute_metrics(config,
                    model_path, 
                    vae_ckpt,
                    data_path,
                    output_path, 
                    scales_img, 
                    scales_txt, 
                    num_samples = 5000, 
                    split = "test", 
                    steps = 50, 
                    res = 512, 
                    seed = 0):
    editor = ImageEditor(config, model_path, vae_ckpt).cuda()
    clip_similarity = ClipSimilarity().cuda()



    outpath = Path(output_path, f"n={num_samples}_p={split}_s={steps}_r={res}_e={seed}.jsonl")
    Path(output_path).mkdir(parents=True, exist_ok=True)

    for scale_txt in scales_txt:
        for scale_img in scales_img:
            dataset = EditDatasetEval(
                    path=data_path, 
                    split=split, 
                    res=res
                    )
            assert num_samples <= len(dataset)
            print(f'Processing t={scale_txt}, i={scale_img}')
            torch.manual_seed(seed)
            perm = torch.randperm(len(dataset))
            count = 0
            i = 0

            sim_0_avg = 0
            sim_1_avg = 0
            sim_direction_avg = 0
            sim_image_avg = 0
            count = 0

            pbar = tqdm(total=num_samples)
            while count < num_samples:
                
                idx = perm[i].item()
                sample = dataset[idx]
                i += 1

                gen = editor(sample["image_0"].cuda(), sample["edit"], scale_txt=scale_txt, scale_img=scale_img, steps=steps)

                sim_0, sim_1, sim_direction, sim_image = clip_similarity(
                    sample["image_0"][None].cuda(), gen[None].cuda(), [sample["input_prompt"]], [sample["output_prompt"]]
                )
                sim_0_avg += sim_0.item()
                sim_1_avg += sim_1.item()
                sim_direction_avg += sim_direction.item()
                sim_image_avg += sim_image.item()
                count += 1
                pbar.update(count)
            pbar.close()

            sim_0_avg /= count
            sim_1_avg /= count
            sim_direction_avg /= count
            sim_image_avg /= count

            with open(outpath, "a") as f:
                f.write(f"{json.dumps(dict(sim_0=sim_0_avg, sim_1=sim_1_avg, sim_direction=sim_direction_avg, sim_image=sim_image_avg, num_samples=num_samples, split=split, scale_txt=scale_txt, scale_img=scale_img, steps=steps, res=res, seed=seed))}\n")
    return outpath

def plot_metrics(metrics_file, output_path):
    
    with open(metrics_file, 'r') as f:
        data = [json.loads(line) for line in f]
        
    plt.rcParams.update({'font.size': 11.5})
    seaborn.set_style("darkgrid")
    plt.figure(figsize=(20.5* 0.7, 10.8* 0.7), dpi=200)

    x = [d["sim_direction"] for d in data]
    y = [d["sim_image"] for d in data]

    plt.plot(x, y, marker='o', linewidth=2, markersize=4)

    plt.xlabel("CLIP Text-Image Direction Similarity", labelpad=10)
    plt.ylabel("CLIP Image Similarity", labelpad=10)

    plt.savefig(Path(output_path) / Path("plot.pdf"), bbox_inches="tight")

def main():
    parser = ArgumentParser()
    parser.add_argument("--resolution", default=512, type=int)
    parser.add_argument("--steps", default=100, type=int)
    parser.add_argument("--config", default="configs/generate.yaml", type=str)
    parser.add_argument("--output_path", default="analysis/", type=str)
    parser.add_argument("--ckpt", default="checkpoints/instruct-pix2pix-00-22000.ckpt", type=str)
    parser.add_argument("--dataset", default="data/clip-filtered-dataset/", type=str)
    parser.add_argument("--vae-ckpt", default=None, type=str)
    args = parser.parse_args()

    scales_img = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2]
    scales_txt = [7.5]
    
    metrics_file = compute_metrics(
            args.config,
            args.ckpt, 
            args.vae_ckpt,
            args.dataset, 
            args.output_path, 
            scales_img, 
            scales_txt,
            steps = args.steps,
            )
    
    plot_metrics(metrics_file, args.output_path)
        


if __name__ == "__main__":
    main()


================================================
FILE: prompt_app.py
================================================
from __future__ import annotations

from argparse import ArgumentParser

import datasets
import gradio as gr
import numpy as np
import openai

from dataset_creation.generate_txt_dataset import generate


def main(openai_model: str):
    dataset = datasets.load_dataset("ChristophSchuhmann/improved_aesthetics_6.5plus", split="train")
    captions = dataset[np.random.permutation(len(dataset))]["TEXT"]
    index = 0

    def click_random():
        nonlocal index
        output = captions[index]
        index = (index + 1) % len(captions)
        return output

    def click_generate(input: str):
        if input == "":
            raise gr.Error("Input caption is missing!")
        edit_output = generate(openai_model, input)
        if edit_output is None:
            return "Failed :(", "Failed :("
        return edit_output

    with gr.Blocks(css="footer {visibility: hidden}") as demo:
        txt_input = gr.Textbox(lines=3, label="Input Caption", interactive=True, placeholder="Type image caption here...")  # fmt: skip
        txt_edit = gr.Textbox(lines=1, label="GPT-3 Instruction", interactive=False)
        txt_output = gr.Textbox(lines=3, label="GPT3 Edited Caption", interactive=False)

        with gr.Row():
            clear_btn = gr.Button("Clear")
            random_btn = gr.Button("Random Input")
            generate_btn = gr.Button("Generate Instruction + Edited Caption")

            clear_btn.click(fn=lambda: ("", "", ""), inputs=[], outputs=[txt_input, txt_edit, txt_output])
            random_btn.click(fn=click_random, inputs=[], outputs=[txt_input])
            generate_btn.click(fn=click_generate, inputs=[txt_input], outputs=[txt_edit, txt_output])

    demo.launch(share=True)


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--openai-api-key", required=True, type=str)
    parser.add_argument("--openai-model", required=True, type=str)
    args = parser.parse_args()
    openai.api_key = args.openai_api_key
    main(args.openai_model)


================================================
FILE: scripts/download_checkpoints.sh
================================================
#!/bin/bash

SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

mkdir -p $SCRIPT_DIR/../checkpoints

curl http://instruct-pix2pix.eecs.berkeley.edu/instruct-pix2pix-00-22000.ckpt -o $SCRIPT_DIR/../checkpoints/instruct-pix2pix-00-22000.ckpt


================================================
FILE: scripts/download_data.sh
================================================
#!/bin/bash

# Make data folder relative to script location
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

mkdir -p $SCRIPT_DIR/../data

# Copy text datasets
wget -q --show-progress http://instruct-pix2pix.eecs.berkeley.edu/gpt-generated-prompts.jsonl -O $SCRIPT_DIR/../data/gpt-generated-prompts.jsonl
wget -q --show-progress http://instruct-pix2pix.eecs.berkeley.edu/human-written-prompts.jsonl -O $SCRIPT_DIR/../data/human-written-prompts.jsonl

# If dataset name isn't provided, exit. 
if [ -z $1 ] 
then 
	exit 0 
fi

# Copy dataset files
mkdir $SCRIPT_DIR/../data/$1
wget -A zip,json -R "index.html*" -q --show-progress -r --no-parent http://instruct-pix2pix.eecs.berkeley.edu/$1/ -nd -P $SCRIPT_DIR/../data/$1/

# Unzip to folders
unzip $SCRIPT_DIR/../data/$1/\*.zip -d $SCRIPT_DIR/../data/$1/

# Cleanup
rm -f $SCRIPT_DIR/../data/$1/*.zip
rm -f $SCRIPT_DIR/../data/$1/*.html


================================================
FILE: scripts/download_pretrained_sd.sh
================================================
#!/bin/bash

SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

mkdir -p $SCRIPT_DIR/../stable_diffusion/models/ldm/stable-diffusion-v1
curl -L https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -o $SCRIPT_DIR/../stable_diffusion/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
curl -L https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt -o $SCRIPT_DIR/../stable_diffusion/models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt


================================================
FILE: stable_diffusion/LICENSE
================================================
Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors

CreativeML Open RAIL-M
dated August 22, 2022

Section I: PREAMBLE

Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.

Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.

In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.

Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.

This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.

NOW THEREFORE, You and Licensor agree as follows:

1. Definitions

- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
- "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.

Section II: INTELLECTUAL PROPERTY RIGHTS

Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.

2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.

Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION

4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
You must cause any modified files to carry prominent notices stating that You changed the files;
You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.

Section IV: OTHER PROVISIONS

7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.

END OF TERMS AND CONDITIONS




Attachment A

Use Restrictions

You agree not to use the Model or Derivatives of the Model:
- In any way that violates any applicable national, federal, state, local or international law or regulation;
- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
- To generate or disseminate verifiably false information and/or content with the purpose of harming others;
- To generate or disseminate personal identifiable information that can be used to harm an individual;
- To defame, disparage or otherwise harass others;
- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
- To provide medical advice and medical results interpretation;
- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).


================================================
FILE: stable_diffusion/README.md
================================================
# Stable Diffusion
*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:*

[**High-Resolution Image Synthesis with Latent Diffusion Models**](https://ommer-lab.com/research/latent-diffusion-models/)<br/>
[Robin Rombach](https://github.com/rromb)\*,
[Andreas Blattmann](https://github.com/ablattmann)\*,
[Dominik Lorenz](https://github.com/qp-qp)\,
[Patrick Esser](https://github.com/pesser),
[Björn Ommer](https://hci.iwr.uni-heidelberg.de/Staff/bommer)<br/>
_[CVPR '22 Oral](https://openaccess.thecvf.com/content/CVPR2022/html/Rombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.html) |
[GitHub](https://github.com/CompVis/latent-diffusion) | [arXiv](https://arxiv.org/abs/2112.10752) | [Project page](https://ommer-lab.com/research/latent-diffusion-models/)_

![txt2img-stable2](assets/stable-samples/txt2img/merged-0006.png)
[Stable Diffusion](#stable-diffusion-v1) is a latent text-to-image diffusion
model.
Thanks to a generous compute donation from [Stability AI](https://stability.ai/) and support from [LAION](https://laion.ai/), we were able to train a Latent Diffusion Model on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. 
Similar to Google's [Imagen](https://arxiv.org/abs/2205.11487), 
this model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts.
With its 860M UNet and 123M text encoder, the model is relatively lightweight and runs on a GPU with at least 10GB VRAM.
See [this section](#stable-diffusion-v1) below and the [model card](https://huggingface.co/CompVis/stable-diffusion).

  
## Requirements
A suitable [conda](https://conda.io/) environment named `ldm` can be created
and activated with:

```
conda env create -f environment.yaml
conda activate ldm
```

You can also update an existing [latent diffusion](https://github.com/CompVis/latent-diffusion) environment by running

```
conda install pytorch torchvision -c pytorch
pip install transformers==4.19.2 diffusers invisible-watermark
pip install -e .
``` 


## Stable Diffusion v1

Stable Diffusion v1 refers to a specific configuration of the model
architecture that uses a downsampling-factor 8 autoencoder with an 860M UNet
and CLIP ViT-L/14 text encoder for the diffusion model. The model was pretrained on 256x256 images and 
then finetuned on 512x512 images.

*Note: Stable Diffusion v1 is a general text-to-image diffusion model and therefore mirrors biases and (mis-)conceptions that are present
in its training data. 
Details on the training procedure and data, as well as the intended use of the model can be found in the corresponding [model card](Stable_Diffusion_v1_Model_Card.md).*

The weights are available via [the CompVis organization at Hugging Face](https://huggingface.co/CompVis) under [a license which contains specific use-based restrictions to prevent misuse and harm as informed by the model card, but otherwise remains permissive](LICENSE). While commercial use is permitted under the terms of the license, **we do not recommend using the provided weights for services or products without additional safety mechanisms and considerations**, since there are [known limitations and biases](Stable_Diffusion_v1_Model_Card.md#limitations-and-bias) of the weights, and research on safe and ethical deployment of general text-to-image models is an ongoing effort. **The weights are research artifacts and should be treated as such.**

[The CreativeML OpenRAIL M license](LICENSE) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.

### Weights

We currently provide the following checkpoints:

- `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en).
  194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`).
- `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`.
  515k steps at resolution `512x512` on [laion-aesthetics v2 5+](https://laion.ai/blog/laion-aesthetics/) (a subset of laion2B-en with estimated aesthetics score `> 5.0`, and additionally
filtered to images with an original size `>= 512x512`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the [LAION-5B](https://laion.ai/blog/laion-5b/) metadata, the aesthetics score is estimated using the [LAION-Aesthetics Predictor V2](https://github.com/christophschuhmann/improved-aesthetic-predictor)).
- `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
- `sd-v1-4.ckpt`: Resumed from `sd-v1-2.ckpt`. 225k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).

Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
steps show the relative improvements of the checkpoints:
![sd evaluation results](assets/v1-variants-scores.jpg)



### Text-to-Image with Stable Diffusion
![txt2img-stable2](assets/stable-samples/txt2img/merged-0005.png)
![txt2img-stable2](assets/stable-samples/txt2img/merged-0007.png)

Stable Diffusion is a latent diffusion model conditioned on the (non-pooled) text embeddings of a CLIP ViT-L/14 text encoder.
We provide a [reference script for sampling](#reference-sampling-script), but
there also exists a [diffusers integration](#diffusers-integration), which we
expect to see more active community development.

#### Reference Sampling Script

We provide a reference sampling script, which incorporates

- a [Safety Checker Module](https://github.com/CompVis/stable-diffusion/pull/36),
  to reduce the probability of explicit outputs,
- an [invisible watermarking](https://github.com/ShieldMnt/invisible-watermark)
  of the outputs, to help viewers [identify the images as machine-generated](scripts/tests/test_watermark.py).

After [obtaining the `stable-diffusion-v1-*-original` weights](#weights), link them
```
mkdir -p models/ldm/stable-diffusion-v1/
ln -s <path/to/model.ckpt> models/ldm/stable-diffusion-v1/model.ckpt 
```
and sample with
```
python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms 
```

By default, this uses a guidance scale of `--scale 7.5`, [Katherine Crowson's implementation](https://github.com/CompVis/latent-diffusion/pull/51) of the [PLMS](https://arxiv.org/abs/2202.09778) sampler, 
and renders images of size 512x512 (which it was trained on) in 50 steps. All supported arguments are listed below (type `python scripts/txt2img.py --help`).


```commandline
usage: txt2img.py [-h] [--prompt [PROMPT]] [--outdir [OUTDIR]] [--skip_grid] [--skip_save] [--ddim_steps DDIM_STEPS] [--plms] [--laion400m] [--fixed_code] [--ddim_eta DDIM_ETA]
                  [--n_iter N_ITER] [--H H] [--W W] [--C C] [--f F] [--n_samples N_SAMPLES] [--n_rows N_ROWS] [--scale SCALE] [--from-file FROM_FILE] [--config CONFIG] [--ckpt CKPT]
                  [--seed SEED] [--precision {full,autocast}]

optional arguments:
  -h, --help            show this help message and exit
  --prompt [PROMPT]     the prompt to render
  --outdir [OUTDIR]     dir to write results to
  --skip_grid           do not save a grid, only individual samples. Helpful when evaluating lots of samples
  --skip_save           do not save individual samples. For speed measurements.
  --ddim_steps DDIM_STEPS
                        number of ddim sampling steps
  --plms                use plms sampling
  --laion400m           uses the LAION400M model
  --fixed_code          if enabled, uses the same starting code across samples
  --ddim_eta DDIM_ETA   ddim eta (eta=0.0 corresponds to deterministic sampling
  --n_iter N_ITER       sample this often
  --H H                 image height, in pixel space
  --W W                 image width, in pixel space
  --C C                 latent channels
  --f F                 downsampling factor
  --n_samples N_SAMPLES
                        how many samples to produce for each given prompt. A.k.a. batch size
  --n_rows N_ROWS       rows in the grid (default: n_samples)
  --scale SCALE         unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))
  --from-file FROM_FILE
                        if specified, load prompts from this file
  --config CONFIG       path to config which constructs model
  --ckpt CKPT           path to checkpoint of model
  --seed SEED           the seed (for reproducible sampling)
  --precision {full,autocast}
                        evaluate at this precision
```
Note: The inference config for all v1 versions is designed to be used with EMA-only checkpoints. 
For this reason `use_ema=False` is set in the configuration, otherwise the code will try to switch from
non-EMA to EMA weights. If you want to examine the effect of EMA vs no EMA, we provide "full" checkpoints
which contain both types of weights. For these, `use_ema=False` will load and use the non-EMA weights.


#### Diffusers Integration

A simple way to download and sample Stable Diffusion is by using the [diffusers library](https://github.com/huggingface/diffusers/tree/main#new--stable-diffusion-is-now-fully-compatible-with-diffusers):
```py
# make sure you're logged in with `huggingface-cli login`
from torch import autocast
from diffusers import StableDiffusionPipeline

pipe = StableDiffusionPipeline.from_pretrained(
	"CompVis/stable-diffusion-v1-4", 
	use_auth_token=True
).to("cuda")

prompt = "a photo of an astronaut riding a horse on mars"
with autocast("cuda"):
    image = pipe(prompt)["sample"][0]  
    
image.save("astronaut_rides_horse.png")
```


### Image Modification with Stable Diffusion

By using a diffusion-denoising mechanism as first proposed by [SDEdit](https://arxiv.org/abs/2108.01073), the model can be used for different 
tasks such as text-guided image-to-image translation and upscaling. Similar to the txt2img sampling script, 
we provide a script to perform image modification with Stable Diffusion.  

The following describes an example where a rough sketch made in [Pinta](https://www.pinta-project.com/) is converted into a detailed artwork.
```
python scripts/img2img.py --prompt "A fantasy landscape, trending on artstation" --init-img <path-to-img.jpg> --strength 0.8
```
Here, strength is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. 
Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. See the following example.

**Input**

![sketch-in](assets/stable-samples/img2img/sketch-mountains-input.jpg)

**Outputs**

![out3](assets/stable-samples/img2img/mountains-3.png)
![out2](assets/stable-samples/img2img/mountains-2.png)

This procedure can, for example, also be used to upscale samples from the base model.


## Comments 

- Our codebase for the diffusion models builds heavily on [OpenAI's ADM codebase](https://github.com/openai/guided-diffusion)
and [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch). 
Thanks for open-sourcing!

- The implementation of the transformer encoder is from [x-transformers](https://github.com/lucidrains/x-transformers) by [lucidrains](https://github.com/lucidrains?tab=repositories). 


## BibTeX

```
@misc{rombach2021highresolution,
      title={High-Resolution Image Synthesis with Latent Diffusion Models}, 
      author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer},
      year={2021},
      eprint={2112.10752},
      archivePrefix={arXiv},
      primaryClass={cs.CV}
}
```




================================================
FILE: stable_diffusion/Stable_Diffusion_v1_Model_Card.md
================================================
# Stable Diffusion v1 Model Card
This model card focuses on the model associated with the Stable Diffusion model, available [here](https://github.com/CompVis/stable-diffusion).

## Model Details
- **Developed by:** Robin Rombach, Patrick Esser
- **Model type:** Diffusion-based text-to-image generation model
- **Language(s):** English
- **License:** [Proprietary](LICENSE)
- **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
- **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752).
- **Cite as:**

      @InProceedings{Rombach_2022_CVPR,
          author    = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
          title     = {High-Resolution Image Synthesis With Latent Diffusion Models},
          booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
          month     = {June},
          year      = {2022},
          pages     = {10684-10695}
      }

# Uses

## Direct Use 
The model is intended for research purposes only. Possible research areas and
tasks include

- Safe deployment of models which have the potential to generate harmful content.
- Probing and understanding the limitations and biases of generative models.
- Generation of artworks and use in design and other artistic processes.
- Applications in educational or creative tools.
- Research on generative models.

Excluded uses are described below.

 ### Misuse, Malicious Use, and Out-of-Scope Use
_Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_.

The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.

#### Out-of-Scope Use
The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.

#### Misuse and Malicious Use
Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:

- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
- Intentionally promoting or propagating discriminatory content or harmful stereotypes.
- Impersonating individuals without their consent.
- Sexual content without consent of the people who might see it.
- Mis- and disinformation
- Representations of egregious violence and gore
- Sharing of copyrighted or licensed material in violation of its terms of use.
- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.

## Limitations and Bias

### Limitations

- The model does not achieve perfect photorealism
- The model cannot render legible text
- The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
- Faces and people in general may not be generated properly.
- The model was trained mainly with English captions and will not work as well in other languages.
- The autoencoding part of the model is lossy
- The model was trained on a large-scale dataset
  [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material
  and is not fit for product use without additional safety mechanisms and
  considerations.
- No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data.
  The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images.

### Bias
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. 
Stable Diffusion v1 was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), 
which consists of images that are limited to English descriptions. 
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. 
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the 
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
Stable Diffusion v1 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent.


## Training

**Training Data**
The model developers used the following dataset for training the model:

- LAION-5B and subsets thereof (see next section)

**Training Procedure**
Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, 

- Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
- Text prompts are encoded through a ViT-L/14 text-encoder.
- The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention.
- The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet.

We currently provide the following checkpoints:

- `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en).
  194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`).
- `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`.
  515k steps at resolution `512x512` on [laion-aesthetics v2 5+](https://laion.ai/blog/laion-aesthetics/) (a subset of laion2B-en with estimated aesthetics score `> 5.0`, and additionally
filtered to images with an original size `>= 512x512`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the [LAION-5B](https://laion.ai/blog/laion-5b/) metadata, the aesthetics score is estimated using the [LAION-Aesthetics Predictor V2](https://github.com/christophschuhmann/improved-aesthetic-predictor)).
- `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
- `sd-v1-4.ckpt`: Resumed from `sd-v1-2.ckpt`. 225k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).

- **Hardware:** 32 x 8 x A100 GPUs
- **Optimizer:** AdamW
- **Gradient Accumulations**: 2
- **Batch:** 32 x 8 x 2 x 4 = 2048
- **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant

## Evaluation Results 
Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
steps show the relative improvements of the checkpoints:

![pareto](assets/v1-variants-scores.jpg) 

Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution.  Not optimized for FID scores.

## Environmental Impact

**Stable Diffusion v1** **Estimated Emissions**
Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.

- **Hardware Type:** A100 PCIe 40GB
- **Hours used:** 150000
- **Cloud Provider:** AWS
- **Compute Region:** US-east
- **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq.

## Citation
    @InProceedings{Rombach_2022_CVPR,
        author    = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
        title     = {High-Resolution Image Synthesis With Latent Diffusion Models},
        booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
        month     = {June},
        year      = {2022},
        pages     = {10684-10695}
    }

*This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*


================================================
FILE: stable_diffusion/assets/results.gif.REMOVED.git-id
================================================
82b6590e670a32196093cc6333ea19e6547d07de

================================================
FILE: stable_diffusion/assets/stable-samples/img2img/upscaling-in.png.REMOVED.git-id
================================================
501c31c21751664957e69ce52cad1818b6d2f4ce

================================================
FILE: stable_diffusion/assets/stable-samples/img2img/upscaling-out.png.REMOVED.git-id
================================================
1c4bb25a779f34d86b2d90e584ac67af91bb1303

================================================
FILE: stable_diffusion/assets/stable-samples/txt2img/merged-0005.png.REMOVED.git-id
================================================
ca0a1af206555f0f208a1ab879e95efedc1b1c5b

================================================
FILE: stable_diffusion/assets/stable-samples/txt2img/merged-0006.png.REMOVED.git-id
================================================
999f3703230580e8c89e9081abd6a1f8f50896d4

================================================
FILE: stable_diffusion/assets/stable-samples/txt2img/merged-0007.png.REMOVED.git-id
================================================
af390acaf601283782d6f479d4cade4d78e30b26

================================================
FILE: stable_diffusion/assets/txt2img-preview.png.REMOVED.git-id
================================================
51ee1c235dfdc63d4c41de7d303d03730e43c33c

================================================
FILE: stable_diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml
================================================
model:
  base_learning_rate: 4.5e-6
  target: ldm.models.autoencoder.AutoencoderKL
  params:
    monitor: "val/rec_loss"
    embed_dim: 16
    lossconfig:
      target: ldm.modules.losses.LPIPSWithDiscriminator
      params:
        disc_start: 50001
        kl_weight: 0.000001
        disc_weight: 0.5

    ddconfig:
      double_z: True
      z_channels: 16
      resolution: 256
      in_channels: 3
      out_ch: 3
      ch: 128
      ch_mult: [ 1,1,2,2,4]  # num_down = len(ch_mult)-1
      num_res_blocks: 2
      attn_resolutions: [16]
      dropout: 0.0


data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 12
    wrap: True
    train:
      target: ldm.data.imagenet.ImageNetSRTrain
      params:
        size: 256
        degradation: pil_nearest
    validation:
      target: ldm.data.imagenet.ImageNetSRValidation
      params:
        size: 256
        degradation: pil_nearest

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 1000
        max_images: 8
        increase_log_steps: True

  trainer:
    benchmark: True
    accumulate_grad_batches: 2


================================================
FILE: stable_diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml
================================================
model:
  base_learning_rate: 4.5e-6
  target: ldm.models.autoencoder.AutoencoderKL
  params:
    monitor: "val/rec_loss"
    embed_dim: 4
    lossconfig:
      target: ldm.modules.losses.LPIPSWithDiscriminator
      params:
        disc_start: 50001
        kl_weight: 0.000001
        disc_weight: 0.5

    ddconfig:
      double_z: True
      z_channels: 4
      resolution: 256
      in_channels: 3
      out_ch: 3
      ch: 128
      ch_mult: [ 1,2,4,4 ]  # num_down = len(ch_mult)-1
      num_res_blocks: 2
      attn_resolutions: [ ]
      dropout: 0.0

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 12
    wrap: True
    train:
      target: ldm.data.imagenet.ImageNetSRTrain
      params:
        size: 256
        degradation: pil_nearest
    validation:
      target: ldm.data.imagenet.ImageNetSRValidation
      params:
        size: 256
        degradation: pil_nearest

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 1000
        max_images: 8
        increase_log_steps: True

  trainer:
    benchmark: True
    accumulate_grad_batches: 2


================================================
FILE: stable_diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml
================================================
model:
  base_learning_rate: 4.5e-6
  target: ldm.models.autoencoder.AutoencoderKL
  params:
    monitor: "val/rec_loss"
    embed_dim: 3
    lossconfig:
      target: ldm.modules.losses.LPIPSWithDiscriminator
      params:
        disc_start: 50001
        kl_weight: 0.000001
        disc_weight: 0.5

    ddconfig:
      double_z: True
      z_channels: 3
      resolution: 256
      in_channels: 3
      out_ch: 3
      ch: 128
      ch_mult: [ 1,2,4 ]  # num_down = len(ch_mult)-1
      num_res_blocks: 2
      attn_resolutions: [ ]
      dropout: 0.0


data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 12
    wrap: True
    train:
      target: ldm.data.imagenet.ImageNetSRTrain
      params:
        size: 256
        degradation: pil_nearest
    validation:
      target: ldm.data.imagenet.ImageNetSRValidation
      params:
        size: 256
        degradation: pil_nearest

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 1000
        max_images: 8
        increase_log_steps: True

  trainer:
    benchmark: True
    accumulate_grad_batches: 2


================================================
FILE: stable_diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml
================================================
model:
  base_learning_rate: 4.5e-6
  target: ldm.models.autoencoder.AutoencoderKL
  params:
    monitor: "val/rec_loss"
    embed_dim: 64
    lossconfig:
      target: ldm.modules.losses.LPIPSWithDiscriminator
      params:
        disc_start: 50001
        kl_weight: 0.000001
        disc_weight: 0.5

    ddconfig:
      double_z: True
      z_channels: 64
      resolution: 256
      in_channels: 3
      out_ch: 3
      ch: 128
      ch_mult: [ 1,1,2,2,4,4]  # num_down = len(ch_mult)-1
      num_res_blocks: 2
      attn_resolutions: [16,8]
      dropout: 0.0

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 12
    wrap: True
    train:
      target: ldm.data.imagenet.ImageNetSRTrain
      params:
        size: 256
        degradation: pil_nearest
    validation:
      target: ldm.data.imagenet.ImageNetSRValidation
      params:
        size: 256
        degradation: pil_nearest

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 1000
        max_images: 8
        increase_log_steps: True

  trainer:
    benchmark: True
    accumulate_grad_batches: 2


================================================
FILE: stable_diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml
================================================
model:
  base_learning_rate: 2.0e-06
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0195
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    image_size: 64
    channels: 3
    monitor: val/loss_simple_ema

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 64
        in_channels: 3
        out_channels: 3
        model_channels: 224
        attention_resolutions:
        # note: this isn\t actually the resolution but
        # the downsampling factor, i.e. this corresnponds to
        # attention on spatial resolution 8,16,32, as the
        # spatial reolution of the latents is 64 for f4
        - 8
        - 4
        - 2
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 3
        - 4
        num_head_channels: 32
    first_stage_config:
      target: ldm.models.autoencoder.VQModelInterface
      params:
        embed_dim: 3
        n_embed: 8192
        ckpt_path: models/first_stage_models/vq-f4/model.ckpt
        ddconfig:
          double_z: false
          z_channels: 3
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    cond_stage_config: __is_unconditional__
data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 48
    num_workers: 5
    wrap: false
    train:
      target: taming.data.faceshq.CelebAHQTrain
      params:
        size: 256
    validation:
      target: taming.data.faceshq.CelebAHQValidation
      params:
        size: 256


lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 5000
        max_images: 8
        increase_log_steps: False

  trainer:
    benchmark: True

================================================
FILE: stable_diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml
================================================
model:
  base_learning_rate: 1.0e-06
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0195
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    cond_stage_key: class_label
    image_size: 32
    channels: 4
    cond_stage_trainable: true
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32
        in_channels: 4
        out_channels: 4
        model_channels: 256
        attention_resolutions:
        #note: this isn\t actually the resolution but
        # the downsampling factor, i.e. this corresnponds to
        # attention on spatial resolution 8,16,32, as the
        # spatial reolution of the latents is 32 for f8
        - 4
        - 2
        - 1
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 4
        num_head_channels: 32
        use_spatial_transformer: true
        transformer_depth: 1
        context_dim: 512
    first_stage_config:
      target: ldm.models.autoencoder.VQModelInterface
      params:
        embed_dim: 4
        n_embed: 16384
        ckpt_path: configs/first_stage_models/vq-f8/model.yaml
        ddconfig:
          double_z: false
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions:
          - 32
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    cond_stage_config:
      target: ldm.modules.encoders.modules.ClassEmbedder
      params:
        embed_dim: 512
        key: class_label
data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 64
    num_workers: 12
    wrap: false
    train:
      target: ldm.data.imagenet.ImageNetTrain
      params:
        config:
          size: 256
    validation:
      target: ldm.data.imagenet.ImageNetValidation
      params:
        config:
          size: 256


lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 5000
        max_images: 8
        increase_log_steps: False

  trainer:
    benchmark: True

================================================
FILE: stable_diffusion/configs/latent-diffusion/cin256-v2.yaml
================================================
model:
  base_learning_rate: 0.0001
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0195
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    cond_stage_key: class_label
    image_size: 64
    channels: 3
    cond_stage_trainable: true
    conditioning_key: crossattn
    monitor: val/loss
    use_ema: False
    
    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 64
        in_channels: 3
        out_channels: 3
        model_channels: 192
        attention_resolutions:
        - 8
        - 4
        - 2
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 3
        - 5
        num_heads: 1
        use_spatial_transformer: true
        transformer_depth: 1
        context_dim: 512
    
    first_stage_config:
      target: ldm.models.autoencoder.VQModelInterface
      params:
        embed_dim: 3
        n_embed: 8192
        ddconfig:
          double_z: false
          z_channels: 3
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    
    cond_stage_config:
      target: ldm.modules.encoders.modules.ClassEmbedder
      params:
        n_classes: 1001
        embed_dim: 512
        key: class_label


================================================
FILE: stable_diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml
================================================
model:
  base_learning_rate: 2.0e-06
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0195
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    image_size: 64
    channels: 3
    monitor: val/loss_simple_ema
    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 64
        in_channels: 3
        out_channels: 3
        model_channels: 224
        attention_resolutions:
        # note: this isn\t actually the resolution but
        # the downsampling factor, i.e. this corresnponds to
        # attention on spatial resolution 8,16,32, as the
        # spatial reolution of the latents is 64 for f4
        - 8
        - 4
        - 2
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 3
        - 4
        num_head_channels: 32
    first_stage_config:
      target: ldm.models.autoencoder.VQModelInterface
      params:
        embed_dim: 3
        n_embed: 8192
        ckpt_path: configs/first_stage_models/vq-f4/model.yaml
        ddconfig:
          double_z: false
          z_channels: 3
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    cond_stage_config: __is_unconditional__
data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 42
    num_workers: 5
    wrap: false
    train:
      target: taming.data.faceshq.FFHQTrain
      params:
        size: 256
    validation:
      target: taming.data.faceshq.FFHQValidation
      params:
        size: 256


lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 5000
        max_images: 8
        increase_log_steps: False

  trainer:
    benchmark: True

================================================
FILE: stable_diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml
================================================
model:
  base_learning_rate: 2.0e-06
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0195
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    image_size: 64
    channels: 3
    monitor: val/loss_simple_ema
    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 64
        in_channels: 3
        out_channels: 3
        model_channels: 224
        attention_resolutions:
        # note: this isn\t actually the resolution but
        # the downsampling factor, i.e. this corresnponds to
        # attention on spatial resolution 8,16,32, as the
        # spatial reolution of the latents is 64 for f4
        - 8
        - 4
        - 2
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 3
        - 4
        num_head_channels: 32
    first_stage_config:
      target: ldm.models.autoencoder.VQModelInterface
      params:
        ckpt_path: configs/first_stage_models/vq-f4/model.yaml
        embed_dim: 3
        n_embed: 8192
        ddconfig:
          double_z: false
          z_channels: 3
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    cond_stage_config: __is_unconditional__
data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 48
    num_workers: 5
    wrap: false
    train:
      target: ldm.data.lsun.LSUNBedroomsTrain
      params:
        size: 256
    validation:
      target: ldm.data.lsun.LSUNBedroomsValidation
      params:
        size: 256


lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 5000
        max_images: 8
        increase_log_steps: False

  trainer:
    benchmark: True

================================================
FILE: stable_diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml
================================================
model:
  base_learning_rate: 5.0e-5   # set to target_lr by starting main.py with '--scale_lr False'
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.0155
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    loss_type: l1
    first_stage_key: "image"
    cond_stage_key: "image"
    image_size: 32
    channels: 4
    cond_stage_trainable: False
    concat_mode: False
    scale_by_std: True
    monitor: 'val/loss_simple_ema'

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [10000]
        cycle_lengths: [10000000000000]
        f_start: [1.e-6]
        f_max: [1.]
        f_min: [ 1.]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32
        in_channels: 4
        out_channels: 4
        model_channels: 192
        attention_resolutions: [ 1, 2, 4, 8 ]   # 32, 16, 8, 4
        num_res_blocks: 2
        channel_mult: [ 1,2,2,4,4 ]  # 32, 16, 8, 4, 2
        num_heads: 8
        use_scale_shift_norm: True
        resblock_updown: True

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: "val/rec_loss"
        ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
        ddconfig:
          double_z: True
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult: [ 1,2,4,4 ]  # num_down = len(ch_mult)-1
          num_res_blocks: 2
          attn_resolutions: [ ]
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config: "__is_unconditional__"

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 96
    num_workers: 5
    wrap: False
    train:
      target: ldm.data.lsun.LSUNChurchesTrain
      params:
        size: 256
    validation:
      target: ldm.data.lsun.LSUNChurchesValidation
      params:
        size: 256

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 5000
        max_images: 8
        increase_log_steps: False


  trainer:
    benchmark: True

================================================
FILE: stable_diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml
================================================
model:
  base_learning_rate: 5.0e-05
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.012
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    cond_stage_key: caption
    image_size: 32
    channels: 4
    cond_stage_trainable: true
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: False

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32
        in_channels: 4
        out_channels: 4
        model_channels: 320
        attention_resolutions:
        - 4
        - 2
        - 1
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 4
        - 4
        num_heads: 8
        use_spatial_transformer: true
        transformer_depth: 1
        context_dim: 1280
        use_checkpoint: true
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.BERTEmbedder
      params:
        n_embed: 1280
        n_layer: 32


================================================
FILE: stable_diffusion/configs/retrieval-augmented-diffusion/768x768.yaml
================================================
model:
  base_learning_rate: 0.0001
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.0015
    linear_end: 0.015
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: jpg
    cond_stage_key: nix
    image_size: 48
    channels: 16
    cond_stage_trainable: false
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_by_std: false
    scale_factor: 0.22765929
    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 48
        in_channels: 16
        out_channels: 16
        model_channels: 448
        attention_resolutions:
        - 4
        - 2
        - 1
        num_res_blocks: 2
        channel_mult:
        - 1
        - 2
        - 3
        - 4
        use_scale_shift_norm: false
        resblock_updown: false
        num_head_channels: 32
        use_spatial_transformer: true
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: true
    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        monitor: val/rec_loss
        embed_dim: 16
        ddconfig:
          double_z: true
          z_channels: 16
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 1
          - 2
          - 2
          - 4
          num_res_blocks: 2
          attn_resolutions:
          - 16
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity
    cond_stage_config:
      target: torch.nn.Identity

================================================
FILE: stable_diffusion/configs/stable-diffusion/v1-inference.yaml
================================================
model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm.LatentDiffusion
  params:
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "jpg"
    cond_stage_key: "txt"
    image_size: 64
    channels: 4
    cond_stage_trainable: false   # Note: different from the one we trained before
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
    use_ema: False

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 10000 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        image_size: 32 # unused
        in_channels: 4
        out_channels: 4
        model_channels: 320
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: True
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 4
        monitor: val/rec_loss
        ddconfig:
          double_z: true
          z_channels: 4
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult:
          - 1
          - 2
          - 4
          - 4
          num_res_blocks: 2
          attn_resolutions: []
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder


================================================
FILE: stable_diffusion/data/example_conditioning/text_conditional/sample_0.txt
================================================
A basket of cerries


================================================
FILE: stable_diffusion/data/imagenet_clsidx_to_label.txt
================================================
 0: 'tench, Tinca tinca',
 1: 'goldfish, Carassius auratus',
 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
 3: 'tiger shark, Galeocerdo cuvieri',
 4: 'hammerhead, hammerhead shark',
 5: 'electric ray, crampfish, numbfish, torpedo',
 6: 'stingray',
 7: 'cock',
 8: 'hen',
 9: 'ostrich, Struthio camelus',
 10: 'brambling, Fringilla montifringilla',
 11: 'goldfinch, Carduelis carduelis',
 12: 'house finch, linnet, Carpodacus mexicanus',
 13: 'junco, snowbird',
 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
 15: 'robin, American robin, Turdus migratorius',
 16: 'bulbul',
 17: 'jay',
 18: 'magpie',
 19: 'chickadee',
 20: 'water ouzel, dipper',
 21: 'kite',
 22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
 23: 'vulture',
 24: 'great grey owl, great gray owl, Strix nebulosa',
 25: 'European fire salamander, Salamandra salamandra',
 26: 'common newt, Triturus vulgaris',
 27: 'eft',
 28: 'spotted salamander, Ambystoma maculatum',
 29: 'axolotl, mud puppy, Ambystoma mexicanum',
 30: 'bullfrog, Rana catesbeiana',
 31: 'tree frog, tree-frog',
 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
 33: 'loggerhead, loggerhead turtle, Caretta caretta',
 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
 35: 'mud turtle',
 36: 'terrapin',
 37: 'box turtle, box tortoise',
 38: 'banded gecko',
 39: 'common iguana, iguana, Iguana iguana',
 40: 'American chameleon, anole, Anolis carolinensis',
 41: 'whiptail, whiptail lizard',
 42: 'agama',
 43: 'frilled lizard, Chlamydosaurus kingi',
 44: 'alligator lizard',
 45: 'Gila monster, Heloderma suspectum',
 46: 'green lizard, Lacerta viridis',
 47: 'African chameleon, Chamaeleo chamaeleon',
 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
 49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
 50: 'American alligator, Alligator mississipiensis',
 51: 'triceratops',
 52: 'thunder snake, worm snake, Carphophis amoenus',
 53: 'ringneck snake, ring-necked snake, ring snake',
 54: 'hognose snake, puff adder, sand viper',
 55: 'green snake, grass snake',
 56: 'king snake, kingsnake',
 57: 'garter snake, grass snake',
 58: 'water snake',
 59: 'vine snake',
 60: 'night snake, Hypsiglena torquata',
 61: 'boa constrictor, Constrictor constrictor',
 62: 'rock python, rock snake, Python sebae',
 63: 'Indian cobra, Naja naja',
 64: 'green mamba',
 65: 'sea snake',
 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
 68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
 69: 'trilobite',
 70: 'harvestman, daddy longlegs, Phalangium opilio',
 71: 'scorpion',
 72: 'black and gold garden spider, Argiope aurantia',
 73: 'barn spider, Araneus cavaticus',
 74: 'garden spider, Aranea diademata',
 75: 'black widow, Latrodectus mactans',
 76: 'tarantula',
 77: 'wolf spider, hunting spider',
 78: 'tick',
 79: 'centipede',
 80: 'black grouse',
 81: 'ptarmigan',
 82: 'ruffed grouse, partridge, Bonasa umbellus',
 83: 'prairie chicken, prairie grouse, prairie fowl',
 84: 'peacock',
 85: 'quail',
 86: 'partridge',
 87: 'African grey, African gray, Psittacus erithacus',
 88: 'macaw',
 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
 90: 'lorikeet',
 91: 'coucal',
 92: 'bee eater',
 93: 'hornbill',
 94: 'hummingbird',
 95: 'jacamar',
 96: 'toucan',
 97: 'drake',
 98: 'red-breasted merganser, Mergus serrator',
 99: 'goose',
 100: 'black swan, Cygnus atratus',
 101: 'tusker',
 102: 'echidna, spiny anteater, anteater',
 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
 104: 'wallaby, brush kangaroo',
 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
 106: 'wombat',
 107: 'jellyfish',
 108: 'sea anemone, anemone',
 109: 'brain coral',
 110: 'flatworm, platyhelminth',
 111: 'nematode, nematode worm, roundworm',
 112: 'conch',
 113: 'snail',
 114: 'slug',
 115: 'sea slug, nudibranch',
 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
 117: 'chambered nautilus, pearly nautilus, nautilus',
 118: 'Dungeness crab, Cancer magister',
 119: 'rock crab, Cancer irroratus',
 120: 'fiddler crab',
 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
 124: 'crayfish, crawfish, crawdad, crawdaddy',
 125: 'hermit crab',
 126: 'isopod',
 127: 'white stork, Ciconia ciconia',
 128: 'black stork, Ciconia nigra',
 129: 'spoonbill',
 130: 'flamingo',
 131: 'little blue heron, Egretta caerulea',
 132: 'American egret, great white heron, Egretta albus',
 133: 'bittern',
 134: 'crane',
 135: 'limpkin, Aramus pictus',
 136: 'European gallinule, Porphyrio porphyrio',
 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
 138: 'bustard',
 139: 'ruddy turnstone, Arenaria interpres',
 140: 'red-backed sandpiper, dunlin, Erolia alpina',
 141: 'redshank, Tringa totanus',
 142: 'dowitcher',
 143: 'oystercatcher, oyster catcher',
 144: 'pelican',
 145: 'king penguin, Aptenodytes patagonica',
 146: 'albatross, mollymawk',
 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
 149: 'dugong, Dugong dugon',
 150: 'sea lion',
 151: 'Chihuahua',
 152: 'Japanese spaniel',
 153: 'Maltese dog, Maltese terrier, Maltese',
 154: 'Pekinese, Pekingese, Peke',
 155: 'Shih-Tzu',
 156: 'Blenheim spaniel',
 157: 'papillon',
 158: 'toy terrier',
 159: 'Rhodesian ridgeback',
 160: 'Afghan hound, Afghan',
 161: 'basset, basset hound',
 162: 'beagle',
 163: 'bloodhound, sleuthhound',
 164: 'bluetick',
 165: 'black-and-tan coonhound',
 166: 'Walker hound, Walker foxhound',
 167: 'English foxhound',
 168: 'redbone',
 169: 'borzoi, Russian wolfhound',
 170: 'Irish wolfhound',
 171: 'Italian greyhound',
 172: 'whippet',
 173: 'Ibizan hound, Ibizan Podenco',
 174: 'Norwegian elkhound, elkhound',
 175: 'otterhound, otter hound',
 176: 'Saluki, gazelle hound',
 177: 'Scottish deerhound, deerhound',
 178: 'Weimaraner',
 179: 'Staffordshire bullterrier, Staffordshire bull terrier',
 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
 181: 'Bedlington terrier',
 182: 'Border terrier',
 183: 'Kerry blue terrier',
 184: 'Irish terrier',
 185: 'Norfolk terrier',
 186: 'Norwich terrier',
 187: 'Yorkshire terrier',
 188: 'wire-haired fox terrier',
 189: 'Lakeland terrier',
 190: 'Sealyham terrier, Sealyham',
 191: 'Airedale, Airedale terrier',
 192: 'cairn, cairn terrier',
 193: 'Australian terrier',
 194: 'Dandie Dinmont, Dandie Dinmont terrier',
 195: 'Boston bull, Boston terrier',
 196: 'miniature schnauzer',
 197: 'giant schnauzer',
 198: 'standard schnauzer',
 199: 'Scotch terrier, Scottish terrier, Scottie',
 200: 'Tibetan terrier, chrysanthemum dog',
 201: 'silky terrier, Sydney silky',
 202: 'soft-coated wheaten terrier',
 203: 'West Highland white terrier',
 204: 'Lhasa, Lhasa apso',
 205: 'flat-coated retriever',
 206: 'curly-coated retriever',
 207: 'golden retriever',
 208: 'Labrador retriever',
 209: 'Chesapeake Bay retriever',
 210: 'German short-haired pointer',
 211: 'vizsla, Hungarian pointer',
 212: 'English setter',
 213: 'Irish setter, red setter',
 214: 'Gordon setter',
 215: 'Brittany spaniel',
 216: 'clumber, clumber spaniel',
 217: 'English springer, English springer spaniel',
 218: 'Welsh springer spaniel',
 219: 'cocker spaniel, English cocker spaniel, cocker',
 220: 'Sussex spaniel',
 221: 'Irish water spaniel',
 222: 'kuvasz',
 223: 'schipperke',
 224: 'groenendael',
 225: 'malinois',
 226: 'briard',
 227: 'kelpie',
 228: 'komondor',
 229: 'Old English sheepdog, bobtail',
 230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
 231: 'collie',
 232: 'Border collie',
 233: 'Bouvier des Flandres, Bouviers des Flandres',
 234: 'Rottweiler',
 235: 'German shepherd, German shepherd dog, German police dog, alsatian',
 236: 'Doberman, Doberman pinscher',
 237: 'miniature pinscher',
 238: 'Greater Swiss Mountain dog',
 239: 'Bernese mountain dog',
 240: 'Appenzeller',
 241: 'EntleBucher',
 242: 'boxer',
 243: 'bull mastiff',
 244: 'Tibetan mastiff',
 245: 'French bulldog',
 246: 'Great Dane',
 247: 'Saint Bernard, St Bernard',
 248: 'Eskimo dog, husky',
 249: 'malamute, malemute, Alaskan malamute',
 250: 'Siberian husky',
 251: 'dalmatian, coach dog, carriage dog',
 252: 'affenpinscher, monkey pinscher, monkey dog',
 253: 'basenji',
 254: 'pug, pug-dog',
 255: 'Leonberg',
 256: 'Newfoundland, Newfoundland dog',
 257: 'Great Pyrenees',
 258: 'Samoyed, Samoyede',
 259: 'Pomeranian',
 260: 'chow, chow chow',
 261: 'keeshond',
 262: 'Brabancon griffon',
 263: 'Pembroke, Pembroke Welsh corgi',
 264: 'Cardigan, Cardigan Welsh corgi',
 265: 'toy poodle',
 266: 'miniature poodle',
 267: 'standard poodle',
 268: 'Mexican hairless',
 269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
 270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
 271: 'red wolf, maned wolf, Canis rufus, Canis niger',
 272: 'coyote, prairie wolf, brush wolf, Canis latrans',
 273: 'dingo, warrigal, warragal, Canis dingo',
 274: 'dhole, Cuon alpinus',
 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
 276: 'hyena, hyaena',
 277: 'red fox, Vulpes vulpes',
 278: 'kit fox, Vulpes macrotis',
 279: 'Arctic fox, white fox, Alopex lagopus',
 280: 'grey fox, gray fox, Urocyon cinereoargenteus',
 281: 'tabby, tabby cat',
 282: 'tiger cat',
 283: 'Persian cat',
 284: 'Siamese cat, Siamese',
 285: 'Egyptian cat',
 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
 287: 'lynx, catamount',
 288: 'leopard, Panthera pardus',
 289: 'snow leopard, ounce, Panthera uncia',
 290: 'jaguar, panther, Panthera onca, Felis onca',
 291: 'lion, king of beasts, Panthera leo',
 292: 'tiger, Panthera tigris',
 293: 'cheetah, chetah, Acinonyx jubatus',
 294: 'brown bear, bruin, Ursus arctos',
 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
 297: 'sloth bear, Melursus ursinus, Ursus ursinus',
 298: 'mongoose',
 299: 'meerkat, mierkat',
 300: 'tiger beetle',
 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
 302: 'ground beetle, carabid beetle',
 303: 'long-horned beetle, longicorn, longicorn beetle',
 304: 'leaf beetle, chrysomelid',
 305: 'dung beetle',
 306: 'rhinoceros beetle',
 307: 'weevil',
 308: 'fly',
 309: 'bee',
 310: 'ant, emmet, pismire',
 311: 'grasshopper, hopper',
 312: 'cricket',
 313: 'walking stick, walkingstick, stick insect',
 314: 'cockroach, roach',
 315: 'mantis, mantid',
 316: 'cicada, cicala',
 317: 'leafhopper',
 318: 'lacewing, lacewing fly',
 319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
 320: 'damselfly',
 321: 'admiral',
 322: 'ringlet, ringlet butterfly',
 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
 324: 'cabbage butterfly',
 325: 'sulphur butterfly, sulfur butterfly',
 326: 'lycaenid, lycaenid butterfly',
 327: 'starfish, sea star',
 328: 'sea urchin',
 329: 'sea cucumber, holothurian',
 330: 'wood rabbit, cottontail, cottontail rabbit',
 331: 'hare',
 332: 'Angora, Angora rabbit',
 333: 'hamster',
 334: 'porcupine, hedgehog',
 335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
 336: 'marmot',
 337: 'beaver',
 338: 'guinea pig, Cavia cobaya',
 339: 'sorrel',
 340: 'zebra',
 341: 'hog, pig, grunter, squealer, Sus scrofa',
 342: 'wild boar, boar, Sus scrofa',
 343: 'warthog',
 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
 345: 'ox',
 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
 347: 'bison',
 348: 'ram, tup',
 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
 350: 'ibex, Capra ibex',
 351: 'hartebeest',
 352: 'impala, Aepyceros melampus',
 353: 'gazelle',
 354: 'Arabian camel, dromedary, Camelus dromedarius',
 355: 'llama',
 356: 'weasel',
 357: 'mink',
 358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
 359: 'black-footed ferret, ferret, Mustela nigripes',
 360: 'otter',
 361: 'skunk, polecat, wood pussy',
 362: 'badger',
 363: 'armadillo',
 364: 'three-toed sloth, ai, Bradypus tridactylus',
 365: 'orangutan, orang, orangutang, Pongo pygmaeus',
 366: 'gorilla, Gorilla gorilla',
 367: 'chimpanzee, chimp, Pan troglodytes',
 368: 'gibbon, Hylobates lar',
 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
 370: 'guenon, guenon monkey',
 371: 'patas, hussar monkey, Erythrocebus patas',
 372: 'baboon',
 373: 'macaque',
 374: 'langur',
 375: 'colobus, colobus monkey',
 376: 'proboscis monkey, Nasalis larvatus',
 377: 'marmoset',
 378: 'capuchin, ringtail, Cebus capucinus',
 379: 'howler monkey, howler',
 380: 'titi, titi monkey',
 381: 'spider monkey, Ateles geoffroyi',
 382: 'squirrel monkey, Saimiri sciureus',
 383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
 384: 'indri, indris, Indri indri, Indri brevicaudatus',
 385: 'Indian elephant, Elephas maximus',
 386: 'African elephant, Loxodonta africana',
 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
 389: 'barracouta, snoek',
 390: 'eel',
 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
 392: 'rock beauty, Holocanthus tricolor',
 393: 'anemone fish',
 394: 'sturgeon',
 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
 396: 'lionfish',
 397: 'puffer, pufferfish, blowfish, globefish',
 398: 'abacus',
 399: 'abaya',
 400: "academic gown, academic robe, judge's robe",
 401: 'accordion, piano accordion, squeeze box',
 402: 'acoustic guitar',
 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
 404: 'airliner',
 405: 'airship, dirigible',
 406: 'altar',
 407: 'ambulance',
 408: 'amphibian, amphibious vehicle',
 409: 'analog clock',
 410: 'apiary, bee house',
 411: 'apron',
 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
 413: 'assault rifle, assault gun',
 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
 415: 'bakery, bakeshop, bakehouse',
 416: 'balance beam, beam',
 417: 'balloon',
 418: 'ballpoint, ballpoint pen, ballpen, Biro',
 419: 'Band Aid',
 420: 'banjo',
 421: 'bannister, banister, balustrade, balusters, handrail',
 422: 'barbell',
 423: 'barber chair',
 424: 'barbershop',
 425: 'barn',
 426: 'barometer',
 427: 'barrel, cask',
 428: 'barrow, garden cart, lawn cart, wheelbarrow',
 429: 'baseball',
 430: 'basketball',
 431: 'bassinet',
 432: 'bassoon',
 433: 'bathing cap, swimming cap',
 434: 'bath towel',
 435: 'bathtub, bathing tub, bath, tub',
 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
 437: 'beacon, lighthouse, beacon light, pharos',
 438: 'beaker',
 439: 'bearskin, busby, shako',
 440: 'beer bottle',
 441: 'beer glass',
 442: 'bell cote, bell cot',
 443: 'bib',
 444: 'bicycle-built-for-two, tandem bicycle, tandem',
 445: 'bikini, two-piece',
 446: 'binder, ring-binder',
 447: 'binoculars, field glasses, opera glasses',
 448: 'birdhouse',
 449: 'boathouse',
 450: 'bobsled, bobsleigh, bob',
 451: 'bolo tie, bolo, bola tie, bola',
 452: 'bonnet, poke bonnet',
 453: 'bookcase',
 454: 'bookshop, bookstore, bookstall',
 455: 'bottlecap',
 456: 'bow',
 457: 'bow tie, bow-tie, bowtie',
 458: 'brass, memorial tablet, plaque',
 459: 'brassiere, bra, bandeau',
 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
 461: 'breastplate, aegis, egis',
 462: 'broom',
 463: 'bucket, pail',
 464: 'buckle',
 465: 'bulletproof vest',
 466: 'bullet train, bullet',
 467: 'butcher shop, meat market',
 468: 'cab, hack, taxi, taxicab',
 469: 'caldron, cauldron',
 470: 'candle, taper, wax light',
 471: 'cannon',
 472: 'canoe',
 473: 'can opener, tin opener',
 474: 'cardigan',
 475: 'car mirror',
 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
 477: "carpenter's kit, tool kit",
 478: 'carton',
 479: 'car wheel',
 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
 481: 'cassette',
 482: 'cassette player',
 483: 'castle',
 484: 'catamaran',
 485: 'CD player',
 486: 'cello, violoncello',
 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
 488: 'chain',
 489: 'chainlink fence',
 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
 491: 'chain saw, chainsaw',
 492: 'chest',
 493: 'chiffonier, commode',
 494: 'chime, bell, gong',
 495: 'china cabinet, china closet',
 496: 'Christmas stocking',
 497: 'church, church building',
 498: 'cinema, movie theater, movie theatre, movie house, picture palace',
 499: 'cleaver, meat cleaver, chopper',
 500: 'cliff dwelling',
 501: 'cloak',
 502: 'clog, geta, patten, sabot',
 503: 'cocktail shaker',
 504: 'coffee mug',
 505: 'coffeepot',
 506: 'coil, spiral, volute, whorl, helix',
 507: 'combination lock',
 508: 'computer keyboard, keypad',
 509: 'confectionery, confectionary, candy store',
 510: 'container ship, containership, container vessel',
 511: 'convertible',
 512: 'corkscrew, bottle screw',
 513: 'cornet, horn, trumpet, trump',
 514: 'cowboy boot',
 515: 'cowboy hat, ten-gallon hat',
 516: 'cradle',
 517: 'crane',
 518: 'crash helmet',
 519: 'crate',
 520: 'crib, cot',
 521: 'Crock Pot',
 522: 'croquet ball',
 523: 'crutch',
 524: 'cuirass',
 525: 'dam, dike, dyke',
 526: 'desk',
 527: 'desktop computer',
 528: 'dial telephone, dial phone',
 529: 'diaper, nappy, napkin',
 530: 'digital clock',
 531: 'digital watch',
 532: 'dining table, board',
 533: 'dishrag, dishcloth',
 534: 'dishwasher, dish washer, dishwashing machine',
 535: 'disk brake, disc brake',
 536: 'dock, dockage, docking facility',
 537: 'dogsled, dog sled, dog sleigh',
 538: 'dome',
 539: 'doormat, welcome mat',
 540: 'drilling platform, offshore rig',
 541: 'drum, membranophone, tympan',
 542: 'drumstick',
 543: 'dumbbell',
 544: 'Dutch oven',
 545: 'electric fan, blower',
 546: 'electric guitar',
 547: 'electric locomotive',
 548: 'entertainment center',
 549: 'envelope',
 550: 'espresso maker',
 551: 'face powder',
 552: 'feather boa, boa',
 553: 'file, file cabinet, filing cabinet',
 554: 'fireboat',
 555: 'fire engine, fire truck',
 556: 'fire screen, fireguard',
 557: 'flagpole, flagstaff',
 558: 'flute, transverse flute',
 559: 'folding chair',
 560: 'football helmet',
 561: 'forklift',
 562: 'fountain',
 563: 'fountain pen',
 564: 'four-poster',
 565: 'freight car',
 566: 'French horn, horn',
 567: 'frying pan, frypan, skillet',
 568: 'fur coat',
 569: 'garbage truck, dustcart',
 570: 'gasmask, respirator, gas helmet',
 571: 'gas pump, gasoline pump, petrol pump, island dispenser',
 572: 'goblet',
 573: 'go-kart',
 574: 'golf ball',
 575: 'golfcart, golf cart',
 576: 'gondola',
 577: 'gong, tam-tam',
 578: 'gown',
 579: 'grand piano, grand',
 580: 'greenhouse, nursery, glasshouse',
 581: 'grille, radiator grille',
 582: 'grocery store, grocery, food market, market',
 583: 'guillotine',
 584: 'hair slide',
 585: 'hair spray',
 586: 'half track',
 587: 'hammer',
 588: 'hamper',
 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
 590: 'hand-held computer, hand-held microcomputer',
 591: 'handkerchief, hankie, hanky, hankey',
 592: 'hard disc, hard disk, fixed disk',
 593: 'harmonica, mouth organ, harp, mouth harp',
 594: 'harp',
 595: 'harvester, reaper',
 596: 'hatchet',
 597: 'holster',
 598: 'home theater, home theatre',
 599: 'honeycomb',
 600: 'hook, claw',
 601: 'hoopskirt, crinoline',
 602: 'horizontal bar, high bar',
 603: 'horse cart, horse-cart',
 604: 'hourglass',
 605: 'iPod',
 606: 'iron, smoothing iron',
 607: "jack-o'-lantern",
 608: 'jean, blue jean, denim',
 609: 'jeep, landrover',
 610: 'jersey, T-shirt, tee shirt',
 611: 'jigsaw puzzle',
 612: 'jinrikisha, ricksha, rickshaw',
 613: 'joystick',
 614: 'kimono',
 615: 'knee pad',
 616: 'knot',
 617: 'lab coat, laboratory coat',
 618: 'ladle',
 619: 'lampshade, lamp shade',
 620: 'laptop, laptop computer',
 621: 'lawn mower, mower',
 622: 'lens cap, lens cover',
 623: 'letter opener, paper knife, paperknife',
 624: 'library',
 625: 'lifeboat',
 626: 'lighter, light, igniter, ignitor',
 627: 'limousine, limo',
 628: 'liner, ocean liner',
 629: 'lipstick, lip rouge',
 630: 'Loafer',
 631: 'lotion',
 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
 633: "loupe, jeweler's loupe",
 634: 'lumbermill, sawmill',
 635: 'magnetic compass',
 636: 'mailbag, postbag',
 637: 'mailbox, letter box',
 638: 'maillot',
 639: 'maillot, tank suit',
 640: 'manhole cover',
 641: 'maraca',
 642: 'marimba, xylophone',
 643: 'mask',
 644: 'matchstick',
 645: 'maypole',
 646: 'maze, labyrinth',
 647: 'measuring cup',
 648: 'medicine chest, medicine cabinet',
 649: 'megalith, megalithic structure',
 650: 'microphone, mike',
 651: 'microwave, microwave oven',
 652: 'military uniform',
 653: 'milk can',
 654: 'minibus',
 655: 'miniskirt, mini',
 656: 'minivan',
 657: 'missile',
 658: 'mitten',
 659: 'mixing bowl',
 660: 'mobile home, manufactured home',
 661: 'Model T',
 662: 'modem',
 663: 'monastery',
 664: 'monitor',
 665: 'moped',
 666: 'mortar',
 667: 'mortarboard',
 668: 'mosque',
 669: 'mosquito net',
 670: 'motor scooter, scooter',
 671: 'mountain bike, all-terrain bike, off-roader',
 672: 'mountain tent',
 673: 'mouse, computer mouse',
 674: 'mousetrap',
 675: 'moving van',
 676: 'muzzle',
 677: 'nail',
 678: 'neck brace',
 679: 'necklace',
 680: 'nipple',
 681: 'notebook, notebook computer',
 682: 'obelisk',
 683: 'oboe, hautboy, hautbois',
 684: 'ocarina, sweet potato',
 685: 'odometer, hodometer, mileometer, milometer',
 686: 'oil filter',
 687: 'organ, pipe organ',
 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
 689: 'overskirt',
 690: 'oxcart',
 691: 'oxygen mask',
 692: 'packet',
 693: 'paddle, boat paddle',
 694: 'paddlewheel, paddle wheel',
 695: 'padlock',
 696: 'paintbrush',
 697: "pajama, pyjama, pj's, jammies",
 698: 'palace',
 699: 'panpipe, pandean pipe, syrinx',
 700: 'paper towel',
 701: 'parachute, chute',
 702: 'parallel bars, bars',
 703: 'park bench',
 704: 'parking meter',
 705: 'passenger car, coach, carriage',
 706: 'patio, terrace',
 707: 'pay-phone, pay-station',
 708: 'pedestal, plinth, footstall',
 709: 'pencil box, pencil case',
 710: 'pencil sharpener',
 711: 'perfume, essence',
 712: 'Petri dish',
 713: 'photocopier',
 714: 'pick, plectrum, plectron',
 715: 'pickelhaube',
 716: 'picket fence, paling',
 717: 'pickup, pickup truck',
 718: 'pier',
 719: 'piggy bank, penny bank',
 720: 'pill bottle',
 721: 'pillow',
 722: 'ping-pong ball',
 723: 'pinwheel',
 724: 'pirate, pirate ship',
 725: 'pitcher, ewer',
 726: "plane, carpenter's plane, woodworking plane",
 727: 'planetarium',
 728: 'plastic bag',
 729: 'plate rack',
 730: 'plow, plough',
 731: "plunger, plumber's helper",
 732: 'Polaroid camera, Polaroid Land camera',
 733: 'pole',
 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
 735: 'poncho',
 736: 'pool table, billiard table, snooker table',
 737: 'pop bottle, soda bottle',
 738: 'pot, flowerpot',
 739: "potter's wheel",
 740: 'power drill',
 741: 'prayer rug, prayer mat',
 742: 'printer',
 743: 'prison, prison house',
 744: 'projectile, missile',
 745: 'projector',
 746: 'puck, hockey puck',
 747: 'punching bag, punch bag, punching ball, punchball',
 748: 'purse',
 749: 'quill, quill pen',
 750: 'quilt, comforter, comfort, puff',
 751: 'racer, race car, racing car',
 752: 'racket, racquet',
 753: 'radiator',
 754: 'radio, wireless',
 755: 'radio telescope, radio reflector',
 756: 'rain barrel',
 757: 'recreational vehicle, RV, R.V.',
 758: 'reel',
 759: 'reflex camera',
 760: 'refrigerator, icebox',
 761: 'remote control, remote',
 762: 'restaurant, eating house, eating place, eatery',
 763: 'revolver, six-gun, six-shooter',
 764: 'rifle',
 765: 'rocking chair, rocker',
 766: 'rotisserie',
 767: 'rubber eraser, rubber, pencil eraser',
 768: 'rugby ball',
 769: 'rule, ruler',
 770: 'running shoe',
 771: 'safe',
 772: 'safety pin',
 773: 'saltshaker, salt shaker',
 774: 'sandal',
 775: 'sarong',
 776: 'sax, saxophone',
 777: 'scabbard',
 778: 'scale, weighing machine',
 779: 'school bus',
 780: 'schooner',
 781: 'scoreboard',
 782: 'screen, CRT screen',
 783: 'screw',
 784: 'screwdriver',
 785: 'seat belt, seatbelt',
 786: 'sewing machine',
 787: 'shield, buckler',
 788: 'shoe shop, shoe-shop, shoe store',
 789: 'shoji',
 790: 'shopping basket',
 791: 'shopping cart',
 792: 'shovel',
 793: 'shower cap',
 794: 'shower curtain',
 795: 'ski',
 796: 'ski mask',
 797: 'sleeping bag',
 798: 'slide rule, slipstick',
 799: 'sliding door',
 800: 'slot, one-armed bandit',
 801: 'snorkel',
 802: 'snowmobile',
 803: 'snowplow, snowplough',
 804: 'soap dispenser',
 805: 'soccer ball',
 806: 'sock',
 807: 'solar dish, solar collector, solar furnace',
 808: 'sombrero',
 809: 'soup bowl',
 810: 'space bar',
 811: 'space heater',
 812: 'space shuttle',
 813: 'spatula',
 814: 'speedboat',
 815: "spider web, spider's web",
 816: 'spindle',
 817: 'sports car, sport car',
 818: 'spotlight, spot',
 819: 'stage',
 820: 'steam locomotive',
 821: 'steel arch bridge',
 822: 'steel drum',
 823: 'stethoscope',
 824: 'stole',
 825: 'stone wall',
 826: 'stopwatch, stop watch',
 827: 'stove',
 828: 'strainer',
 829: 'streetcar, tram, tramcar, trolley, trolley car',
 830: 'stretcher',
 831: 'studio couch, day bed',
 832: 'stupa, tope',
 833: 'submarine, pigboat, sub, U-boat',
 834: 'suit, suit of clothes',
 835: 'sundial',
 836: 'sunglass',
 837: 'sunglasses, dark glasses, shades',
 838: 'sunscreen, sunblock, sun blocker',
 839: 'suspension bridge',
 840: 'swab, swob, mop',
 841: 'sweatshirt',
 842: 'swimming trunks, bathing trunks',
 843: 'swing',
 844: 'switch, electric switch, electrical switch',
 845: 'syringe',
 846: 'table lamp',
 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
 848: 'tape player',
 849: 'teapot',
 850: 'teddy, teddy bear',
 851: 'television, television system',
 852: 'tennis ball',
 853: 'thatch, thatched roof',
 854: 'theater curtain, theatre curtain',
 855: 'thimble',
 856: 'thresher, thrasher, threshing machine',
 857: 'throne',
 858: 'tile roof',
 859: 'toaster',
 860: 'tobacco shop, tobacconist shop, tobacconist',
 861: 'toilet seat',
 862: 'torch',
 863: 'totem pole',
 864: 'tow truck, tow car, wrecker',
 865: 'toyshop',
 866: 'tractor',
 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
 868: 'tray',
 869: 'trench coat',
 870: 'tricycle, trike, velocipede',
 871: 'trimaran',
 872: 'tripod',
 873: 'triumphal arch',
 874: 'trolleybus, trolley coach, trackless trolley',
 875: 'trombone',
 876: 'tub, vat',
 877: 'turnstile',
 878: 'typewriter keyboard',
 879: 'umbrella',
 880: 'unicycle, monocycle',
 881: 'upright, upright piano',
 882: 'vacuum, vacuum cleaner',
 883: 'vase',
 884: 'vault',
 885: 'velvet',
 886: 'vending machine',
 887: 'vestment',
 888: 'viaduct',
 889: 'violin, fiddle',
 890: 'volleyball',
 891: 'waffle iron',
 892: 'wall clock',
 893: 'wallet, billfold, notecase, pocketbook',
 894: 'wardrobe, closet, press',
 895: 'warplane, military plane',
 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
 897: 'washer, automatic washer, washing machine',
 898: 'water bottle',
 899: 'water jug',
 900: 'water tower',
 901: 'whiskey jug',
 902: 'whistle',
 903: 'wig',
 904: 'window screen',
 905: 'window shade',
 906: 'Windsor tie',
 907: 'wine bottle',
 908: 'wing',
 909: 'wok',
 910: 'wooden spoon',
 911: 'wool, woolen, woollen',
 912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
 913: 'wreck',
 914: 'yawl',
 915: 'yurt',
 916: 'web site, website, internet site, site',
 917: 'comic book',
 918: 'crossword puzzle, crossword',
 919: 'street sign',
 920: 'traffic light, traffic signal, stoplight',
 921: 'book jacket, dust cover, dust jacket, dust wrapper',
 922: 'menu',
 923: 'plate',
 924: 'guacamole',
 925: 'consomme',
 926: 'hot pot, hotpot',
 927: 'trifle',
 928: 'ice cream, icecream',
 929: 'ice lolly, lolly, lollipop, popsicle',
 930: 'French loaf',
 931: 'bagel, b
Download .txt
gitextract_odg4toud/

├── LICENSE
├── README.md
├── configs/
│   ├── generate.yaml
│   └── train.yaml
├── dataset_creation/
│   ├── generate_img_dataset.py
│   ├── generate_txt_dataset.py
│   ├── prepare_dataset.py
│   └── prepare_for_gpt.py
├── edit_app.py
├── edit_cli.py
├── edit_dataset.py
├── environment.yaml
├── main.py
├── metrics/
│   ├── clip_similarity.py
│   └── compute_metrics.py
├── prompt_app.py
├── scripts/
│   ├── download_checkpoints.sh
│   ├── download_data.sh
│   └── download_pretrained_sd.sh
└── stable_diffusion/
    ├── LICENSE
    ├── README.md
    ├── Stable_Diffusion_v1_Model_Card.md
    ├── assets/
    │   ├── results.gif.REMOVED.git-id
    │   ├── stable-samples/
    │   │   ├── img2img/
    │   │   │   ├── upscaling-in.png.REMOVED.git-id
    │   │   │   └── upscaling-out.png.REMOVED.git-id
    │   │   └── txt2img/
    │   │       ├── merged-0005.png.REMOVED.git-id
    │   │       ├── merged-0006.png.REMOVED.git-id
    │   │       └── merged-0007.png.REMOVED.git-id
    │   └── txt2img-preview.png.REMOVED.git-id
    ├── configs/
    │   ├── autoencoder/
    │   │   ├── autoencoder_kl_16x16x16.yaml
    │   │   ├── autoencoder_kl_32x32x4.yaml
    │   │   ├── autoencoder_kl_64x64x3.yaml
    │   │   └── autoencoder_kl_8x8x64.yaml
    │   ├── latent-diffusion/
    │   │   ├── celebahq-ldm-vq-4.yaml
    │   │   ├── cin-ldm-vq-f8.yaml
    │   │   ├── cin256-v2.yaml
    │   │   ├── ffhq-ldm-vq-4.yaml
    │   │   ├── lsun_bedrooms-ldm-vq-4.yaml
    │   │   ├── lsun_churches-ldm-kl-8.yaml
    │   │   └── txt2img-1p4B-eval.yaml
    │   ├── retrieval-augmented-diffusion/
    │   │   └── 768x768.yaml
    │   └── stable-diffusion/
    │       └── v1-inference.yaml
    ├── data/
    │   ├── example_conditioning/
    │   │   └── text_conditional/
    │   │       └── sample_0.txt
    │   ├── imagenet_clsidx_to_label.txt
    │   ├── imagenet_train_hr_indices.p.REMOVED.git-id
    │   ├── imagenet_val_hr_indices.p
    │   └── index_synset.yaml
    ├── environment.yaml
    ├── ldm/
    │   ├── data/
    │   │   ├── __init__.py
    │   │   ├── base.py
    │   │   ├── imagenet.py
    │   │   └── lsun.py
    │   ├── lr_scheduler.py
    │   ├── models/
    │   │   ├── autoencoder.py
    │   │   └── diffusion/
    │   │       ├── __init__.py
    │   │       ├── classifier.py
    │   │       ├── ddim.py
    │   │       ├── ddpm.py
    │   │       ├── ddpm_edit.py
    │   │       ├── dpm_solver/
    │   │       │   ├── __init__.py
    │   │       │   ├── dpm_solver.py
    │   │       │   └── sampler.py
    │   │       └── plms.py
    │   ├── modules/
    │   │   ├── attention.py
    │   │   ├── diffusionmodules/
    │   │   │   ├── __init__.py
    │   │   │   ├── model.py
    │   │   │   ├── openaimodel.py
    │   │   │   └── util.py
    │   │   ├── distributions/
    │   │   │   ├── __init__.py
    │   │   │   └── distributions.py
    │   │   ├── ema.py
    │   │   ├── encoders/
    │   │   │   ├── __init__.py
    │   │   │   └── modules.py
    │   │   ├── image_degradation/
    │   │   │   ├── __init__.py
    │   │   │   ├── bsrgan.py
    │   │   │   ├── bsrgan_light.py
    │   │   │   └── utils_image.py
    │   │   ├── losses/
    │   │   │   ├── __init__.py
    │   │   │   ├── contperceptual.py
    │   │   │   └── vqperceptual.py
    │   │   └── x_transformer.py
    │   └── util.py
    ├── main.py
    ├── models/
    │   ├── first_stage_models/
    │   │   ├── kl-f16/
    │   │   │   └── config.yaml
    │   │   ├── kl-f32/
    │   │   │   └── config.yaml
    │   │   ├── kl-f4/
    │   │   │   └── config.yaml
    │   │   ├── kl-f8/
    │   │   │   └── config.yaml
    │   │   ├── vq-f16/
    │   │   │   └── config.yaml
    │   │   ├── vq-f4/
    │   │   │   └── config.yaml
    │   │   ├── vq-f4-noattn/
    │   │   │   └── config.yaml
    │   │   ├── vq-f8/
    │   │   │   └── config.yaml
    │   │   └── vq-f8-n256/
    │   │       └── config.yaml
    │   └── ldm/
    │       ├── bsr_sr/
    │       │   └── config.yaml
    │       ├── celeba256/
    │       │   └── config.yaml
    │       ├── cin256/
    │       │   └── config.yaml
    │       ├── ffhq256/
    │       │   └── config.yaml
    │       ├── inpainting_big/
    │       │   └── config.yaml
    │       ├── layout2img-openimages256/
    │       │   └── config.yaml
    │       ├── lsun_beds256/
    │       │   └── config.yaml
    │       ├── lsun_churches256/
    │       │   └── config.yaml
    │       ├── semantic_synthesis256/
    │       │   └── config.yaml
    │       ├── semantic_synthesis512/
    │       │   └── config.yaml
    │       └── text2img256/
    │           └── config.yaml
    ├── notebook_helpers.py
    ├── scripts/
    │   ├── download_first_stages.sh
    │   ├── download_models.sh
    │   ├── img2img.py
    │   ├── inpaint.py
    │   ├── knn2img.py
    │   ├── latent_imagenet_diffusion.ipynb.REMOVED.git-id
    │   ├── sample_diffusion.py
    │   ├── tests/
    │   │   └── test_watermark.py
    │   ├── train_searcher.py
    │   └── txt2img.py
    └── setup.py
Download .txt
SYMBOL INDEX (849 symbols across 46 files)

FILE: dataset_creation/generate_img_dataset.py
  function append_dims (line 29) | def append_dims(x, target_dims):
  function to_d (line 37) | def to_d(x, sigma, denoised):
  function get_ancestral_step (line 42) | def get_ancestral_step(sigma_from, sigma_to):
  function sample_euler_ancestral (line 50) | def sample_euler_ancestral(model, x, sigmas, prompt2prompt_threshold=0.0...
  function load_model_from_config (line 73) | def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
  class CFGDenoiser (line 97) | class CFGDenoiser(nn.Module):
    method __init__ (line 98) | def __init__(self, model):
    method forward (line 102) | def forward(self, x, sigma, uncond, cond, cfg_scale):
  function to_pil (line 110) | def to_pil(image: torch.Tensor) -> Image.Image:
  function main (line 116) | def main():

FILE: dataset_creation/generate_txt_dataset.py
  function generate (line 20) | def generate(
  function main (line 57) | def main(openai_model: str, num_samples: int, num_partitions: int, parti...

FILE: dataset_creation/prepare_dataset.py
  function main (line 8) | def main():

FILE: dataset_creation/prepare_for_gpt.py
  function main (line 7) | def main(input_path: str, output_path: str):

FILE: edit_app.py
  class CFGDenoiser (line 60) | class CFGDenoiser(nn.Module):
    method __init__ (line 61) | def __init__(self, model):
    method forward (line 65) | def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_sc...
  function load_model_from_config (line 76) | def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
  function main (line 100) | def main():

FILE: edit_cli.py
  class CFGDenoiser (line 23) | class CFGDenoiser(nn.Module):
    method __init__ (line 24) | def __init__(self, model):
    method forward (line 28) | def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_sc...
  function load_model_from_config (line 39) | def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
  function main (line 63) | def main():

FILE: edit_dataset.py
  class EditDataset (line 16) | class EditDataset(Dataset):
    method __init__ (line 17) | def __init__(
    method __len__ (line 48) | def __len__(self) -> int:
    method __getitem__ (line 51) | def __getitem__(self, i: int) -> dict[str, Any]:
  class EditDatasetEval (line 75) | class EditDatasetEval(Dataset):
    method __init__ (line 76) | def __init__(
    method __len__ (line 101) | def __len__(self) -> int:
    method __getitem__ (line 104) | def __getitem__(self, i: int) -> dict[str, Any]:

FILE: main.py
  function get_parser (line 30) | def get_parser(**parser_kwargs):
  function nondefault_trainer_args (line 130) | def nondefault_trainer_args(opt):
  class WrappedDataset (line 137) | class WrappedDataset(Dataset):
    method __init__ (line 140) | def __init__(self, dataset):
    method __len__ (line 143) | def __len__(self):
    method __getitem__ (line 146) | def __getitem__(self, idx):
  function worker_init_fn (line 150) | def worker_init_fn(_):
  class DataModuleFromConfig (line 166) | class DataModuleFromConfig(pl.LightningDataModule):
    method __init__ (line 167) | def __init__(self, batch_size, train=None, validation=None, test=None,...
    method prepare_data (line 189) | def prepare_data(self):
    method setup (line 193) | def setup(self, stage=None):
    method _train_dataloader (line 201) | def _train_dataloader(self):
    method _val_dataloader (line 211) | def _val_dataloader(self, shuffle=False):
    method _test_dataloader (line 222) | def _test_dataloader(self, shuffle=False):
    method _predict_dataloader (line 235) | def _predict_dataloader(self, shuffle=False):
  class SetupCallback (line 244) | class SetupCallback(Callback):
    method __init__ (line 245) | def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, light...
    method on_keyboard_interrupt (line 255) | def on_keyboard_interrupt(self, trainer, pl_module):
    method on_pretrain_routine_start (line 261) | def on_pretrain_routine_start(self, trainer, pl_module):
  function get_world_size (line 281) | def get_world_size():
  function all_gather (line 288) | def all_gather(data):
  class ImageLogger (line 351) | class ImageLogger(Callback):
    method __init__ (line 352) | def __init__(self, batch_frequency, max_images, clamp=True, increase_l...
    method _testtube (line 372) | def _testtube(self, pl_module, images, batch_idx, split):
    method log_local (line 383) | def log_local(self, save_dir, split, images, prompts,
    method log_img (line 414) | def log_img(self, pl_module, batch, batch_idx, split="train"):
    method check_frequency (line 450) | def check_frequency(self, check_idx):
    method on_train_batch_end (line 458) | def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch...
    method on_validation_batch_end (line 462) | def on_validation_batch_end(self, trainer, pl_module, outputs, batch, ...
  class CUDACallback (line 470) | class CUDACallback(Callback):
    method on_train_epoch_start (line 472) | def on_train_epoch_start(self, trainer, pl_module):
    method on_train_epoch_end (line 478) | def on_train_epoch_end(self, trainer, pl_module, outputs):
  function melk (line 755) | def melk(*args, **kwargs):
  function divein (line 763) | def divein(*args, **kwargs):

FILE: metrics/clip_similarity.py
  class ClipSimilarity (line 10) | class ClipSimilarity(nn.Module):
    method __init__ (line 11) | def __init__(self, name: str = "ViT-L/14"):
    method encode_text (line 22) | def encode_text(self, text: list[str]) -> torch.Tensor:
    method encode_image (line 28) | def encode_image(self, image: torch.Tensor) -> torch.Tensor:  # Input ...
    method forward (line 36) | def forward(

FILE: metrics/compute_metrics.py
  class CFGDenoiser (line 34) | class CFGDenoiser(nn.Module):
    method __init__ (line 35) | def __init__(self, model):
    method forward (line 39) | def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_sc...
  function load_model_from_config (line 50) | def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
  class ImageEditor (line 73) | class ImageEditor(nn.Module):
    method __init__ (line 74) | def __init__(self, config, ckpt, vae_ckpt=None):
    method forward (line 84) | def forward(
  function compute_metrics (line 117) | def compute_metrics(config,
  function plot_metrics (line 186) | def plot_metrics(metrics_file, output_path):
  function main (line 205) | def main():

FILE: prompt_app.py
  function main (line 13) | def main(openai_model: str):

FILE: stable_diffusion/ldm/data/base.py
  class Txt2ImgIterableBaseDataset (line 5) | class Txt2ImgIterableBaseDataset(IterableDataset):
    method __init__ (line 9) | def __init__(self, num_records=0, valid_ids=None, size=256):
    method __len__ (line 18) | def __len__(self):
    method __iter__ (line 22) | def __iter__(self):

FILE: stable_diffusion/ldm/data/imagenet.py
  function synset2idx (line 20) | def synset2idx(path_to_yaml="data/index_synset.yaml"):
  class ImageNetBase (line 26) | class ImageNetBase(Dataset):
    method __init__ (line 27) | def __init__(self, config=None):
    method __len__ (line 39) | def __len__(self):
    method __getitem__ (line 42) | def __getitem__(self, i):
    method _prepare (line 45) | def _prepare(self):
    method _filter_relpaths (line 48) | def _filter_relpaths(self, relpaths):
    method _prepare_synset_to_human (line 66) | def _prepare_synset_to_human(self):
    method _prepare_idx_to_synset (line 74) | def _prepare_idx_to_synset(self):
    method _prepare_human_to_integer_label (line 80) | def _prepare_human_to_integer_label(self):
    method _load (line 93) | def _load(self):
  class ImageNetTrain (line 134) | class ImageNetTrain(ImageNetBase):
    method __init__ (line 145) | def __init__(self, process_images=True, data_root=None, **kwargs):
    method _prepare (line 150) | def _prepare(self):
  class ImageNetValidation (line 197) | class ImageNetValidation(ImageNetBase):
    method __init__ (line 211) | def __init__(self, process_images=True, data_root=None, **kwargs):
    method _prepare (line 216) | def _prepare(self):
  class ImageNetSR (line 272) | class ImageNetSR(Dataset):
    method __init__ (line 273) | def __init__(self, size=None,
    method __len__ (line 336) | def __len__(self):
    method __getitem__ (line 339) | def __getitem__(self, i):
  class ImageNetSRTrain (line 375) | class ImageNetSRTrain(ImageNetSR):
    method __init__ (line 376) | def __init__(self, **kwargs):
    method get_base (line 379) | def get_base(self):
  class ImageNetSRValidation (line 386) | class ImageNetSRValidation(ImageNetSR):
    method __init__ (line 387) | def __init__(self, **kwargs):
    method get_base (line 390) | def get_base(self):

FILE: stable_diffusion/ldm/data/lsun.py
  class LSUNBase (line 9) | class LSUNBase(Dataset):
    method __init__ (line 10) | def __init__(self,
    method __len__ (line 36) | def __len__(self):
    method __getitem__ (line 39) | def __getitem__(self, i):
  class LSUNChurchesTrain (line 62) | class LSUNChurchesTrain(LSUNBase):
    method __init__ (line 63) | def __init__(self, **kwargs):
  class LSUNChurchesValidation (line 67) | class LSUNChurchesValidation(LSUNBase):
    method __init__ (line 68) | def __init__(self, flip_p=0., **kwargs):
  class LSUNBedroomsTrain (line 73) | class LSUNBedroomsTrain(LSUNBase):
    method __init__ (line 74) | def __init__(self, **kwargs):
  class LSUNBedroomsValidation (line 78) | class LSUNBedroomsValidation(LSUNBase):
    method __init__ (line 79) | def __init__(self, flip_p=0.0, **kwargs):
  class LSUNCatsTrain (line 84) | class LSUNCatsTrain(LSUNBase):
    method __init__ (line 85) | def __init__(self, **kwargs):
  class LSUNCatsValidation (line 89) | class LSUNCatsValidation(LSUNBase):
    method __init__ (line 90) | def __init__(self, flip_p=0., **kwargs):

FILE: stable_diffusion/ldm/lr_scheduler.py
  class LambdaWarmUpCosineScheduler (line 4) | class LambdaWarmUpCosineScheduler:
    method __init__ (line 8) | def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_...
    method schedule (line 17) | def schedule(self, n, **kwargs):
    method __call__ (line 32) | def __call__(self, n, **kwargs):
  class LambdaWarmUpCosineScheduler2 (line 36) | class LambdaWarmUpCosineScheduler2:
    method __init__ (line 41) | def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths...
    method find_in_interval (line 52) | def find_in_interval(self, n):
    method schedule (line 59) | def schedule(self, n, **kwargs):
    method __call__ (line 77) | def __call__(self, n, **kwargs):
  class LambdaLinearScheduler (line 81) | class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
    method schedule (line 83) | def schedule(self, n, **kwargs):

FILE: stable_diffusion/ldm/models/autoencoder.py
  class VQModel (line 14) | class VQModel(pl.LightningModule):
    method __init__ (line 15) | def __init__(self,
    method ema_scope (line 64) | def ema_scope(self, context=None):
    method init_from_ckpt (line 78) | def init_from_ckpt(self, path, ignore_keys=list()):
    method on_train_batch_end (line 92) | def on_train_batch_end(self, *args, **kwargs):
    method encode (line 96) | def encode(self, x):
    method encode_to_prequant (line 102) | def encode_to_prequant(self, x):
    method decode (line 107) | def decode(self, quant):
    method decode_code (line 112) | def decode_code(self, code_b):
    method forward (line 117) | def forward(self, input, return_pred_indices=False):
    method get_input (line 124) | def get_input(self, batch, k):
    method training_step (line 142) | def training_step(self, batch, batch_idx, optimizer_idx):
    method validation_step (line 164) | def validation_step(self, batch, batch_idx):
    method _validation_step (line 170) | def _validation_step(self, batch, batch_idx, suffix=""):
    method configure_optimizers (line 197) | def configure_optimizers(self):
    method get_last_layer (line 230) | def get_last_layer(self):
    method log_images (line 233) | def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
    method to_rgb (line 255) | def to_rgb(self, x):
  class VQModelInterface (line 264) | class VQModelInterface(VQModel):
    method __init__ (line 265) | def __init__(self, embed_dim, *args, **kwargs):
    method encode (line 269) | def encode(self, x):
    method decode (line 274) | def decode(self, h, force_not_quantize=False):
  class AutoencoderKL (line 285) | class AutoencoderKL(pl.LightningModule):
    method __init__ (line 286) | def __init__(self,
    method init_from_ckpt (line 313) | def init_from_ckpt(self, path, ignore_keys=list()):
    method encode (line 324) | def encode(self, x):
    method decode (line 330) | def decode(self, z):
    method forward (line 335) | def forward(self, input, sample_posterior=True):
    method get_input (line 344) | def get_input(self, batch, k):
    method training_step (line 351) | def training_step(self, batch, batch_idx, optimizer_idx):
    method validation_step (line 372) | def validation_step(self, batch, batch_idx):
    method configure_optimizers (line 386) | def configure_optimizers(self):
    method get_last_layer (line 397) | def get_last_layer(self):
    method log_images (line 401) | def log_images(self, batch, only_inputs=False, **kwargs):
    method to_rgb (line 417) | def to_rgb(self, x):
  class IdentityFirstStage (line 426) | class IdentityFirstStage(torch.nn.Module):
    method __init__ (line 427) | def __init__(self, *args, vq_interface=False, **kwargs):
    method encode (line 431) | def encode(self, x, *args, **kwargs):
    method decode (line 434) | def decode(self, x, *args, **kwargs):
    method quantize (line 437) | def quantize(self, x, *args, **kwargs):
    method forward (line 442) | def forward(self, x, *args, **kwargs):

FILE: stable_diffusion/ldm/models/diffusion/classifier.py
  function disabled_train (line 22) | def disabled_train(self, mode=True):
  class NoisyLatentImageClassifier (line 28) | class NoisyLatentImageClassifier(pl.LightningModule):
    method __init__ (line 30) | def __init__(self,
    method init_from_ckpt (line 70) | def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
    method load_diffusion (line 88) | def load_diffusion(self):
    method load_classifier (line 95) | def load_classifier(self, ckpt_path, pool):
    method get_x_noisy (line 110) | def get_x_noisy(self, x, t, noise=None):
    method forward (line 120) | def forward(self, x_noisy, t, *args, **kwargs):
    method get_input (line 124) | def get_input(self, batch, k):
    method get_conditioning (line 133) | def get_conditioning(self, batch, k=None):
    method compute_top_k (line 150) | def compute_top_k(self, logits, labels, k, reduction="mean"):
    method on_train_epoch_start (line 157) | def on_train_epoch_start(self):
    method write_logs (line 162) | def write_logs(self, loss, logits, targets):
    method shared_step (line 179) | def shared_step(self, batch, t=None):
    method training_step (line 198) | def training_step(self, batch, batch_idx):
    method reset_noise_accs (line 202) | def reset_noise_accs(self):
    method on_validation_start (line 206) | def on_validation_start(self):
    method validation_step (line 210) | def validation_step(self, batch, batch_idx):
    method configure_optimizers (line 220) | def configure_optimizers(self):
    method log_images (line 238) | def log_images(self, batch, N=8, *args, **kwargs):

FILE: stable_diffusion/ldm/models/diffusion/ddim.py
  class DDIMSampler (line 12) | class DDIMSampler(object):
    method __init__ (line 13) | def __init__(self, model, schedule="linear", **kwargs):
    method register_buffer (line 19) | def register_buffer(self, name, attr):
    method make_schedule (line 25) | def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddi...
    method sample (line 57) | def sample(self,
    method ddim_sampling (line 114) | def ddim_sampling(self, cond, shape,
    method p_sample_ddim (line 166) | def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_origin...
    method stochastic_encode (line 207) | def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
    method decode (line 223) | def decode(self, x_latent, cond, t_start, unconditional_guidance_scale...

FILE: stable_diffusion/ldm/models/diffusion/ddpm.py
  function disabled_train (line 34) | def disabled_train(self, mode=True):
  function uniform_on_device (line 40) | def uniform_on_device(r1, r2, shape, device):
  class DDPM (line 44) | class DDPM(pl.LightningModule):
    method __init__ (line 46) | def __init__(self,
    method register_schedule (line 117) | def register_schedule(self, given_betas=None, beta_schedule="linear", ...
    method ema_scope (line 172) | def ema_scope(self, context=None):
    method init_from_ckpt (line 186) | def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
    method q_mean_variance (line 204) | def q_mean_variance(self, x_start, t):
    method predict_start_from_noise (line 216) | def predict_start_from_noise(self, x_t, t, noise):
    method q_posterior (line 222) | def q_posterior(self, x_start, x_t, t):
    method p_mean_variance (line 231) | def p_mean_variance(self, x, t, clip_denoised: bool):
    method p_sample (line 244) | def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
    method p_sample_loop (line 253) | def p_sample_loop(self, shape, return_intermediates=False):
    method sample (line 268) | def sample(self, batch_size=16, return_intermediates=False):
    method q_sample (line 274) | def q_sample(self, x_start, t, noise=None):
    method get_loss (line 279) | def get_loss(self, pred, target, mean=True):
    method p_losses (line 294) | def p_losses(self, x_start, t, noise=None):
    method forward (line 323) | def forward(self, x, *args, **kwargs):
    method get_input (line 329) | def get_input(self, batch, k):
    method shared_step (line 337) | def shared_step(self, batch):
    method training_step (line 342) | def training_step(self, batch, batch_idx):
    method validation_step (line 358) | def validation_step(self, batch, batch_idx):
    method on_train_batch_end (line 366) | def on_train_batch_end(self, *args, **kwargs):
    method _get_rows_from_list (line 370) | def _get_rows_from_list(self, samples):
    method log_images (line 378) | def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=Non...
    method configure_optimizers (line 415) | def configure_optimizers(self):
  class LatentDiffusion (line 424) | class LatentDiffusion(DDPM):
    method __init__ (line 426) | def __init__(self,
    method make_cond_schedule (line 471) | def make_cond_schedule(self, ):
    method on_train_batch_start (line 478) | def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
    method register_schedule (line 493) | def register_schedule(self,
    method instantiate_first_stage (line 502) | def instantiate_first_stage(self, config):
    method instantiate_cond_stage (line 509) | def instantiate_cond_stage(self, config):
    method _get_denoise_row_from_list (line 530) | def _get_denoise_row_from_list(self, samples, desc='', force_no_decode...
    method get_first_stage_encoding (line 542) | def get_first_stage_encoding(self, encoder_posterior):
    method get_learned_conditioning (line 551) | def get_learned_conditioning(self, c):
    method meshgrid (line 564) | def meshgrid(self, h, w):
    method delta_border (line 571) | def delta_border(self, h, w):
    method get_weighting (line 585) | def get_weighting(self, h, w, Ly, Lx, device):
    method get_fold_unfold (line 601) | def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1):  # todo...
    method get_input (line 654) | def get_input(self, batch, k, return_first_stage_outputs=False, force_...
    method decode_first_stage (line 706) | def decode_first_stage(self, z, predict_cids=False, force_not_quantize...
    method differentiable_decode_first_stage (line 766) | def differentiable_decode_first_stage(self, z, predict_cids=False, for...
    method encode_first_stage (line 826) | def encode_first_stage(self, x):
    method shared_step (line 865) | def shared_step(self, batch, **kwargs):
    method forward (line 870) | def forward(self, x, c, *args, **kwargs):
    method _rescale_annotations (line 881) | def _rescale_annotations(self, bboxes, crop_coordinates):  # TODO: mov...
    method apply_model (line 891) | def apply_model(self, x_noisy, t, cond, return_ids=False):
    method _predict_eps_from_xstart (line 994) | def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
    method _prior_bpd (line 998) | def _prior_bpd(self, x_start):
    method p_losses (line 1012) | def p_losses(self, x_start, cond, t, noise=None):
    method p_mean_variance (line 1047) | def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codeboo...
    method p_sample (line 1079) | def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
    method progressive_denoising (line 1110) | def progressive_denoising(self, cond, shape, verbose=True, callback=No...
    method p_sample_loop (line 1166) | def p_sample_loop(self, cond, shape, return_intermediates=False,
    method sample (line 1217) | def sample(self, cond, batch_size=16, return_intermediates=False, x_T=...
    method sample_log (line 1235) | def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
    method log_images (line 1251) | def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200,...
    method configure_optimizers (line 1361) | def configure_optimizers(self):
    method to_rgb (line 1386) | def to_rgb(self, x):
  class DiffusionWrapper (line 1395) | class DiffusionWrapper(pl.LightningModule):
    method __init__ (line 1396) | def __init__(self, diff_model_config, conditioning_key):
    method forward (line 1402) | def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
  class Layout2ImgDiffusion (line 1424) | class Layout2ImgDiffusion(LatentDiffusion):
    method __init__ (line 1426) | def __init__(self, cond_stage_key, *args, **kwargs):
    method log_images (line 1430) | def log_images(self, batch, N=8, *args, **kwargs):

FILE: stable_diffusion/ldm/models/diffusion/ddpm_edit.py
  function disabled_train (line 37) | def disabled_train(self, mode=True):
  function uniform_on_device (line 43) | def uniform_on_device(r1, r2, shape, device):
  class DDPM (line 47) | class DDPM(pl.LightningModule):
    method __init__ (line 49) | def __init__(self,
    method register_schedule (line 128) | def register_schedule(self, given_betas=None, beta_schedule="linear", ...
    method ema_scope (line 183) | def ema_scope(self, context=None):
    method init_from_ckpt (line 197) | def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
    method q_mean_variance (line 236) | def q_mean_variance(self, x_start, t):
    method predict_start_from_noise (line 248) | def predict_start_from_noise(self, x_t, t, noise):
    method q_posterior (line 254) | def q_posterior(self, x_start, x_t, t):
    method p_mean_variance (line 263) | def p_mean_variance(self, x, t, clip_denoised: bool):
    method p_sample (line 276) | def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
    method p_sample_loop (line 285) | def p_sample_loop(self, shape, return_intermediates=False):
    method sample (line 300) | def sample(self, batch_size=16, return_intermediates=False):
    method q_sample (line 306) | def q_sample(self, x_start, t, noise=None):
    method get_loss (line 311) | def get_loss(self, pred, target, mean=True):
    method p_losses (line 326) | def p_losses(self, x_start, t, noise=None):
    method forward (line 355) | def forward(self, x, *args, **kwargs):
    method get_input (line 361) | def get_input(self, batch, k):
    method shared_step (line 364) | def shared_step(self, batch):
    method training_step (line 369) | def training_step(self, batch, batch_idx):
    method validation_step (line 385) | def validation_step(self, batch, batch_idx):
    method on_train_batch_end (line 393) | def on_train_batch_end(self, *args, **kwargs):
    method _get_rows_from_list (line 397) | def _get_rows_from_list(self, samples):
    method log_images (line 405) | def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=Non...
    method configure_optimizers (line 442) | def configure_optimizers(self):
  class LatentDiffusion (line 451) | class LatentDiffusion(DDPM):
    method __init__ (line 453) | def __init__(self,
    method make_cond_schedule (line 503) | def make_cond_schedule(self, ):
    method on_train_batch_start (line 510) | def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
    method register_schedule (line 525) | def register_schedule(self,
    method instantiate_first_stage (line 534) | def instantiate_first_stage(self, config):
    method instantiate_cond_stage (line 541) | def instantiate_cond_stage(self, config):
    method _get_denoise_row_from_list (line 562) | def _get_denoise_row_from_list(self, samples, desc='', force_no_decode...
    method get_first_stage_encoding (line 574) | def get_first_stage_encoding(self, encoder_posterior):
    method get_learned_conditioning (line 583) | def get_learned_conditioning(self, c):
    method meshgrid (line 596) | def meshgrid(self, h, w):
    method delta_border (line 603) | def delta_border(self, h, w):
    method get_weighting (line 617) | def get_weighting(self, h, w, Ly, Lx, device):
    method get_fold_unfold (line 633) | def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1):  # todo...
    method get_input (line 686) | def get_input(self, batch, k, return_first_stage_outputs=False, force_...
    method decode_first_stage (line 719) | def decode_first_stage(self, z, predict_cids=False, force_not_quantize...
    method differentiable_decode_first_stage (line 779) | def differentiable_decode_first_stage(self, z, predict_cids=False, for...
    method encode_first_stage (line 839) | def encode_first_stage(self, x):
    method shared_step (line 878) | def shared_step(self, batch, **kwargs):
    method forward (line 883) | def forward(self, x, c, *args, **kwargs):
    method _rescale_annotations (line 894) | def _rescale_annotations(self, bboxes, crop_coordinates):  # TODO: mov...
    method apply_model (line 904) | def apply_model(self, x_noisy, t, cond, return_ids=False):
    method _predict_eps_from_xstart (line 1007) | def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
    method _prior_bpd (line 1011) | def _prior_bpd(self, x_start):
    method p_losses (line 1025) | def p_losses(self, x_start, cond, t, noise=None):
    method p_mean_variance (line 1060) | def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codeboo...
    method p_sample (line 1092) | def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
    method progressive_denoising (line 1123) | def progressive_denoising(self, cond, shape, verbose=True, callback=No...
    method p_sample_loop (line 1179) | def p_sample_loop(self, cond, shape, return_intermediates=False,
    method sample (line 1230) | def sample(self, cond, batch_size=16, return_intermediates=False, x_T=...
    method sample_log (line 1248) | def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
    method log_images (line 1264) | def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200,...
    method configure_optimizers (line 1375) | def configure_optimizers(self):
    method to_rgb (line 1400) | def to_rgb(self, x):
  class DiffusionWrapper (line 1409) | class DiffusionWrapper(pl.LightningModule):
    method __init__ (line 1410) | def __init__(self, diff_model_config, conditioning_key):
    method forward (line 1416) | def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
  class Layout2ImgDiffusion (line 1438) | class Layout2ImgDiffusion(LatentDiffusion):
    method __init__ (line 1440) | def __init__(self, cond_stage_key, *args, **kwargs):
    method log_images (line 1444) | def log_images(self, batch, N=8, *args, **kwargs):

FILE: stable_diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py
  class NoiseScheduleVP (line 6) | class NoiseScheduleVP:
    method __init__ (line 7) | def __init__(
    method marginal_log_mean_coeff (line 125) | def marginal_log_mean_coeff(self, t):
    method marginal_alpha (line 138) | def marginal_alpha(self, t):
    method marginal_std (line 144) | def marginal_std(self, t):
    method marginal_lambda (line 150) | def marginal_lambda(self, t):
    method inverse_lambda (line 158) | def inverse_lambda(self, lamb):
  function model_wrapper (line 177) | def model_wrapper(
  class DPM_Solver (line 351) | class DPM_Solver:
    method __init__ (line 352) | def __init__(self, model_fn, noise_schedule, predict_x0=False, thresho...
    method noise_prediction_fn (line 380) | def noise_prediction_fn(self, x, t):
    method data_prediction_fn (line 386) | def data_prediction_fn(self, x, t):
    method model_fn (line 401) | def model_fn(self, x, t):
    method get_time_steps (line 410) | def get_time_steps(self, skip_type, t_T, t_0, N, device):
    method get_orders_and_timesteps_for_singlestep_solver (line 439) | def get_orders_and_timesteps_for_singlestep_solver(self, steps, order,...
    method denoise_to_zero_fn (line 498) | def denoise_to_zero_fn(self, x, s):
    method dpm_solver_first_update (line 504) | def dpm_solver_first_update(self, x, s, t, model_s=None, return_interm...
    method singlestep_dpm_solver_second_update (line 551) | def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s...
    method singlestep_dpm_solver_third_update (line 633) | def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./...
    method multistep_dpm_solver_second_update (line 755) | def multistep_dpm_solver_second_update(self, x, model_prev_list, t_pre...
    method multistep_dpm_solver_third_update (line 812) | def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev...
    method singlestep_dpm_solver_update (line 859) | def singlestep_dpm_solver_update(self, x, s, t, order, return_intermed...
    method multistep_dpm_solver_update (line 885) | def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list,...
    method dpm_solver_adaptive (line 909) | def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0....
    method sample (line 965) | def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_...
  function interpolate_fn (line 1132) | def interpolate_fn(x, xp, yp):
  function expand_dims (line 1174) | def expand_dims(v, dims):

FILE: stable_diffusion/ldm/models/diffusion/dpm_solver/sampler.py
  class DPMSolverSampler (line 8) | class DPMSolverSampler(object):
    method __init__ (line 9) | def __init__(self, model, **kwargs):
    method register_buffer (line 15) | def register_buffer(self, name, attr):
    method sample (line 22) | def sample(self,

FILE: stable_diffusion/ldm/models/diffusion/plms.py
  class PLMSSampler (line 11) | class PLMSSampler(object):
    method __init__ (line 12) | def __init__(self, model, schedule="linear", **kwargs):
    method register_buffer (line 18) | def register_buffer(self, name, attr):
    method make_schedule (line 24) | def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddi...
    method sample (line 58) | def sample(self,
    method plms_sampling (line 115) | def plms_sampling(self, cond, shape,
    method p_sample_plms (line 173) | def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_origin...

FILE: stable_diffusion/ldm/modules/attention.py
  function exists (line 14) | def exists(val):
  function uniq (line 18) | def uniq(arr):
  function default (line 22) | def default(val, d):
  function max_neg_value (line 28) | def max_neg_value(t):
  function init_ (line 32) | def init_(tensor):
  class GEGLU (line 40) | class GEGLU(nn.Module):
    method __init__ (line 41) | def __init__(self, dim_in, dim_out):
    method forward (line 45) | def forward(self, x):
  class FeedForward (line 50) | class FeedForward(nn.Module):
    method __init__ (line 51) | def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
    method forward (line 66) | def forward(self, x):
  function zero_module (line 70) | def zero_module(module):
  function Normalize (line 79) | def Normalize(in_channels):
  class LinearAttention (line 83) | class LinearAttention(nn.Module):
    method __init__ (line 84) | def __init__(self, dim, heads=4, dim_head=32):
    method forward (line 91) | def forward(self, x):
  class SpatialSelfAttention (line 102) | class SpatialSelfAttention(nn.Module):
    method __init__ (line 103) | def __init__(self, in_channels):
    method forward (line 129) | def forward(self, x):
  class CrossAttention (line 155) | class CrossAttention(nn.Module):
    method __init__ (line 156) | def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, ...
    method forward (line 175) | def forward(self, x, context=None, mask=None):
  class BasicTransformerBlock (line 210) | class BasicTransformerBlock(nn.Module):
    method __init__ (line 211) | def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None,...
    method forward (line 222) | def forward(self, x, context=None):
    method _forward (line 225) | def _forward(self, x, context=None):
  class SpatialTransformer (line 232) | class SpatialTransformer(nn.Module):
    method __init__ (line 240) | def __init__(self, in_channels, n_heads, d_head,
    method forward (line 264) | def forward(self, x, context=None):

FILE: stable_diffusion/ldm/modules/diffusionmodules/model.py
  function get_timestep_embedding (line 12) | def get_timestep_embedding(timesteps, embedding_dim):
  function nonlinearity (line 33) | def nonlinearity(x):
  function Normalize (line 38) | def Normalize(in_channels, num_groups=32):
  class Upsample (line 42) | class Upsample(nn.Module):
    method __init__ (line 43) | def __init__(self, in_channels, with_conv):
    method forward (line 53) | def forward(self, x):
  class Downsample (line 60) | class Downsample(nn.Module):
    method __init__ (line 61) | def __init__(self, in_channels, with_conv):
    method forward (line 72) | def forward(self, x):
  class ResnetBlock (line 82) | class ResnetBlock(nn.Module):
    method __init__ (line 83) | def __init__(self, *, in_channels, out_channels=None, conv_shortcut=Fa...
    method forward (line 121) | def forward(self, x, temb):
  class LinAttnBlock (line 144) | class LinAttnBlock(LinearAttention):
    method __init__ (line 146) | def __init__(self, in_channels):
  class AttnBlock (line 150) | class AttnBlock(nn.Module):
    method __init__ (line 151) | def __init__(self, in_channels):
    method forward (line 178) | def forward(self, x):
  function make_attn (line 205) | def make_attn(in_channels, attn_type="vanilla"):
  class Model (line 216) | class Model(nn.Module):
    method __init__ (line 217) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
    method forward (line 316) | def forward(self, x, t=None, context=None):
    method get_last_layer (line 364) | def get_last_layer(self):
  class Encoder (line 368) | class Encoder(nn.Module):
    method __init__ (line 369) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
    method forward (line 434) | def forward(self, x):
  class Decoder (line 462) | class Decoder(nn.Module):
    method __init__ (line 463) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
    method forward (line 535) | def forward(self, z):
  class SimpleDecoder (line 571) | class SimpleDecoder(nn.Module):
    method __init__ (line 572) | def __init__(self, in_channels, out_channels, *args, **kwargs):
    method forward (line 594) | def forward(self, x):
  class UpsampleDecoder (line 607) | class UpsampleDecoder(nn.Module):
    method __init__ (line 608) | def __init__(self, in_channels, out_channels, ch, num_res_blocks, reso...
    method forward (line 641) | def forward(self, x):
  class LatentRescaler (line 655) | class LatentRescaler(nn.Module):
    method __init__ (line 656) | def __init__(self, factor, in_channels, mid_channels, out_channels, de...
    method forward (line 680) | def forward(self, x):
  class MergedRescaleEncoder (line 692) | class MergedRescaleEncoder(nn.Module):
    method __init__ (line 693) | def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
    method forward (line 705) | def forward(self, x):
  class MergedRescaleDecoder (line 711) | class MergedRescaleDecoder(nn.Module):
    method __init__ (line 712) | def __init__(self, z_channels, out_ch, resolution, num_res_blocks, att...
    method forward (line 722) | def forward(self, x):
  class Upsampler (line 728) | class Upsampler(nn.Module):
    method __init__ (line 729) | def __init__(self, in_size, out_size, in_channels, out_channels, ch_mu...
    method forward (line 741) | def forward(self, x):
  class Resize (line 747) | class Resize(nn.Module):
    method __init__ (line 748) | def __init__(self, in_channels=None, learned=False, mode="bilinear"):
    method forward (line 763) | def forward(self, x, scale_factor=1.0):
  class FirstStagePostProcessor (line 770) | class FirstStagePostProcessor(nn.Module):
    method __init__ (line 772) | def __init__(self, ch_mult:list, in_channels,
    method instantiate_pretrained (line 807) | def instantiate_pretrained(self, config):
    method encode_with_pretrained (line 816) | def encode_with_pretrained(self,x):
    method forward (line 822) | def forward(self,x):

FILE: stable_diffusion/ldm/modules/diffusionmodules/openaimodel.py
  function convert_module_to_f16 (line 24) | def convert_module_to_f16(x):
  function convert_module_to_f32 (line 27) | def convert_module_to_f32(x):
  class AttentionPool2d (line 32) | class AttentionPool2d(nn.Module):
    method __init__ (line 37) | def __init__(
    method forward (line 51) | def forward(self, x):
  class TimestepBlock (line 62) | class TimestepBlock(nn.Module):
    method forward (line 68) | def forward(self, x, emb):
  class TimestepEmbedSequential (line 74) | class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
    method forward (line 80) | def forward(self, x, emb, context=None):
  class Upsample (line 91) | class Upsample(nn.Module):
    method __init__ (line 100) | def __init__(self, channels, use_conv, dims=2, out_channels=None, padd...
    method forward (line 109) | def forward(self, x):
  class TransposedUpsample (line 121) | class TransposedUpsample(nn.Module):
    method __init__ (line 123) | def __init__(self, channels, out_channels=None, ks=5):
    method forward (line 130) | def forward(self,x):
  class Downsample (line 134) | class Downsample(nn.Module):
    method __init__ (line 143) | def __init__(self, channels, use_conv, dims=2, out_channels=None,paddi...
    method forward (line 158) | def forward(self, x):
  class ResBlock (line 163) | class ResBlock(TimestepBlock):
    method __init__ (line 179) | def __init__(
    method forward (line 243) | def forward(self, x, emb):
    method _forward (line 255) | def _forward(self, x, emb):
  class AttentionBlock (line 278) | class AttentionBlock(nn.Module):
    method __init__ (line 285) | def __init__(
    method forward (line 314) | def forward(self, x):
    method _forward (line 318) | def _forward(self, x):
  function count_flops_attn (line 327) | def count_flops_attn(model, _x, y):
  class QKVAttentionLegacy (line 347) | class QKVAttentionLegacy(nn.Module):
    method __init__ (line 352) | def __init__(self, n_heads):
    method forward (line 356) | def forward(self, qkv):
    method count_flops (line 375) | def count_flops(model, _x, y):
  class QKVAttention (line 379) | class QKVAttention(nn.Module):
    method __init__ (line 384) | def __init__(self, n_heads):
    method forward (line 388) | def forward(self, qkv):
    method count_flops (line 409) | def count_flops(model, _x, y):
  class UNetModel (line 413) | class UNetModel(nn.Module):
    method __init__ (line 443) | def __init__(
    method convert_to_fp16 (line 694) | def convert_to_fp16(self):
    method convert_to_fp32 (line 702) | def convert_to_fp32(self):
    method forward (line 710) | def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
  class EncoderUNetModel (line 745) | class EncoderUNetModel(nn.Module):
    method __init__ (line 751) | def __init__(
    method convert_to_fp16 (line 924) | def convert_to_fp16(self):
    method convert_to_fp32 (line 931) | def convert_to_fp32(self):
    method forward (line 938) | def forward(self, x, timesteps):

FILE: stable_diffusion/ldm/modules/diffusionmodules/util.py
  function make_beta_schedule (line 21) | def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_e...
  function make_ddim_timesteps (line 46) | def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_...
  function make_ddim_sampling_parameters (line 63) | def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbos...
  function betas_for_alpha_bar (line 77) | def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.9...
  function extract_into_tensor (line 96) | def extract_into_tensor(a, t, x_shape):
  function checkpoint (line 102) | def checkpoint(func, inputs, params, flag):
  class CheckpointFunction (line 119) | class CheckpointFunction(torch.autograd.Function):
    method forward (line 121) | def forward(ctx, run_function, length, *args):
    method backward (line 131) | def backward(ctx, *output_grads):
  function timestep_embedding (line 151) | def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=Fal...
  function zero_module (line 174) | def zero_module(module):
  function scale_module (line 183) | def scale_module(module, scale):
  function mean_flat (line 192) | def mean_flat(tensor):
  function normalization (line 199) | def normalization(channels):
  class SiLU (line 209) | class SiLU(nn.Module):
    method forward (line 210) | def forward(self, x):
  class GroupNorm32 (line 214) | class GroupNorm32(nn.GroupNorm):
    method forward (line 215) | def forward(self, x):
  function conv_nd (line 218) | def conv_nd(dims, *args, **kwargs):
  function linear (line 231) | def linear(*args, **kwargs):
  function avg_pool_nd (line 238) | def avg_pool_nd(dims, *args, **kwargs):
  class HybridConditioner (line 251) | class HybridConditioner(nn.Module):
    method __init__ (line 253) | def __init__(self, c_concat_config, c_crossattn_config):
    method forward (line 258) | def forward(self, c_concat, c_crossattn):
  function noise_like (line 264) | def noise_like(shape, device, repeat=False):

FILE: stable_diffusion/ldm/modules/distributions/distributions.py
  class AbstractDistribution (line 5) | class AbstractDistribution:
    method sample (line 6) | def sample(self):
    method mode (line 9) | def mode(self):
  class DiracDistribution (line 13) | class DiracDistribution(AbstractDistribution):
    method __init__ (line 14) | def __init__(self, value):
    method sample (line 17) | def sample(self):
    method mode (line 20) | def mode(self):
  class DiagonalGaussianDistribution (line 24) | class DiagonalGaussianDistribution(object):
    method __init__ (line 25) | def __init__(self, parameters, deterministic=False):
    method sample (line 35) | def sample(self):
    method kl (line 39) | def kl(self, other=None):
    method nll (line 53) | def nll(self, sample, dims=[1,2,3]):
    method mode (line 61) | def mode(self):
  function normal_kl (line 65) | def normal_kl(mean1, logvar1, mean2, logvar2):

FILE: stable_diffusion/ldm/modules/ema.py
  class LitEma (line 5) | class LitEma(nn.Module):
    method __init__ (line 6) | def __init__(self, model, decay=0.9999, use_num_upates=True):
    method forward (line 25) | def forward(self,model):
    method copy_to (line 46) | def copy_to(self, model):
    method store (line 55) | def store(self, parameters):
    method restore (line 64) | def restore(self, parameters):

FILE: stable_diffusion/ldm/modules/encoders/modules.py
  class AbstractEncoder (line 12) | class AbstractEncoder(nn.Module):
    method __init__ (line 13) | def __init__(self):
    method encode (line 16) | def encode(self, *args, **kwargs):
  class ClassEmbedder (line 21) | class ClassEmbedder(nn.Module):
    method __init__ (line 22) | def __init__(self, embed_dim, n_classes=1000, key='class'):
    method forward (line 27) | def forward(self, batch, key=None):
  class TransformerEmbedder (line 36) | class TransformerEmbedder(AbstractEncoder):
    method __init__ (line 38) | def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, devic...
    method forward (line 44) | def forward(self, tokens):
    method encode (line 49) | def encode(self, x):
  class BERTTokenizer (line 53) | class BERTTokenizer(AbstractEncoder):
    method __init__ (line 55) | def __init__(self, device="cuda", vq_interface=True, max_length=77):
    method forward (line 63) | def forward(self, text):
    method encode (line 70) | def encode(self, text):
    method decode (line 76) | def decode(self, text):
  class BERTEmbedder (line 80) | class BERTEmbedder(AbstractEncoder):
    method __init__ (line 82) | def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
    method forward (line 93) | def forward(self, text):
    method encode (line 101) | def encode(self, text):
  class SpatialRescaler (line 106) | class SpatialRescaler(nn.Module):
    method __init__ (line 107) | def __init__(self,
    method forward (line 125) | def forward(self,x):
    method encode (line 134) | def encode(self, x):
  class FrozenCLIPEmbedder (line 137) | class FrozenCLIPEmbedder(AbstractEncoder):
    method __init__ (line 139) | def __init__(self, version="openai/clip-vit-large-patch14", device="cu...
    method freeze (line 147) | def freeze(self):
    method forward (line 152) | def forward(self, text):
    method encode (line 161) | def encode(self, text):
  class FrozenCLIPTextEmbedder (line 165) | class FrozenCLIPTextEmbedder(nn.Module):
    method __init__ (line 169) | def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n...
    method freeze (line 177) | def freeze(self):
    method forward (line 182) | def forward(self, text):
    method encode (line 189) | def encode(self, text):
  class FrozenClipImageEmbedder (line 197) | class FrozenClipImageEmbedder(nn.Module):
    method __init__ (line 201) | def __init__(
    method preprocess (line 216) | def preprocess(self, x):
    method forward (line 226) | def forward(self, x):

FILE: stable_diffusion/ldm/modules/image_degradation/bsrgan.py
  function modcrop_np (line 29) | def modcrop_np(img, sf):
  function analytic_kernel (line 49) | def analytic_kernel(k):
  function anisotropic_Gaussian (line 65) | def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
  function gm_blur_kernel (line 86) | def gm_blur_kernel(mean, cov, size=15):
  function shift_pixel (line 99) | def shift_pixel(x, sf, upper_left=True):
  function blur (line 128) | def blur(x, k):
  function gen_kernel (line 145) | def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]),...
  function fspecial_gaussian (line 187) | def fspecial_gaussian(hsize, sigma):
  function fspecial_laplacian (line 201) | def fspecial_laplacian(alpha):
  function fspecial (line 210) | def fspecial(filter_type, *args, **kwargs):
  function bicubic_degradation (line 228) | def bicubic_degradation(x, sf=3):
  function srmd_degradation (line 240) | def srmd_degradation(x, k, sf=3):
  function dpsr_degradation (line 262) | def dpsr_degradation(x, k, sf=3):
  function classical_degradation (line 284) | def classical_degradation(x, k, sf=3):
  function add_sharpening (line 299) | def add_sharpening(img, weight=0.5, radius=50, threshold=10):
  function add_blur (line 325) | def add_blur(img, sf=4):
  function add_resize (line 339) | def add_resize(img, sf=4):
  function add_Gaussian_noise (line 369) | def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
  function add_speckle_noise (line 386) | def add_speckle_noise(img, noise_level1=2, noise_level2=25):
  function add_Poisson_noise (line 404) | def add_Poisson_noise(img):
  function add_JPEG_noise (line 418) | def add_JPEG_noise(img):
  function random_crop (line 427) | def random_crop(lq, hq, sf=4, lq_patchsize=64):
  function degradation_bsrgan (line 438) | def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
  function degradation_bsrgan_variant (line 530) | def degradation_bsrgan_variant(image, sf=4, isp_model=None):
  function degradation_bsrgan_plus (line 617) | def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True,...

FILE: stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py
  function modcrop_np (line 29) | def modcrop_np(img, sf):
  function analytic_kernel (line 49) | def analytic_kernel(k):
  function anisotropic_Gaussian (line 65) | def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
  function gm_blur_kernel (line 86) | def gm_blur_kernel(mean, cov, size=15):
  function shift_pixel (line 99) | def shift_pixel(x, sf, upper_left=True):
  function blur (line 128) | def blur(x, k):
  function gen_kernel (line 145) | def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]),...
  function fspecial_gaussian (line 187) | def fspecial_gaussian(hsize, sigma):
  function fspecial_laplacian (line 201) | def fspecial_laplacian(alpha):
  function fspecial (line 210) | def fspecial(filter_type, *args, **kwargs):
  function bicubic_degradation (line 228) | def bicubic_degradation(x, sf=3):
  function srmd_degradation (line 240) | def srmd_degradation(x, k, sf=3):
  function dpsr_degradation (line 262) | def dpsr_degradation(x, k, sf=3):
  function classical_degradation (line 284) | def classical_degradation(x, k, sf=3):
  function add_sharpening (line 299) | def add_sharpening(img, weight=0.5, radius=50, threshold=10):
  function add_blur (line 325) | def add_blur(img, sf=4):
  function add_resize (line 343) | def add_resize(img, sf=4):
  function add_Gaussian_noise (line 373) | def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
  function add_speckle_noise (line 390) | def add_speckle_noise(img, noise_level1=2, noise_level2=25):
  function add_Poisson_noise (line 408) | def add_Poisson_noise(img):
  function add_JPEG_noise (line 422) | def add_JPEG_noise(img):
  function random_crop (line 431) | def random_crop(lq, hq, sf=4, lq_patchsize=64):
  function degradation_bsrgan (line 442) | def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
  function degradation_bsrgan_variant (line 534) | def degradation_bsrgan_variant(image, sf=4, isp_model=None):

FILE: stable_diffusion/ldm/modules/image_degradation/utils_image.py
  function is_image_file (line 29) | def is_image_file(filename):
  function get_timestamp (line 33) | def get_timestamp():
  function imshow (line 37) | def imshow(x, title=None, cbar=False, figsize=None):
  function surf (line 47) | def surf(Z, cmap='rainbow', figsize=None):
  function get_image_paths (line 67) | def get_image_paths(dataroot):
  function _get_paths_from_images (line 74) | def _get_paths_from_images(path):
  function patches_from_image (line 93) | def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
  function imssave (line 112) | def imssave(imgs, img_path):
  function split_imageset (line 125) | def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_si...
  function mkdir (line 153) | def mkdir(path):
  function mkdirs (line 158) | def mkdirs(paths):
  function mkdir_and_rename (line 166) | def mkdir_and_rename(path):
  function imread_uint (line 185) | def imread_uint(path, n_channels=3):
  function imsave (line 203) | def imsave(img, img_path):
  function imwrite (line 209) | def imwrite(img, img_path):
  function read_img (line 220) | def read_img(path):
  function uint2single (line 249) | def uint2single(img):
  function single2uint (line 254) | def single2uint(img):
  function uint162single (line 259) | def uint162single(img):
  function single2uint16 (line 264) | def single2uint16(img):
  function uint2tensor4 (line 275) | def uint2tensor4(img):
  function uint2tensor3 (line 282) | def uint2tensor3(img):
  function tensor2uint (line 289) | def tensor2uint(img):
  function single2tensor3 (line 302) | def single2tensor3(img):
  function single2tensor4 (line 307) | def single2tensor4(img):
  function tensor2single (line 312) | def tensor2single(img):
  function tensor2single3 (line 320) | def tensor2single3(img):
  function single2tensor5 (line 329) | def single2tensor5(img):
  function single32tensor5 (line 333) | def single32tensor5(img):
  function single42tensor4 (line 337) | def single42tensor4(img):
  function tensor2img (line 342) | def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
  function augment_img (line 380) | def augment_img(img, mode=0):
  function augment_img_tensor4 (line 401) | def augment_img_tensor4(img, mode=0):
  function augment_img_tensor (line 422) | def augment_img_tensor(img, mode=0):
  function augment_img_np3 (line 441) | def augment_img_np3(img, mode=0):
  function augment_imgs (line 469) | def augment_imgs(img_list, hflip=True, rot=True):
  function modcrop (line 494) | def modcrop(img_in, scale):
  function shave (line 510) | def shave(img_in, border=0):
  function rgb2ycbcr (line 529) | def rgb2ycbcr(img, only_y=True):
  function ycbcr2rgb (line 553) | def ycbcr2rgb(img):
  function bgr2ycbcr (line 573) | def bgr2ycbcr(img, only_y=True):
  function channel_convert (line 597) | def channel_convert(in_c, tar_type, img_list):
  function calculate_psnr (line 621) | def calculate_psnr(img1, img2, border=0):
  function calculate_ssim (line 642) | def calculate_ssim(img1, img2, border=0):
  function ssim (line 669) | def ssim(img1, img2):
  function cubic (line 700) | def cubic(x):
  function calculate_weights_indices (line 708) | def calculate_weights_indices(in_length, out_length, scale, kernel, kern...
  function imresize (line 766) | def imresize(img, scale, antialiasing=True):
  function imresize_np (line 839) | def imresize_np(img, scale, antialiasing=True):

FILE: stable_diffusion/ldm/modules/losses/contperceptual.py
  class LPIPSWithDiscriminator (line 7) | class LPIPSWithDiscriminator(nn.Module):
    method __init__ (line 8) | def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixello...
    method calculate_adaptive_weight (line 32) | def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
    method forward (line 45) | def forward(self, inputs, reconstructions, posteriors, optimizer_idx,

FILE: stable_diffusion/ldm/modules/losses/vqperceptual.py
  function hinge_d_loss_with_exemplar_weights (line 11) | def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
  function adopt_weight (line 20) | def adopt_weight(weight, global_step, threshold=0, value=0.):
  function measure_perplexity (line 26) | def measure_perplexity(predicted_indices, n_embed):
  function l1 (line 35) | def l1(x, y):
  function l2 (line 39) | def l2(x, y):
  class VQLPIPSWithDiscriminator (line 43) | class VQLPIPSWithDiscriminator(nn.Module):
    method __init__ (line 44) | def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
    method calculate_adaptive_weight (line 85) | def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
    method forward (line 98) | def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,

FILE: stable_diffusion/ldm/modules/x_transformer.py
  class AbsolutePositionalEmbedding (line 25) | class AbsolutePositionalEmbedding(nn.Module):
    method __init__ (line 26) | def __init__(self, dim, max_seq_len):
    method init_ (line 31) | def init_(self):
    method forward (line 34) | def forward(self, x):
  class FixedPositionalEmbedding (line 39) | class FixedPositionalEmbedding(nn.Module):
    method __init__ (line 40) | def __init__(self, dim):
    method forward (line 45) | def forward(self, x, seq_dim=1, offset=0):
  function exists (line 54) | def exists(val):
  function default (line 58) | def default(val, d):
  function always (line 64) | def always(val):
  function not_equals (line 70) | def not_equals(val):
  function equals (line 76) | def equals(val):
  function max_neg_value (line 82) | def max_neg_value(tensor):
  function pick_and_pop (line 88) | def pick_and_pop(keys, d):
  function group_dict_by_key (line 93) | def group_dict_by_key(cond, d):
  function string_begins_with (line 102) | def string_begins_with(prefix, str):
  function group_by_key_prefix (line 106) | def group_by_key_prefix(prefix, d):
  function groupby_prefix_and_trim (line 110) | def groupby_prefix_and_trim(prefix, d):
  class Scale (line 117) | class Scale(nn.Module):
    method __init__ (line 118) | def __init__(self, value, fn):
    method forward (line 123) | def forward(self, x, **kwargs):
  class Rezero (line 128) | class Rezero(nn.Module):
    method __init__ (line 129) | def __init__(self, fn):
    method forward (line 134) | def forward(self, x, **kwargs):
  class ScaleNorm (line 139) | class ScaleNorm(nn.Module):
    method __init__ (line 140) | def __init__(self, dim, eps=1e-5):
    method forward (line 146) | def forward(self, x):
  class RMSNorm (line 151) | class RMSNorm(nn.Module):
    method __init__ (line 152) | def __init__(self, dim, eps=1e-8):
    method forward (line 158) | def forward(self, x):
  class Residual (line 163) | class Residual(nn.Module):
    method forward (line 164) | def forward(self, x, residual):
  class GRUGating (line 168) | class GRUGating(nn.Module):
    method __init__ (line 169) | def __init__(self, dim):
    method forward (line 173) | def forward(self, x, residual):
  class GEGLU (line 184) | class GEGLU(nn.Module):
    method __init__ (line 185) | def __init__(self, dim_in, dim_out):
    method forward (line 189) | def forward(self, x):
  class FeedForward (line 194) | class FeedForward(nn.Module):
    method __init__ (line 195) | def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
    method forward (line 210) | def forward(self, x):
  class Attention (line 215) | class Attention(nn.Module):
    method __init__ (line 216) | def __init__(
    method forward (line 268) | def forward(
  class AttentionLayers (line 370) | class AttentionLayers(nn.Module):
    method __init__ (line 371) | def __init__(
    method forward (line 481) | def forward(
  class Encoder (line 541) | class Encoder(AttentionLayers):
    method __init__ (line 542) | def __init__(self, **kwargs):
  class TransformerWrapper (line 548) | class TransformerWrapper(nn.Module):
    method __init__ (line 549) | def __init__(
    method init_ (line 595) | def init_(self):
    method forward (line 598) | def forward(

FILE: stable_diffusion/ldm/util.py
  function log_txt_as_img (line 17) | def log_txt_as_img(wh, xc, size=10):
  function ismap (line 41) | def ismap(x):
  function isimage (line 47) | def isimage(x):
  function exists (line 53) | def exists(x):
  function default (line 57) | def default(val, d):
  function mean_flat (line 63) | def mean_flat(tensor):
  function count_params (line 71) | def count_params(model, verbose=False):
  function instantiate_from_config (line 78) | def instantiate_from_config(config):
  function get_obj_from_str (line 88) | def get_obj_from_str(string, reload=False):
  function _do_parallel_data_prefetch (line 96) | def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
  function parallel_data_prefetch (line 108) | def parallel_data_prefetch(

FILE: stable_diffusion/main.py
  function get_parser (line 24) | def get_parser(**parser_kwargs):
  function nondefault_trainer_args (line 126) | def nondefault_trainer_args(opt):
  class WrappedDataset (line 133) | class WrappedDataset(Dataset):
    method __init__ (line 136) | def __init__(self, dataset):
    method __len__ (line 139) | def __len__(self):
    method __getitem__ (line 142) | def __getitem__(self, idx):
  function worker_init_fn (line 146) | def worker_init_fn(_):
  class DataModuleFromConfig (line 162) | class DataModuleFromConfig(pl.LightningDataModule):
    method __init__ (line 163) | def __init__(self, batch_size, train=None, validation=None, test=None,...
    method prepare_data (line 185) | def prepare_data(self):
    method setup (line 189) | def setup(self, stage=None):
    method _train_dataloader (line 197) | def _train_dataloader(self):
    method _val_dataloader (line 207) | def _val_dataloader(self, shuffle=False):
    method _test_dataloader (line 218) | def _test_dataloader(self, shuffle=False):
    method _predict_dataloader (line 231) | def _predict_dataloader(self, shuffle=False):
  class SetupCallback (line 240) | class SetupCallback(Callback):
    method __init__ (line 241) | def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, light...
    method on_keyboard_interrupt (line 251) | def on_keyboard_interrupt(self, trainer, pl_module):
    method on_pretrain_routine_start (line 257) | def on_pretrain_routine_start(self, trainer, pl_module):
  class ImageLogger (line 289) | class ImageLogger(Callback):
    method __init__ (line 290) | def __init__(self, batch_frequency, max_images, clamp=True, increase_l...
    method _testtube (line 310) | def _testtube(self, pl_module, images, batch_idx, split):
    method log_local (line 321) | def log_local(self, save_dir, split, images,
    method log_img (line 340) | def log_img(self, pl_module, batch, batch_idx, split="train"):
    method check_frequency (line 372) | def check_frequency(self, check_idx):
    method on_train_batch_end (line 383) | def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch...
    method on_validation_batch_end (line 387) | def on_validation_batch_end(self, trainer, pl_module, outputs, batch, ...
  class CUDACallback (line 395) | class CUDACallback(Callback):
    method on_train_epoch_start (line 397) | def on_train_epoch_start(self, trainer, pl_module):
    method on_train_epoch_end (line 403) | def on_train_epoch_end(self, trainer, pl_module, outputs):
  function melk (line 697) | def melk(*args, **kwargs):
  function divein (line 705) | def divein(*args, **kwargs):

FILE: stable_diffusion/notebook_helpers.py
  function download_models (line 19) | def download_models(mode):
  function load_model_from_config (line 40) | def load_model_from_config(config, ckpt):
  function get_model (line 52) | def get_model(mode):
  function get_custom_cond (line 59) | def get_custom_cond(mode):
  function get_cond_options (line 85) | def get_cond_options(mode):
  function select_cond_path (line 92) | def select_cond_path(mode):
  function get_cond (line 107) | def get_cond(mode, selected_path):
  function visualize_cond_img (line 127) | def visualize_cond_img(path):
  function run (line 131) | def run(model, selected_path, task, custom_steps, resize_enabled=False, ...
  function convsample_ddim (line 188) | def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, n...
  function make_convolutional_sample (line 208) | def make_convolutional_sample(batch, model, mode="vanilla", custom_steps...

FILE: stable_diffusion/scripts/img2img.py
  function chunk (line 23) | def chunk(it, size):
  function load_model_from_config (line 28) | def load_model_from_config(config, ckpt, verbose=False):
  function load_img (line 48) | def load_img(path):
  function main (line 60) | def main():

FILE: stable_diffusion/scripts/inpaint.py
  function make_batch (line 11) | def make_batch(image, mask, device):

FILE: stable_diffusion/scripts/knn2img.py
  function chunk (line 36) | def chunk(it, size):
  function load_model_from_config (line 41) | def load_model_from_config(config, ckpt, verbose=False):
  class Searcher (line 61) | class Searcher(object):
    method __init__ (line 62) | def __init__(self, database, retriever_version='ViT-L/14'):
    method train_searcher (line 75) | def train_searcher(self, k,
    method load_single_file (line 91) | def load_single_file(self, saved_embeddings):
    method load_multi_files (line 96) | def load_multi_files(self, data_archive):
    method load_database (line 104) | def load_database(self):
    method load_retriever (line 123) | def load_retriever(self, version='ViT-L/14', ):
    method load_searcher (line 130) | def load_searcher(self):
    method search (line 135) | def search(self, x, k):
    method __call__ (line 163) | def __call__(self, x, n):

FILE: stable_diffusion/scripts/sample_diffusion.py
  function custom_to_pil (line 15) | def custom_to_pil(x):
  function custom_to_np (line 27) | def custom_to_np(x):
  function logs2pil (line 36) | def logs2pil(logs, keys=["sample"]):
  function convsample (line 54) | def convsample(model, shape, return_intermediates=True,
  function convsample_ddim (line 69) | def convsample_ddim(model, steps, shape, eta=1.0
  function make_convolutional_sample (line 79) | def make_convolutional_sample(model, batch_size, vanilla=False, custom_s...
  function run (line 108) | def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, ...
  function save_logs (line 143) | def save_logs(logs, path, n_saved=0, key="sample", np_path=None):
  function get_parser (line 162) | def get_parser():
  function load_model_from_config (line 220) | def load_model_from_config(config, sd):
  function load_model (line 228) | def load_model(config, ckpt, gpu, eval_mode):

FILE: stable_diffusion/scripts/tests/test_watermark.py
  function testit (line 6) | def testit(img_path):

FILE: stable_diffusion/scripts/train_searcher.py
  function search_bruteforce (line 12) | def search_bruteforce(searcher):
  function search_partioned_ah (line 16) | def search_partioned_ah(searcher, dims_per_block, aiq_threshold, reorder_k,
  function search_ah (line 24) | def search_ah(searcher, dims_per_block, aiq_threshold, reorder_k):
  function load_datapool (line 28) | def load_datapool(dpath):
  function train_searcher (line 62) | def train_searcher(opt,

FILE: stable_diffusion/scripts/txt2img.py
  function chunk (line 32) | def chunk(it, size):
  function numpy_to_pil (line 37) | def numpy_to_pil(images):
  function load_model_from_config (line 49) | def load_model_from_config(config, ckpt, verbose=False):
  function put_watermark (line 69) | def put_watermark(img, wm_encoder=None):
  function load_replacement (line 77) | def load_replacement(x):
  function check_safety (line 88) | def check_safety(x_image):
  function main (line 98) | def main():
Condensed preview — 115 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (866K chars).
[
  {
    "path": "LICENSE",
    "chars": 1499,
    "preview": "Copyright 2023 Timothy Brooks, Aleksander Holynski, Alexei A. Efros\n\nPermission is hereby granted, free of charge, to an"
  },
  {
    "path": "README.md",
    "chars": 16760,
    "preview": "# InstructPix2Pix: Learning to Follow Image Editing Instructions\n### [Project Page](https://www.timothybrooks.com/instru"
  },
  {
    "path": "configs/generate.yaml",
    "chars": 2614,
    "preview": "# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).\n# See more de"
  },
  {
    "path": "configs/train.yaml",
    "chars": 2931,
    "preview": "# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).\n# See more de"
  },
  {
    "path": "dataset_creation/generate_img_dataset.py",
    "chars": 11145,
    "preview": "import argparse\nimport json\nimport sys\nfrom pathlib import Path\n\nimport k_diffusion\nimport numpy as np\nimport torch\nimpo"
  },
  {
    "path": "dataset_creation/generate_txt_dataset.py",
    "chars": 4407,
    "preview": "from __future__ import annotations\n\nimport json\nimport time\nfrom argparse import ArgumentParser\nfrom pathlib import Path"
  },
  {
    "path": "dataset_creation/prepare_dataset.py",
    "chars": 837,
    "preview": "import json\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nfrom tqdm.auto import tqdm\n\n\ndef main():\n    p"
  },
  {
    "path": "dataset_creation/prepare_for_gpt.py",
    "chars": 821,
    "preview": "import json\nfrom argparse import ArgumentParser\n\nfrom generate_txt_dataset import DELIMITER_0, DELIMITER_1, STOP\n\n\ndef m"
  },
  {
    "path": "edit_app.py",
    "chars": 10599,
    "preview": "from __future__ import annotations\n\nimport math\nimport random\nimport sys\nfrom argparse import ArgumentParser\n\nimport ein"
  },
  {
    "path": "edit_cli.py",
    "chars": 4975,
    "preview": "from __future__ import annotations\n\nimport math\nimport random\nimport sys\nfrom argparse import ArgumentParser\n\nimport ein"
  },
  {
    "path": "edit_dataset.py",
    "chars": 4206,
    "preview": "from __future__ import annotations\n\nimport json\nimport math\nfrom pathlib import Path\nfrom typing import Any\n\nimport nump"
  },
  {
    "path": "environment.yaml",
    "chars": 980,
    "preview": "# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).\n# See more de"
  },
  {
    "path": "main.py",
    "chars": 29977,
    "preview": "import argparse, os, sys, datetime, glob\nimport numpy as np\nimport time\nimport torch\nimport torchvision\nimport pytorch_l"
  },
  {
    "path": "metrics/clip_similarity.py",
    "chars": 2380,
    "preview": "from __future__ import annotations\n\nimport clip\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom "
  },
  {
    "path": "metrics/compute_metrics.py",
    "chars": 8188,
    "preview": "from __future__ import annotations\n\nimport math\nimport random\nimport sys\nfrom argparse import ArgumentParser\n\nimport ein"
  },
  {
    "path": "prompt_app.py",
    "chars": 2017,
    "preview": "from __future__ import annotations\n\nfrom argparse import ArgumentParser\n\nimport datasets\nimport gradio as gr\nimport nump"
  },
  {
    "path": "scripts/download_checkpoints.sh",
    "chars": 270,
    "preview": "#!/bin/bash\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\n\nmkdir -p $SCRIPT_DIR/../ch"
  },
  {
    "path": "scripts/download_data.sh",
    "chars": 917,
    "preview": "#!/bin/bash\n\n# Make data folder relative to script location\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &>"
  },
  {
    "path": "scripts/download_pretrained_sd.sh",
    "chars": 570,
    "preview": "#!/bin/bash\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\n\nmkdir -p $SCRIPT_DIR/../st"
  },
  {
    "path": "stable_diffusion/LICENSE",
    "chars": 14381,
    "preview": "Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors\n\nCreativeML Open RAIL-M\ndated August 22, 2022\n\nSecti"
  },
  {
    "path": "stable_diffusion/README.md",
    "chars": 12439,
    "preview": "# Stable Diffusion\n*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.a"
  },
  {
    "path": "stable_diffusion/Stable_Diffusion_v1_Model_Card.md",
    "chars": 9340,
    "preview": "# Stable Diffusion v1 Model Card\nThis model card focuses on the model associated with the Stable Diffusion model, availa"
  },
  {
    "path": "stable_diffusion/assets/results.gif.REMOVED.git-id",
    "chars": 40,
    "preview": "82b6590e670a32196093cc6333ea19e6547d07de"
  },
  {
    "path": "stable_diffusion/assets/stable-samples/img2img/upscaling-in.png.REMOVED.git-id",
    "chars": 40,
    "preview": "501c31c21751664957e69ce52cad1818b6d2f4ce"
  },
  {
    "path": "stable_diffusion/assets/stable-samples/img2img/upscaling-out.png.REMOVED.git-id",
    "chars": 40,
    "preview": "1c4bb25a779f34d86b2d90e584ac67af91bb1303"
  },
  {
    "path": "stable_diffusion/assets/stable-samples/txt2img/merged-0005.png.REMOVED.git-id",
    "chars": 40,
    "preview": "ca0a1af206555f0f208a1ab879e95efedc1b1c5b"
  },
  {
    "path": "stable_diffusion/assets/stable-samples/txt2img/merged-0006.png.REMOVED.git-id",
    "chars": 40,
    "preview": "999f3703230580e8c89e9081abd6a1f8f50896d4"
  },
  {
    "path": "stable_diffusion/assets/stable-samples/txt2img/merged-0007.png.REMOVED.git-id",
    "chars": 40,
    "preview": "af390acaf601283782d6f479d4cade4d78e30b26"
  },
  {
    "path": "stable_diffusion/assets/txt2img-preview.png.REMOVED.git-id",
    "chars": 40,
    "preview": "51ee1c235dfdc63d4c41de7d303d03730e43c33c"
  },
  {
    "path": "stable_diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml",
    "chars": 1145,
    "preview": "model:\n  base_learning_rate: 4.5e-6\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: \"val/rec_loss\""
  },
  {
    "path": "stable_diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml",
    "chars": 1140,
    "preview": "model:\n  base_learning_rate: 4.5e-6\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: \"val/rec_loss\""
  },
  {
    "path": "stable_diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml",
    "chars": 1139,
    "preview": "model:\n  base_learning_rate: 4.5e-6\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: \"val/rec_loss\""
  },
  {
    "path": "stable_diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml",
    "chars": 1148,
    "preview": "model:\n  base_learning_rate: 4.5e-6\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: \"val/rec_loss\""
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml",
    "chars": 2028,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml",
    "chars": 2360,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/cin256-v2.yaml",
    "chars": 1553,
    "preview": "model:\n  base_learning_rate: 0.0001\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.00"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml",
    "chars": 2020,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml",
    "chars": 2024,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml",
    "chars": 2284,
    "preview": "model:\n  base_learning_rate: 5.0e-5   # set to target_lr by starting main.py with '--scale_lr False'\n  target: ldm.model"
  },
  {
    "path": "stable_diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml",
    "chars": 1614,
    "preview": "model:\n  base_learning_rate: 5.0e-05\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/configs/retrieval-augmented-diffusion/768x768.yaml",
    "chars": 1615,
    "preview": "model:\n  base_learning_rate: 0.0001\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.00"
  },
  {
    "path": "stable_diffusion/configs/stable-diffusion/v1-inference.yaml",
    "chars": 1873,
    "preview": "model:\n  base_learning_rate: 1.0e-04\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/data/example_conditioning/text_conditional/sample_0.txt",
    "chars": 20,
    "preview": "A basket of cerries\n"
  },
  {
    "path": "stable_diffusion/data/imagenet_clsidx_to_label.txt",
    "chars": 30563,
    "preview": " 0: 'tench, Tinca tinca',\n 1: 'goldfish, Carassius auratus',\n 2: 'great white shark, white shark, man-eater, man-eating "
  },
  {
    "path": "stable_diffusion/data/imagenet_train_hr_indices.p.REMOVED.git-id",
    "chars": 40,
    "preview": "b8d6d4689d2ecf32147e9cc2f5e6c50e072df26f"
  },
  {
    "path": "stable_diffusion/data/index_synset.yaml",
    "chars": 14890,
    "preview": "0: n01440764\n1: n01443537\n2: n01484850\n3: n01491361\n4: n01494475\n5: n01496331\n6: n01498041\n7: n01514668\n8: n07646067\n9: "
  },
  {
    "path": "stable_diffusion/environment.yaml",
    "chars": 734,
    "preview": "name: ldm\nchannels:\n  - pytorch\n  - defaults\ndependencies:\n  - python=3.8.5\n  - pip=20.3\n  - cudatoolkit=11.3\n  - pytorc"
  },
  {
    "path": "stable_diffusion/ldm/data/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "stable_diffusion/ldm/data/base.py",
    "chars": 693,
    "preview": "from abc import abstractmethod\nfrom torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset\n\n\nclas"
  },
  {
    "path": "stable_diffusion/ldm/data/imagenet.py",
    "chars": 15497,
    "preview": "import os, yaml, pickle, shutil, tarfile, glob\nimport cv2\nimport albumentations\nimport PIL\nimport numpy as np\nimport tor"
  },
  {
    "path": "stable_diffusion/ldm/data/lsun.py",
    "chars": 3274,
    "preview": "import os\nimport numpy as np\nimport PIL\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision impo"
  },
  {
    "path": "stable_diffusion/ldm/lr_scheduler.py",
    "chars": 3882,
    "preview": "import numpy as np\n\n\nclass LambdaWarmUpCosineScheduler:\n    \"\"\"\n    note: use with a base_lr of 1.0\n    \"\"\"\n    def __in"
  },
  {
    "path": "stable_diffusion/ldm/models/autoencoder.py",
    "chars": 17619,
    "preview": "import torch\nimport pytorch_lightning as pl\nimport torch.nn.functional as F\nfrom contextlib import contextmanager\n\nfrom "
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/classifier.py",
    "chars": 10276,
    "preview": "import os\nimport torch\nimport pytorch_lightning as pl\nfrom omegaconf import OmegaConf\nfrom torch.nn import functional as"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/ddim.py",
    "chars": 12797,
    "preview": "\"\"\"SAMPLING ONLY.\"\"\"\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom functools import partial\n\nfrom ldm.modu"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/ddpm.py",
    "chars": 67425,
    "preview": "\"\"\"\nwild mixture of\nhttps://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e316"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/ddpm_edit.py",
    "chars": 68306,
    "preview": "\"\"\"\nwild mixture of\nhttps://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e316"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/dpm_solver/__init__.py",
    "chars": 37,
    "preview": "from .sampler import DPMSolverSampler"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py",
    "chars": 64057,
    "preview": "import torch\nimport torch.nn.functional as F\nimport math\n\n\nclass NoiseScheduleVP:\n    def __init__(\n            self,\n  "
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/dpm_solver/sampler.py",
    "chars": 2908,
    "preview": "\"\"\"SAMPLING ONLY.\"\"\"\n\nimport torch\n\nfrom .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver\n\n\nclass DPMSolver"
  },
  {
    "path": "stable_diffusion/ldm/models/diffusion/plms.py",
    "chars": 12450,
    "preview": "\"\"\"SAMPLING ONLY.\"\"\"\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom functools import partial\n\nfrom ldm.modu"
  },
  {
    "path": "stable_diffusion/ldm/modules/attention.py",
    "chars": 9174,
    "preview": "# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).\n# See more de"
  },
  {
    "path": "stable_diffusion/ldm/modules/diffusionmodules/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "stable_diffusion/ldm/modules/diffusionmodules/model.py",
    "chars": 33409,
    "preview": "# pytorch_diffusion + derived encoder decoder\nimport math\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom ein"
  },
  {
    "path": "stable_diffusion/ldm/modules/diffusionmodules/openaimodel.py",
    "chars": 34953,
    "preview": "from abc import abstractmethod\nfrom functools import partial\nimport math\nfrom typing import Iterable\n\nimport numpy as np"
  },
  {
    "path": "stable_diffusion/ldm/modules/diffusionmodules/util.py",
    "chars": 9561,
    "preview": "# adopted from\n# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py\n# and\n#"
  },
  {
    "path": "stable_diffusion/ldm/modules/distributions/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "stable_diffusion/ldm/modules/distributions/distributions.py",
    "chars": 2970,
    "preview": "import torch\nimport numpy as np\n\n\nclass AbstractDistribution:\n    def sample(self):\n        raise NotImplementedError()\n"
  },
  {
    "path": "stable_diffusion/ldm/modules/ema.py",
    "chars": 2982,
    "preview": "import torch\nfrom torch import nn\n\n\nclass LitEma(nn.Module):\n    def __init__(self, model, decay=0.9999, use_num_upates="
  },
  {
    "path": "stable_diffusion/ldm/modules/encoders/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "stable_diffusion/ldm/modules/encoders/modules.py",
    "chars": 8154,
    "preview": "import torch\nimport torch.nn as nn\nfrom functools import partial\nimport clip\nfrom einops import rearrange, repeat\nfrom t"
  },
  {
    "path": "stable_diffusion/ldm/modules/image_degradation/__init__.py",
    "chars": 208,
    "preview": "from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr\nfrom ldm.modules.image"
  },
  {
    "path": "stable_diffusion/ldm/modules/image_degradation/bsrgan.py",
    "chars": 25198,
    "preview": "# -*- coding: utf-8 -*-\n\"\"\"\n# --------------------------------------------\n# Super-Resolution\n# ------------------------"
  },
  {
    "path": "stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py",
    "chars": 22238,
    "preview": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nimport torch\n\nfrom functools import partial\nimport random\nfrom sci"
  },
  {
    "path": "stable_diffusion/ldm/modules/image_degradation/utils_image.py",
    "chars": 29022,
    "preview": "import os\nimport math\nimport random\nimport numpy as np\nimport torch\nimport cv2\nfrom torchvision.utils import make_grid\nf"
  },
  {
    "path": "stable_diffusion/ldm/modules/losses/__init__.py",
    "chars": 68,
    "preview": "from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator"
  },
  {
    "path": "stable_diffusion/ldm/modules/losses/contperceptual.py",
    "chars": 5581,
    "preview": "import torch\nimport torch.nn as nn\n\nfrom taming.modules.losses.vqperceptual import *  # TODO: taming dependency yes/no?\n"
  },
  {
    "path": "stable_diffusion/ldm/modules/losses/vqperceptual.py",
    "chars": 7941,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom einops import repeat\n\nfrom taming.modules.discrim"
  },
  {
    "path": "stable_diffusion/ldm/modules/x_transformer.py",
    "chars": 20168,
    "preview": "\"\"\"shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers\"\"\"\nimport torch\nfrom torch import "
  },
  {
    "path": "stable_diffusion/ldm/util.py",
    "chars": 5857,
    "preview": "import importlib\n\nimport torch\nimport numpy as np\nfrom collections import abc\nfrom einops import rearrange\nfrom functool"
  },
  {
    "path": "stable_diffusion/main.py",
    "chars": 28229,
    "preview": "import argparse, os, sys, datetime, glob, importlib, csv\nimport numpy as np\nimport time\nimport torch\nimport torchvision\n"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/kl-f16/config.yaml",
    "chars": 909,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: val/rec_loss\n"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/kl-f32/config.yaml",
    "chars": 929,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: val/rec_loss\n"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/kl-f4/config.yaml",
    "chars": 880,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: val/rec_loss\n"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/kl-f8/config.yaml",
    "chars": 889,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.AutoencoderKL\n  params:\n    monitor: val/rec_loss\n"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/vq-f16/config.yaml",
    "chars": 1026,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.VQModel\n  params:\n    embed_dim: 8\n    n_embed: 16"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/vq-f4/config.yaml",
    "chars": 955,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.VQModel\n  params:\n    embed_dim: 3\n    n_embed: 81"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/vq-f4-noattn/config.yaml",
    "chars": 978,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.VQModel\n  params:\n    embed_dim: 3\n    n_embed: 81"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/vq-f8/config.yaml",
    "chars": 1035,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.VQModel\n  params:\n    embed_dim: 4\n    n_embed: 16"
  },
  {
    "path": "stable_diffusion/models/first_stage_models/vq-f8-n256/config.yaml",
    "chars": 1013,
    "preview": "model:\n  base_learning_rate: 4.5e-06\n  target: ldm.models.autoencoder.VQModel\n  params:\n    embed_dim: 4\n    n_embed: 25"
  },
  {
    "path": "stable_diffusion/models/ldm/bsr_sr/config.yaml",
    "chars": 1900,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/celeba256/config.yaml",
    "chars": 1599,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/cin256/config.yaml",
    "chars": 1862,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/ffhq256/config.yaml",
    "chars": 1591,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/inpainting_big/config.yaml",
    "chars": 1619,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/layout2img-openimages256/config.yaml",
    "chars": 1924,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/lsun_beds256/config.yaml",
    "chars": 1601,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/lsun_churches256/config.yaml",
    "chars": 2018,
    "preview": "model:\n  base_learning_rate: 5.0e-05\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/semantic_synthesis256/config.yaml",
    "chars": 1378,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/semantic_synthesis512/config.yaml",
    "chars": 1820,
    "preview": "model:\n  base_learning_rate: 1.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/models/ldm/text2img256/config.yaml",
    "chars": 1831,
    "preview": "model:\n  base_learning_rate: 2.0e-06\n  target: ldm.models.diffusion.ddpm.LatentDiffusion\n  params:\n    linear_start: 0.0"
  },
  {
    "path": "stable_diffusion/notebook_helpers.py",
    "chars": 10099,
    "preview": "from torchvision.datasets.utils import download_url\nfrom ldm.util import instantiate_from_config\nimport torch\nimport os\n"
  },
  {
    "path": "stable_diffusion/scripts/download_first_stages.sh",
    "chars": 1324,
    "preview": "#!/bin/bash\nwget -O models/first_stage_models/kl-f4/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f4.zip\nwge"
  },
  {
    "path": "stable_diffusion/scripts/download_models.sh",
    "chars": 1681,
    "preview": "#!/bin/bash\nwget -O models/ldm/celeba256/celeba-256.zip https://ommer-lab.com/files/latent-diffusion/celeba.zip\nwget -O "
  },
  {
    "path": "stable_diffusion/scripts/img2img.py",
    "chars": 9181,
    "preview": "\"\"\"make variations of input image\"\"\"\n\nimport argparse, os, sys, glob\nimport PIL\nimport torch\nimport numpy as np\nfrom ome"
  },
  {
    "path": "stable_diffusion/scripts/inpaint.py",
    "chars": 3644,
    "preview": "import argparse, os, sys, glob\nfrom omegaconf import OmegaConf\nfrom PIL import Image\nfrom tqdm import tqdm\nimport numpy "
  },
  {
    "path": "stable_diffusion/scripts/knn2img.py",
    "chars": 13707,
    "preview": "import argparse, os, sys, glob\nimport clip\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom omegaconf import O"
  },
  {
    "path": "stable_diffusion/scripts/latent_imagenet_diffusion.ipynb.REMOVED.git-id",
    "chars": 40,
    "preview": "607f94fc7d3ef6d8d1627017215476d9dfc7ddc4"
  },
  {
    "path": "stable_diffusion/scripts/sample_diffusion.py",
    "chars": 9606,
    "preview": "import argparse, os, sys, glob, datetime, yaml\nimport torch\nimport time\nimport numpy as np\nfrom tqdm import trange\n\nfrom"
  },
  {
    "path": "stable_diffusion/scripts/tests/test_watermark.py",
    "chars": 357,
    "preview": "import cv2\nimport fire\nfrom imwatermark import WatermarkDecoder\n\n\ndef testit(img_path):\n    bgr = cv2.imread(img_path)\n "
  },
  {
    "path": "stable_diffusion/scripts/train_searcher.py",
    "chars": 5807,
    "preview": "import os, sys\nimport numpy as np\nimport scann\nimport argparse\nimport glob\nfrom multiprocessing import cpu_count\nfrom tq"
  },
  {
    "path": "stable_diffusion/scripts/txt2img.py",
    "chars": 11666,
    "preview": "import argparse, os, sys, glob\nimport cv2\nimport torch\nimport numpy as np\nfrom omegaconf import OmegaConf\nfrom PIL impor"
  },
  {
    "path": "stable_diffusion/setup.py",
    "chars": 233,
    "preview": "from setuptools import setup, find_packages\n\nsetup(\n    name='latent-diffusion',\n    version='0.0.1',\n    description=''"
  }
]

// ... and 1 more files (download for full content)

About this extraction

This page contains the full source code of the timothybrooks/instruct-pix2pix GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 115 files (811.3 KB), approximately 216.4k tokens, and a symbol index with 849 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!