Showing preview only (425K chars total). Download the full file or copy to clipboard to get everything.
Repository: brandontrabucco/da-fusion
Branch: main
Commit: ea784ad0f748
Files: 186
Total size: 377.3 KB
Directory structure:
gitextract_8pms0nca/
├── .gitignore
├── .gitmodules
├── LICENSE
├── README.md
├── aggregate_embeddings.py
├── fine_tune.py
├── fine_tune_upstream.py
├── generate_augmentations.py
├── generate_images.py
├── images/
│ └── README.md
├── index.html
├── plot.py
├── plot_masking_ablation.py
├── plot_stacking_ablation.py
├── plot_stratify.py
├── scripts/
│ ├── baseline/
│ │ ├── launch_baseline_coco.sh
│ │ ├── launch_baseline_imagenet.sh
│ │ ├── launch_baseline_pascal.sh
│ │ └── launch_baseline_spurge.sh
│ ├── baseline_randaugment/
│ │ ├── launch_baseline_coco.sh
│ │ ├── launch_baseline_imagenet.sh
│ │ ├── launch_baseline_pascal.sh
│ │ └── launch_baseline_spurge.sh
│ ├── cutmix_ablation/
│ │ ├── launch_baseline_pascal.sh
│ │ ├── launch_real_guidance=0.5_pascal.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh
│ ├── deit_backbone/
│ │ ├── launch_baseline_pascal.sh
│ │ ├── launch_real_guidance=0.5_pascal.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh
│ ├── erase_classes/
│ │ ├── erase_caltech101_part0.sh
│ │ ├── erase_caltech101_part1.sh
│ │ ├── erase_caltech101_part10.sh
│ │ ├── erase_caltech101_part11.sh
│ │ ├── erase_caltech101_part12.sh
│ │ ├── erase_caltech101_part13.sh
│ │ ├── erase_caltech101_part14.sh
│ │ ├── erase_caltech101_part15.sh
│ │ ├── erase_caltech101_part16.sh
│ │ ├── erase_caltech101_part17.sh
│ │ ├── erase_caltech101_part18.sh
│ │ ├── erase_caltech101_part19.sh
│ │ ├── erase_caltech101_part2.sh
│ │ ├── erase_caltech101_part20.sh
│ │ ├── erase_caltech101_part3.sh
│ │ ├── erase_caltech101_part4.sh
│ │ ├── erase_caltech101_part5.sh
│ │ ├── erase_caltech101_part6.sh
│ │ ├── erase_caltech101_part7.sh
│ │ ├── erase_caltech101_part8.sh
│ │ ├── erase_caltech101_part9.sh
│ │ ├── erase_coco_part0.sh
│ │ ├── erase_coco_part1.sh
│ │ ├── erase_coco_part10.sh
│ │ ├── erase_coco_part11.sh
│ │ ├── erase_coco_part12.sh
│ │ ├── erase_coco_part13.sh
│ │ ├── erase_coco_part14.sh
│ │ ├── erase_coco_part15.sh
│ │ ├── erase_coco_part2.sh
│ │ ├── erase_coco_part3.sh
│ │ ├── erase_coco_part4.sh
│ │ ├── erase_coco_part5.sh
│ │ ├── erase_coco_part6.sh
│ │ ├── erase_coco_part7.sh
│ │ ├── erase_coco_part8.sh
│ │ ├── erase_coco_part9.sh
│ │ ├── erase_flowers102_part0.sh
│ │ ├── erase_flowers102_part1.sh
│ │ ├── erase_flowers102_part10.sh
│ │ ├── erase_flowers102_part11.sh
│ │ ├── erase_flowers102_part12.sh
│ │ ├── erase_flowers102_part13.sh
│ │ ├── erase_flowers102_part14.sh
│ │ ├── erase_flowers102_part15.sh
│ │ ├── erase_flowers102_part16.sh
│ │ ├── erase_flowers102_part17.sh
│ │ ├── erase_flowers102_part18.sh
│ │ ├── erase_flowers102_part19.sh
│ │ ├── erase_flowers102_part2.sh
│ │ ├── erase_flowers102_part20.sh
│ │ ├── erase_flowers102_part3.sh
│ │ ├── erase_flowers102_part4.sh
│ │ ├── erase_flowers102_part5.sh
│ │ ├── erase_flowers102_part6.sh
│ │ ├── erase_flowers102_part7.sh
│ │ ├── erase_flowers102_part8.sh
│ │ ├── erase_flowers102_part9.sh
│ │ ├── erase_imagenet_part0.sh
│ │ ├── erase_imagenet_part1.sh
│ │ ├── erase_imagenet_part10.sh
│ │ ├── erase_imagenet_part11.sh
│ │ ├── erase_imagenet_part12.sh
│ │ ├── erase_imagenet_part13.sh
│ │ ├── erase_imagenet_part14.sh
│ │ ├── erase_imagenet_part15.sh
│ │ ├── erase_imagenet_part16.sh
│ │ ├── erase_imagenet_part17.sh
│ │ ├── erase_imagenet_part18.sh
│ │ ├── erase_imagenet_part19.sh
│ │ ├── erase_imagenet_part2.sh
│ │ ├── erase_imagenet_part3.sh
│ │ ├── erase_imagenet_part4.sh
│ │ ├── erase_imagenet_part5.sh
│ │ ├── erase_imagenet_part6.sh
│ │ ├── erase_imagenet_part7.sh
│ │ ├── erase_imagenet_part8.sh
│ │ ├── erase_imagenet_part9.sh
│ │ ├── erase_pascal_part0.sh
│ │ ├── erase_pascal_part1.sh
│ │ ├── erase_pascal_part2.sh
│ │ ├── erase_pascal_part3.sh
│ │ ├── erase_spurge_part0.sh
│ │ └── generate_scripts.py
│ ├── fine_tuning/
│ │ ├── fine_tune_coco.sh
│ │ ├── fine_tune_imagenet.sh
│ │ ├── fine_tune_pascal.sh
│ │ └── fine_tune_spurge.sh
│ ├── fine_tuning_erasure/
│ │ ├── fine_tune_coco.sh
│ │ ├── fine_tune_imagenet.sh
│ │ ├── fine_tune_pascal.sh
│ │ └── fine_tune_spurge.sh
│ ├── masking/
│ │ ├── launch_real_guidance=0-0.5_coco.sh
│ │ ├── launch_real_guidance=0-0.5_pascal.sh
│ │ ├── launch_real_guidance=0.5-0_coco.sh
│ │ ├── launch_real_guidance=0.5-0_pascal.sh
│ │ ├── launch_textual_inversion=0-0.5_coco.sh
│ │ ├── launch_textual_inversion=0-0.5_pascal.sh
│ │ ├── launch_textual_inversion=0.5-0_coco.sh
│ │ └── launch_textual_inversion=0.5-0_pascal.sh
│ ├── num_synthetic/
│ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-20.sh
│ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-5.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-20.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-5.sh
│ ├── real_guidance/
│ │ ├── launch_real_guidance=0.5_coco.sh
│ │ ├── launch_real_guidance=0.5_coco_class_agnostic.sh
│ │ ├── launch_real_guidance=0.5_imagenet.sh
│ │ ├── launch_real_guidance=0.5_imagenet_class_agnostic.sh
│ │ ├── launch_real_guidance=0.5_pascal.sh
│ │ ├── launch_real_guidance=0.5_pascal_class_agnostic.sh
│ │ ├── launch_real_guidance=0.5_spurge.sh
│ │ └── launch_real_guidance=0.5_spurge_class_agnostic.sh
│ ├── real_guidance_erasure/
│ │ ├── launch_real_guidance=0.5_coco.sh
│ │ ├── launch_real_guidance=0.5_imagenet.sh
│ │ ├── launch_real_guidance=0.5_pascal.sh
│ │ └── launch_real_guidance=0.5_spurge.sh
│ ├── real_guidance_randaugment/
│ │ ├── launch_real_guidance=0.5_coco.sh
│ │ ├── launch_real_guidance=0.5_imagenet.sh
│ │ ├── launch_real_guidance=0.5_pascal.sh
│ │ └── launch_real_guidance=0.5_spurge.sh
│ ├── stacking/
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh
│ ├── stacking_erasure/
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh
│ ├── stacking_randaugment/
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh
│ ├── synthetic_prob/
│ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-0.3.sh
│ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-0.7.sh
│ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.3.sh
│ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.7.sh
│ └── textual_inversion/
│ ├── launch_textual_inversion=0.5_coco.sh
│ ├── launch_textual_inversion=0.5_imagenet.sh
│ ├── launch_textual_inversion=0.5_pascal.sh
│ └── launch_textual_inversion=0.5_spurge.sh
├── semantic_aug/
│ ├── __init__.py
│ ├── augmentations/
│ │ ├── __init__.py
│ │ ├── compose.py
│ │ ├── real_guidance.py
│ │ ├── textual_inversion.py
│ │ └── textual_inversion_upstream.py
│ ├── datasets/
│ │ ├── __init__.py
│ │ ├── caltech101.py
│ │ ├── coco.py
│ │ ├── flowers102.py
│ │ ├── imagenet.py
│ │ ├── pascal.py
│ │ └── spurge.py
│ ├── few_shot_dataset.py
│ └── generative_augmentation.py
├── setup.py
└── train_classifier.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# images
*.png
*.jpg
*.pdf
# experiment data
*.out
*.csv
*.pt
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
================================================
FILE: .gitmodules
================================================
[submodule "stable-diffusion"]
path = stable-diffusion
url = https://github.com/CompVis/stable-diffusion.git
[submodule "erasing"]
path = erasing
url = https://github.com/brandontrabucco/erasing.git
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2022 Brandon Trabucco
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Effective Data Augmentation With Diffusion Models
[](https://www.youtube.com/watch?v=IKDWOOWzwns)
Watch our talk for a quick introduction!
Data augmentation is one of the most prevalent tools in deep learning, underpinning many recent advances. The standard approach to data augmentation combines simple transformations like rotations and flips to generate new images from existing ones. However, current augmentations cannot alter the high-level semantic attributes, such as animal species present in a scene, to enhance the diversity of data. We improve diversity in data augmentation with image-to-image transformations parameterized by pre-trained text-to-image diffusion models. Our method edits images using an off-the-shelf diffusion model, and generalizes to novel visual concepts from a few labelled examples.
[ICLR 2024 Manuscript](https://openreview.net/forum?id=ZWzUA9zeAg) | [Site](btrabuc.co/da-fusion) | [Leafy Spurge Dataset](leafy-spurge-dataset.github.io)
## Installation
To install the package, first create a `conda` environment.
```bash
conda create -n da-fusion python=3.7 pytorch==1.12.1 torchvision==0.13.1 cudatoolkit=11.6 -c pytorch
conda activate da-fusion
pip install diffusers["torch"] transformers pycocotools pandas matplotlib seaborn scipy
```
Then download and install the source code.
```bash
git clone git@github.com:brandontrabucco/da-fusion.git
pip install -e da-fusion
```
## Datasets
We benchmark DA-Fusion on few-shot image classification problems, including a Leafy Spurge weed recognition task, and classification tasks derived from COCO and PASCAL VOC. For the latter two, we label images with the classes corresponding to the largest object in the image.
Custom datasets can be evaluated by implementing subclasses of `semantic_aug/few_shot_dataset.py`.
## Setting Up PASCAL VOC
Data for the PASCAL VOC task is adapted from the [2012 PASCAL VOC Challenge](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). Once this dataset has been downloaded and extracted, the PASCAL dataset class `semantic_aug/datasets/pascal.py` should be pointed to the downloaded dataset via the `PASCAL_DIR` config variable located [here](https://github.com/brandontrabucco/da-fusion/blob/main/semantic_aug/datasets/pascal.py#L14).
Ensure that `PASCAL_DIR` points to a folder containing `ImageSets`, `JPEGImages`, `SegmentationClass`, and `SegmentationObject` subfolders.
## Setting Up COCO
To setup COCO, first download the [2017 Training Images](http://images.cocodataset.org/zips/train2017.zip), the [2017 Validation Images](http://images.cocodataset.org/zips/val2017.zip), and the [2017 Train/Val Annotations](http://images.cocodataset.org/annotations/annotations_trainval2017.zip). These files should be unzipped into the following directory structure.
```
coco2017/
train2017/
val2017/
annotations/
```
`COCO_DIR` located [here](https://github.com/brandontrabucco/da-fusion/blob/main/semantic_aug/datasets/coco.py#L15) should be updated to point to the location of `coco2017` on your system.
## Setting Up The Spurge Dataset
We are planning to release this dataset in the next few months. Check back for updates!
## Fine-Tuning Tokens
We perform textual inversion (https://arxiv.org/abs/2208.01618) to adapt Stable Diffusion to the classes present in our few-shot datasets. The implementation in `fine_tune.py` is adapted from the [Diffusers](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py) example.
We wrap this script for distributing experiments on a slurm cluster in a set of `sbatch` scripts located at `scripts/fine_tuning`. These scripts will perform multiple runs of Textual Inversion in parallel, subject to the number of available nodes on your slurm cluster.
If `sbatch` is not available in your system, you can run these scripts with `bash` and manually set `SLURM_ARRAY_TASK_ID` and `SLURM_ARRAY_TASK_COUNT` for each parallel job (these are normally set automatically by slurm to control the job index, and the number of jobs respectively, and can be set to 0, 1).
## Few-Shot Classification
Code for training image classification models using augmented images from DA-Fusion is located in `train_classifier.py`. This script accepts a number of arguments that control how the classifier is trained:
```bash
python train_classifier.py --logdir pascal-baselines/textual-inversion-0.5 \
--synthetic-dir "aug/textual-inversion-0.5/{dataset}-{seed}-{examples_per_class}" \
--dataset pascal --prompt "a photo of a {name}" \
--aug textual-inversion --guidance-scale 7.5 \
--strength 0.5 --mask 0 --inverted 0 \
--num-synthetic 10 --synthetic-probability 0.5 \
--num-trials 1 --examples-per-class 4
```
This example will train a classifier on the PASCAL VOC task, with 4 images per class, using the prompt `"a photo of a ClassX"` where the special token `ClassX` is fine-tuned (from scratch) with textual inversion. Slurm scripts that reproduce the paper are located in `scripts/textual_inversion`. Results are logged to `.csv` files based on the script argument `--logdir`.
We used a [custom plotting script](https://github.com/brandontrabucco/da-fusion/blob/main/plot.py) to generate the figures in the main paper.
## Citation
If you find our method helpful, consider citing our preprint!
```
@misc{https://doi.org/10.48550/arxiv.2302.07944,
doi = {10.48550/ARXIV.2302.07944},
url = {https://arxiv.org/abs/2302.07944},
author = {Trabucco, Brandon and Doherty, Kyle and Gurinas, Max and Salakhutdinov, Ruslan},
keywords = {Computer Vision and Pattern Recognition (cs.CV), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Effective Data Augmentation With Diffusion Models},
publisher = {arXiv},
year = {2023},
copyright = {arXiv.org perpetual, non-exclusive license}
}
```
================================================
FILE: aggregate_embeddings.py
================================================
import torch
import os
import glob
import argparse
from itertools import product
from tqdm import trange
DEFAULT_EMBED_PATH = "{dataset}-tokens/{dataset}-{seed}-{examples_per_class}.pt"
if __name__ == "__main__":
parser = argparse.ArgumentParser("Merge token files")
parser.add_argument("--num-trials", type=int, default=8)
parser.add_argument("--examples-per-class", nargs='+', type=int, default=[1, 2, 4, 8, 16])
parser.add_argument("--embed-path", type=str, default=DEFAULT_EMBED_PATH)
parser.add_argument("--input-path", type=str, default="./fine-tuned")
parser.add_argument("--dataset", type=str, default="pascal",
choices=["spurge", "imagenet", "coco", "pascal"])
args = parser.parse_args()
for seed, examples_per_class in product(
range(args.num_trials), args.examples_per_class):
path = os.path.join(args.input_path, (
f"{args.dataset}-{seed}-{examples_per_class}/*/learned_embeds.bin"))
merged_dict = dict()
for file in glob.glob(path):
merged_dict.update(torch.load(file))
target_path = args.embed_path.format(
dataset=args.dataset, seed=seed,
examples_per_class=examples_per_class)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
torch.save(merged_dict, target_path)
================================================
FILE: fine_tune.py
================================================
import argparse
import logging
import math
import os
import gc
import shutil
import random
from pathlib import Path
from typing import Optional
from itertools import product
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.utils.data import Dataset
from semantic_aug.datasets.coco import COCODataset
from semantic_aug.datasets.spurge import SpurgeDataset
from semantic_aug.datasets.imagenet import ImageNetDataset
from semantic_aug.datasets.pascal import PASCALDataset
import datasets
import diffusers
import PIL
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfFolder, Repository, whoami
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
from packaging import version
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
DATASETS = {
"spurge": SpurgeDataset,
"coco": COCODataset,
"pascal": PASCALDataset,
"imagenet": ImageNetDataset
}
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__)
def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
logger.info("Saving embeddings")
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--only_save_embeds",
action="store_true",
default=False,
help="Save only the embeddings for the new concept.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="./",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--num-trials", type=int, default=8)
parser.add_argument("--examples-per-class", nargs='+', type=int, default=[1, 2, 4, 8, 16])
parser.add_argument("--dataset", type=str, default="coco",
choices=["spurge", "imagenet", "coco", "pascal"])
parser.add_argument("--unet-ckpt", type=str, default=None)
parser.add_argument("--erase-concepts", action="store_true",
help="erase text inversion concepts first")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(h, w,) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def main(args):
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
logging_dir=logging_dir,
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load tokenizer
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
if args.unet_ckpt is not None:
unet.load_state_dict(torch.load(args.unet_ckpt))
print(f"Loaded UNET from {args.unet_ckpt}")
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
# Freeze vae and unet
vae.requires_grad_(False)
unet.requires_grad_(False)
# Freeze all parameters except for the token embeddings in text encoder
text_encoder.text_model.encoder.requires_grad_(False)
text_encoder.text_model.final_layer_norm.requires_grad_(False)
text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
if args.gradient_checkpointing:
# Keep unet in train mode if we are using gradient checkpointing to save memory.
# The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
unet.train()
text_encoder.gradient_checkpointing_enable()
unet.enable_gradient_checkpointing()
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
# Prepare everything with our `accelerator`.
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move vae and unet to device and cast to weight_dtype
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1]
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
resume_global_step = global_step * args.gradient_accumulation_steps
first_epoch = resume_global_step // num_update_steps_per_epoch
resume_step = resume_global_step % num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
# keep original embeddings as reference
orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
for epoch in range(first_epoch, args.num_train_epochs):
text_encoder.train()
for step, batch in enumerate(train_dataloader):
# Skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
if step % args.gradient_accumulation_steps == 0:
progress_bar.update(1)
continue
with accelerator.accumulate(text_encoder):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Let's make sure we don't update any embedding weights besides the newly added token
index_no_updates = torch.arange(len(tokenizer)) != placeholder_token_id
with torch.no_grad():
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
index_no_updates
] = orig_embeds_params[index_no_updates]
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if accelerator.is_main_process:
# Save the newly trained embeddings
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
save_progress(text_encoder, placeholder_token_id,
accelerator, args, save_path)
accelerator.end_training()
accelerator.free_memory()
del accelerator, vae, unet, text_encoder
gc.collect()
torch.cuda.empty_cache()
if __name__ == "__main__":
args = parse_args()
output_dir = args.output_dir
rank = int(os.environ.pop("RANK", 0))
world_size = int(os.environ.pop("WORLD_SIZE", 1))
device_id = rank % torch.cuda.device_count()
torch.cuda.set_device(rank % torch.cuda.device_count())
print(f'Initialized process {rank} / {world_size}')
options = product(range(args.num_trials), args.examples_per_class)
options = np.array(list(options))
options = np.array_split(options, world_size)[rank]
for seed, examples_per_class in options.tolist():
os.makedirs(os.path.join(output_dir, "extracted"), exist_ok=True)
train_dataset = DATASETS[
args.dataset](split="train", seed=seed,
examples_per_class=examples_per_class)
for idx in range(len(train_dataset)):
image = train_dataset.get_image_by_idx(idx)
metadata = train_dataset.get_metadata_by_idx(idx)
name = metadata["name"].replace(" ", "_")
path = f"{args.dataset}-{seed}-{examples_per_class}"
path = os.path.join(output_dir, "extracted", path, name, f"{idx}.png")
os.makedirs(os.path.dirname(path), exist_ok=True)
image.save(path)
for class_name in train_dataset.class_names:
formatted_name = class_name.replace(" ", "_")
dirname = f"{args.dataset}-{seed}-{examples_per_class}/{formatted_name}"
args = parse_args()
args.seed = seed
args.placeholder_token = f"<{formatted_name}>"
args.initializer_token = "the"
args.train_data_dir = os.path.join(
output_dir, "extracted", dirname)
args.output_dir = os.path.join(
output_dir, "fine-tuned", dirname)
word_name = class_name.replace(" ", "")
if args.erase_concepts: args.unet_ckpt = (
"/projects/rsalakhugroup/btrabucc/esd-models/" +
f"compvis-word_{word_name}-method_full-sg_3-ng_1-iter_1000-lr_1e-05/" +
f"diffusers-word_{word_name}-method_full-sg_3-ng_1-iter_1000-lr_1e-05.pt")
main(args)
shutil.rmtree(args.train_data_dir)
================================================
FILE: fine_tune_upstream.py
================================================
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import logging
import math
import shutil
import os
import gc
import random
import shutil
import warnings
from pathlib import Path
from itertools import product
import numpy as np
import PIL
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from semantic_aug.datasets.coco import COCODataset
from semantic_aug.datasets.spurge import SpurgeDataset
from semantic_aug.datasets.imagenet import ImageNetDataset
from semantic_aug.datasets.pascal import PASCALDataset
from semantic_aug.datasets.caltech101 import CalTech101Dataset
from semantic_aug.datasets.flowers102 import Flowers102Dataset
DATASETS = {
"spurge": SpurgeDataset,
"coco": COCODataset,
"pascal": PASCALDataset,
"imagenet": ImageNetDataset,
"caltech": CalTech101Dataset,
"flowers": Flowers102Dataset
}
if is_wandb_available():
import wandb
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.20.0.dev0")
logger = get_logger(__name__)
def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- textual_inversion
inference: true
---
"""
model_card = f"""
# Textual inversion text2image fine-tuning - {repo_id}
These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
# create pipeline (note: unet and vae are loaded again in float32)
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
unet=unet,
vae=vae,
safety_checker=None,
revision=args.revision,
torch_dtype=weight_dtype,
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = []
for _ in range(args.num_validation_images):
with torch.autocast("cuda"):
image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
return images
def save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path):
logger.info("Saving embeddings")
learned_embeds = (
accelerator.unwrap_model(text_encoder)
.get_input_embeddings()
.weight[min(placeholder_token_ids) : max(placeholder_token_ids) + 1]
)
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--save_as_full_pipeline",
action="store_true",
help="Save the complete stable diffusion pipeline.",
)
parser.add_argument(
"--num_vectors",
type=int,
default=1,
help="How many textual inversion vectors shall be used to learn the concept.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run validation every X steps. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument(
"--validation_epochs",
type=int,
default=None,
help=(
"Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--num-trials", type=int, default=8)
parser.add_argument("--examples-per-class", nargs='+', type=int, default=[1, 2, 4, 8, 16])
parser.add_argument("--dataset", type=str, default="coco", choices=DATASETS.keys())
parser.add_argument("--unet-ckpt", type=str, default=None)
parser.add_argument("--erase-concepts", action="store_true",
help="erase text inversion concepts first")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def main(args):
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load tokenizer
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
if args.unet_ckpt is not None:
unet.load_state_dict(torch.load(args.unet_ckpt))
print(f"Loaded UNET from {args.unet_ckpt}")
# Add the placeholder token in tokenizer
placeholder_tokens = [args.placeholder_token]
if args.num_vectors < 1:
raise ValueError(f"--num_vectors has to be larger or equal to 1, but is {args.num_vectors}")
# add dummy tokens for multi-vector
additional_tokens = []
for i in range(1, args.num_vectors):
additional_tokens.append(f"{args.placeholder_token}_{i}")
placeholder_tokens += additional_tokens
num_added_tokens = tokenizer.add_tokens(placeholder_tokens)
if num_added_tokens != args.num_vectors:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_ids = tokenizer.convert_tokens_to_ids(placeholder_tokens)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
with torch.no_grad():
for token_id in placeholder_token_ids:
token_embeds[token_id] = token_embeds[initializer_token_id].clone()
# Freeze vae and unet
vae.requires_grad_(False)
unet.requires_grad_(False)
# Freeze all parameters except for the token embeddings in text encoder
text_encoder.text_model.encoder.requires_grad_(False)
text_encoder.text_model.final_layer_norm.requires_grad_(False)
text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
if args.gradient_checkpointing:
# Keep unet in train mode if we are using gradient checkpointing to save memory.
# The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
unet.train()
text_encoder.gradient_checkpointing_enable()
unet.enable_gradient_checkpointing()
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
)
if args.validation_epochs is not None:
warnings.warn(
f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}."
" Deprecated validation_epochs in favor of `validation_steps`"
f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}",
FutureWarning,
stacklevel=2,
)
args.validation_steps = args.validation_epochs * len(train_dataset)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
)
# Prepare everything with our `accelerator`.
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move vae and unet to device and cast to weight_dtype
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
resume_global_step = global_step * args.gradient_accumulation_steps
first_epoch = global_step // num_update_steps_per_epoch
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
# keep original embeddings as reference
orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
for epoch in range(first_epoch, args.num_train_epochs):
text_encoder.train()
for step, batch in enumerate(train_dataloader):
# Skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
if step % args.gradient_accumulation_steps == 0:
progress_bar.update(1)
continue
with accelerator.accumulate(text_encoder):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Let's make sure we don't update any embedding weights besides the newly added token
index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool)
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
with torch.no_grad():
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
index_no_updates
] = orig_embeds_params[index_no_updates]
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
images = []
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
images = log_validation(
text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch
)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
if accelerator.is_main_process:
if args.push_to_hub and not args.save_as_full_pipeline:
logger.warn("Enabling full model saving because --push_to_hub=True was specified.")
save_full_model = True
else:
save_full_model = args.save_as_full_pipeline
if save_full_model:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=unet,
tokenizer=tokenizer,
)
pipeline.save_pretrained(args.output_dir)
# Save the newly trained embeddings
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
repo_folder=args.output_dir,
)
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
accelerator.free_memory()
del accelerator, vae, unet, text_encoder
gc.collect()
torch.cuda.empty_cache()
if __name__ == "__main__":
args = parse_args()
output_dir = args.output_dir
rank = int(os.environ.pop("RANK", 0))
world_size = int(os.environ.pop("WORLD_SIZE", 1))
device_id = rank % torch.cuda.device_count()
torch.cuda.set_device(rank % torch.cuda.device_count())
print(f'Initialized process {rank} / {world_size}')
class_names = DATASETS[args.dataset].class_names
options = list(product(
range(args.num_trials),
args.examples_per_class,
class_names))
print(f"{len(options)} Total Options")
options_idx = np.arange(len(options))
options_idx = np.array_split(options_idx, world_size)[rank]
options = [options[idx] for idx in options_idx]
for seed, examples_per_class, class_name in options:
os.makedirs(os.path.join(output_dir, "extracted"), exist_ok=True)
train_dataset = DATASETS[
args.dataset](split="train", seed=seed,
examples_per_class=examples_per_class)
for idx in range(len(train_dataset)):
image = train_dataset.get_image_by_idx(idx)
metadata = train_dataset.get_metadata_by_idx(idx)
if metadata["name"] == class_name:
name = metadata["name"].replace(" ", "_")
path = f"{args.dataset}-{seed}-{examples_per_class}"
path = os.path.join(output_dir, "extracted", path, name, f"{idx}.png")
os.makedirs(os.path.dirname(path), exist_ok=True)
image.save(path)
formatted_name = class_name.replace(" ", "_")
dirname = f"{args.dataset}-{seed}-{examples_per_class}/{formatted_name}"
args = parse_args()
args.seed = seed
args.placeholder_token = f"<{formatted_name}>"
args.train_data_dir = os.path.join(
output_dir, "extracted", dirname)
args.output_dir = os.path.join(
output_dir, "fine-tuned", dirname)
word_name = class_name.replace(" ", "")
if args.erase_concepts: args.unet_ckpt = (
"/projects/rsalakhugroup/btrabucc/esd-models/" +
f"compvis-word_{word_name}-method_full-sg_3-ng_1-iter_1000-lr_1e-05/" +
f"diffusers-word_{word_name}-method_full-sg_3-ng_1-iter_1000-lr_1e-05.pt")
main(args)
shutil.rmtree(args.train_data_dir)
================================================
FILE: generate_augmentations.py
================================================
from semantic_aug.datasets.coco import COCODataset
from semantic_aug.datasets.spurge import SpurgeDataset
from semantic_aug.datasets.imagenet import ImageNetDataset
from semantic_aug.datasets.pascal import PASCALDataset
from semantic_aug.augmentations.compose import ComposeParallel
from semantic_aug.augmentations.compose import ComposeSequential
from semantic_aug.augmentations.real_guidance import RealGuidance
from semantic_aug.augmentations.textual_inversion import TextualInversion
from diffusers import StableDiffusionPipeline
from itertools import product
from torch import autocast
from PIL import Image
from tqdm import tqdm
import os
import torch
import argparse
import numpy as np
import random
DATASETS = {
"spurge": SpurgeDataset,
"coco": COCODataset,
"pascal": PASCALDataset,
"imagenet": ImageNetDataset
}
COMPOSE = {
"parallel": ComposeParallel,
"sequential": ComposeSequential
}
AUGMENT = {
"real-guidance": RealGuidance,
"textual-inversion": TextualInversion
}
if __name__ == "__main__":
parser = argparse.ArgumentParser("Inference script")
parser.add_argument("--out", type=str, default="real-guidance/")
parser.add_argument("--model-path", type=str, default="CompVis/stable-diffusion-v1-4")
parser.add_argument("--embed-path", type=str, default="erasure-tokens/pascal-tokens/pascal-0-8.pt")
parser.add_argument("--dataset", type=str, default="pascal")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--examples-per-class", type=int, default=1)
parser.add_argument("--num-synthetic", type=int, default=10)
parser.add_argument("--prompt", type=str, default="a photo of a {name}")
parser.add_argument("--aug", nargs="+", type=str, default=["real-guidance"],
choices=["real-guidance", "textual-inversion"])
parser.add_argument("--guidance-scale", nargs="+", type=float, default=[7.5])
parser.add_argument("--strength", nargs="+", type=float, default=[0.5])
parser.add_argument("--mask", nargs="+", type=int, default=[0], choices=[0, 1])
parser.add_argument("--inverted", nargs="+", type=int, default=[0], choices=[0, 1])
parser.add_argument("--probs", nargs="+", type=float, default=None)
parser.add_argument("--compose", type=str, default="parallel",
choices=["parallel", "sequential"])
parser.add_argument("--class-name", type=str, default=None)
parser.add_argument("--erasure-ckpt-path", type=str, default=None)
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
aug = COMPOSE[args.compose]([
AUGMENT[aug](
embed_path=args.embed_path,
model_path=args.model_path,
prompt=args.prompt,
strength=strength,
guidance_scale=guidance_scale,
mask=mask,
inverted=inverted,
erasure_ckpt_path=args.erasure_ckpt_path
)
for (aug, guidance_scale,
strength, mask, inverted) in zip(
args.aug, args.guidance_scale,
args.strength, args.mask, args.inverted
)
], probs=args.probs)
train_dataset = DATASETS[
args.dataset](split="train", seed=args.seed,
examples_per_class=args.examples_per_class)
options = product(range(len(train_dataset)), range(args.num_synthetic))
for idx, num in tqdm(list(
options), desc="Generating Augmentations"):
image = train_dataset.get_image_by_idx(idx)
label = train_dataset.get_label_by_idx(idx)
metadata = train_dataset.get_metadata_by_idx(idx)
if args.class_name is not None:
if metadata["name"] != args.class_name: continue
image, label = aug(
image, label, metadata)
name = metadata['name'].replace(" ", "_")
pil_image, image = image, os.path.join(
args.out, f"{name}-{idx}-{num}.png")
pil_image.save(image)
================================================
FILE: generate_images.py
================================================
from semantic_aug.augmentations.textual_inversion import TextualInversion
from diffusers import StableDiffusionPipeline
from itertools import product
from torch import autocast
from PIL import Image
from tqdm import trange
import os
import torch
import argparse
import numpy as np
import random
DEFAULT_ERASURE_CKPT = (
"/projects/rsalakhugroup/btrabucc/esd-models/" +
"compvis-word_airplane-method_full-sg_3-ng_1-iter_1000-lr_1e-05/" +
"diffusers-word_airplane-method_full-sg_3-ng_1-iter_1000-lr_1e-05.pt")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Stable Diffusion inference script")
parser.add_argument("--model-path", type=str, default="CompVis/stable-diffusion-v1-4")
parser.add_argument("--embed-path", type=str, default=(
"erasure-tokens/fine-tuned/pascal-0-8/airplane/learned_embeds.bin"))
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--num-generate", type=int, default=10)
parser.add_argument("--prompt", type=str, default="a photo of a <airplane>")
parser.add_argument("--out", type=str, default="erasure-tokens/fine-tuned/pascal-0-8/airplane/")
parser.add_argument("--guidance-scale", type=float, default=7.5)
parser.add_argument("--erasure-ckpt-name", type=str, default=DEFAULT_ERASURE_CKPT)
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
pipe = StableDiffusionPipeline.from_pretrained(
args.model_path, use_auth_token=True,
revision="fp16",
torch_dtype=torch.float16
).to('cuda')
aug = TextualInversion(args.embed_path, model_path=args.model_path)
pipe.tokenizer = aug.pipe.tokenizer
pipe.text_encoder = aug.pipe.text_encoder
pipe.set_progress_bar_config(disable=True)
pipe.safety_checker = None
if args.erasure_ckpt_name is not None:
pipe.unet.load_state_dict(torch.load(
args.erasure_ckpt_name, map_location='cuda'))
for idx in trange(args.num_generate,
desc="Generating Images"):
with autocast('cuda'):
image = pipe(
args.prompt,
guidance_scale=args.guidance_scale
).images[0]
image.save(os.path.join(args.out, f"{idx}.png"))
================================================
FILE: images/README.md
================================================
================================================
FILE: index.html
================================================
<!DOCTYPE html><html lang="en" itemscope itemtype="http://schema.org/WebPage"><head><meta charset="utf-8"><script nonce="a9BH-X5QD4qdoD3whwiuBw">var DOCS_timing={}; DOCS_timing['sl']=new Date().getTime();</script><script nonce="a9BH-X5QD4qdoD3whwiuBw">function _DumpException(e) {throw e;}</script><script nonce="a9BH-X5QD4qdoD3whwiuBw">_docs_flag_initialData={"atari-emtpr":false,"atari-ebidm":true,"atari-ebids":true,"atari-edtm":true,"atari-eibrm":false,"atari-ectm":false,"atari-ects":false,"docs-text-elei":false,"docs-text-usc":true,"atari-bae":false,"docs-text-eessmkc":false,"docs-text-emtps":false,"docs-text-etsrdpn":false,"docs-text-etsrds":false,"docs-text-erdfs":false,"docs-text-encps":false,"docs-text-endes":false,"docs-text-escpv":true,"docs-text-ecfs":false,"docs-text-ecis":false,"docs-text-eessips":false,"docs-text-edctzs":false,"docs-text-eetxpc":false,"docs-text-eetxp":false,"docs-text-lns":false,"docs-text-edhcfs":true,"docs-text-ertkmcp":true,"docs-text-ettctvs":false,"docs-text-issermps":false,"docs-text-emscts":false,"docs-etshc":false,"docs-text-tbcb":2.0E7,"docs-text-ftls":true,"docs-efsmsdl":false,"docs-euoftm":false,"docs-text-etb":false,"docs-text-esbefr":false,"docs-text-etof":false,"docs-text-ipi":false,"docs-text-ehlb":false,"docs-text-epa":true,"docs-text-ecls":true,"docs-text-dwit":false,"docs-text-elawp":false,"docs-eec":false,"docs-ecot":"","docs-text-enbcr":false,"docs-text-svofc":false,"docs-sup":"","docs-eldi":false,"docs-dli":false,"docs-liap":"/logImpressions","ilcm":{"eui":"AHKXmL1v_PW0AhcXt6BpBW2jQrg8Oghi_CtVSF_bwn67w6hgFSgUc9r_UTzuuxDr7ST4iTjf_sC4","je":1,"sstu":1703046087531681,"si":"CKa51YiVnYMDFaBLqwIdT8QMkA","gsc":null,"ei":[5703839,5704621,5706832,5706836,5707711,5735808,5737802,5738531,5740816,5743126,5746994,5747263,5748031,5752696,5753331,5754231,5755098,5758825,5760350,5762261,5764270,5765553,5766779,5767853,5770437,5773680,5774096,5774349,5774854,5776519,5777196,5783803,5784949,5784969,5791301,5791784,5792686,5796153,5796475,5797293,14101306,14101502,14101510,14101534,49372444,49375323,49376002,49376338,49378890,49451560,49453046,49472072,49512374,49517792,49612442,49613709,49622832,49623182,49624081,49644024,49765383,49769346,49816166,49822930,49823173,49824164,49833471,49839580,49842864,49924715,50082749,50127541,50166960,50168316,50221729,50266231,50273537,50293697,50335898,50360149,50390166,50492351,50520322,50529112,50533185,50580253,50606356,70979411,70983144,71035309,71085250,71102133,71119967,71152134,71178681,71185179,71197835,71230234,71238955,71241074,71260351,71273598,71286030,71289155,71301339,71330602,71346961,71382647,71396894,71401154,71407394,71444154,71471151,71471883,71480305,71528086,71528606,71530092,71531296,71537707,71558038,71624116,71625589,71641922,71659822,71671627],"crc":0,"cvi":[]},"docs-ccdil":false,"docs-eil":true,"info_params":{"token":"AHL0AtKjtWA7F_ipTj5iNvZa4HtBjmPZSA:1703046087403"},"buildLabel":"editors.sites-viewer-frontend_20231212.02_p0","docs-show_debug_info":false,"atari-jefp":"/_/view/jserror","docs-jern":"view","atari-rhpp":"/_/view","docs-ecuach":false,"docs-cclt":2033,"docs-ecci":true,"docs-esi":false,"docs-efypr":true,"docs-eyprp":false,"docs-eytpgcv":1}; _docs_flag_cek= null ; if (window['DOCS_timing']) {DOCS_timing['ifdld']=new Date().getTime();}</script><meta name="viewport" content="width=device-width, initial-scale=1"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="referrer" content="strict-origin-when-cross-origin"><link rel="icon" href="https://ssl.gstatic.com/atari/images/public/favicon.ico"><meta property="og:title" content="DA-Fusion"><meta property="og:type" content="website"><meta property="og:url" content="https://sites.google.com/btrabucco.com/da-fusion/home"><meta property="og:description" content="Paper: arXiv | Code: GitHub
"><meta itemprop="name" content="DA-Fusion"><meta itemprop="description" content="Paper: arXiv | Code: GitHub
"><meta itemprop="url" content="https://sites.google.com/btrabucco.com/da-fusion/home"><meta itemprop="thumbnailUrl" content="https://lh5.googleusercontent.com/mTrJFRXLoOsce_2zNf1rSofnhmI3zU0oBAU09nT9cl5uj_KakpWpEkR99OXH3MQQT_VnP46NNY56Khcl3pMA5ybQ3yAUXw7CpN2Ndh_XJ5VYtGG6eQXe0EfeYnx-4qym3g=w1280"><meta itemprop="image" content="https://lh5.googleusercontent.com/mTrJFRXLoOsce_2zNf1rSofnhmI3zU0oBAU09nT9cl5uj_KakpWpEkR99OXH3MQQT_VnP46NNY56Khcl3pMA5ybQ3yAUXw7CpN2Ndh_XJ5VYtGG6eQXe0EfeYnx-4qym3g=w1280"><meta itemprop="imageUrl" content="https://lh5.googleusercontent.com/mTrJFRXLoOsce_2zNf1rSofnhmI3zU0oBAU09nT9cl5uj_KakpWpEkR99OXH3MQQT_VnP46NNY56Khcl3pMA5ybQ3yAUXw7CpN2Ndh_XJ5VYtGG6eQXe0EfeYnx-4qym3g=w1280"><meta property="og:image" content="https://lh5.googleusercontent.com/mTrJFRXLoOsce_2zNf1rSofnhmI3zU0oBAU09nT9cl5uj_KakpWpEkR99OXH3MQQT_VnP46NNY56Khcl3pMA5ybQ3yAUXw7CpN2Ndh_XJ5VYtGG6eQXe0EfeYnx-4qym3g=w1280"><link href="https://fonts.googleapis.com/css?family=Lato%3A300%2C300italic%2C400%2C400italic%2C700%2C700italic&display=swap" rel="stylesheet" nonce="k7EAwqdvbMKbMpxLLtxMIg"><link href="https://fonts.googleapis.com/css?family=Google+Sans:400,500|Roboto:300,400,500,700|Source+Code+Pro:400,700&display=swap" rel="stylesheet" nonce="k7EAwqdvbMKbMpxLLtxMIg"><link href="https://fonts.googleapis.com/css?family=Lora%3Ai%2Cbi%2C700%2C400%7CSource%20Code%20Pro%3Ai%2Cbi%2C700%2C400&display=swap" rel="stylesheet" nonce="k7EAwqdvbMKbMpxLLtxMIg"><style nonce="k7EAwqdvbMKbMpxLLtxMIg">@media only screen and (max-width: 479px){.jgG6ef{font-size: 17.0pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.jgG6ef{font-size: 17.0pt;}}@media only screen and (min-width: 768px) and (max-width: 1279px){.jgG6ef{font-size: 18.0pt;}}@media only screen and (min-width: 1280px){.jgG6ef{font-size: 18.0pt;}}@media only screen and (max-width: 479px){.RijTuc{font-size: 25.0pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.RijTuc{font-size: 30.0pt;}}@media only screen and (min-width: 768px) and (max-width: 1279px){.RijTuc{font-size: 34.0pt;}}@media only screen and (min-width: 1280px){.RijTuc{font-size: 34.0pt;}}@media only screen and (max-width: 479px){.puwcIf{font-size: 20.0pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.puwcIf{font-size: 22.0pt;}}@media only screen and (min-width: 768px) and (max-width: 1279px){.puwcIf{font-size: 24.0pt;}}@media only screen and (min-width: 1280px){.puwcIf{font-size: 24.0pt;}}</style><link rel="stylesheet" href="https://www.gstatic.com/_/atari/_/ss/k=atari.vw.RdwxJhNMYZs.L.W.O/am=gAE/d=1/rs=AGEqA5k0HgViAOMqGAcxmPPLYhFps6gwmA" data-id="_cl" nonce="k7EAwqdvbMKbMpxLLtxMIg"><script nonce="a9BH-X5QD4qdoD3whwiuBw"></script><title>DA-Fusion</title><style jsname="ptDGoc" nonce="k7EAwqdvbMKbMpxLLtxMIg">.M63kCb{background-color: rgba(255,255,255,1);}.OUGEr{color: rgba(33,33,33,1);}.duRjpb .OUGEr{color: rgba(34,110,147,1);}.JYVBee .OUGEr{color: rgba(34,110,147,1);}.OmQG5e .OUGEr{color: rgba(33,33,33,1);}.iwQgFb{background-color: rgba(0,0,0,0.150000006);}.ySLm4c{font-family: Lato, sans-serif;}.CbiMKe{background-color: rgba(30,108,147,1);}.qeLZfd .zfr3Q{color: rgba(33,33,33,1);}.qeLZfd .qnVSj{color: rgba(33,33,33,1);}.qeLZfd .Glwbz{color: rgba(33,33,33,1);}.qeLZfd .duRjpb{color: rgba(34,110,147,1);}.qeLZfd .qLrapd{color: rgba(34,110,147,1);}.qeLZfd .JYVBee{color: rgba(34,110,147,1);}.qeLZfd .aHM7ed{color: rgba(34,110,147,1);}.qeLZfd .OmQG5e{color: rgba(33,33,33,1);}.qeLZfd .NHD4Gf{color: rgba(33,33,33,1);}.qeLZfd .aw5Odc{color: rgba(0,101,128,1);}.qeLZfd .dhtgD:hover{color: rgba(0,0,0,1);}.qeLZfd .dhtgD:visited{color: rgba(0,101,128,1);}.qeLZfd .iwQgFb{background-color: rgba(0,0,0,0.150000006);}.qeLZfd .OUGEr{color: rgba(33,33,33,1);}.qeLZfd .duRjpb .OUGEr{color: rgba(34,110,147,1);}.qeLZfd .JYVBee .OUGEr{color: rgba(34,110,147,1);}.qeLZfd .OmQG5e .OUGEr{color: rgba(33,33,33,1);}.qeLZfd:before{background-color: rgba(242,242,242,1); display: block;}.lQAHbd .zfr3Q{color: rgba(255,255,255,1);}.lQAHbd .qnVSj{color: rgba(255,255,255,1);}.lQAHbd .Glwbz{color: rgba(255,255,255,1);}.lQAHbd .duRjpb{color: rgba(255,255,255,1);}.lQAHbd .qLrapd{color: rgba(255,255,255,1);}.lQAHbd .JYVBee{color: rgba(255,255,255,1);}.lQAHbd .aHM7ed{color: rgba(255,255,255,1);}.lQAHbd .OmQG5e{color: rgba(255,255,255,1);}.lQAHbd .NHD4Gf{color: rgba(255,255,255,1);}.lQAHbd .aw5Odc{color: rgba(255,255,255,1);}.lQAHbd .dhtgD:hover{color: rgba(255,255,255,1);}.lQAHbd .dhtgD:visited{color: rgba(255,255,255,1);}.lQAHbd .iwQgFb{background-color: rgba(255,255,255,0.150000006);}.lQAHbd .OUGEr{color: rgba(255,255,255,1);}.lQAHbd .duRjpb .OUGEr{color: rgba(255,255,255,1);}.lQAHbd .JYVBee .OUGEr{color: rgba(255,255,255,1);}.lQAHbd .OmQG5e .OUGEr{color: rgba(255,255,255,1);}.lQAHbd .CbiMKe{background-color: rgba(255,255,255,1);}.lQAHbd:before{background-color: rgba(30,108,147,1); display: block;}.cJgDec .zfr3Q{color: rgba(255,255,255,1);}.cJgDec .zfr3Q .OUGEr{color: rgba(255,255,255,1);}.cJgDec .qnVSj{color: rgba(255,255,255,1);}.cJgDec .Glwbz{color: rgba(255,255,255,1);}.cJgDec .qLrapd{color: rgba(255,255,255,1);}.cJgDec .aHM7ed{color: rgba(255,255,255,1);}.cJgDec .NHD4Gf{color: rgba(255,255,255,1);}.cJgDec .IFuOkc:before{background-color: rgba(33,33,33,1); opacity: 0; display: block;}.O13XJf{height: 340px; padding-bottom: 60px; padding-top: 60px;}.O13XJf .IFuOkc{background-color: rgba(34,110,147,1); background-image: url(https://ssl.gstatic.com/atari/images/simple-header-blended-small.png);}.O13XJf .IFuOkc:before{background-color: rgba(33,33,33,1); opacity: 0.4; display: block;}.O13XJf .zfr3Q{color: rgba(255,255,255,1);}.O13XJf .qnVSj{color: rgba(255,255,255,1);}.O13XJf .Glwbz{color: rgba(255,255,255,1);}.O13XJf .duRjpb{color: rgba(255,255,255,1);}.O13XJf .qLrapd{color: rgba(255,255,255,1);}.O13XJf .JYVBee{color: rgba(255,255,255,1);}.O13XJf .aHM7ed{color: rgba(255,255,255,1);}.O13XJf .OmQG5e{color: rgba(255,255,255,1);}.O13XJf .NHD4Gf{color: rgba(255,255,255,1);}.tpmmCb .zfr3Q{color: rgba(33,33,33,1);}.tpmmCb .zfr3Q .OUGEr{color: rgba(33,33,33,1);}.tpmmCb .qnVSj{color: rgba(33,33,33,1);}.tpmmCb .Glwbz{color: rgba(33,33,33,1);}.tpmmCb .qLrapd{color: rgba(33,33,33,1);}.tpmmCb .aHM7ed{color: rgba(33,33,33,1);}.tpmmCb .NHD4Gf{color: rgba(33,33,33,1);}.tpmmCb .IFuOkc:before{background-color: rgba(255,255,255,1); display: block;}.tpmmCb .Wew9ke{fill: rgba(33,33,33,1);}.aw5Odc{color: rgba(0,101,128,1);}.dhtgD:hover{color: rgba(0,122,147,1);}.dhtgD:active{color: rgba(0,122,147,1);}.dhtgD:visited{color: rgba(0,101,128,1);}.Zjiec{color: rgba(255,255,255,1); font-family: Lato, sans-serif; font-size: 19pt; font-weight: 300; letter-spacing: 1px; line-height: 1.3; padding-bottom: 62.5px; padding-left: 48px; padding-right: 36px; padding-top: 11.5px;}.XMyrgf{margin-top: 0px; margin-left: 48px; margin-bottom: 24px; margin-right: 24px;}.TlfmSc{color: rgba(255,255,255,1); font-family: Lato, sans-serif; font-size: 15pt; font-weight: 300; line-height: 1.333;}.Mz8gvb{color: rgba(255,255,255,1);}.zDUgLc{background-color: rgba(33,33,33,1);}.QTKDff.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}.YTv4We{color: rgba(178,178,178,1);}.YTv4We:hover:before{background-color: rgba(255,255,255,0.1199999973); display: block;}.YTv4We.chg4Jd:focus:before{border-color: rgba(255,255,255,0.3600000143); display: block;}.eWDljc{background-color: rgba(33,33,33,1);}.eWDljc .hDrhEe{padding-left: 8px;}.ZXW7w{color: rgba(255,255,255,1); opacity: 0.26;}.PsKE7e{color: rgba(255,255,255,1); font-family: Lato, sans-serif; font-size: 12pt; font-weight: 300;}.lhZOrc{color: rgba(73,170,212,1);}.hDrhEe:hover{color: rgba(73,170,212,1);}.M9vuGd{color: rgba(73,170,212,1); font-weight: 400;}.jgXgSe:hover{color: rgba(73,170,212,1);}.j10yRb:hover{color: rgba(0,188,212,1);}.j10yRb.chg4Jd:focus:before{border-color: rgba(255,255,255,0.3600000143); display: block;}.tCHXDc{color: rgba(255,255,255,1);}.iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}.wgxiMe{background-color: rgba(33,33,33,1);}.fOU46b .TlfmSc{color: rgba(255,255,255,1);}.fOU46b .KJll8d{background-color: rgba(255,255,255,1);}.fOU46b .Mz8gvb{color: rgba(255,255,255,1);}.fOU46b .Mz8gvb.chg4Jd:focus:before{border-color: rgba(255,255,255,1); display: block;}.fOU46b .qV4dIc{color: rgba(255,255,255,0.8700000048);}.fOU46b .jgXgSe:hover{color: rgba(255,255,255,1);}.fOU46b .M9vuGd{color: rgba(255,255,255,1);}.fOU46b .tCHXDc{color: rgba(255,255,255,0.8700000048);}.fOU46b .iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}.fOU46b .G8QRnc .Mz8gvb{color: rgba(0,0,0,0.8000000119);}.fOU46b .G8QRnc .Mz8gvb.chg4Jd:focus:before{border-color: rgba(0,0,0,0.8000000119); display: block;}.fOU46b .G8QRnc .ZXW7w{color: rgba(0,0,0,0.8000000119);}.fOU46b .G8QRnc .TlfmSc{color: rgba(0,0,0,0.8000000119);}.fOU46b .G8QRnc .KJll8d{background-color: rgba(0,0,0,0.8000000119);}.fOU46b .G8QRnc .qV4dIc{color: rgba(0,0,0,0.6399999857);}.fOU46b .G8QRnc .jgXgSe:hover{color: rgba(0,0,0,0.8199999928);}.fOU46b .G8QRnc .M9vuGd{color: rgba(0,0,0,0.8199999928);}.fOU46b .G8QRnc .tCHXDc{color: rgba(0,0,0,0.6399999857);}.fOU46b .G8QRnc .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.fOU46b .usN8rf .Mz8gvb{color: rgba(0,0,0,0.8000000119);}.fOU46b .usN8rf .Mz8gvb.chg4Jd:focus:before{border-color: rgba(0,0,0,0.8000000119); display: block;}.fOU46b .usN8rf .ZXW7w{color: rgba(0,0,0,0.8000000119);}.fOU46b .usN8rf .TlfmSc{color: rgba(0,0,0,0.8000000119);}.fOU46b .usN8rf .KJll8d{background-color: rgba(0,0,0,0.8000000119);}.fOU46b .usN8rf .qV4dIc{color: rgba(0,0,0,0.6399999857);}.fOU46b .usN8rf .jgXgSe:hover{color: rgba(0,0,0,0.8199999928);}.fOU46b .usN8rf .M9vuGd{color: rgba(0,0,0,0.8199999928);}.fOU46b .usN8rf .tCHXDc{color: rgba(0,0,0,0.6399999857);}.fOU46b .usN8rf .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.fOU46b .aCIEDd .qV4dIc{color: rgba(33,33,33,1);}.fOU46b .aCIEDd .TlfmSc{color: rgba(33,33,33,1);}.fOU46b .aCIEDd .KJll8d{background-color: rgba(33,33,33,1);}.fOU46b .aCIEDd .ZXW7w{color: rgba(33,33,33,1);}.fOU46b .aCIEDd .jgXgSe:hover{color: rgba(33,33,33,1); opacity: 0.82;}.fOU46b .aCIEDd .Mz8gvb{color: rgba(33,33,33,1);}.fOU46b .aCIEDd .tCHXDc{color: rgba(33,33,33,1);}.fOU46b .aCIEDd .iWs3gf.chg4Jd:focus{background-color: rgba(33,33,33,0.1199999973);}.fOU46b .a3ETed .qV4dIc{color: rgba(255,255,255,1);}.fOU46b .a3ETed .TlfmSc{color: rgba(255,255,255,1);}.fOU46b .a3ETed .KJll8d{background-color: rgba(255,255,255,1);}.fOU46b .a3ETed .ZXW7w{color: rgba(255,255,255,1);}.fOU46b .a3ETed .jgXgSe:hover{color: rgba(255,255,255,1); opacity: 0.82;}.fOU46b .a3ETed .Mz8gvb{color: rgba(255,255,255,1);}.fOU46b .a3ETed .tCHXDc{color: rgba(255,255,255,1);}.fOU46b .a3ETed .iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}@media only screen and (min-width: 1280px){.XeSM4.b2Iqye.fOU46b .LBrwzc .tCHXDc{color: rgba(255,255,255,0.8700000048);}}.XeSM4.b2Iqye.fOU46b .LBrwzc .iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}@media only screen and (min-width: 1280px){.KuNac.b2Iqye.fOU46b .tCHXDc{color: rgba(0,0,0,0.6399999857);}}.KuNac.b2Iqye.fOU46b .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.fOU46b .zDUgLc{opacity: 0;}.LBrwzc .ZXW7w{color: rgba(0,0,0,1);}.LBrwzc .KJll8d{background-color: rgba(0,0,0,1);}.GBy4H .ZXW7w{color: rgba(255,255,255,1);}.GBy4H .KJll8d{background-color: rgba(255,255,255,1);}.eBSUbc{background-color: rgba(33,33,33,1); color: rgba(0,188,212,0.6999999881);}.BFDQOb:hover{color: rgba(73,170,212,1);}.ImnMyf{background-color: rgba(255,255,255,1); color: rgba(33,33,33,1);}.Vs12Bd{background-color: rgba(242,242,242,1); color: rgba(33,33,33,1);}.S5d9Rd{background-color: rgba(30,108,147,1); color: rgba(255,255,255,1);}.zfr3Q{color: rgba(33,33,33,1); font-family: Lato, sans-serif; font-size: 11pt; font-weight: 400; line-height: 1.6667; margin-top: 12px;}.qnVSj{color: rgba(33,33,33,1);}.Glwbz{color: rgba(33,33,33,1);}.duRjpb{color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 34pt; font-weight: 300; letter-spacing: 0.5px; line-height: 1.2; margin-top: 30px;}.Ap4VC{margin-bottom: -30px;}.qLrapd{color: rgba(34,110,147,1);}.JYVBee{color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 19pt; font-weight: 400; line-height: 1.4; margin-top: 20px;}.CobnVe{margin-bottom: -20px;}.aHM7ed{color: rgba(34,110,147,1);}.OmQG5e{color: rgba(33,33,33,1); font-family: Lato, sans-serif; font-size: 15pt; font-style: normal; font-weight: 400; line-height: 1.25; margin-top: 16px;}.GV3q8e{margin-bottom: -16px;}.NHD4Gf{color: rgba(33,33,33,1);}.LB7kq .duRjpb{font-size: 64pt; letter-spacing: 2px; line-height: 1; margin-top: 40px;}.LB7kq .JYVBee{font-size: 25pt; font-weight: 300; line-height: 1.1; margin-top: 25px;}@media only screen and (max-width: 479px){.LB7kq .duRjpb{font-size: 40pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.LB7kq .duRjpb{font-size: 53pt;}}@media only screen and (max-width: 479px){.LB7kq .JYVBee{font-size: 19pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.LB7kq .JYVBee{font-size: 22pt;}}.O13XJf{height: 340px; padding-bottom: 60px; padding-top: 60px;}@media only screen and (min-width: 480px) and (max-width: 767px){.O13XJf{height: 280px; padding-bottom: 40px; padding-top: 40px;}}@media only screen and (max-width: 479px){.O13XJf{height: 250px; padding-bottom: 30px; padding-top: 30px;}}.SBrW1{height: 520px;}@media only screen and (min-width: 480px) and (max-width: 767px){.SBrW1{height: 520px;}}@media only screen and (max-width: 479px){.SBrW1{height: 400px;}}.Wew9ke{fill: rgba(255,255,255,1);}.gk8rDe{height: 180px; padding-bottom: 32px; padding-top: 60px;}.gk8rDe .zfr3Q{color: rgba(0,0,0,1);}.gk8rDe .duRjpb{color: rgba(34,110,147,1); font-size: 45pt; line-height: 1.1;}.gk8rDe .qLrapd{color: rgba(34,110,147,1);}.gk8rDe .JYVBee{color: rgba(34,110,147,1); font-size: 27pt; line-height: 1.35; margin-top: 15px;}.gk8rDe .aHM7ed{color: rgba(34,110,147,1);}.gk8rDe .OmQG5e{color: rgba(33,33,33,1);}.gk8rDe .NHD4Gf{color: rgba(33,33,33,1);}@media only screen and (max-width: 479px){.gk8rDe .duRjpb{font-size: 30pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.gk8rDe .duRjpb{font-size: 38pt;}}@media only screen and (max-width: 479px){.gk8rDe .JYVBee{font-size: 20pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.gk8rDe .JYVBee{font-size: 24pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.gk8rDe{padding-top: 45px;}}@media only screen and (max-width: 479px){.gk8rDe{padding-bottom: 0px; padding-top: 30px;}}.dhtgD{text-decoration: underline;}.JzO0Vc{background-color: rgba(33,33,33,1); font-family: Lato, sans-serif; width: 250px;}@media only screen and (min-width: 1280px){.JzO0Vc{padding-top: 48.5px;}}.TlfmSc{font-family: Lato, sans-serif; font-size: 15pt; font-weight: 300; line-height: 1.333;}.PsKE7e{font-family: Lato, sans-serif; font-size: 12pt;}.IKA38e{line-height: 1.21;}.hDrhEe{padding-bottom: 11.5px; padding-top: 11.5px;}.zDUgLc{opacity: 1;}.QmpIrf{background-color: rgba(30,108,147,1); border-color: rgba(255,255,255,1); color: rgba(255,255,255,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.xkUom{border-color: rgba(30,108,147,1); color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.xkUom:hover{background-color: rgba(30,108,147,0.1000000015);}.KjwKmc{color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal; line-height: normal;}.KjwKmc:hover{background-color: rgba(30,108,147,0.1000000015);}.lQAHbd .QmpIrf{background-color: rgba(255,255,255,1); border-color: rgba(34,110,147,1); color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.lQAHbd .xkUom{border-color: rgba(242,242,242,1); color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.lQAHbd .xkUom:hover{background-color: rgba(255,255,255,0.1000000015);}.lQAHbd .KjwKmc{color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.lQAHbd .KjwKmc:hover{background-color: rgba(255,255,255,0.1000000015);}.lQAHbd .Mt0nFe{border-color: rgba(255,255,255,0.200000003);}.cJgDec .QmpIrf{background-color: rgba(255,255,255,1); border-color: rgba(34,110,147,1); color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.cJgDec .xkUom{border-color: rgba(242,242,242,1); color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.cJgDec .xkUom:hover{background-color: rgba(255,255,255,0.1000000015);}.cJgDec .KjwKmc{color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.cJgDec .KjwKmc:hover{background-color: rgba(255,255,255,0.1000000015);}.tpmmCb .QmpIrf{background-color: rgba(255,255,255,1); border-color: rgba(34,110,147,1); color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.tpmmCb .xkUom{border-color: rgba(30,108,147,1); color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.tpmmCb .xkUom:hover{background-color: rgba(30,108,147,0.1000000015);}.tpmmCb .KjwKmc{color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.tpmmCb .KjwKmc:hover{background-color: rgba(30,108,147,0.1000000015);}.gk8rDe .QmpIrf{background-color: rgba(30,108,147,1); border-color: rgba(255,255,255,1); color: rgba(255,255,255,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.gk8rDe .xkUom{border-color: rgba(30,108,147,1); color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.gk8rDe .xkUom:hover{background-color: rgba(30,108,147,0.1000000015);}.gk8rDe .KjwKmc{color: rgba(30,108,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.gk8rDe .KjwKmc:hover{background-color: rgba(30,108,147,0.1000000015);}.O13XJf .QmpIrf{background-color: rgba(255,255,255,1); border-color: rgba(34,110,147,1); color: rgba(34,110,147,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.O13XJf .xkUom{border-color: rgba(242,242,242,1); color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.O13XJf .xkUom:hover{background-color: rgba(255,255,255,0.1000000015);}.O13XJf .KjwKmc{color: rgba(242,242,242,1); font-family: Lato, sans-serif; font-size: 11pt; line-height: normal;}.O13XJf .KjwKmc:hover{background-color: rgba(255,255,255,0.1000000015);}.Y4CpGd{font-family: Lato, sans-serif; font-size: 11pt;}.CMArNe{background-color: rgba(242,242,242,1);}.LBrwzc .TlfmSc{color: rgba(0,0,0,0.8000000119);}.LBrwzc .YTv4We{color: rgba(0,0,0,0.6399999857);}.LBrwzc .YTv4We.chg4Jd:focus:before{border-color: rgba(0,0,0,0.6399999857); display: block;}.LBrwzc .Mz8gvb{color: rgba(0,0,0,0.6399999857);}.LBrwzc .tCHXDc{color: rgba(0,0,0,0.6399999857);}.LBrwzc .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.LBrwzc .wgxiMe{background-color: rgba(255,255,255,1);}.LBrwzc .qV4dIc{color: rgba(0,0,0,0.6399999857);}.LBrwzc .M9vuGd{color: rgba(0,0,0,0.8000000119); font-weight: bold;}.LBrwzc .Zjiec{color: rgba(0,0,0,0.8000000119);}.LBrwzc .IKA38e{color: rgba(0,0,0,0.6399999857);}.LBrwzc .lhZOrc.IKA38e{color: rgba(0,0,0,0.8000000119); font-weight: bold;}.LBrwzc .j10yRb:hover{color: rgba(0,0,0,0.8000000119);}.LBrwzc .eBSUbc{color: rgba(0,0,0,0.8000000119);}.LBrwzc .hDrhEe:hover{color: rgba(0,0,0,0.8000000119);}.LBrwzc .jgXgSe:hover{color: rgba(0,0,0,0.8000000119);}.LBrwzc .M9vuGd:hover{color: rgba(0,0,0,0.8000000119);}.LBrwzc .zDUgLc{border-bottom-color: rgba(204,204,204,1); border-bottom-width: 1px; border-bottom-style: solid;}.fOU46b .LBrwzc .M9vuGd{color: rgba(0,0,0,0.8000000119);}.fOU46b .LBrwzc .jgXgSe:hover{color: rgba(0,0,0,0.8000000119);}.fOU46b .LBrwzc .zDUgLc{opacity: 1; border-bottom-style: none;}.fOU46b .LBrwzc .tCHXDc{color: rgba(0,0,0,0.6399999857);}.fOU46b .LBrwzc .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.fOU46b .GBy4H .M9vuGd{color: rgba(255,255,255,1);}.fOU46b .GBy4H .jgXgSe:hover{color: rgba(255,255,255,1);}.fOU46b .GBy4H .zDUgLc{opacity: 1;}.fOU46b .GBy4H .tCHXDc{color: rgba(255,255,255,0.8700000048);}.fOU46b .GBy4H .iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}.XeSM4.G9Qloe.fOU46b .LBrwzc .tCHXDc{color: rgba(0,0,0,0.6399999857);}.XeSM4.G9Qloe.fOU46b .LBrwzc .iWs3gf.chg4Jd:focus{background-color: rgba(0,0,0,0.1199999973);}.GBy4H .lhZOrc.IKA38e{color: rgba(255,255,255,1);}.GBy4H .eBSUbc{color: rgba(255,255,255,0.8700000048);}.GBy4H .hDrhEe:hover{color: rgba(255,255,255,1);}.GBy4H .j10yRb:hover{color: rgba(255,255,255,1);}.GBy4H .YTv4We{color: rgba(255,255,255,1);}.GBy4H .YTv4We.chg4Jd:focus:before{border-color: rgba(255,255,255,1); display: block;}.GBy4H .tCHXDc{color: rgba(255,255,255,0.8700000048);}.GBy4H .iWs3gf.chg4Jd:focus{background-color: rgba(255,255,255,0.1199999973);}.GBy4H .jgXgSe:hover{color: rgba(255,255,255,1);}.GBy4H .jgXgSe:hover{color: rgba(255,255,255,1);}.GBy4H .M9vuGd{color: rgba(255,255,255,1);}.GBy4H .M9vuGd:hover{color: rgba(255,255,255,1);}.QcmuFb{padding-left: 20px;}.vDPrib{padding-left: 40px;}.TBDXjd{padding-left: 60px;}.bYeK8e{padding-left: 80px;}.CuqSDe{padding-left: 100px;}.Havqpe{padding-left: 120px;}.JvDrRe{padding-left: 140px;}.o5lrIf{padding-left: 160px;}.yOJW7c{padding-left: 180px;}.rB8cye{padding-left: 200px;}.RuayVd{padding-right: 20px;}.YzcKX{padding-right: 40px;}.reTV0b{padding-right: 60px;}.vSYeUc{padding-right: 80px;}.PxtZIe{padding-right: 100px;}.ahQMed{padding-right: 120px;}.rzhcXb{padding-right: 140px;}.PBhj0b{padding-right: 160px;}.TlN46c{padding-right: 180px;}.GEdNnc{padding-right: 200px;}.TMjjoe{font-family: Lato, sans-serif; font-size: 9pt; line-height: 1.2; margin-top: 0px;}@media only screen and (min-width: 1280px){.yxgWrb{margin-left: 250px;}}@media only screen and (max-width: 479px){.Zjiec{font-size: 15pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.Zjiec{font-size: 17pt;}}@media only screen and (max-width: 479px){.TlfmSc{font-size: 13pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.TlfmSc{font-size: 14pt;}}@media only screen and (max-width: 479px){.PsKE7e{font-size: 12pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.PsKE7e{font-size: 12pt;}}@media only screen and (max-width: 479px){.duRjpb{font-size: 24pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.duRjpb{font-size: 29pt;}}@media only screen and (max-width: 479px){.JYVBee{font-size: 15pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.JYVBee{font-size: 17pt;}}@media only screen and (max-width: 479px){.OmQG5e{font-size: 13pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.OmQG5e{font-size: 14pt;}}@media only screen and (max-width: 479px){.TlfmSc{font-size: 13pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.TlfmSc{font-size: 14pt;}}@media only screen and (max-width: 479px){.PsKE7e{font-size: 12pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.PsKE7e{font-size: 12pt;}}@media only screen and (max-width: 479px){.TMjjoe{font-size: 9pt;}}@media only screen and (min-width: 480px) and (max-width: 767px){.TMjjoe{font-size: 9pt;}}section[id="h.INITIAL_GRID.hz2sysafyqnv"] .IFuOkc:before{opacity: 0.0;}</style><script nonce="a9BH-X5QD4qdoD3whwiuBw">_at_config = [null,"AIzaSyChg3MFqzdi1P5J-YvEyakkSA1yU7HRcDI","897606708560-a63d8ia0t9dhtpdt4i3djab2m42see7o.apps.googleusercontent.com",null,null,null,null,null,null,null,null,null,null,null,"SITES_%s",null,null,null,null,null,null,null,null,null,["AHKXmL1v_PW0AhcXt6BpBW2jQrg8Oghi_CtVSF_bwn67w6hgFSgUc9r_UTzuuxDr7ST4iTjf_sC4",1,"CKa51YiVnYMDFaBLqwIdT8QMkA",1703046087531681,[5703839,5704621,5706832,5706836,5707711,5735808,5737802,5738531,5740816,5743126,5746994,5747263,5748031,5752696,5753331,5754231,5755098,5758825,5760350,5762261,5764270,5765553,5766779,5767853,5770437,5773680,5774096,5774349,5774854,5776519,5777196,5783803,5784949,5784969,5791301,5791784,5792686,5796153,5796475,5797293,14101306,14101502,14101510,14101534,49372444,49375323,49376002,49376338,49378890,49451560,49453046,49472072,49512374,49517792,49612442,49613709,49622832,49623182,49624081,49644024,49765383,49769346,49816166,49822930,49823173,49824164,49833471,49839580,49842864,49924715,50082749,50127541,50166960,50168316,50221729,50266231,50273537,50293697,50335898,50360149,50390166,50492351,50520322,50529112,50533185,50580253,50606356,70979411,70983144,71035309,71085250,71102133,71119967,71152134,71178681,71185179,71197835,71230234,71238955,71241074,71260351,71273598,71286030,71289155,71301339,71330602,71346961,71382647,71396894,71401154,71407394,71444154,71471151,71471883,71480305,71528086,71528606,71530092,71531296,71537707,71558038,71624116,71625589,71641922,71659822,71671627]],"AHL0AtKjtWA7F_ipTj5iNvZa4HtBjmPZSA:1703046087403",null,null,null,0,null,null,null,null,null,null,null,null,null,"https://drive.google.com",null,null,null,null,null,null,null,null,null,0,1,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"v2internal","https://docs.google.com",null,null,null,null,null,null,"https://sites.google.com/new/?authuser\u003d0",null,null,null,null,null,0,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,1,"",null,null,null,null,null,null,null,null,null,null,null,null,6,null,null,"https://accounts.google.com/o/oauth2/auth","https://accounts.google.com/o/oauth2/postmessageRelay",null,null,null,null,78,"https://sites.google.com/new/?authuser\u003d0\u0026usp\u003dviewer_footer\u0026authuser\u003d0",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,[],null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"https://www.gstatic.com/atari/embeds/83a60601c213b72fb19c1855fb0c5f26/intermediate-frame-minified.html",0,null,"v2beta",null,null,null,null,null,null,4,"https://accounts.google.com/o/oauth2/iframe",null,null,null,null,null,null,"https://1678599899-atari-embeds.googleusercontent.com/embeds/16cb204cf3a9d4d223a0a3fd8b0eec5d/inner-frame-minified.html",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,0,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"https://sites.google.com/btrabucco.com/da-fusion/home",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,0,null,null,null,null,null,null,0,null,"02c5ul33",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,0,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,1,null,null,[1703046087543,"editors.sites-viewer-frontend_20231212.02_p0","590108479","0",0,1,""],null,null,null,null,1,null,null,0,null,null,null,null,null,null,null,null,20,500,"https://domains.google.com",null,0,null,null,null,null,null,null,null,null,null,null,null,0,null,null,null,0,null,null,null,null,null,1,1,0,1,0,0,0,0,null,null,null,null,null,"https://www.google.com/calendar/embed",null,1,null,null,0,null,null,null,null,null,null,null,null,null,null,0,null,null,null,null,null,1,1]; window.globals = {"enableAnalytics":true,"webPropertyId":"","showDebug":false,"hashedSiteId":"4cca8ee9fc4742b8704c74180e6f5423fa06ada53ff489f3d38f5c4e925b2faa","normalizedPath":"btrabucco.com/da-fusion/home","pageTitle":"Home"}; function gapiLoaded() {if (globals.gapiLoaded == undefined) {globals.gapiLoaded = true;} else {globals.gapiLoaded();}}window.messages = []; window.addEventListener && window.addEventListener('message', function(e) {if (window.messages && e.data && e.data.magic == 'SHIC') {window.messages.push(e);}});</script><script src="https://apis.google.com/js/client.js?onload=gapiLoaded" nonce="a9BH-X5QD4qdoD3whwiuBw"></script><script nonce="a9BH-X5QD4qdoD3whwiuBw">(function(){}).call(this);
</script><script nonce="a9BH-X5QD4qdoD3whwiuBw">const imageUrl = 'https:\/\/lh6.googleusercontent.com\/_Uox1EaLrhZo7CLGId161i_WEN1vLr3EM6E3rVaNyvhhtk9-uzPHWYOIgsBT8oSsWV15Nj3KknD7kbe7fA7Rukg\x3dw16383';
function bgImgLoaded() {
if (!globals.headerBgImgLoaded) {
globals.headerBgImgLoaded = new Date().getTime();
} else {
globals.headerBgImgLoaded();
}
}
if (imageUrl) {
const img = new Image();
img.src = imageUrl;
img.onload = bgImgLoaded;
globals.headerBgImgExists = true;
} else {
globals.headerBgImgExists = false;
}
</script></head><body dir="ltr" itemscope itemtype="http://schema.org/WebPage" id="yDmH0d" css="yDmH0d"><div jscontroller="pc62j" jsmodel="iTeaXe" jsaction="rcuQ6b:WYd;GvneHb:og1FDd;vbaUQc:uAM5ec;"><div jscontroller="X4BaPc" jsaction="rcuQ6b:WYd;o6xM5b:Pg9eo;HuL2Hd:mHeCvf;VMhF5:FFYy5e;sk3Qmb:HI1Mdd;JIbuQc:rSzFEd(z2EeY),aSaF6e(ilzYPe);"><div jscontroller="o1L5Wb" data-sitename="da-fusion" data-domain="btrabucco.com" data-universe="1" jsmodel="fNFZH" jsaction="Pe9H6d:cZFEp;WMZaJ:VsGN3;hJluRd:UADL7b;zuqEgd:HI9w0;tr6QDd:Y8aXB;MxH79b:xDkBfb;JIbuQc:SPXMTb(uxAMZ);" jsname="G0jgYd"><div jsname="gYwusb" class="p9b27"></div><div jscontroller="RrXLpc" jsname="XeeWQc" role="banner" jsaction="keydown:uiKYid(OH0EC);rcuQ6b:WYd;zuqEgd:ufqpf;JIbuQc:XfTnxb(lfEfFf),AlTiYc(GeGHKb),AlTiYc(m1xNUe),zZlNMe(pZn8Oc);YqO5N:ELcyfe;"><div jsname="bF1uUb" class="BuY5Fd" jsaction="click:xVuwSc;"></div><div jsname="MVsrn" class="TbNlJb "><div role="button" class="U26fgb mUbCce fKz7Od h3nfre M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="GeGHKb" aria-label="Back to site" aria-disabled="false" tabindex="0" data-tooltip="Back to site" data-tooltip-vertical-offset="-12" data-tooltip-horizontal-offset="0"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg class="V4YR2c" viewBox="0 0 24 24" focusable="false"><path d="M0 0h24v24H0z" fill="none"/><path d="M20 11H7.83l5.59-5.59L12 4l-8 8 8 8 1.41-1.41L7.83 13H20v-2z"/></svg></span></span></div><div class="E2UJ5" jsname="M6JdT"><div class="rFrNMe b7AJhc zKHdkd" jscontroller="pxq3x" jsaction="clickonly:KjsqPd; focus:Jt1EX; blur:fpfTEe; input:Lg5SV" jsshadow jsname="OH0EC" aria-expanded="true"><div class="aCsJod oJeWuf"><div class="aXBtI I0VJ4d Wic03c"><span jsslot class="A37UZe qgcB3c iHd5yb"><div role="button" class="U26fgb mUbCce fKz7Od i3PoXe M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="lfEfFf" aria-label="Search" aria-disabled="false" tabindex="0" data-tooltip="Search" data-tooltip-vertical-offset="-12" data-tooltip-horizontal-offset="0"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg class="vu8Pwe" viewBox="0 0 24 24" focusable="false"><path d="M15.5 14h-.79l-.28-.27C15.41 12.59 16 11.11 16 9.5 16 5.91 13.09 3 9.5 3S3 5.91 3 9.5 5.91 16 9.5 16c1.61 0 3.09-.59 4.23-1.57l.27.28v.79l5 4.99L20.49 19l-4.99-5zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z"/><path d="M0 0h24v24H0z" fill="none"/></svg></span></span></div><div class="EmVfjc SKShhf" data-loadingmessage="Loading…" jscontroller="qAKInc" jsaction="animationend:kWijWc;dyRcpb:dyRcpb" jsname="aZ2wEe"><div class="Cg7hO" aria-live="assertive" jsname="vyyg5"></div><div jsname="Hxlbvc" class="xu46lf"><div class="ir3uv uWlRce co39ub"><div class="xq3j6 ERcjC"><div class="X6jHbb GOJTSe"></div></div><div class="HBnAAc"><div class="X6jHbb GOJTSe"></div></div><div class="xq3j6 dj3yTd"><div class="X6jHbb GOJTSe"></div></div></div><div class="ir3uv GFoASc Cn087"><div class="xq3j6 ERcjC"><div class="X6jHbb GOJTSe"></div></div><div class="HBnAAc"><div class="X6jHbb GOJTSe"></div></div><div class="xq3j6 dj3yTd"><div class="X6jHbb GOJTSe"></div></div></div><div class="ir3uv WpeOqd hfsr6b"><div class="xq3j6 ERcjC"><div class="X6jHbb GOJTSe"></div></div><div class="HBnAAc"><div class="X6jHbb GOJTSe"></div></div><div class="xq3j6 dj3yTd"><div class="X6jHbb GOJTSe"></div></div></div><div class="ir3uv rHV3jf EjXFBf"><div class="xq3j6 ERcjC"><div class="X6jHbb GOJTSe"></div></div><div class="HBnAAc"><div class="X6jHbb GOJTSe"></div></div><div class="xq3j6 dj3yTd"><div class="X6jHbb GOJTSe"></div></div></div></div></div><div role="button" class="U26fgb mUbCce fKz7Od JyJRXe M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="m1xNUe" aria-label="Back to site" aria-disabled="false" tabindex="0" data-tooltip="Back to site" data-tooltip-vertical-offset="-12" data-tooltip-horizontal-offset="0"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg class="V4YR2c" viewBox="0 0 24 24" focusable="false"><path d="M0 0h24v24H0z" fill="none"/><path d="M20 11H7.83l5.59-5.59L12 4l-8 8 8 8 1.41-1.41L7.83 13H20v-2z"/></svg></span></span></div></span><div class="Xb9hP"><input type="search" class="whsOnd zHQkBf" jsname="YPqjbf" autocomplete="off" tabindex="0" aria-label="Search this site" value="" aria-disabled="false" autofocus role="combobox" data-initial-value=""/><div jsname="LwH6nd" class="ndJi5d snByac" aria-hidden="true">Search this site</div></div><span jsslot class="A37UZe sxyYjd MQL3Ob"><div role="button" class="U26fgb mUbCce fKz7Od Kk06A M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="pZn8Oc" aria-label="Clear search" aria-disabled="false" tabindex="0" data-tooltip="Clear search" data-tooltip-vertical-offset="-12" data-tooltip-horizontal-offset="0"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg class="fAUEUd" viewBox="0 0 24 24" focusable="false"><path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"></path><path d="M0 0h24v24H0z" fill="none"></path></svg></span></span></div></span><div class="i9lrp mIZh1c"></div><div jsname="XmnwAc" class="OabDMe cXrdqd"></div></div></div><div class="LXRPh"><div jsname="ty6ygf" class="ovnfwe Is7Fhb"></div></div></div></div></div></div></div><div jsname="tiN4bf"><style nonce="k7EAwqdvbMKbMpxLLtxMIg">.rrJNTc{opacity: 0;}.bKy5e{pointer-events: none; position: absolute; top: 0;}</style><div class="bKy5e"><div class="rrJNTc" tabindex="-1"><div class="VfPpkd-dgl2Hf-ppHlrf-sM5MNb" data-is-touch-wrapper="true"><button class="VfPpkd-LgbsSe VfPpkd-LgbsSe-OWXEXe-dgl2Hf LjDxcd XhPA0b LQeN7 WsSUlf jz7fPb" jscontroller="soHxf" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc; touchcancel:JMtRjd; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;mlnRJb:fLiPzd;" data-idom-class="LjDxcd XhPA0b LQeN7 WsSUlf jz7fPb" jsname="z2EeY" tabindex="0"><div class="VfPpkd-Jh9lGc"></div><div class="VfPpkd-J1Ukfc-LhBDec"></div><div class="VfPpkd-RLmnJb"></div><span jsname="V67aGc" class="VfPpkd-vQzf8d">Skip to main content</span></button></div><div class="VfPpkd-dgl2Hf-ppHlrf-sM5MNb" data-is-touch-wrapper="true"><button class="VfPpkd-LgbsSe VfPpkd-LgbsSe-OWXEXe-dgl2Hf LjDxcd XhPA0b LQeN7 WsSUlf br90J" jscontroller="soHxf" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc; touchcancel:JMtRjd; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;mlnRJb:fLiPzd;" data-idom-class="LjDxcd XhPA0b LQeN7 WsSUlf br90J" jsname="ilzYPe" tabindex="0"><div class="VfPpkd-Jh9lGc"></div><div class="VfPpkd-J1Ukfc-LhBDec"></div><div class="VfPpkd-RLmnJb"></div><span jsname="V67aGc" class="VfPpkd-vQzf8d">Skip to navigation</span></button></div></div></div><div class="M63kCb N63NQ"></div><div class="QZ3zWd"><div class="fktJzd AKpWA fOU46b yMcSQd Ly6Unf G9Qloe KuNac XxIgdb" jsname="UzWXSb" data-uses-custom-theme="false" data-legacy-theme-name="QualityBasics" data-legacy-theme-font-kit="Light" data-legacy-theme-color-kit="Blue" jscontroller="Md9ENb" jsaction="gsiSmd:Ffcznf;yj5fUd:cpPetb;HNXL3:q0Vyke;e2SXKd:IPDu5e;BdXpgd:nhk7K;rcuQ6b:WYd;"><header id="atIdViewHeader"><div class="BbxBP G8QRnc K5Zlne" jsname="WA9qLc" jscontroller="RQOkef" jsaction="rcuQ6b:ywL4Jf;VbOlFf:ywL4Jf;FaOgy:ywL4Jf; keydown:Hq2uPe; wheel:Ut4Ahc;" data-top-navigation="true" data-is-preview="false"><div class="VLoccc K5Zlne ELAV1d U8eYrb" jsname="rtFGi"><div class="Pvc6xe"><div jsname="I8J07e" class="TlfmSc YSH9J"><a class="GAuSPc" jsname="jIujaf" href="/btrabucco.com/da-fusion/home?authuser=0"><span class="QTKDff">DA-Fusion</span></a></div></div><div jsname="mADGA" class="zDUgLc"></div></div></div></header><div role="main" tabindex="-1" class="UtePc RCETm" dir="ltr"><section id="h.INITIAL_GRID.hz2sysafyqnv" class="yaqOZd LB7kq cJgDec tpmmCb O13XJf KEFykf" style=""><div class="Nu95r"><div class="IFuOkc" style="background-position: center center; background-image: url(https://lh6.googleusercontent.com/_Uox1EaLrhZo7CLGId161i_WEN1vLr3EM6E3rVaNyvhhtk9-uzPHWYOIgsBT8oSsWV15Nj3KknD7kbe7fA7Rukg=w16383); background-size: cover;" jsname="LQX2Vd"></div></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc yYI8W HQwdzb"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.INITIAL_GRID.t5bg9a27xpls" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe lkHyyc"><h2 id="h.e91iktvgsmnf" dir="ltr" class="zfr3Q JYVBee CDt4Ke " style="background-color: transparent; border-bottom: none; border-left: none; border-right: none; border-top: none; line-height: 1.2; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 0.0pt; padding-right: 0.0pt; padding-top: 0.0pt; text-align: center;"><span class="RijTuc C9DxTc " style="color: #000000; vertical-align: baseline;">Effective Data Augmentation With Diffusion Models</span></h2><br><h2 id="h.24ch5go1zjca" dir="ltr" class="zfr3Q JYVBee CDt4Ke " style="background-color: transparent; border-bottom: none; border-left: none; border-right: none; border-top: none; line-height: 1.2; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 0.0pt; padding-right: 0.0pt; padding-top: 0.0pt; text-align: center;"><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-variant: normal; font-weight: 400; vertical-align: baseline;">Brandon Trabucco ¹ , </span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-weight: 400; vertical-align: baseline;">Kyle Doherty</span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-variant: normal; font-weight: 400; vertical-align: baseline;"> ² , </span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-weight: 400; vertical-align: baseline;">Max Gurinas</span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-variant: normal; font-weight: 400; vertical-align: baseline;"> ³ ,</span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-weight: 400; vertical-align: baseline;"> </span><span class="jgG6ef C9DxTc " style="color: #000000; font-family: Lato, Arial; font-variant: normal; font-weight: 400; vertical-align: baseline;">Ruslan Salakhutdinov ¹</span></h2><h2 id="h.dkyz64wd01w9" dir="ltr" class="zfr3Q JYVBee CDt4Ke " style="background-color: transparent; border-bottom: none; border-left: none; border-right: none; border-top: none; line-height: 1.2; margin-bottom: 20.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 0.0pt; padding-right: 0.0pt; padding-top: 0.0pt; text-align: center;"><span class="jgG6ef C9DxTc " style="color: #000000; font-variant: normal; vertical-align: baseline;">¹ Carnegie Mellon University ,</span><span class="jgG6ef C9DxTc " style="color: #000000; vertical-align: baseline;"> </span><span class="jgG6ef C9DxTc " style="color: #000000; font-variant: normal; vertical-align: baseline;">² </span><span class="jgG6ef C9DxTc " style="color: #000000; vertical-align: baseline;">MPG Ranch</span><span class="jgG6ef C9DxTc " style="color: #000000; font-variant: normal; vertical-align: baseline;"> , ³ University of </span><span class="jgG6ef C9DxTc " style="color: #000000; vertical-align: baseline;">Chicago Laboratory Schools</span></h2></div></div></div></div><div class="oKdM2c ZZyype"><div id="h.662c5a9b377f41bf_8" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd OWlOyc baZpAe"><div jscontroller="VYKRW" jsaction="rcuQ6b:rcuQ6b;"><div class="WIdY2d M1aSXe"><div jsname="WXxXjd" style="padding-top: 45.4939341421%"></div><iframe jsname="L5Fo6c" class="YMEQtf" sandbox="allow-scripts allow-popups allow-forms allow-same-origin allow-popups-to-escape-sandbox allow-downloads allow-modals allow-storage-access-by-user-activation" frameborder="0" aria-label="YouTube Video, Effective Data Augmentation With Diffusion Models [NeurIPS 2023]" src="https://www.youtube.com/embed/IKDWOOWzwns?embed_config=%7B%22gws%22:1%7D" allowfullscreen></iframe></div></div></div></div></div></div></div></div></div></div></div><div class="DnLU4" jsaction="JIbuQc:v5IJLd(ipHvib);"><div role="button" class="U26fgb mUbCce fKz7Od HqAAld Wew9ke M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="ipHvib" aria-label="Scroll down" aria-disabled="false" tabindex="0"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg class="XE8yyf" viewBox="0 0 24 24" focusable="false"><path d="M7.41 7.84L12 12.42l4.59-4.58L18 9.25l-6 6-6-6z"/><path d="M0-.75h24v24H0z" fill="none"/></svg></span></span></div></div></section><section id="h.662c5a9b377f41bf_13" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_16" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style="text-align: center;"><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-variant: normal; font-weight: 400; vertical-align: baseline;">Paper: </span><a class="XqQF9c" href="https://arxiv.org/abs/2302.07944" target="_blank" style="color: inherit; text-decoration: none;"><span class="C9DxTc aw5Odc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; text-decoration: underline; vertical-align: baseline;">arXiv</span></a><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;"> | </span><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-variant: normal; font-weight: 400; vertical-align: baseline;">Code: </span><a class="XqQF9c" href="https://github.com/brandontrabucco/da-fusion" target="_blank" style="color: inherit; text-decoration: none;"><span class="C9DxTc aw5Odc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline;">GitHub</span></a></p></div></div></div></div></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_49" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_45" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh5.googleusercontent.com/mTrJFRXLoOsce_2zNf1rSofnhmI3zU0oBAU09nT9cl5uj_KakpWpEkR99OXH3MQQT_VnP46NNY56Khcl3pMA5ybQ3yAUXw7CpN2Ndh_XJ5VYtGG6eQXe0EfeYnx-4qym3g=w1280" class="CENy8b" role="img"></div></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_33" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_36" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style="text-align: center;"><span class="puwcIf C9DxTc " style="font-family: Lora, Arial; font-weight: 400; vertical-align: baseline;">Abstract</span></p></div></div></div></div></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_37" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_40" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style=""><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;">Data augmentation is one of the most prevalent tools in deep learning, underpinning many recent advances, including those from classification, generative models, and representation learning. The standard approach to data augmentation combines simple transformations like rotations and flips to generate new images from existing ones. However, these new images lack diversity along key semantic axes present in the data. Current augmentations cannot alter the high-level semantic attributes, such as animal species present in a scene, to enhance the diversity of data. We address the lack of diversity in data augmentation with image-to-image transformations parameterized by pre-trained text-to-image diffusion models. Our method edits images to change their semantics using an off-the-shelf diffusion model, and generalizes to novel visual concepts from a few labelled examples. We evaluate our approach on few-shot image classification tasks, and on a real-world weed recognition task, and observe an improvement in accuracy in tested domains.</span></p></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_70" class="yaqOZd WxWicb"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_73" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="iwQgFb" role="presentation"></div></div></div></div></div></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_53" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc yYI8W HQwdzb"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_50" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh3.googleusercontent.com/MQzaRK1DmU7bpbAy8w8VaxuiTyOC9stf_T3oUm8JeCb6A-fVONJw7DwhnYA1wmxVubLNwWI8L13GaxNVh-FakS-3QFUfvzVX4Vcj_jFFgIvvwRQxGQtnOzLIWT-kMUAODA=w1280" class="CENy8b" role="img"></div></div></div></div></div><div class="oKdM2c ZZyype"><div id="h.662c5a9b377f41bf_58" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style=""><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;">Our augmentation adapts to the images in your datasets by learning pseudo-prompts <y> for each class.</span></p></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_65" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc yYI8W HQwdzb"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_62" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh4.googleusercontent.com/XTw2snXXe905NI7eSTxEQHvq9QA9lwmZuQ-flQdk0QnidV8a91SPJi8-bgthp61ATMIGnOIfAsh3ighbbgOl7vAsdNDOjUW3ibrbhxgca156DY8O_PsO4cnI_c0cW98N0A=w1280" class="CENy8b" role="img"></div></div></div></div></div><div class="oKdM2c ZZyype"><div id="h.662c5a9b377f41bf_69" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style=""><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;">We generate augmentations using the structural layout of real images as a guide.</span></p></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_77" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc yYI8W HQwdzb"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_74" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh3.googleusercontent.com/WXNoTzEQOxwqLa97-pXeGSb2DdHX-ajfq-dKVasSOib1F8FZQhYjGmmB10WWQ88AlCq0cr5MNDI-E2uRPgLkoaOagOnHnfwsVk32Hx-wYWUIMnqIys0DIKpLejbuTRwPKw=w1280" class="CENy8b" role="img"></div></div></div></div></div><div class="oKdM2c ZZyype"><div id="h.662c5a9b377f41bf_81" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style=""><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;">Generations from DA-Fusion </span><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-variant: normal; font-weight: 400; vertical-align: baseline;">preserve</span><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;"> the layout of trees, but produce different structural elements.</span></p></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_82" class="yaqOZd WxWicb"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_85" class="hJDwNd-AhqUyc-uQSCkd Ft7HRd-AhqUyc-uQSCkd jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="iwQgFb" role="presentation"></div></div></div></div></div></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_92" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc yYI8W HQwdzb"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_89" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh5.googleusercontent.com/wHPXgt9yfYbDB8WmAw8gUzU9kdH-UKUpRYXgJuh_woA8r1_DZo5wjJksSPYNRk47RgL5-o3ErD54W85nmNNBcydnCj27HwYyGHuDk2t3aKQM0lDJ8NvgutvaIIwf0FeyfQ=w1280" class="CENy8b" role="img"></div></div></div></div></div><div class="oKdM2c ZZyype"><div id="h.662c5a9b377f41bf_102" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style=""><span class="C9DxTc " style="font-family: Lora, Arial; font-size: 13.999999999999998pt; font-weight: 400; vertical-align: baseline;">We see strong performance across seven few-shot classification tasks.</span></p></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.662c5a9b377f41bf_107" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.662c5a9b377f41bf_104" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd baZpAe"><div class="t3iYD"><img src="https://lh3.googleusercontent.com/J9GoHlhEIz90S_RcvjLHn5FsBXOHn24U7VhqKQBCb9V6SJSjMpqMV1MX7BSD-UwEsm3R9t9C2uzvfUp7oRxQ_vVjLnGm22Wkjja5rM4hn2lDbjnJjjzGx5dsS--So6I76g=w1280" class="CENy8b" role="img"></div></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section><section id="h.afc85eb394d9d41_3" class="yaqOZd"><div class="IFuOkc"></div><div class="mYVXT"><div class="LS81yb VICjCf j5pSsc db35Fc" tabindex="-1"><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div><div class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO purZT-AhqUyc-II5mzb ZcASvf-AhqUyc-II5mzb pSzOP-AhqUyc-qWD73c Ktthjf-AhqUyc-qWD73c JNdkSc SQVYQc"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"><div class="oKdM2c ZZyype Kzv0Me"><div id="h.afc85eb394d9d41_0" class="hJDwNd-AhqUyc-EehZO Ft7HRd-AhqUyc-EehZO jXK9ad D2fZ2 zu5uec OjCsFc dmUFtb wHaque g5GTcb"><div class="jXK9ad-SmKAyb"><div class="tyJCtd mGzaTb Depvyb baZpAe"><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: solid #ffffff1f 0.75pt; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 4.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;">@misc{https://doi.org/10.48550/arxiv.2302.07944,</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> doi = {10.48550/ARXIV.2302.07944},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> url = {https://arxiv.org/abs/2302.07944},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> author = {Trabucco, Brandon and Doherty, Kyle and Gurinas, Max and Salakhutdinov, Ruslan},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> keywords = {Computer Vision and Pattern Recognition (cs.CV), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> title = {Effective Data Augmentation With Diffusion Models},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> publisher = {arXiv},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> year = {2023},</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: none; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 0.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;"> copyright = {arXiv.org perpetual, non-exclusive license}</span></p><p dir="ltr" class="zfr3Q CDt4Ke " style="background-clip: padding-box; background-color: rgba(0,0,0,0.0590000004); border-bottom: solid #ffffff1f 0.75pt; border-left: solid #ffffff1f 0.75pt; border-right: solid #ffffff1f 0.75pt; border-top: none; margin-bottom: 0.0pt; margin-top: 0.0pt; padding-bottom: 4.0pt; padding-left: 4.0pt; padding-right: 4.0pt; padding-top: 0.0pt;"><span class="C9DxTc " style="color: #24292e; font-family: 'Source Code Pro', Arial; font-size: 10.0pt; font-weight: 400; vertical-align: baseline;">}</span></p><br></div></div></div></div></div></div></div><div class="hJDwNd-AhqUyc-R6PoUb Ft7HRd-AhqUyc-R6PoUb JNdkSc SQVYQc L6cTce-purZT L6cTce-pSzOP"><div class="JNdkSc-SmKAyb LkDMRd"><div class="" jscontroller="sGwD4d" jsaction="zXBUYb:zTPCnb;zQF9Uc:Qxe3nd;" jsname="F57UId"></div></div></div></div></div></section></div><div class="Xpil1b xgQ6eb"></div><footer jsname="yePe5c"></footer><div jscontroller="j1RDQb" jsaction="rcuQ6b:rcuQ6b;MxH79b:JdcaS;FaOgy:XuHpsb;" class="dZA9kd ynRLnc" data-last-updated-at-time="1703046058114" data-is-preview="false"><div role="button" class="U26fgb JRtysb WzwrXb I12f0b K2mXPb zXBiaf ynRLnc" jscontroller="iSvg6e" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;;keydown:I481le;" jsshadow jsname="Bg3gkf" aria-label="Site actions" aria-disabled="false" tabindex="0" aria-haspopup="true" aria-expanded="false" data-menu-corner="bottom-start" data-anchor-corner="top-start"><div class="NWlf3e MbhUzd" jsname="ksKsZd"></div><span jsslot class="MhXXcc oJeWuf"><span class="Lw7GHd snByac"><svg width="24" height="24" viewBox="0 0 24 24" focusable="false" class=" NMm5M"><path d="M11 17h2v-6h-2v6zm1-15C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8zM11 9h2V7h-2v2z"/></svg></span></span><div jsname="xl07Ob" style="display:none" aria-hidden="true"><div class="JPdR6b hVNH5c" jscontroller="uY3Nvd" jsaction="IpSVtb:TvD9Pc;fEN2Ze:xzS4ub;frq95c:LNeFm;cFpp9e:J9oOtd; click:H8nU8b; mouseup:H8nU8b; keydown:I481le; keypress:Kr2w4b; blur:O22p3e; focus:H8nU8b" role="menu" tabindex="0" style="position:fixed"><div class="XvhY1d" jsaction="mousedown:p8EH2c; touchstart:p8EH2c;"><div class="JAPqpe K0NPx"><span jsslot class="z80M1 FeRvI" jsaction="click:o6ZaF(preventDefault=true); mousedown:lAhnzb; mouseup:Osgxgf; mouseenter:SKyDAe; mouseleave:xq3APb;touchstart:jJiBRc; touchmove:kZeBdd; touchend:VfAz8(preventMouseEvents=true)" jsname="j7LFlb" aria-label="Admin" role="menuitem" tabindex="-1"><div class="aBBjbd MbhUzd" jsname="ksKsZd"></div><div class="uyYuVb oJeWuf" jscontroller="Uw6ODe" jsaction="JIbuQc:sA9Jl;" jsmodel="IlVkp" data-admin-details-url="/v/showsitedetails/btrabucco.com/da-fusion" data-request-edit-access-url="null"><div class="jO7h3c">Admin</div></div></span><span jsslot class="z80M1 FeRvI" jsaction="click:o6ZaF(preventDefault=true); mousedown:lAhnzb; mouseup:Osgxgf; mouseenter:SKyDAe; mouseleave:xq3APb;touchstart:jJiBRc; touchmove:kZeBdd; touchend:VfAz8(preventMouseEvents=true)" jsname="j7LFlb" data-disabled-tooltip="Contact is not available in preview mode" aria-label="Contact" role="menuitem" tabindex="-1"><div class="aBBjbd MbhUzd" jsname="ksKsZd"></div><div class="uyYuVb oJeWuf" jscontroller="j3gDVb" jsaction="JIbuQc:sGCPHc;" jsmodel="Rta7Nb" data-normalized-path="btrabucco.com/da-fusion/home"><div class="jO7h3c">Contact</div></div></span><span jsslot class="z80M1 FeRvI" jsaction="click:o6ZaF(preventDefault=true); mousedown:lAhnzb; mouseup:Osgxgf; mouseenter:SKyDAe; mouseleave:xq3APb;touchstart:jJiBRc; touchmove:kZeBdd; touchend:VfAz8(preventMouseEvents=true)" jsname="j7LFlb" aria-label="Page details" role="menuitem" tabindex="-1"><div class="aBBjbd MbhUzd" jsname="ksKsZd"></div><div class="uyYuVb oJeWuf" jsaction="JIbuQc:hriXLd;" jsname="Rg8K2c"><div class="jO7h3c">Page details</div></div></span></div></div></div></div></div></div><div jscontroller="j1RDQb" jsaction="focusin:gBxDVb(srlkmf); focusout:zvXhGb(srlkmf); click:ro2KTd(psdQ5e);JIbuQc:DSypkd(Bg3gkf);MxH79b:JdcaS;rcuQ6b:rcuQ6b;" class="LqzjUe ynRLnc" data-last-updated-at-time="1703046058114" data-is-preview="false"><div jsname="psdQ5e" class="Q0cSn"></div><div jsname="bN97Pc" class="hBW7Hb"><div role="button" class="U26fgb mUbCce fKz7Od kpPxtd QMuaBc M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc(preventMouseEvents=true|preventDefault=true); touchcancel:JMtRjd;" jsshadow jsname="Bg3gkf" aria-label="Site actions" aria-disabled="false" tabindex="-1" aria-hidden="true"><div class="VTBa7b MbhUzd" jsname="ksKsZd"></div><span jsslot class="xjKiLb"><span class="Ce1Y1c" style="top: -12px"><svg width="24" height="24" viewBox="0 0 24 24" focusable="false" class=" NMm5M"><path d="M11 17h2v-6h-2v6zm1-15C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8zM11 9h2V7h-2v2z"/></svg></span></span></div><div jsname="srlkmf" class="hUphyc"><div class="YkaBSd"><div class="iBkmkf"><span>Page updated</span> <span jsname="CFIm1b" class="dji00c" jsaction="AHmuwe:eGiyHb; mouseover:eGiyHb;" tabindex="0" role="contentinfo"></span></div></div><div class="YkaBSd" jsmodel="IlVkp" jscontroller="Uw6ODe" jsaction="click:sA9Jl" data-admin-details-url="/v/showsitedetails/btrabucco.com/da-fusion" data-request-edit-access-url="null"><div role="button" class="U26fgb kpPxtd J7BuEb" jsshadow jsname="lV5oke" aria-disabled="false" tabindex="0">Admin</div></div><div class="YkaBSd" jscontroller="j3gDVb" jsmodel="Rta7Nb" jsaction="click:sGCPHc;" data-normalized-path="btrabucco.com/da-fusion/home"><div role="button" class="U26fgb kpPxtd J7BuEb" jsshadow aria-label="Contact " aria-disabled="false" tabindex="0">Contact</div></div></div></div></div><div jsname="kdb7zb"><div jscontroller="kklOXe" jsmodel="nbZU0e" jsaction="rcuQ6b:rcuQ6b;FaOgy:nkegzf;BU3dg:U3QbAf;HRy4zb:Z8zbSc;" class="Pt0Du TSZdd"><div class="mF4yBc" jsname="LgbsSe"><div jscontroller="TW9Rvc" jsaction="rcuQ6b:WYd;"><div role="presentation" class="U26fgb XHsn7e MAXCNe M9Bg4d" jscontroller="VXdfxd" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;" jsshadow jsname="LgbsSe" aria-label="Edit this page" aria-disabled="false" data-tooltip="Edit this page" data-tooltip-vertical-offset="-12" data-tooltip-horizontal-offset="0"><a class="FKF6mc TpQm9d" href="/u/0/d/1H2xR56eRCUmVUkDfo9SMNUBwpShtZDZa/p/1j1n5utR-JyBSVQIkKZNN9mWgOndcixik/edit?authuser=0&usp=edit_published_site" aria-label="Edit this page"><div class="HaXdpb wb61gb"></div><div class="HRp7vf MbhUzd" jsname="ksKsZd"></div><span jsslot class="Ip8zfc"><svg class="EI709d" viewBox="0 0 24 24" fill="currentColor" focusable="false"><path d="M3 17.25V21h3.75L17.81 9.94l-3.75-3.75L3 17.25zM20.71 7.04c.39-.39.39-1.02 0-1.41l-2.34-2.34c-.39-.39-1.02-.39-1.41 0l-1.83 1.83 3.75 3.75 1.83-1.83z"/><path d="M0 0h24v24H0z" fill="none"/></svg></span></a></div></div></div></div></div></div></div></div><script nonce="a9BH-X5QD4qdoD3whwiuBw">DOCS_timing['cov']=new Date().getTime();</script><script src="https://www.gstatic.com/_/atari/_/js/k=atari.vw.en.e1fAekHULl4.O/am=gAE/d=1/rs=AGEqA5mst-EBQJicLg6789_EIxD3O9iTkA/m=view" id="base-js" nonce="a9BH-X5QD4qdoD3whwiuBw"></script></div></div><div jscontroller="YV8yqd" jsaction="rcuQ6b:npT2md"></div></body></html>
================================================
FILE: plot.py
================================================
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import seaborn as sns
import os
import glob
import argparse
import math
def pretty(text):
"""Convert a string into a consistent format for
presentation in a matplotlib pyplot:
this version looks like: One Two Three Four
"""
text = text.replace("_", " ")
text = text.replace("-", " ")
text = text.replace("/", " ")
text = text.strip()
prev_c = None
out_str = []
for c in text:
if prev_c is not None and \
prev_c.islower() and c.isupper():
out_str.append(" ")
prev_c = " "
if prev_c is None or prev_c == " ":
c = c.upper()
out_str.append(c)
prev_c = c
return "".join(out_str)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Few-Shot Baseline")
parser.add_argument("--logdirs", nargs="+", type=str, default=[
"./spurge-baselines", "./pascal-baselines", "./coco-baselines", "./imagenet-baselines"])
parser.add_argument("--datasets", nargs="+", type=str,
default=["Spurge", "Pascal", "COCO", "ImageNet"])
parser.add_argument("--method-dirs", nargs="+", type=str,
default=["baseline", "real-guidance", "ours"])
parser.add_argument("--method-names", nargs="+", type=str,
default=["Baseline", "Real Guidance (He et al., 2022)", "MBDA (Ours)"])
parser.add_argument("--name", type=str, default="visualization")
parser.add_argument("--rows", type=int, default=1)
args = parser.parse_args()
combined_dataframe = []
for logdir, dataset in zip(
args.logdirs, args.datasets):
for bname in os.listdir(logdir):
bpath = os.path.join(logdir, bname)
if not os.path.isdir(bpath):
continue
files = list(glob.glob(os.path.join(bpath, "*.csv")))
if len(files) == 0:
continue
data = pd.concat([pd.read_csv(x, index_col=0)
for x in files], ignore_index=True)
data = data[(data["metric"] == "Accuracy") &
(data[ "split"] == "Validation")]
def select_by_epoch(df):
selected_row = df.loc[df["value"].idxmax()]
return data[(data["epoch"] == selected_row["epoch"]) &
(data[ "examples_per_class"] ==
selected_row["examples_per_class"])]
best = data.groupby(["examples_per_class", "epoch"])
best = best["value"].mean().to_frame('value').reset_index()
best = best.groupby("examples_per_class").apply(
select_by_epoch
)
best["method"] = bname
best["dataset"] = dataset
combined_dataframe.append(best)
matplotlib.rc('font', family='Times New Roman', serif='cm10')
matplotlib.rc('mathtext', fontset='cm')
plt.rcParams['text.usetex'] = False
combined_dataframe = pd.concat(
combined_dataframe, ignore_index=True)
combined_dataframe = pd.concat([combined_dataframe[
combined_dataframe['method'] == n] for n in args.method_dirs])
color_palette = sns.color_palette(n_colors=len(args.method_dirs))
legend_rows = int(math.ceil(len(args.method_names) / len(args.datasets)))
columns = int(math.ceil(len(args.datasets) / args.rows))
fig, axs = plt.subplots(
args.rows, columns,
figsize=(6 * columns, 4 * args.rows + (
2.0 if legend_rows == 1 else
2.5 if legend_rows == 2 else 3
)))
for i, dataset in enumerate(args.datasets):
results = combined_dataframe
if dataset not in ["all", "All", "Overall"]:
results = results[results["dataset"] == dataset]
axis = sns.lineplot(x="examples_per_class", y="value", hue="method",
data=results, errorbar=('ci', 68),
linewidth=4, palette=color_palette,
ax=(
axs[i // columns, i % columns]
if args.rows > 1 and len(args.datasets) > 1
else axs[i] if len(args.datasets) > 1 else axs
))
if i == 0: handles, labels = axis.get_legend_handles_labels()
axis.legend([],[], frameon=False)
axis.set(xlabel=None)
axis.set(ylabel=None)
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
axis.xaxis.set_ticks_position('bottom')
axis.yaxis.set_ticks_position('left')
axis.yaxis.set_tick_params(labelsize=16)
axis.xaxis.set_tick_params(labelsize=16)
if i // columns == args.rows - 1:
axis.set_xlabel("Examples Per Class", fontsize=24,
fontweight='bold', labelpad=12)
axis.set_ylabel("Accuracy (Val)", fontsize=24,
fontweight='bold', labelpad=12)
axis.set_title(dataset, fontsize=24, fontweight='bold', pad=12)
axis.grid(color='grey', linestyle='dotted', linewidth=2)
legend = fig.legend(handles, [x for x in args.method_names],
loc="lower center", prop={'size': 24, 'weight': 'bold'},
ncol=min(len(args.method_names), len(args.datasets)))
for i, legend_object in enumerate(legend.legendHandles):
legend_object.set_linewidth(4.0)
legend_object.set_color(color_palette[i])
plt.tight_layout(pad=1.0)
fig.subplots_adjust(hspace=0.3)
fig.subplots_adjust(bottom=(
0.25 if legend_rows == 1 else
0.35 if legend_rows == 2 else 0.4
) / args.rows + 0.05)
plt.savefig(f"{args.name}.pdf")
plt.savefig(f"{args.name}.png")
================================================
FILE: plot_masking_ablation.py
================================================
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from collections import defaultdict
from itertools import product
import os
import glob
import argparse
import math
def pretty(text):
"""Convert a string into a consistent format for
presentation in a matplotlib pyplot:
this version looks like: One Two Three Four
"""
text = text.replace("_", " ")
text = text.replace("-", " ")
text = text.replace("/", " ")
text = text.strip()
prev_c = None
out_str = []
for c in text:
if prev_c is not None and \
prev_c.islower() and c.isupper():
out_str.append(" ")
prev_c = " "
if prev_c is None or prev_c == " ":
c = c.upper()
out_str.append(c)
prev_c = c
return "".join(out_str)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Few-Shot Baseline")
parser.add_argument("--logdirs", nargs="+", type=str,
default=["./pascal-baselines", "./coco-baselines"])
parser.add_argument("--datasets", nargs="+", type=str,
default=["Pascal", "COCO"])
parser.add_argument("--method-dirs", nargs="+", type=str,
default=["textual-inversion-0.5",
"textual-inversion-mask-0-0.5",
"textual-inversion-mask-0.5-0"])
parser.add_argument("--baseline-dirs", nargs="+", type=str,
default=["real-guidance-0.5-cap",
"real-guidance-mask-0-0.5",
"real-guidance-mask-0.5-0"])
parser.add_argument("--method-names", nargs="+", type=str,
default=["Original",
"Masked Foreground",
"Masked Background"])
parser.add_argument("--name", type=str, default="masking-results")
parser.add_argument("--rows", type=int, default=1)
parser.add_argument("--num-trials", type=int, default=8)
parser.add_argument("--no-legend", action="store_true")
args = parser.parse_args()
combined_dataframe = []
for logdir, dataset in zip(args.logdirs, args.datasets):
for bname in os.listdir(logdir):
bpath = os.path.join(logdir, bname)
if not os.path.isdir(bpath):
continue
files = list(glob.glob(os.path.join(bpath, "*.csv")))
if len(files) == 0:
continue
data = pd.concat([pd.read_csv(x, index_col=0)
for x in files], ignore_index=True)
data = data[(data["metric"] == "Accuracy") &
(data[ "split"] == "Validation")]
def select_by_epoch(df):
selected_row = df.loc[df["value"].idxmax()]
return data[(data["epoch"] == selected_row["epoch"]) &
(data[ "examples_per_class"] ==
selected_row["examples_per_class"])]
best = data.groupby(["examples_per_class", "epoch"])
best = best["value"].mean().to_frame('value').reset_index()
best = best.groupby("examples_per_class").apply(
select_by_epoch
)
best["method"] = bname
best["dataset"] = dataset
combined_dataframe.append(best)
matplotlib.rc('font', family='Times New Roman', serif='cm10')
matplotlib.rc('mathtext', fontset='cm')
plt.rcParams['text.usetex'] = False
combined_dataframe = pd.concat(
combined_dataframe, ignore_index=True)
combined_dataframe = pd.concat([
combined_dataframe[combined_dataframe['method'] == n]
for n in (args.method_dirs + args.baseline_dirs)])
color_palette = sns.color_palette(n_colors=len(args.method_dirs))
legend_rows = int(math.ceil(len(args.method_names) / (2 * len(args.datasets))))
columns = int(math.ceil(2 * len(args.datasets) / args.rows))
fig, axs = plt.subplots(
args.rows, columns,
figsize=(6 * columns, 4 * args.rows + ((
2.0 if legend_rows == 1 else
2.5 if legend_rows == 2 else 3
) if not args.no_legend else 1.0)))
baseline_performance = defaultdict(list)
for dataset in args.datasets:
for seed in range(args.num_trials):
for bi, method in enumerate(args.baseline_dirs):
results = combined_dataframe[
(combined_dataframe["dataset"] == dataset) &
(combined_dataframe["method"] == method) &
(combined_dataframe["seed"] == seed)
]
for examples in [1, 2, 4, 8, 16]:
value = results[results["examples_per_class"]
== examples]["value"].to_numpy()
if value.size > 0: baseline_performance[
(dataset, bi, examples)].append(value[0])
cumulative_value = 0.0
invalid = False
for examples_a, examples_b in zip([1, 2, 4, 8], [2, 4, 8, 16]):
value_a = results[results["examples_per_class"] == examples_a]["value"].to_numpy()
value_b = results[results["examples_per_class"] == examples_b]["value"].to_numpy()
if value_a.size > 0 and value_b.size > 0:
cumulative_value += ((value_a + value_b) / 2) * (examples_b - examples_a)
else: invalid = True
if not invalid: baseline_performance[
(dataset, bi)].append(cumulative_value[0])
performance_df = []
performance_auc_df = []
for dataset in args.datasets:
for seed in range(args.num_trials):
for bi, method in enumerate(args.method_dirs):
results = combined_dataframe[
(combined_dataframe["dataset"] == dataset) &
(combined_dataframe["method"] == method) &
(combined_dataframe["seed"] == seed)
]
for examples in [1, 2, 4, 8, 16]:
if (dataset, bi, examples) not in baseline_performance: continue
value = results[results["examples_per_class"]
== examples]["value"].to_numpy()
baseline_value = np.mean(
baseline_performance[(dataset, bi, examples)])
if value.size > 0: performance_df.append(dict(
dataset=dataset,
method=method,
seed=seed,
examples_per_class=examples,
value=value[0] - baseline_value,
))
if (dataset, bi) not in baseline_performance: continue
valid = 0
cumulative_value = -np.mean(baseline_performance[(dataset, bi)])
for examples_a, examples_b in zip([1, 2, 4, 8], [2, 4, 8, 16]):
value_a = results[results["examples_per_class"] == examples_a]["value"].to_numpy()
value_b = results[results["examples_per_class"] == examples_b]["value"].to_numpy()
if value_a.size > 0 and value_b.size > 0:
cumulative_value += ((value_a + value_b) / 2) * (examples_b - examples_a)
valid += 1
if valid == 4:
performance_auc_df.append(dict(
dataset=dataset,
method=method,
seed=seed,
value=cumulative_value[0],
))
performance_df = pd.DataFrame.from_records(performance_df)
performance_auc_df = pd.DataFrame.from_records(performance_auc_df)
for dataset in args.datasets:
df = performance_df.loc[performance_df["dataset"] == dataset]
if df.size == 0: continue
acc_max = df["value"].to_numpy().max()
acc_min = df["value"].to_numpy().min()
performance_df.loc[
performance_df["dataset"] == dataset,
"normalized_value"
] = (df["value"] - acc_min) / (acc_max - acc_min)
df = performance_auc_df.loc[performance_auc_df["dataset"] == dataset]
if df.size == 0: continue
acc_max = df["value"].to_numpy().max()
acc_min = df["value"].to_numpy().min()
performance_auc_df.loc[
performance_auc_df["dataset"] == dataset,
"normalized_value"
] = (df["value"] - acc_min) / (acc_max - acc_min)
for i, (style, dataset) in enumerate(
product(["line", "bar"], args.datasets)):
results = performance_df[
performance_df["dataset"] == dataset] \
if dataset != "Overall" else performance_df
results_auc = performance_auc_df[
performance_auc_df["dataset"] == dataset] \
if dataset != "Overall" else performance_auc_df
if style == "line":
axis = sns.lineplot(
y="normalized_value" if dataset == "Overall" else "value",
x="examples_per_class", hue="method",
data=results, errorbar=('ci', 68),
linewidth=4, palette=color_palette,
ax=(axs[i // columns, i % columns]
if args.rows > 1 and len(args.datasets) > 1
else axs[i] if len(args.datasets) > 1 else axs))
if i == 0: handles, labels = axis.get_legend_handles_labels()
axis.legend([],[], frameon=False)
axis.set(xlabel=None)
axis.set(ylabel=None)
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
axis.xaxis.set_ticks_position('bottom')
axis.yaxis.set_ticks_position('left')
axis.yaxis.set_tick_params(labelsize=16)
axis.xaxis.set_tick_params(labelsize=16)
if i // columns == args.rows - 1:
axis.set_xlabel("Examples Per Class", fontsize=24,
fontweight='bold', labelpad=12)
axis.set_ylabel("Normalized Score" if dataset == "Overall"
else "Gained Accuracy (Val)", fontsize=24,
fontweight='bold', labelpad=12)
elif style == "bar":
axis = sns.barplot(
y="normalized_value" if dataset == "Overall" else "value",
x="method", data=results_auc, errorbar=('ci', 68),
linewidth=4, palette=color_palette,
ax=(axs[i // columns, i % columns]
if args.rows > 1 and len(args.datasets) > 1
else axs[i] if len(args.datasets) > 1 else axs))
if i == 0: handles, labels = axis.get_legend_handles_labels()
axis.legend([],[], frameon=False)
axis.set(xlabel=None)
axis.set(ylabel=None)
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
axis.xaxis.set_ticks_position('bottom')
axis.yaxis.set_ticks_position('left')
axis.yaxis.set_tick_params(labelsize=16)
axis.xaxis.set_ticklabels([])
acc_max = results_auc["normalized_value" if dataset == "Overall" else "value"].to_numpy().max()
acc_min = results_auc["normalized_value" if dataset == "Overall" else "value"].to_numpy().min()
axis.set_ylim(max(0, acc_min), acc_max)
axis.set_ylabel("Normalized Score" if dataset == "Overall"
else "Gained AUC (Val)", fontsize=24,
fontweight='bold', labelpad=12)
axis.set_title(dataset, fontsize=24, fontweight='bold', pad=12)
axis.grid(color='grey', linestyle='dotted', linewidth=2)
if not args.no_legend:
legend = fig.legend(handles, [x for x in args.method_names],
loc="lower center", prop={'size': 24, 'weight': 'bold'},
ncol=min(len(args.method_names), 2 * len(args.datasets)))
for i, legend_object in enumerate(legend.legendHandles):
legend_object.set_linewidth(4.0)
legend_object.set_color(color_palette[i])
plt.tight_layout(pad=1.0)
fig.subplots_adjust(hspace=0.3)
if not args.no_legend:
fig.subplots_adjust(bottom=(
0.25 if legend_rows == 1 else
0.35 if legend_rows == 2 else 0.4
) / args.rows + 0.05)
plt.savefig(f"{args.name}.pdf")
plt.savefig(f"{args.name}.png")
================================================
FILE: plot_stacking_ablation.py
================================================
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import seaborn as sns
import os
import glob
import argparse
import math
def pretty(text):
"""Convert a string into a consistent format for
presentation in a matplotlib pyplot:
this version looks like: One Two Three Four
"""
text = text.replace("_", " ")
text = text.replace("-", " ")
text = text.replace("/", " ")
text = text.strip()
prev_c = None
out_str = []
for c in text:
if prev_c is not None and \
prev_c.islower() and c.isupper():
out_str.append(" ")
prev_c = " "
if prev_c is None or prev_c == " ":
c = c.upper()
out_str.append(c)
prev_c = c
return "".join(out_str)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Few-Shot Baseline")
parser.add_argument("--logdirs", nargs="+", type=str, default=[
"./spurge-baselines", "./pascal-baselines", "./coco-baselines"])
parser.add_argument("--datasets", nargs="+", type=str,
default=["Spurge", "Pascal", "COCO", "Overall"])
parser.add_argument("--method-dirs", nargs="+", type=str,
default=["textual-inversion-0.5", "textual-inversion-1.0-0.75-0.5-0.25"])
parser.add_argument("--baseline-dir", type=str, default="baseline")
parser.add_argument("--method-names", nargs="+", type=str,
default=["Model-Based Data Augmentation (k = 1)",
"Model-Based Data Augmentation (k = 4)"])
parser.add_argument("--name", type=str, default="stacking-results-bar")
parser.add_argument("--rows", type=int, default=1)
parser.add_argument("--num-trials", type=int, default=8)
parser.add_argument("--no-legend", action="store_true")
args = parser.parse_args()
combined_dataframe = []
for logdir, dataset in zip(
args.logdirs, args.datasets):
for bname in os.listdir(logdir):
bpath = os.path.join(logdir, bname)
if not os.path.isdir(bpath):
continue
files = list(glob.glob(os.path.join(bpath, "*.csv")))
if len(files) == 0:
continue
data = pd.concat([pd.read_csv(x, index_col=0)
for x in files], ignore_index=True)
data = data[(data["metric"] == "Accuracy") &
(data[ "split"] == "Validation")]
def select_by_epoch(df):
selected_row = df.loc[df["value"].idxmax()]
return data[(data["epoch"] == selected_row["epoch"]) &
(data[ "examples_per_class"] ==
selected_row["examples_per_class"])]
best = data.groupby(["examples_per_class", "epoch"])
best = best["value"].mean().to_frame('value').reset_index()
best = best.groupby("examples_per_class").apply(
select_by_epoch
)
best["method"] = bname
best["dataset"] = dataset
combined_dataframe.append(best)
matplotlib.rc('font', family='Times New Roman', serif='cm10')
matplotlib.rc('mathtext', fontset='cm')
plt.rcParams['text.usetex'] = False
combined_dataframe = pd.concat(
combined_dataframe, ignore_index=True)
combined_dataframe = pd.concat([combined_dataframe[
combined_dataframe['method'] == n] for n in args.method_dirs + [args.baseline_dir]])
color_palette = sns.color_palette(n_colors=len(args.method_dirs))
legend_rows = int(math.ceil(len(args.method_names) / len(args.datasets)))
columns = int(math.ceil(len(args.datasets) / args.rows))
fig, axs = plt.subplots(
args.rows, columns,
figsize=(6 * columns, 3.5 * args.rows + ((
2.0 if legend_rows == 1 else
2.5 if legend_rows == 2 else 3
) if not args.no_legend else 1.0)))
auc_df = []
baseline_performance = {dataset: 0 for dataset in args.datasets}
for dataset in args.datasets:
for seed in range(args.num_trials):
results = combined_dataframe[
(combined_dataframe["dataset"] == dataset) &
(combined_dataframe["method"] == args.baseline_dir) &
(combined_dataframe["seed"] == seed)
]
cumulative_value = 0.0
invalid = False
for examples_a, examples_b in zip([1, 2, 4, 8], [2, 4, 8, 16]):
value_a = results[results["examples_per_class"] == examples_a]["value"].to_numpy()
value_b = results[results["examples_per_class"] == examples_b]["value"].to_numpy()
if value_a.size > 0 and value_b.size > 0:
cumulative_value += ((value_a + value_b) / 2) * (examples_b - examples_a)
else: invalid = True
if not invalid:
baseline_performance[dataset] += \
cumulative_value[0] / args.num_trials
for dataset in args.datasets:
for method in args.method_dirs:
for seed in range(args.num_trials):
results = combined_dataframe[
(combined_dataframe["dataset"] == dataset) &
(combined_dataframe["method"] == method) &
(combined_dataframe["seed"] == seed)
]
if dataset not in baseline_performance: continue
cumulative_value = -baseline_performance[dataset]
invalid = False
for examples_a, examples_b in zip([1, 2, 4, 8], [2, 4, 8, 16]):
value_a = results[results["examples_per_class"] == examples_a]["value"].to_numpy()
value_b = results[results["examples_per_class"] == examples_b]["value"].to_numpy()
if value_a.size > 0 and value_b.size > 0:
cumulative_value += ((value_a + value_b) / 2) * (examples_b - examples_a)
else: invalid = True
if not invalid:
auc_df.append(
dict(
dataset=dataset,
method=method,
seed=seed,
value=cumulative_value[0],
)
)
combined_dataframe = pd.DataFrame.from_records(auc_df)
for dataset in args.datasets:
df = combined_dataframe.loc[combined_dataframe["dataset"] == dataset]
if df.size == 0: continue
acc_max = df["value"].to_numpy().max()
acc_min = df["value"].to_numpy().min()
combined_dataframe.loc[
combined_dataframe["dataset"] == dataset,
"normalized_value"
] = (df["value"] - acc_min) / (acc_max - acc_min)
for i, dataset in enumerate(args.datasets):
results = combined_dataframe[combined_dataframe[
"dataset"] == dataset] if dataset != "Overall" else combined_dataframe
axis = sns.barplot(
y="normalized_value" if dataset == "Overall" else "value",
x="method", data=results, errorbar=('ci', 68),
linewidth=4, palette=color_palette,
ax=(axs[i // columns, i % columns]
if args.rows > 1 and len(args.datasets) > 1
else axs[i] if len(args.datasets) > 1 else axs))
if i == 0: handles, labels = axis.get_legend_handles_labels()
axis.legend([],[], frameon=False)
axis.set(xlabel=None)
axis.set(ylabel=None)
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
axis.xaxis.set_ticks_position('bottom')
axis.yaxis.set_ticks_position('left')
axis.yaxis.set_tick_params(labelsize=16)
axis.xaxis.set_ticklabels([])
acc_max = results["normalized_value" if dataset == "Overall" else "value"].to_numpy().max()
acc_min = results["normalized_value" if dataset == "Overall" else "value"].to_numpy().min()
axis.set_ylim(max(0, acc_min), acc_max)
axis.set_ylabel("Normalized Score" if dataset == "Overall"
else "Gained AUC (Val)", fontsize=24,
fontweight='bold', labelpad=12)
axis.set_title(dataset, fontsize=24, fontweight='bold', pad=12)
axis.grid(color='grey', linestyle='dotted', linewidth=2)
if not args.no_legend:
legend = fig.legend([x for x in args.method_names],
loc="lower center", prop={'size': 24, 'weight': 'bold'},
ncol=min(len(args.method_names), len(args.datasets)))
for i, legend_object in enumerate(legend.legendHandles):
legend_object.set_linewidth(4.0)
legend_object.set_color(color_palette[i])
plt.tight_layout(pad=1.0)
fig.subplots_adjust(hspace=0.3)
if not args.no_legend:
fig.subplots_adjust(bottom=(
0.20 if legend_rows == 1 else
0.30 if legend_rows == 2 else 0.35
) / args.rows + 0.05)
plt.savefig(f"{args.name}.pdf")
plt.savefig(f"{args.name}.png")
================================================
FILE: plot_stratify.py
================================================
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import seaborn as sns
import os
import glob
import argparse
def pretty(text):
"""Convert a string into a consistent format for
presentation in a matplotlib pyplot:
this version looks like: One Two Three Four
"""
text = text.replace("_", " ")
text = text.replace("-", " ")
text = text.replace("/", " ")
text = text.strip()
prev_c = None
out_str = []
for c in text:
if prev_c is not None and \
prev_c.islower() and c.isupper():
out_str.append(" ")
prev_c = " "
if prev_c is None or prev_c == " ":
c = c.upper
gitextract_8pms0nca/ ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── aggregate_embeddings.py ├── fine_tune.py ├── fine_tune_upstream.py ├── generate_augmentations.py ├── generate_images.py ├── images/ │ └── README.md ├── index.html ├── plot.py ├── plot_masking_ablation.py ├── plot_stacking_ablation.py ├── plot_stratify.py ├── scripts/ │ ├── baseline/ │ │ ├── launch_baseline_coco.sh │ │ ├── launch_baseline_imagenet.sh │ │ ├── launch_baseline_pascal.sh │ │ └── launch_baseline_spurge.sh │ ├── baseline_randaugment/ │ │ ├── launch_baseline_coco.sh │ │ ├── launch_baseline_imagenet.sh │ │ ├── launch_baseline_pascal.sh │ │ └── launch_baseline_spurge.sh │ ├── cutmix_ablation/ │ │ ├── launch_baseline_pascal.sh │ │ ├── launch_real_guidance=0.5_pascal.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh │ ├── deit_backbone/ │ │ ├── launch_baseline_pascal.sh │ │ ├── launch_real_guidance=0.5_pascal.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh │ ├── erase_classes/ │ │ ├── erase_caltech101_part0.sh │ │ ├── erase_caltech101_part1.sh │ │ ├── erase_caltech101_part10.sh │ │ ├── erase_caltech101_part11.sh │ │ ├── erase_caltech101_part12.sh │ │ ├── erase_caltech101_part13.sh │ │ ├── erase_caltech101_part14.sh │ │ ├── erase_caltech101_part15.sh │ │ ├── erase_caltech101_part16.sh │ │ ├── erase_caltech101_part17.sh │ │ ├── erase_caltech101_part18.sh │ │ ├── erase_caltech101_part19.sh │ │ ├── erase_caltech101_part2.sh │ │ ├── erase_caltech101_part20.sh │ │ ├── erase_caltech101_part3.sh │ │ ├── erase_caltech101_part4.sh │ │ ├── erase_caltech101_part5.sh │ │ ├── erase_caltech101_part6.sh │ │ ├── erase_caltech101_part7.sh │ │ ├── erase_caltech101_part8.sh │ │ ├── erase_caltech101_part9.sh │ │ ├── erase_coco_part0.sh │ │ ├── erase_coco_part1.sh │ │ ├── erase_coco_part10.sh │ │ ├── erase_coco_part11.sh │ │ ├── erase_coco_part12.sh │ │ ├── erase_coco_part13.sh │ │ ├── erase_coco_part14.sh │ │ ├── erase_coco_part15.sh │ │ ├── erase_coco_part2.sh │ │ ├── erase_coco_part3.sh │ │ ├── erase_coco_part4.sh │ │ ├── erase_coco_part5.sh │ │ ├── erase_coco_part6.sh │ │ ├── erase_coco_part7.sh │ │ ├── erase_coco_part8.sh │ │ ├── erase_coco_part9.sh │ │ ├── erase_flowers102_part0.sh │ │ ├── erase_flowers102_part1.sh │ │ ├── erase_flowers102_part10.sh │ │ ├── erase_flowers102_part11.sh │ │ ├── erase_flowers102_part12.sh │ │ ├── erase_flowers102_part13.sh │ │ ├── erase_flowers102_part14.sh │ │ ├── erase_flowers102_part15.sh │ │ ├── erase_flowers102_part16.sh │ │ ├── erase_flowers102_part17.sh │ │ ├── erase_flowers102_part18.sh │ │ ├── erase_flowers102_part19.sh │ │ ├── erase_flowers102_part2.sh │ │ ├── erase_flowers102_part20.sh │ │ ├── erase_flowers102_part3.sh │ │ ├── erase_flowers102_part4.sh │ │ ├── erase_flowers102_part5.sh │ │ ├── erase_flowers102_part6.sh │ │ ├── erase_flowers102_part7.sh │ │ ├── erase_flowers102_part8.sh │ │ ├── erase_flowers102_part9.sh │ │ ├── erase_imagenet_part0.sh │ │ ├── erase_imagenet_part1.sh │ │ ├── erase_imagenet_part10.sh │ │ ├── erase_imagenet_part11.sh │ │ ├── erase_imagenet_part12.sh │ │ ├── erase_imagenet_part13.sh │ │ ├── erase_imagenet_part14.sh │ │ ├── erase_imagenet_part15.sh │ │ ├── erase_imagenet_part16.sh │ │ ├── erase_imagenet_part17.sh │ │ ├── erase_imagenet_part18.sh │ │ ├── erase_imagenet_part19.sh │ │ ├── erase_imagenet_part2.sh │ │ ├── erase_imagenet_part3.sh │ │ ├── erase_imagenet_part4.sh │ │ ├── erase_imagenet_part5.sh │ │ ├── erase_imagenet_part6.sh │ │ ├── erase_imagenet_part7.sh │ │ ├── erase_imagenet_part8.sh │ │ ├── erase_imagenet_part9.sh │ │ ├── erase_pascal_part0.sh │ │ ├── erase_pascal_part1.sh │ │ ├── erase_pascal_part2.sh │ │ ├── erase_pascal_part3.sh │ │ ├── erase_spurge_part0.sh │ │ └── generate_scripts.py │ ├── fine_tuning/ │ │ ├── fine_tune_coco.sh │ │ ├── fine_tune_imagenet.sh │ │ ├── fine_tune_pascal.sh │ │ └── fine_tune_spurge.sh │ ├── fine_tuning_erasure/ │ │ ├── fine_tune_coco.sh │ │ ├── fine_tune_imagenet.sh │ │ ├── fine_tune_pascal.sh │ │ └── fine_tune_spurge.sh │ ├── masking/ │ │ ├── launch_real_guidance=0-0.5_coco.sh │ │ ├── launch_real_guidance=0-0.5_pascal.sh │ │ ├── launch_real_guidance=0.5-0_coco.sh │ │ ├── launch_real_guidance=0.5-0_pascal.sh │ │ ├── launch_textual_inversion=0-0.5_coco.sh │ │ ├── launch_textual_inversion=0-0.5_pascal.sh │ │ ├── launch_textual_inversion=0.5-0_coco.sh │ │ └── launch_textual_inversion=0.5-0_pascal.sh │ ├── num_synthetic/ │ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-20.sh │ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-5.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-20.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-5.sh │ ├── real_guidance/ │ │ ├── launch_real_guidance=0.5_coco.sh │ │ ├── launch_real_guidance=0.5_coco_class_agnostic.sh │ │ ├── launch_real_guidance=0.5_imagenet.sh │ │ ├── launch_real_guidance=0.5_imagenet_class_agnostic.sh │ │ ├── launch_real_guidance=0.5_pascal.sh │ │ ├── launch_real_guidance=0.5_pascal_class_agnostic.sh │ │ ├── launch_real_guidance=0.5_spurge.sh │ │ └── launch_real_guidance=0.5_spurge_class_agnostic.sh │ ├── real_guidance_erasure/ │ │ ├── launch_real_guidance=0.5_coco.sh │ │ ├── launch_real_guidance=0.5_imagenet.sh │ │ ├── launch_real_guidance=0.5_pascal.sh │ │ └── launch_real_guidance=0.5_spurge.sh │ ├── real_guidance_randaugment/ │ │ ├── launch_real_guidance=0.5_coco.sh │ │ ├── launch_real_guidance=0.5_imagenet.sh │ │ ├── launch_real_guidance=0.5_pascal.sh │ │ └── launch_real_guidance=0.5_spurge.sh │ ├── stacking/ │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh │ ├── stacking_erasure/ │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh │ ├── stacking_randaugment/ │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh │ ├── synthetic_prob/ │ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-0.3.sh │ │ ├── launch_real_guidance=0.5_pascal_class_agnostic-0.7.sh │ │ ├── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.3.sh │ │ └── launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.7.sh │ └── textual_inversion/ │ ├── launch_textual_inversion=0.5_coco.sh │ ├── launch_textual_inversion=0.5_imagenet.sh │ ├── launch_textual_inversion=0.5_pascal.sh │ └── launch_textual_inversion=0.5_spurge.sh ├── semantic_aug/ │ ├── __init__.py │ ├── augmentations/ │ │ ├── __init__.py │ │ ├── compose.py │ │ ├── real_guidance.py │ │ ├── textual_inversion.py │ │ └── textual_inversion_upstream.py │ ├── datasets/ │ │ ├── __init__.py │ │ ├── caltech101.py │ │ ├── coco.py │ │ ├── flowers102.py │ │ ├── imagenet.py │ │ ├── pascal.py │ │ └── spurge.py │ ├── few_shot_dataset.py │ └── generative_augmentation.py ├── setup.py └── train_classifier.py
SYMBOL INDEX (92 symbols across 19 files)
FILE: fine_tune.py
function save_progress (line 78) | def save_progress(text_encoder, placeholder_token_id, accelerator, args,...
function parse_args (line 85) | def parse_args():
class TextualInversionDataset (line 329) | class TextualInversionDataset(Dataset):
method __init__ (line 330) | def __init__(
method __len__ (line 369) | def __len__(self):
method __getitem__ (line 372) | def __getitem__(self, i):
function get_full_repo_name (line 412) | def get_full_repo_name(model_id: str, organization: Optional[str] = None...
function main (line 422) | def main(args):
FILE: fine_tune_upstream.py
function save_model_card (line 106) | def save_model_card(repo_id: str, images=None, base_model=str, repo_fold...
function log_validation (line 134) | def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator...
function save_progress (line 180) | def save_progress(text_encoder, placeholder_token_ids, accelerator, args...
function parse_args (line 191) | def parse_args():
class TextualInversionDataset (line 500) | class TextualInversionDataset(Dataset):
method __init__ (line 501) | def __init__(
method __len__ (line 540) | def __len__(self):
method __getitem__ (line 543) | def __getitem__(self, i):
function main (line 586) | def main(args):
FILE: plot.py
function pretty (line 12) | def pretty(text):
function select_by_epoch (line 81) | def select_by_epoch(df):
FILE: plot_masking_ablation.py
function pretty (line 15) | def pretty(text):
function select_by_epoch (line 94) | def select_by_epoch(df):
FILE: plot_stacking_ablation.py
function pretty (line 12) | def pretty(text):
function select_by_epoch (line 86) | def select_by_epoch(df):
FILE: plot_stratify.py
function pretty (line 11) | def pretty(text):
function select_by_epoch (line 75) | def select_by_epoch(df):
FILE: semantic_aug/augmentations/compose.py
class ComposeSequential (line 16) | class ComposeSequential(GenerativeAugmentation):
method __init__ (line 18) | def __init__(self, augs: List[GenerativeAugmentation],
method forward (line 27) | def forward(self, image: Image.Image, label: int,
class ComposeParallel (line 38) | class ComposeParallel(GenerativeAugmentation):
method __init__ (line 40) | def __init__(self, augs: List[GenerativeAugmentation],
method forward (line 49) | def forward(self, image: Image.Image, label: int,
FILE: semantic_aug/augmentations/real_guidance.py
class RealGuidance (line 18) | class RealGuidance(GenerativeAugmentation):
method __init__ (line 22) | def __init__(self, model_path: str = "CompVis/stable-diffusion-v1-4",
method forward (line 64) | def forward(self, image: Image.Image, label: int,
FILE: semantic_aug/augmentations/textual_inversion.py
function load_embeddings (line 27) | def load_embeddings(embed_path: str,
function format_name (line 59) | def format_name(name):
class TextualInversion (line 63) | class TextualInversion(GenerativeAugmentation):
method __init__ (line 67) | def __init__(self, embed_path: str,
method forward (line 118) | def forward(self, image: Image.Image, label: int,
FILE: semantic_aug/augmentations/textual_inversion_upstream.py
function format_name (line 27) | def format_name(name, num_tokens: int = 1):
class TextualInversion (line 38) | class TextualInversion(GenerativeAugmentation):
method __init__ (line 42) | def __init__(self, embed_path: str,
method forward (line 94) | def forward(self, image: Image.Image, label: int,
FILE: semantic_aug/datasets/caltech101.py
class CalTech101Dataset (line 19) | class CalTech101Dataset(FewShotDataset):
method __init__ (line 43) | def __init__(self, *args, split: str = "train", seed: int = 0,
method __len__ (line 116) | def __len__(self):
method get_image_by_idx (line 120) | def get_image_by_idx(self, idx: int) -> Image.Image:
method get_label_by_idx (line 124) | def get_label_by_idx(self, idx: int) -> int:
method get_metadata_by_idx (line 128) | def get_metadata_by_idx(self, idx: int) -> dict:
FILE: semantic_aug/datasets/coco.py
class COCODataset (line 26) | class COCODataset(FewShotDataset):
method __init__ (line 45) | def __init__(self, *args, split: str = "train", seed: int = 0,
method __len__ (line 139) | def __len__(self):
method get_image_by_idx (line 143) | def get_image_by_idx(self, idx: int) -> torch.Tensor:
method get_label_by_idx (line 147) | def get_label_by_idx(self, idx: int) -> torch.Tensor:
method get_metadata_by_idx (line 151) | def get_metadata_by_idx(self, idx: int) -> Dict:
FILE: semantic_aug/datasets/flowers102.py
class Flowers102Dataset (line 20) | class Flowers102Dataset(FewShotDataset):
method __init__ (line 128) | def __init__(self, *args, split: str = "train", seed: int = 0,
method __len__ (line 203) | def __len__(self):
method get_image_by_idx (line 207) | def get_image_by_idx(self, idx: int) -> Image.Image:
method get_label_by_idx (line 211) | def get_label_by_idx(self, idx: int) -> int:
method get_metadata_by_idx (line 215) | def get_metadata_by_idx(self, idx: int) -> dict:
FILE: semantic_aug/datasets/imagenet.py
class ImageNetDataset (line 27) | class ImageNetDataset(FewShotDataset):
method __init__ (line 52) | def __init__(self, *args, split: str = "train", seed: int = 0,
method __len__ (line 147) | def __len__(self):
method get_image_by_idx (line 151) | def get_image_by_idx(self, idx: int) -> torch.Tensor:
method get_label_by_idx (line 155) | def get_label_by_idx(self, idx: int) -> torch.Tensor:
method get_metadata_by_idx (line 159) | def get_metadata_by_idx(self, idx: int) -> Dict:
FILE: semantic_aug/datasets/pascal.py
class PASCALDataset (line 26) | class PASCALDataset(FewShotDataset):
method __init__ (line 35) | def __init__(self, *args, split: str = "train", seed: int = 0,
method __len__ (line 142) | def __len__(self):
method get_image_by_idx (line 146) | def get_image_by_idx(self, idx: int) -> Image.Image:
method get_label_by_idx (line 150) | def get_label_by_idx(self, idx: int) -> int:
method get_metadata_by_idx (line 154) | def get_metadata_by_idx(self, idx: int) -> dict:
FILE: semantic_aug/datasets/spurge.py
class SpurgeDataset (line 20) | class SpurgeDataset(FewShotDataset):
method __init__ (line 25) | def __init__(self, *args, data_dir: str = DEFAULT_DATA_DIR,
method __len__ (line 93) | def __len__(self):
method get_image_by_idx (line 97) | def get_image_by_idx(self, idx: int) -> torch.Tensor:
method get_label_by_idx (line 101) | def get_label_by_idx(self, idx: int) -> torch.Tensor:
method get_metadata_by_idx (line 105) | def get_metadata_by_idx(self, idx: int) -> Any:
FILE: semantic_aug/few_shot_dataset.py
class FewShotDataset (line 17) | class FewShotDataset(Dataset):
method __init__ (line 22) | def __init__(self, examples_per_class: int = None,
method get_image_by_idx (line 45) | def get_image_by_idx(self, idx: int) -> Image.Image:
method get_label_by_idx (line 50) | def get_label_by_idx(self, idx: int) -> int:
method get_metadata_by_idx (line 55) | def get_metadata_by_idx(self, idx: int) -> dict:
method generate_augmentations (line 59) | def generate_augmentations(self, num_repeats: int):
method __getitem__ (line 82) | def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
FILE: semantic_aug/generative_augmentation.py
class GenerativeAugmentation (line 10) | class GenerativeAugmentation(nn.Module, abc.ABC):
method forward (line 13) | def forward(self, image: Image.Image, label: int,
FILE: train_classifier.py
function run_experiment (line 67) | def run_experiment(examples_per_class: int = 0,
class ClassificationModel (line 320) | class ClassificationModel(nn.Module):
method __init__ (line 322) | def __init__(self, num_classes: int, backbone: str = "resnet50"):
method forward (line 340) | def forward(self, image):
Condensed preview — 186 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (416K chars).
[
{
"path": ".gitignore",
"chars": 1863,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# images\n*.png\n*.jpg\n*.pdf\n\n# experiment data"
},
{
"path": ".gitmodules",
"chars": 203,
"preview": "[submodule \"stable-diffusion\"]\n\tpath = stable-diffusion\n\turl = https://github.com/CompVis/stable-diffusion.git\n[submodul"
},
{
"path": "LICENSE",
"chars": 1072,
"preview": "MIT License\n\nCopyright (c) 2022 Brandon Trabucco\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "README.md",
"chars": 6029,
"preview": "# Effective Data Augmentation With Diffusion Models\n\n[![Watch Effective Data Augmentation With Diffusion Models On YouTu"
},
{
"path": "aggregate_embeddings.py",
"chars": 1374,
"preview": "import torch\nimport os\nimport glob\nimport argparse\nfrom itertools import product\nfrom tqdm import trange\n\n\nDEFAULT_EMBED"
},
{
"path": "fine_tune.py",
"chars": 31152,
"preview": "import argparse\nimport logging\nimport math\nimport os\nimport gc\nimport shutil\nimport random\nfrom pathlib import Path\nfrom"
},
{
"path": "fine_tune_upstream.py",
"chars": 41745,
"preview": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under"
},
{
"path": "generate_augmentations.py",
"chars": 4135,
"preview": "from semantic_aug.datasets.coco import COCODataset\nfrom semantic_aug.datasets.spurge import SpurgeDataset\nfrom semantic_"
},
{
"path": "generate_images.py",
"chars": 2362,
"preview": "from semantic_aug.augmentations.textual_inversion import TextualInversion\nfrom diffusers import StableDiffusionPipeline\n"
},
{
"path": "images/README.md",
"chars": 0,
"preview": ""
},
{
"path": "index.html",
"chars": 79503,
"preview": "\n<!DOCTYPE html><html lang=\"en\" itemscope itemtype=\"http://schema.org/WebPage\"><head><meta charset=\"utf-8\"><script nonce"
},
{
"path": "plot.py",
"chars": 5861,
"preview": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport seaborn as sns\n\nimport os\nimport glob\nimpor"
},
{
"path": "plot_masking_ablation.py",
"chars": 12879,
"preview": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom coll"
},
{
"path": "plot_stacking_ablation.py",
"chars": 9293,
"preview": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport seaborn as sns\n\nimport os\nimport glob\nimpor"
},
{
"path": "plot_stratify.py",
"chars": 5355,
"preview": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport seaborn as sns\n\nimport os\nimport glob\nimpor"
},
{
"path": "scripts/baseline/launch_baseline_coco.sh",
"chars": 611,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/baseline/launch_baseline_imagenet.sh",
"chars": 623,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/baseline/launch_baseline_pascal.sh",
"chars": 617,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/baseline/launch_baseline_spurge.sh",
"chars": 617,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/baseline_randaugment/launch_baseline_coco.sh",
"chars": 641,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/baseline_randaugment/launch_baseline_imagenet.sh",
"chars": 653,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/baseline_randaugment/launch_baseline_pascal.sh",
"chars": 647,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/baseline_randaugment/launch_baseline_spurge.sh",
"chars": 647,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/cutmix_ablation/launch_baseline_pascal.sh",
"chars": 639,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/cutmix_ablation/launch_real_guidance=0.5_pascal.sh",
"chars": 876,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/cutmix_ablation/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh",
"chars": 1067,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/deit_backbone/launch_baseline_pascal.sh",
"chars": 668,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/deit_backbone/launch_real_guidance=0.5_pascal.sh",
"chars": 903,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/deit_backbone/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh",
"chars": 1094,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/erase_classes/erase_caltech101_part0.sh",
"chars": 542,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part1.sh",
"chars": 529,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part10.sh",
"chars": 538,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part11.sh",
"chars": 528,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part12.sh",
"chars": 533,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part13.sh",
"chars": 535,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part14.sh",
"chars": 528,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part15.sh",
"chars": 534,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part16.sh",
"chars": 537,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part17.sh",
"chars": 544,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part18.sh",
"chars": 538,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part19.sh",
"chars": 545,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part2.sh",
"chars": 536,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part20.sh",
"chars": 503,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part3.sh",
"chars": 537,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part4.sh",
"chars": 542,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part5.sh",
"chars": 544,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part6.sh",
"chars": 540,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part7.sh",
"chars": 531,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part8.sh",
"chars": 544,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_caltech101_part9.sh",
"chars": 545,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part0.sh",
"chars": 532,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part1.sh",
"chars": 528,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part10.sh",
"chars": 529,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part11.sh",
"chars": 527,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part12.sh",
"chars": 529,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part13.sh",
"chars": 535,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part14.sh",
"chars": 530,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part15.sh",
"chars": 540,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part2.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part3.sh",
"chars": 517,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part4.sh",
"chars": 530,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part5.sh",
"chars": 531,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part6.sh",
"chars": 538,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part7.sh",
"chars": 550,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part8.sh",
"chars": 525,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_coco_part9.sh",
"chars": 527,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part0.sh",
"chars": 577,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part1.sh",
"chars": 557,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part10.sh",
"chars": 542,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part11.sh",
"chars": 560,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part12.sh",
"chars": 573,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part13.sh",
"chars": 555,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part14.sh",
"chars": 536,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part15.sh",
"chars": 548,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part16.sh",
"chars": 544,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part17.sh",
"chars": 546,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part18.sh",
"chars": 548,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part19.sh",
"chars": 549,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part2.sh",
"chars": 554,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part20.sh",
"chars": 519,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part3.sh",
"chars": 575,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part4.sh",
"chars": 558,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part5.sh",
"chars": 570,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part6.sh",
"chars": 564,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part7.sh",
"chars": 566,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part8.sh",
"chars": 556,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_flowers102_part9.sh",
"chars": 552,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part0.sh",
"chars": 548,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part1.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part10.sh",
"chars": 549,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part11.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part12.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part13.sh",
"chars": 539,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part14.sh",
"chars": 545,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part15.sh",
"chars": 542,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part16.sh",
"chars": 535,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part17.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part18.sh",
"chars": 546,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part19.sh",
"chars": 553,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part2.sh",
"chars": 538,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part3.sh",
"chars": 554,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part4.sh",
"chars": 545,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part5.sh",
"chars": 562,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part6.sh",
"chars": 554,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part7.sh",
"chars": 543,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part8.sh",
"chars": 541,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_imagenet_part9.sh",
"chars": 539,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_pascal_part0.sh",
"chars": 527,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_pascal_part1.sh",
"chars": 515,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_pascal_part2.sh",
"chars": 534,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_pascal_part3.sh",
"chars": 534,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/erase_spurge_part0.sh",
"chars": 510,
"preview": "#!/bin/bash\n#SBATCH --job-name=erase\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH"
},
{
"path": "scripts/erase_classes/generate_scripts.py",
"chars": 1764,
"preview": "from semantic_aug.datasets.coco import COCODataset\nfrom semantic_aug.datasets.spurge import SpurgeDataset\nfrom semantic_"
},
{
"path": "scripts/fine_tuning/fine_tune_coco.sh",
"chars": 903,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning/fine_tune_imagenet.sh",
"chars": 907,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning/fine_tune_pascal.sh",
"chars": 881,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning/fine_tune_spurge.sh",
"chars": 881,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning_erasure/fine_tune_coco.sh",
"chars": 934,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning_erasure/fine_tune_imagenet.sh",
"chars": 938,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning_erasure/fine_tune_pascal.sh",
"chars": 912,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/fine_tuning_erasure/fine_tune_spurge.sh",
"chars": 911,
"preview": "#!/bin/bash\n#SBATCH --job-name=f-tune\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8,matrix"
},
{
"path": "scripts/masking/launch_real_guidance=0-0.5_coco.sh",
"chars": 886,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/masking/launch_real_guidance=0-0.5_pascal.sh",
"chars": 892,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/masking/launch_real_guidance=0.5-0_coco.sh",
"chars": 886,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/masking/launch_real_guidance=0.5-0_pascal.sh",
"chars": 892,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/masking/launch_textual_inversion=0-0.5_coco.sh",
"chars": 910,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/masking/launch_textual_inversion=0-0.5_pascal.sh",
"chars": 916,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/masking/launch_textual_inversion=0.5-0_coco.sh",
"chars": 910,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/masking/launch_textual_inversion=0.5-0_pascal.sh",
"chars": 916,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/num_synthetic/launch_real_guidance=0.5_pascal_class_agnostic-20.sh",
"chars": 887,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/num_synthetic/launch_real_guidance=0.5_pascal_class_agnostic-5.sh",
"chars": 884,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/num_synthetic/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-20.sh",
"chars": 1086,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/num_synthetic/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-5.sh",
"chars": 1083,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_coco.sh",
"chars": 845,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_coco_class_agnostic.sh",
"chars": 841,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_imagenet.sh",
"chars": 857,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_imagenet_class_agnostic.sh",
"chars": 853,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_pascal.sh",
"chars": 851,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_pascal_class_agnostic.sh",
"chars": 847,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_spurge.sh",
"chars": 860,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance/launch_real_guidance=0.5_spurge_class_agnostic.sh",
"chars": 847,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance_erasure/launch_real_guidance=0.5_coco.sh",
"chars": 928,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/real_guidance_erasure/launch_real_guidance=0.5_imagenet.sh",
"chars": 940,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/real_guidance_erasure/launch_real_guidance=0.5_pascal.sh",
"chars": 934,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance_erasure/launch_real_guidance=0.5_spurge.sh",
"chars": 943,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance_randaugment/launch_real_guidance=0.5_coco.sh",
"chars": 954,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/real_guidance_randaugment/launch_real_guidance=0.5_imagenet.sh",
"chars": 966,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/real_guidance_randaugment/launch_real_guidance=0.5_pascal.sh",
"chars": 960,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/real_guidance_randaugment/launch_real_guidance=0.5_spurge.sh",
"chars": 969,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking/launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh",
"chars": 1032,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/stacking/launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh",
"chars": 1044,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/stacking/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh",
"chars": 1038,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking/launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh",
"chars": 1038,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking_erasure/launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh",
"chars": 1205,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/stacking_erasure/launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh",
"chars": 1217,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/stacking_erasure/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh",
"chars": 1211,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking_erasure/launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh",
"chars": 1211,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking_randaugment/launch_textual_inversion=1.0-0.75-0.5-0.25_coco.sh",
"chars": 1231,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/stacking_randaugment/launch_textual_inversion=1.0-0.75-0.5-0.25_imagenet.sh",
"chars": 1243,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/stacking_randaugment/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal.sh",
"chars": 1237,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/stacking_randaugment/launch_textual_inversion=1.0-0.75-0.5-0.25_spurge.sh",
"chars": 1237,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/synthetic_prob/launch_real_guidance=0.5_pascal_class_agnostic-0.3.sh",
"chars": 855,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/synthetic_prob/launch_real_guidance=0.5_pascal_class_agnostic-0.7.sh",
"chars": 855,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/synthetic_prob/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.3.sh",
"chars": 1046,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/synthetic_prob/launch_textual_inversion=1.0-0.75-0.5-0.25_pascal-0.7.sh",
"chars": 1046,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/textual_inversion/launch_textual_inversion=0.5_coco.sh",
"chars": 857,
"preview": "#!/bin/bash\n#SBATCH --job-name=coco\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATCH "
},
{
"path": "scripts/textual_inversion/launch_textual_inversion=0.5_imagenet.sh",
"chars": 869,
"preview": "#!/bin/bash\n#SBATCH --job-name=imagenet\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBA"
},
{
"path": "scripts/textual_inversion/launch_textual_inversion=0.5_pascal.sh",
"chars": 863,
"preview": "#!/bin/bash\n#SBATCH --job-name=pascal\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "scripts/textual_inversion/launch_textual_inversion=0.5_spurge.sh",
"chars": 863,
"preview": "#!/bin/bash\n#SBATCH --job-name=spurge\n#SBATCH --exclude=matrix-1-12,matrix-0-24,matrix-1-4,matrix-2-13,matrix-1-8\n#SBATC"
},
{
"path": "semantic_aug/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "semantic_aug/augmentations/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "semantic_aug/augmentations/compose.py",
"chars": 1610,
"preview": "from semantic_aug.generative_augmentation import GenerativeAugmentation\nfrom diffusers import StableDiffusionImg2ImgPipe"
},
{
"path": "semantic_aug/augmentations/real_guidance.py",
"chars": 4120,
"preview": "from semantic_aug.generative_augmentation import GenerativeAugmentation\nfrom diffusers import StableDiffusionImg2ImgPipe"
},
{
"path": "semantic_aug/augmentations/textual_inversion.py",
"chars": 5907,
"preview": "from semantic_aug.generative_augmentation import GenerativeAugmentation\nfrom diffusers import StableDiffusionImg2ImgPipe"
},
{
"path": "semantic_aug/augmentations/textual_inversion_upstream.py",
"chars": 5173,
"preview": "from semantic_aug.generative_augmentation import GenerativeAugmentation\nfrom diffusers import StableDiffusionImg2ImgPipe"
},
{
"path": "semantic_aug/datasets/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "semantic_aug/datasets/caltech101.py",
"chars": 5399,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/datasets/coco.py",
"chars": 6273,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/datasets/flowers102.py",
"chars": 6599,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/datasets/imagenet.py",
"chars": 6641,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/datasets/pascal.py",
"chars": 5835,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/datasets/spurge.py",
"chars": 3910,
"preview": "from semantic_aug.few_shot_dataset import FewShotDataset\nfrom semantic_aug.generative_augmentation import GenerativeAugm"
},
{
"path": "semantic_aug/few_shot_dataset.py",
"chars": 2840,
"preview": "from semantic_aug.generative_augmentation import GenerativeAugmentation\nfrom typing import Any, Tuple\nfrom torch.utils.d"
},
{
"path": "semantic_aug/generative_augmentation.py",
"chars": 358,
"preview": "from torch.utils.data import Dataset\nfrom typing import Any, Tuple\nfrom PIL import Image\n\nimport torch.nn as nn\nimport t"
},
{
"path": "setup.py",
"chars": 1535,
"preview": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nURL = 'https://github.com/brandontrabucco/semantic-a"
},
{
"path": "train_classifier.py",
"chars": 16376,
"preview": "from semantic_aug.datasets.coco import COCODataset\nfrom semantic_aug.datasets.spurge import SpurgeDataset\nfrom semantic_"
}
]
About this extraction
This page contains the full source code of the brandontrabucco/da-fusion GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 186 files (377.3 KB), approximately 122.5k tokens, and a symbol index with 92 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.