Showing preview only (949K chars total). Download the full file or copy to clipboard to get everything.
Repository: HanxunH/Unlearnable-Examples
Branch: main
Commit: f347155ee23b
Files: 236
Total size: 876.9 KB
Directory structure:
gitextract_b3y44xt6/
├── .gitattributes
├── .gitignore
├── CITATION.cff
├── LICENSE
├── QuickStart.ipynb
├── README.md
├── collect_results.py
├── configs/
│ ├── cifar10/
│ │ ├── dense121.yaml
│ │ ├── resnet18.yaml
│ │ ├── resnet18_add-uniform-noise-aug.yaml
│ │ ├── resnet18_add-uniform-noise.yaml
│ │ ├── resnet18_augement.yaml
│ │ ├── resnet18_augmentation.yaml
│ │ ├── resnet18_classpoison.yaml
│ │ ├── resnet18_classpoison_targeted.yaml
│ │ ├── resnet18_cutmix.yaml
│ │ ├── resnet18_cutout.yaml
│ │ ├── resnet18_denoise.yaml
│ │ ├── resnet18_madrys.yaml
│ │ ├── resnet18_mixup.yaml
│ │ ├── resnet50.yaml
│ │ ├── toy_cifar.yaml
│ │ └── toy_cifar_madrys.yaml
│ ├── cifar100/
│ │ ├── dense121.yaml
│ │ ├── resnet18.yaml
│ │ ├── resnet18_madrys.yaml
│ │ └── resnet50.yaml
│ ├── cifar101/
│ │ └── resnet18.yaml
│ ├── face/
│ │ └── InceptionResnet.yaml
│ ├── imagenet-mini/
│ │ ├── dense121.yaml
│ │ ├── resnet18.yaml
│ │ └── resnet50.yaml
│ ├── svhn/
│ │ ├── dense121.yaml
│ │ ├── resnet18.yaml
│ │ ├── resnet18_madrys.yaml
│ │ └── resnet50.yaml
│ └── tiny-imagenet/
│ ├── dense121.yaml
│ ├── resnet18.yaml
│ └── resnet50.yaml
├── dataset.py
├── evaluator.py
├── fast_autoaugment/
│ ├── .gitignore
│ ├── FastAutoAugment/
│ │ ├── __init__.py
│ │ ├── archive.py
│ │ ├── aug_mixup.py
│ │ ├── augmentations.py
│ │ ├── common.py
│ │ ├── data.py
│ │ ├── imagenet.py
│ │ ├── lr_scheduler.py
│ │ ├── metrics.py
│ │ ├── networks/
│ │ │ ├── __init__.py
│ │ │ ├── efficientnet_pytorch/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── condconv.py
│ │ │ │ ├── model.py
│ │ │ │ └── utils.py
│ │ │ ├── pyramidnet.py
│ │ │ ├── resnet.py
│ │ │ ├── shakedrop.py
│ │ │ ├── shakeshake/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── shake_resnet.py
│ │ │ │ ├── shake_resnext.py
│ │ │ │ └── shakeshake.py
│ │ │ └── wideresnet.py
│ │ ├── safe_shell_exec.py
│ │ ├── search.py
│ │ ├── tf_port/
│ │ │ ├── __init__.py
│ │ │ ├── rmsprop.py
│ │ │ └── tpu_bn.py
│ │ ├── train.py
│ │ └── train_dist.py
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── archive.py
│ ├── confs/
│ │ ├── efficientnet_b0.yaml
│ │ ├── efficientnet_b0_condconv.yaml
│ │ ├── efficientnet_b1.yaml
│ │ ├── efficientnet_b2.yaml
│ │ ├── efficientnet_b3.yaml
│ │ ├── efficientnet_b4.yaml
│ │ ├── pyramid272_cifar.yaml
│ │ ├── resnet200.yaml
│ │ ├── resnet50.yaml
│ │ ├── resnet50_mixup.yaml
│ │ ├── shake26_2x112d_cifar.yaml
│ │ ├── shake26_2x32d_cifar.yaml
│ │ ├── shake26_2x96d_cifar.yaml
│ │ ├── wresnet28x10_cifar.yaml
│ │ ├── wresnet28x10_svhn.yaml
│ │ └── wresnet40x2_cifar.yaml
│ └── requirements.txt
├── madrys.py
├── main.py
├── models/
│ ├── DenseNet.py
│ ├── ResNet.py
│ ├── ToyModel.py
│ ├── __init__.py
│ ├── download.py
│ └── inception_resnet_v1.py
├── perturbation.py
├── requirements.txt
├── scripts/
│ ├── cifar10/
│ │ ├── min-max-noise/
│ │ │ ├── classwise-noise/
│ │ │ │ ├── exp_setting.sh
│ │ │ │ ├── search_perturbation_noise.sh
│ │ │ │ ├── submit.sh
│ │ │ │ ├── train.sh
│ │ │ │ └── train.slurm
│ │ │ └── samplewise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── min-min-noise/
│ │ │ ├── classwise-noise/
│ │ │ │ ├── exp_setting.sh
│ │ │ │ ├── search_perturbation_noise.sh
│ │ │ │ ├── submit.sh
│ │ │ │ ├── train.sh
│ │ │ │ └── train.slurm
│ │ │ └── samplewise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── random-noise/
│ │ ├── classwise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── samplewise-noise/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── submit.sh
│ │ ├── train.sh
│ │ └── train.slurm
│ ├── cifar10-extension/
│ │ └── min-min-noise/
│ │ ├── classwise-noise-2/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-eps=16/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-eps=24/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-random-patch16/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-random-patch24/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-random-patch8/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── classwise-noise-transfer-tiny-imagenet/
│ │ │ ├── exp_setting.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── samplewise-noise-eps=16/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── samplewise-noise-eps=24/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── samplewise-noise-random-patch16/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ ├── samplewise-noise-random-patch24/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── samplewise-noise-random-patch8/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── submit.sh
│ │ ├── train.sh
│ │ └── train.slurm
│ ├── cifar100/
│ │ └── min-min-noise/
│ │ ├── classwise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── samplewise-noise/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── submit.sh
│ │ ├── train.sh
│ │ └── train.slurm
│ ├── cifar101/
│ │ ├── exp_setting.sh
│ │ └── train.sh
│ ├── face/
│ │ └── min-min-noise/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── train.sh
│ │ ├── train.slurm
│ │ ├── train_clean.sh
│ │ ├── train_clean.slurm
│ │ ├── train_protected.sh
│ │ └── train_protected.slurm
│ ├── imagenet-mini/
│ │ └── min-min-noise/
│ │ ├── classwise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── samplewise-noise/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── submit.sh
│ │ ├── train.sh
│ │ └── train.slurm
│ ├── svhn/
│ │ └── min-min-noise/
│ │ ├── classwise-noise/
│ │ │ ├── exp_setting.sh
│ │ │ ├── search_perturbation_noise.sh
│ │ │ ├── submit.sh
│ │ │ ├── train.sh
│ │ │ └── train.slurm
│ │ └── samplewise-noise/
│ │ ├── exp_setting.sh
│ │ ├── search_perturbation_noise.sh
│ │ ├── submit.sh
│ │ ├── train.sh
│ │ └── train.slurm
│ └── tiny-imagenet/
│ └── min-min-noise/
│ └── classwise-noise/
│ ├── exp_setting.sh
│ └── search_perturbation_noise.sh
├── toolbox.py
├── trainer.py
└── util.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
# Auto detect text files and perform LF normalization
* text=auto
================================================
FILE: .gitignore
================================================
__pycache__
*.pyc
.DS_Store
.ipynb_checkpoints
experiments/
test_exp/
pretrained_checkpoints/
exp_results.json
plots/
================================================
FILE: CITATION.cff
================================================
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Huang"
given-names: "Hanxun"
orcid: "https://orcid.org/0000-0002-2793-6680"
- family-names: "Ma"
given-names: "Xingjun"
orcid: "https://orcid.org/0000-0003-2099-4973"
- family-names: "Erfani"
given-names: "Sarah"
orcid: "https://orcid.org/0000-0003-0885-0643"
- family-names: "Bailey"
given-names: "James"
orcid: "https://orcid.org/0000-0002-3769-3811"
- family-names: "Wang"
given-names: "Yisen"
title: "Unlearnable Examples: Making Personal Data Unexploitable"
version: 0.0.1
date-released: 2021-01-15
url: "https://github.com/HanxunH/Unlearnable-Examples"
preferred-citation:
type: conference-paper
title: "Unlearnable Examples: Making Personal Data Unexploitable"
authors:
- family-names: "Huang"
given-names: "Hanxun"
orcid: "https://orcid.org/0000-0002-2793-6680"
- family-names: "Ma"
given-names: "Xingjun"
orcid: "https://orcid.org/0000-0003-2099-4973"
- family-names: "Erfani"
given-names: "Sarah"
orcid: "https://orcid.org/0000-0003-0885-0643"
- family-names: "Bailey"
given-names: "James"
orcid: "https://orcid.org/0000-0002-3769-3811"
- family-names: "Wang"
given-names: "Yisen"
collection-title: "ICLR"
year: 2021
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2021 HanxunH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: QuickStart.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h2>Quick Start: Creating Sample-wise Unlearnable Examples</h2>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Prepare Data</h3>"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Files already downloaded and verified\n",
"Files already downloaded and verified\n"
]
}
],
"source": [
"import torch\n",
"import torchvision\n",
"from torch.utils.data import DataLoader\n",
"from torchvision import datasets, transforms\n",
"\n",
"# Prepare Dataset\n",
"train_transform = [\n",
" transforms.ToTensor()\n",
"]\n",
"test_transform = [\n",
" transforms.ToTensor()\n",
"]\n",
"train_transform = transforms.Compose(train_transform)\n",
"test_transform = transforms.Compose(test_transform)\n",
"\n",
"clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
"clean_test_dataset = datasets.CIFAR10(root='../datasets', train=False, download=True, transform=test_transform)\n",
"\n",
"clean_train_loader = DataLoader(dataset=clean_train_dataset, batch_size=512,\n",
" shuffle=False, pin_memory=True,\n",
" drop_last=False, num_workers=12)\n",
"clean_test_loader = DataLoader(dataset=clean_test_dataset, batch_size=512,\n",
" shuffle=False, pin_memory=True,\n",
" drop_last=False, num_workers=12)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Prepare Model</h3>"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from models.ResNet import ResNet18\n",
"import toolbox\n",
"\n",
"torch.backends.cudnn.enabled = True\n",
"torch.backends.cudnn.benchmark = True\n",
"\n",
"base_model = ResNet18()\n",
"base_model = base_model.cuda()\n",
"criterion = torch.nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.SGD(params=base_model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\n",
"\n",
"noise_generator = toolbox.PerturbationTool(epsilon=0.03137254901960784, num_steps=20, step_size=0.0031372549019607846)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Generate Error-Minimizing Noise</h3>"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:05<00:00, 1.89s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 8.13\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:06<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 11.89\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 31.45\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 67.06\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:08<00:00, 1.92s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 88.17\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 68.22\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 53.30\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:08<00:00, 1.92s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 96.87\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.92s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 97.75\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 98/98 [03:07<00:00, 1.91s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy 99.72\n"
]
}
],
"source": [
"from tqdm import tqdm\n",
"\n",
"noise = torch.zeros([50000, 3, 32, 32])\n",
"data_iter = iter(clean_train_loader)\n",
"condition = True\n",
"train_idx = 0\n",
"\n",
"while condition:\n",
" # optimize theta for M steps\n",
" base_model.train()\n",
" for param in base_model.parameters():\n",
" param.requires_grad = True\n",
" for j in range(0, 10):\n",
" try:\n",
" (images, labels) = next(data_iter)\n",
" except:\n",
" train_idx = 0\n",
" data_iter = iter(clean_train_loader)\n",
" (images, labels) = next(data_iter)\n",
" \n",
" for i, _ in enumerate(images):\n",
" # Update noise to images\n",
" images[i] += noise[train_idx]\n",
" train_idx += 1\n",
" images, labels = images.cuda(), labels.cuda()\n",
" base_model.zero_grad()\n",
" optimizer.zero_grad()\n",
" logits = base_model(images)\n",
" loss = criterion(logits, labels)\n",
" loss.backward()\n",
" torch.nn.utils.clip_grad_norm_(base_model.parameters(), 5.0)\n",
" optimizer.step()\n",
" \n",
" # Perturbation over entire dataset\n",
" idx = 0\n",
" for param in base_model.parameters():\n",
" param.requires_grad = False\n",
" for i, (images, labels) in tqdm(enumerate(clean_train_loader), total=len(clean_train_loader)):\n",
" batch_start_idx, batch_noise = idx, []\n",
" for i, _ in enumerate(images):\n",
" # Update noise to images\n",
" batch_noise.append(noise[idx])\n",
" idx += 1\n",
" batch_noise = torch.stack(batch_noise).cuda()\n",
" \n",
" # Update sample-wise perturbation\n",
" base_model.eval()\n",
" images, labels = images.cuda(), labels.cuda()\n",
" perturb_img, eta = noise_generator.min_min_attack(images, labels, base_model, optimizer, criterion, \n",
" random_noise=batch_noise)\n",
" for i, delta in enumerate(eta):\n",
" noise[batch_start_idx+i] = delta.clone().detach().cpu()\n",
" \n",
" # Eval stop condition\n",
" eval_idx, total, correct = 0, 0, 0\n",
" for i, (images, labels) in enumerate(clean_train_loader):\n",
" for i, _ in enumerate(images):\n",
" # Update noise to images\n",
" images[i] += noise[eval_idx]\n",
" eval_idx += 1\n",
" images, labels = images.cuda(), labels.cuda()\n",
" with torch.no_grad():\n",
" logits = base_model(images)\n",
" _, predicted = torch.max(logits.data, 1)\n",
" total += labels.size(0)\n",
" correct += (predicted == labels).sum().item()\n",
" acc = correct / total\n",
" print('Accuracy %.2f' % (acc*100))\n",
" if acc > 0.99:\n",
" condition=False \n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[[[ 2.5098e-02, 3.1373e-02, 2.8235e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-9.4118e-03, 1.8824e-02, -1.2549e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [-2.1961e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [ 3.1370e-03, -2.1961e-02, -2.8235e-02, ..., 6.2747e-03,\n",
" 1.2549e-02, 2.5098e-02],\n",
" [-3.1370e-03, 9.4119e-03, 9.4119e-03, ..., 2.3842e-07,\n",
" -2.5098e-02, -3.1373e-02],\n",
" [-3.1370e-03, 3.1375e-03, -1.5686e-02, ..., -6.2742e-03,\n",
" -2.3842e-07, -1.5686e-02]],\n",
"\n",
" [[-3.1373e-02, -5.9605e-08, -2.8235e-02, ..., 3.1371e-03,\n",
" -1.5686e-02, -2.1961e-02],\n",
" [-3.1373e-02, 1.2549e-02, 6.2745e-03, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [ 3.1373e-02, 2.5098e-02, 2.5098e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" ...,\n",
" [-3.1373e-02, 1.8823e-02, 9.4115e-03, ..., -1.8824e-02,\n",
" 9.4117e-03, 2.1961e-02],\n",
" [ 3.1370e-03, -6.2745e-03, -9.4119e-03, ..., 3.1373e-02,\n",
" 2.5098e-02, 2.8235e-02],\n",
" [ 1.2549e-02, 2.1961e-02, 1.5686e-02, ..., 1.2549e-02,\n",
" 2.5098e-02, 3.1373e-02]],\n",
"\n",
" [[ 3.1373e-02, -2.5098e-02, -2.5098e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [ 3.1373e-02, 0.0000e+00, 1.2549e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-1.8823e-02, 0.0000e+00, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, 2.8235e-02],\n",
" ...,\n",
" [-2.1961e-02, -2.8235e-02, -2.5098e-02, ..., -3.1373e-02,\n",
" -2.7451e-02, -2.8235e-02],\n",
" [ 3.1373e-02, 3.1372e-03, 3.1373e-02, ..., -3.1373e-02,\n",
" -1.2549e-02, -1.8824e-02],\n",
" [ 9.4117e-03, 1.5686e-02, -2.8235e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02]]],\n",
"\n",
"\n",
" [[[-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 2.5098e-02],\n",
" ...,\n",
" [-2.3842e-07, 6.2742e-03, -1.8824e-02, ..., -2.5098e-02,\n",
" -2.5098e-02, -2.5098e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02]],\n",
"\n",
" [[ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-1.2549e-02, -3.1373e-02, 1.8824e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [ 1.2549e-02, 6.2747e-03, 2.5098e-02, ..., 2.5098e-02,\n",
" 1.2549e-02, 3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 2.8235e-02,\n",
" 2.8235e-02, 2.8235e-02],\n",
" [ 2.8235e-02, 3.1373e-02, 3.1373e-02, ..., 2.8235e-02,\n",
" 2.8235e-02, 2.8235e-02]],\n",
"\n",
" [[-2.1960e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -1.2549e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-1.8824e-02, 2.5098e-02, 1.8824e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" ...,\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [ 1.2549e-02, 6.2746e-03, -3.1373e-02, ..., -2.8235e-02,\n",
" -3.1373e-02, -3.1373e-02]]],\n",
"\n",
"\n",
" [[[-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" 1.0980e-02, 1.0980e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1372e-03,\n",
" 3.1372e-03, 3.1372e-03],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 7.0588e-03,\n",
" 7.0588e-03, 7.0588e-03],\n",
" ...,\n",
" [ 5.9605e-08, -6.2745e-03, -1.2549e-02, ..., -1.2549e-02,\n",
" -2.5098e-02, -6.2745e-03],\n",
" [-2.5098e-02, -1.8824e-02, 6.2746e-03, ..., 1.2549e-02,\n",
" 1.2549e-02, 6.2746e-03],\n",
" [-1.2549e-02, -1.8824e-02, 3.1373e-02, ..., 5.9605e-08,\n",
" 5.9605e-08, -6.2745e-03]],\n",
"\n",
" [[ 3.1372e-03, 1.0980e-02, 1.0980e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [ 3.1372e-03, 3.1372e-03, 3.1372e-03, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-2.5098e-02, -1.4902e-02, 7.0588e-03, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [ 6.2745e-03, 1.2549e-02, 1.2549e-02, ..., -6.2746e-03,\n",
" -6.2746e-03, -5.9605e-08],\n",
" [ 1.8824e-02, 2.5098e-02, -5.9605e-08, ..., -1.2549e-02,\n",
" 1.2549e-02, 2.5098e-02],\n",
" [-6.2746e-03, 3.1373e-02, -3.1373e-02, ..., -5.9605e-08,\n",
" -6.2746e-03, 2.8235e-02]],\n",
"\n",
" [[-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 1.0980e-02,\n",
" 1.0980e-02, 1.0980e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1372e-03,\n",
" 3.1372e-03, 3.1372e-03],\n",
" [-6.2747e-03, -3.1373e-02, -3.1373e-02, ..., 7.0588e-03,\n",
" 7.0588e-03, 7.0588e-03],\n",
" ...,\n",
" [-6.2745e-03, -6.2745e-03, -6.2745e-03, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.5098e-02],\n",
" [ 1.2549e-02, 1.8824e-02, 1.2549e-02, ..., -2.1961e-02,\n",
" -2.5098e-02, -2.8235e-02],\n",
" [-2.5098e-02, -3.1373e-02, -1.8824e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -1.8824e-02]]],\n",
"\n",
"\n",
" ...,\n",
"\n",
"\n",
" [[[ 6.2745e-03, 1.2549e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-4.4703e-08, -2.8235e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" ...,\n",
" [ 1.2549e-02, 6.2746e-03, -6.2745e-03, ..., -6.2745e-03,\n",
" -6.2745e-03, -6.2745e-03],\n",
" [-6.2745e-03, -6.2745e-03, 5.9605e-08, ..., -2.2352e-08,\n",
" -1.4901e-08, -2.3529e-03],\n",
" [-3.1373e-02, -3.1373e-02, -1.8824e-02, ..., -2.5098e-02,\n",
" -2.5098e-02, -2.5098e-02]],\n",
"\n",
" [[-1.2549e-02, 1.8823e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [ 1.2549e-02, 1.2549e-02, -6.2742e-03, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [-1.8824e-02, 6.2745e-03, 6.2745e-03, ..., 6.2745e-03,\n",
" 6.2745e-03, 6.2745e-03],\n",
" [-5.9605e-08, -5.9605e-08, -5.9605e-08, ..., -5.9605e-08,\n",
" 6.2745e-03, 1.2549e-02],\n",
" [ 3.1373e-02, 2.1961e-02, 1.2549e-02, ..., 2.5098e-02,\n",
" 2.5098e-02, 2.5098e-02]],\n",
"\n",
" [[ 1.2549e-02, -3.1373e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [ 3.1373e-02, -3.1375e-03, 3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [ 3.1373e-02, 1.4902e-02, 3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" ...,\n",
" [-6.2745e-03, -6.2745e-03, -6.2745e-03, ..., -2.5098e-02,\n",
" -2.8235e-02, -2.5098e-02],\n",
" [ 5.9605e-08, -1.5686e-02, -1.2549e-02, ..., -2.8235e-02,\n",
" -2.5098e-02, -6.2746e-03],\n",
" [-1.8824e-02, -2.8235e-02, -2.8235e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02]]],\n",
"\n",
"\n",
" [[[ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1370e-03,\n",
" -2.3842e-07, -2.3842e-07],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, -1.8824e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, -1.8824e-02]],\n",
"\n",
" [[-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 3.1373e-02],\n",
" ...,\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 9.4119e-03,\n",
" -9.4115e-03, 1.2549e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 1.8824e-02,\n",
" 1.8824e-02, 1.8824e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., 1.8824e-02,\n",
" 1.8824e-02, 1.8824e-02]],\n",
"\n",
" [[ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [ 3.1372e-02, -3.1373e-02, 3.1372e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, 2.8235e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, -1.8824e-02],\n",
" [-3.1373e-02, -3.1373e-02, -3.1373e-02, ..., -1.8824e-02,\n",
" -1.8824e-02, -1.8824e-02]]],\n",
"\n",
"\n",
" [[[ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -2.5098e-02,\n",
" -2.5098e-02, -3.1373e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 2.8235e-02, ..., -2.5098e-02,\n",
" -2.5098e-02, -2.5098e-02],\n",
" [ 3.1372e-02, 1.8823e-02, -3.1373e-02, ..., -2.5098e-02,\n",
" -2.5098e-02, -2.5098e-02],\n",
" ...,\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1372e-02],\n",
" [-3.1373e-02, -3.1373e-02, -2.5098e-02, ..., 2.8235e-02,\n",
" 2.8235e-02, 2.8235e-02]],\n",
"\n",
" [[-3.1373e-02, -3.1373e-02, -2.8235e-02, ..., 2.5098e-02,\n",
" 2.5098e-02, 3.1373e-02],\n",
" [-3.1373e-02, -3.1373e-02, -2.3842e-07, ..., 2.5098e-02,\n",
" 2.5098e-02, 3.1373e-02],\n",
" [-6.2742e-03, 2.5098e-02, 3.1373e-02, ..., 2.5098e-02,\n",
" 2.5098e-02, 2.5098e-02],\n",
" ...,\n",
" [ 2.5098e-02, 2.5098e-02, 2.5098e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 2.8235e-02],\n",
" [ 2.5098e-02, 2.5098e-02, 2.5098e-02, ..., 3.1373e-02,\n",
" 3.1373e-02, 2.8235e-02],\n",
" [ 3.1373e-02, 3.1373e-02, 3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02]],\n",
"\n",
" [[ 1.8823e-02, 2.8235e-02, 6.2742e-03, ..., -2.5098e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-1.2549e-02, -3.1373e-02, -3.1373e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., -2.5098e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" ...,\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., -2.8235e-02,\n",
" -2.8235e-02, -2.8235e-02],\n",
" [-2.5098e-02, -2.5098e-02, -2.5098e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02],\n",
" [-3.1373e-02, -2.5098e-02, -2.5098e-02, ..., -3.1373e-02,\n",
" -3.1373e-02, -3.1373e-02]]]])\n"
]
}
],
"source": [
"# Examine the noise\n",
"print(noise)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Creat Unlearnable Dataset</h3>"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Files already downloaded and verified\n",
"Files already downloaded and verified\n"
]
}
],
"source": [
"import numpy as np\n",
"\n",
"# Add standard augmentation\n",
"train_transform = [\n",
" transforms.RandomCrop(32, padding=4),\n",
" transforms.RandomHorizontalFlip(),\n",
" transforms.ToTensor()\n",
"]\n",
"train_transform = transforms.Compose(train_transform)\n",
"clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
"unlearnable_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
"\n",
"perturb_noise = noise.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\n",
"unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.float32)\n",
"for i in range(len(unlearnable_train_dataset)):\n",
" unlearnable_train_dataset.data[i] += perturb_noise[i]\n",
" unlearnable_train_dataset.data[i] = np.clip(unlearnable_train_dataset.data[i], a_min=0, a_max=255)\n",
"unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.uint8)\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Visualize Clean Images, Error-Minimizing Noise, Unlearnable Images</h3>"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAg8AAAIGCAYAAADTKmxqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy86wFpkAAAACXBIWXMAAAxOAAAMTgF/d4wjAACJgklEQVR4nO39eXBd53XmC699RswACRDgAcFBFAfNkgnSpq1YouK4k5sm7cSx05E8qROblPvzze0rqb/4VnWVqKpcdXxL4r1JutwidbvlfK1I125JTot0bMuWTdmOZYuDBmviJIIkiAOCBDEcTGfc3x9K6+Zdz4Lw7kOAJJTnV+Uqv0trv3ufvfd5+PK8D9cKwjAMhRBCCCHEk9ilvgBCCCGEzC+4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCTmfPFw5MgR+chHPiJr1qyRDRs2yOuvvz7XpySEEELIHBLMdZGo3/zN35QvfOELcuedd8qTTz4pX//612Xfvn3veUw6nZZFixbN5WURQggh5D04e/as5PN587/N6eJhYGBAVq1aJefPn5dEIiFhGEomk5Gf//znsmrVqmmP6+rqkt7e3rm6LEIIIYTMwHv9WTyn2xanTp2STCYjiURCRESCIJBly5bJyZMnnbwdO3ZIV1fXu/8bGxuby8sihBBCyAVwWRgm7777bunt7X33fw0NDZf6kgghhBAyDXO6eFi6dKlks1kplUoiIhKGoZw8eVKWLVs2l6clhBBCyBwyp4uH9vZ2WbdunTz22GMiIvLUU09JV1fXe/odCCGEEHJ5M+f/2uLQoUNy5513yuDgoDQ1Ncmjjz4q119//XseQ8MkIYQQcml5rz+LE3N98rVr18oLL7ww16chhBBCyEVizhcPc0Z5CmNBcPGvY0asH3aKMx8V4mepqLmsnFBKOFnZjYVFvKZYJQ6xuNrVCit4nM8PV0GVz6XaH8Ws4yqVCiZWys4w1b7Ya/7gOfV5hn2vbB7Q4pnX7JFzkxGrVnG8XoWMEcvOfFiwxuN8hzDHY9P37Mwp0ub9mmvNuxz1TgRv3sx6985R7ufRemflvBPTmocaGJZm1jytdyKoeZYGWnhpnpGC89dgklF2QWtewkPvRESkvWXay5uJy+JfWxBCCCFk/sDFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/DZPzmpnXbIFhFgq0eaeMxqB4DI0yei7DZylBYJgMi+78FcMsFMQMk5Ey78TjaMacSyyzkmlguiwNtpchy42YLtVy7mJcyD9lFv+FuTWVx1+rfuQx9bBHTptHzvzG7++oWqcsDbQM4XFxNc88zkPztN6JoOYFQR3ObRi0vTTPeu/yM7/X5sxayy6C3vGXB0IIIYREgosHQgghhESCiwdCCCGERIKLB0IIIYREgobJS4Au/hVYlRQDy/joxhKG/6U4iZU3C1OT7jiPxqDClFGZUq0tFyxoxRTLiaRTjM9XbdVJH6y5Y5axU4wqbF6cdoefXoIpT1Y59VxiGR9b1HjYcy79usyi668hh7HjTe6zGvac64Qa+x7X4plXzdw+xTltUu6wYlRuvAz/Omh9yyxTo87Ueidi/4FVzLuap/VOxFPzQrx5HXXqux3zNOrqvLk2Z+v5Db2zn0T1XIavGiGEEEIuZ7h4IIQQQkgkuHgghBBCSCTmr+fBtwhGlZ0Zq74GfT7LzhBTtz20OqBhrDDh7uX9/PmfQM6xt96A2MT4uDMeH0dfxOjIOMTa2tqd8Wc//0XIWdSBnQwvtp9Bx3zP72HXMHnwt9pnTvq0x0SGL+JenwsYNmItM4xF0AAgIjKixj4FoQzM3dTgGSPY7abILiPnPmsyZzRsZFix2cLnfL7nb1Hj9b4XUVF/17Pec5+uj1X/ldH6wnh01TX/mLHeGDdWyKN3wdK8XqV5E4Oob+Pj6A/JjQw547a2ZZCjNW+hoXdz6mfw/XOuyi6eFwJ/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJOavYXKeYNmJQu3UM5JiZQyOnj/vjJ/6fx6HnOLEBMTqa2rduRMpyMkX0cAUj7n92woFNCLNpoHRx9zqU3DKyrEIYrPlIDprxBa5w2rNkbNJixHTFYs8zJEWwc6dRnSLx5H3e8Zcho0vjfZ+vpM3O7R4zG3lWOjjqge/xyJ5DF3kvyLqJxN6OvVi6kitdyK25sWHXGNlfboF5zY1z9XKeOw05BQKw24g6IScqg2MPjplFhCs8h8GzJre/eN0szobIYQQQt73cPFACCGEkEhw8UAIIYSQSHDxQAghhJBI0DA556BJpVxRsVLZ4yiRs2f6nXHS6JzW3LIA51Lmy3wRq62lkkmI1dSk1DgNORWjEqa++liVRh3fbpz6GrwrTKrKnrO5ku77393xjlmcu2qqrB7pQ7BtG8R23tsNsW23q3qKVoFJD540viG3GhUte7xMm0iLGg8bOSs8cnzmfv/hPpuypWbhzJqn9U7E1ry6jhZnHBuPQ85UcQxilUaleS2GAbXF1bxSCg3piSnjj9GYZWb1QGueWUXU0Fwvgya7ahJCCCHkEsLFAyGEEEIiwcUDIYQQQiJBz8Os49FVU+1rWb6BiYkcxI4dOeKMx8cwZ83V10IsHrh7gK+9+SbkpBL4KiRULGn4IooFLEoTqH3JeBz3IGPG3qXe9TSLPVlFqdQ49OksaB1YNT0Q6Vw7W3NXiS7+JDLHm+14z7fh62nkVfcQzKallU9AqBhzz9fjOX+LR44uSuVzzOxi7GFf9L8O+nTVxBwfzdN6JzKd5rneGq13ItNonrourXciqHl5Q+8KcfSQ1ZXUg/DQOxHx8zxY+Ggeu2oSQggh5FLCxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLx/jJMVtndDLpcCpr1TNNPuYTHVdziJzHD9BcvubHx4UHIOfCLn0Nsz3e+44zPZgcgp/k3boHY+Pi4M17Y3gY5FeM6mxvdbpx1SVxrBmWji58qgpWIY8EU655rR08lrMHjymiGCgLXfIUZYhZIKVdbNOVTyjj6EUy5t8djnhbPmOZGI7bCY55Z/LafVt+HzirNWMGoYfRqnD1n1/PqnWqWUSOrESIr1LjFOGpYjV/xvCbr8fmhvmuB9Xe/mf8+aHW5DA1905pXEUPvBIs96e6YcaNDsI/mab0TERnJ4vPrWN/qzq30TkSkswk1byLlXpfWOxHUPFPvTCei0hZDy8xnpfMMvZPAQ7csbWORKEIIIYRcSrh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJILQLON3aenq6pLe3t73TqoYxhWzitfMWOY9bS0JTAMKGoi0iXJqDA0+lSm3Itk3//MjkPPiL/8BYufPugbJRBw/b31dPcTKZdfUVFtbBzk33ngDxGpqXfNOU2MT5GQWd0JsxRUrnXFzEx7X0toKsUTSNVYWKujwKxnPSnfRtLpqWq95Rd2X+sUZyLEInlLz93gdhrRUmWPFtAsP/WGzShjo+3mHkfW4EdMVAvHd6Au3QmyJT/vN/RgKu2dMkXsD7P75pBxwxotmPrs/6vXxl2CteVXqnWHws3vj6qihd0ZsasLVPK13In6ap/VORKShiNpVX+eaCstlrIRbW4tmyBtvdMvAphagqbFGaZ6pd0tXQqxjgXtcg6F3kjI6byrNm/DQOytm6t04mltn0rz3+rOYvzwQQgghJBJcPBBCCCEkElw8EEIIISQS768iUVUzc4EUq0iUFZoYHXbGP/je9yDn+uuuc8btmQ7IueZ67I6ZPe3uo8UNj8fIufMQS6iulseOHYOc4SEs2vLbv/M7zvj73/97yCkUCxC77ZZbnfHiVtwxbjS8Gdded70zblmK+3FJ47hCyd2bLYV4X8pGTD8/nPkyxeqYOcceB0DvxRp1cnLhEzNOs9eIbTHsDfv3b3bGB2QP5GwzjvMrN3UAIrPqcdBccpfZzHo3XUwzMTYMMa15Wu9E/DRP652ISL2leb2u5sXrsbDSsWOHIHb2l33OePNtvwM5P3rO1bxJQ+82/YtbIaY1z0fvRFDzfPROBDXP1Ls0hi5E8/jLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiQcPkNOgaOIFRdCMMsegGFizCuRtb3C5+V19/NeQs6lgAsVcPukVF3nj9NcgZVYZNEZEFTa7Drr0NC5akjYIl117lFlE5cfwo5Lx9Es2XA2fcoiKrliyGnL63j0DsXM/bzrg5sxByrrz2GogtWXGlM07XoNEqH6KJqlyeve6Ns0bLDOPpYhcZuHPeJsAH1fgeTMGaTbJTVXfq69TFpkSkD5+7YE0fYmD3uHUfqtVB0y5Y5I613on4aZ7WOxFb884Xhp3xghp0FDebmue+L1dei/1OD7/tatmxk6iBA1ksoqQ1z0fvRFDzfPROBDXvYugdf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgk3l+eh4rV3qXKqVSDq3gM94umpiYgNjYy7IxvuB4LpNSk3b28Yn4KcibHxvGiVHOukyeOQ8rCetz3XdDsxsolbFSTTGIzmWTCXVuWStiMrLEeG860LWxR58c9z6bVuG9XyI0549Ghc5Dz+vM/gdjpNw874yUrr4KcjqVXQKyuET0VPjz46Zlz7n2yqqmxwdUJI2dVlXNXyTppgNhBiGwyjrSa7iiPQx9m3Lceg1uhMdZ9eOD9aJY4MOoWB9rTOAY5c0llPcbWW925vCabMVD1XwetBldx5YSYKs6sdyKoeVrvRDw1z2hG6KN5Wu9ERMol9AAk467moQKKxEsjznhRPTbmamtogdiCJlfzfPROBDXPR+9EUPOWLUK9k0XV6d108JcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhETi/WWYrBbDmBOPBzPmTE2gqfFkj2vo6VqCVWrCvGs8LEygEak4iTFt6FncioVPrl6zBmIbP/ghZ/zcj34EOdn+sxAbH3MNPS1NaEQaGkRTY026xhk3NmHRlnzM+Hxx93Vc2ITmpNB4DhMFN3bi9dchp/dYD8Talq90xjcYpkofDny4qsPsYk+veOTMItoK+XgOzZFb7sDjtOfvQOdeyLk3g4awTVm30+ZWwzCZ2YyxUHshux+CnO14mNyhLqsBvbsit2FIf2uNS5IH4e4ZBa9kK0R2Bsr8edG7bBp6Z5aJcvN89E4ENU/rnYiteRUVa0RPpXTVo+atUZr3IaV3IrbmDSjNy49hR+JmpXlDw6h3LXU1EGusczUvHxp6V8Q/fhemXM2z9K5cwFj/AVfzztbgF2vB8i6IXfMvq9M8Ef7yQAghhJCIcPFACCGEkEhw8UAIIYSQSHDxQAghhJBI0DApIoVCAWLHj7pVvBa2oFmwVEAjkK5u9rPn90LO0MCAMz5hmI5ScVzXxQPXKHPFkiWQ09HaBrFxVclsbBQ7EqYSeL6XX3LrCA5kz0CO0VhUAvVaxVNYhTLdiK9eqt51syUqWAlTd/oTEUmU3ItI5fF55o3Y4OAAxPxwr+uJF/6NkfPIzNN8rMrTzyEHjAqM9+7GPKxx58cecZ/xnlwWctCyaWAZHy32utbOsYeMlp0G+vNZdSnRYrjXyFoLkfWh4RL1Iq3GqD8+FMKZ9U4ENc9H70RQ87TeifhpntY7EZGlV6LmtS9xNW+8iE9rrICaF693z7f/Daybeuq8q3mTxl+380nUsnjKfUHTFXyrU2lDy5TmWXoXL6HopvOuITOfR4PmyPgIxC4E/vJACCGEkEhw8UAIIYSQSHDxQAghhJBI/PPzPMRwvTQxhvthP1f7ds1GZ8hUAju1JQJ3j6pcxP3Fk8fedsZH3noTcjoXd0DsmqvdYihvvfEa5JzJ4h7y0qVLnfHZs+hdiCfwVfjlC//gjBcvxv3GmiT6GZJJt2hKsYz7dmEczxeq2xkzvBJi7IPGVDfVemOfsMbYr60LjPmr4EH5BgbvcT0P9/7Ps3KqWadx+05nfP99t2OSVT8odL8P63DrW7YbRoEn1Pjww5hzYKtxQusagFEM3eZe56zWY1rjXlRwaJo8wL1ZYdVXpT0QIlIxfBBK83z0TgQ1z0fvRFDztN6JiBx5+S2Ida5wdUrrnYjIW29gAbgzx192xiuU3omInD2F+/1x5VX45Zl/gBytebbeYRG8YsG9V2m8daB3IiJJrXmG3kkF/QzxGreEYLqAhatis6R37843q7MRQggh5H0PFw+EEEIIiQQXD4QQQgiJBBcPhBBCCInE/DVMxgwHVaUEobDiFtQYH0ZT1dCZ0xBb3OKahT60ETu1hQFeQynvtoI79CaaIU82uAab1gVY2Gl4BA0+x467xqNQd/4UkeZ6dK6Njgw746ZGNPg0NS6A2GB8yBkX8miOqmuox7laFjrjiqAzKGYYV2PqfiZieJxVNCUU9xlbr0ZNLRqISoHuU+rLzMe1PKkC66s8lUHFyzxoGK2MwlVj9x1wxg/dvx1y7nkInYBGLSkvtujAXWjeO2R08bwKauzcBzlvNaGpGUs0VckavOn+BslZIqbM14beSQwLCI1NuJrno3ciqHmp/Mx6JyJy6E1Xp84k0DA5tgC7Yw6fcYtJ9Rp/PKWLeA26UNWk0jsRkXZT81qc8cAY6o3WPEvvWmvws6SU5gUeeieCmhfIJORM1qD+JNVUs6t3NvzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmL+GydCqyoaxiRHX9Pf6q69CzuToMMQGsq6p6PgR7DqXy6H5cvHijDNOGqY/3eExlcZKcTmjA9qQMgLV1eFxV117NcQO7n/JGccTaJxpa8eKlomkaw4aGcXPO2J06HztNbcK3NDQMOQ0NGCXufq6OveaWtDkVJvCz5yuUbE43vPaOjQQVSrVVfa79/9WgRYjaZYMkkYBRpNAMiqClUZFtkOkSedhitzzkFE+0gvr/ipnl9FC8wmPtpqZPfdDrHMMY148jOZL2ebO5eVRnXPU/Yzh/R0fH4KY1jwfvRMR6VGalz+L3//MYv3eiaSUObCQx6qXoaF5pbKreUMjWAm33tS8a5zxwf1oUo8n0Eja1t7int/o9Ks1b2QQ9e6N1w5AbHSo3xmbeldfB7G2DlfzAh+9EwHNm029mw7+8kAIIYSQSHDxQAghhJBIcPFACCGEkEjMY8+DUQTH8EEMnj/vjOvrschHm9Exs6yKn3QsXgw5C1uxOEhFdXhc3NUFOYszrr/A2sdraMQ9q9zYsDO2bB8v/OpFiOULRWe8ZvVVkFMxlpFFcYvQDI3ifur1110HsVG1T3jy5EnISRhdPPW+4OlUCo8zl7vujvSY0TUwZewddl2x0hkv/41/YU2OtMwwnkXusmLm1qX2OFimC8sHMTNNeDs9sZwCeu93v5GD3RQ12U3Y4bUprO7ziVTplbjoaM2bWe9EUPN89E5EpF1pXtHQu3IFdbhlQbs7vgL9VPGFhuaddzVP652IyKTxSv3kFVfz8mERctasQc2biLver+IkatLQqFuk6frrcZ4hw/s2MeBqXuL8zHonIlJ3xtU8H70TQc3z0TsRkeXrPTXPgL88EEIIISQSXDwQQgghJBJcPBBCCCEkElw8EEIIISQSF2yYnJqakj/6oz+SN954Q2pra6W9vV3+03/6T7Jq1SoZGBiQL3zhC3Ls2DFJp9PyjW98Q2655ZbZuG4JDbegZc+amHKNQBXLZWh0Nzt92i2aMjWFhiKLJUtcI1dzSwvkXH2N2+vvyhUrIOfQEWzZ99//7jvqmrDj2pBhFsyoa6oxOsONjmCbxMFR13xVNjo1trZhR9CODtcg9Ytf/AJykkksVLVp0yZ3njbLkIrFXiYmxp1xz4snIKe/vx9iP/6Hf3DGv7v1f4Uckxa/tEtLtebB2cSq9qTfaywy1LAbv4937HK/t/u34swHNxmnQ2/g7JHzqGZlnT83NnOOQagMkj56J2JonofeifhpntY7EdQ8rXcifpqn9e6da5pZ87TeiYjUNGDX4NGRCWc8OIhm0/KUq3mtLTPrnYjIL15wNc9H70RQ80y9Gx+HmNa8/jOG3v38HyD2u1/w1DyDWfnlYevWrXLo0CF55ZVX5JOf/KR86UtfEhGRr33ta7Jx40Y5cuSIPProo3LHHXdIsYhOWEIIIYTMHy548VBTUyO/+7u/K8E/rmY3btwoPT09IiLy7W9/W+66651/bLZhwwbp7OyU559//kJPSQghhJBLyKx7Hv7yL/9SPvnJT8rg4KAUi0VZ/E/+rfCKFSvMf/O/Y8cO6erqevd/Y2P4EzohhBBCLg9mdfHwwAMPyNGjR+U//If/EOm4u+++W3p7e9/9n1U8gxBCCCGXB7NWYfLBBx+Up59+Wn70ox9JXV2d1NXVSSKRkP7+/nd/fejp6ZFly5bNyvlCo0NYEGAsGXc/4suvvw45Vy5fCrHuDRucccLo1DgyPIwXpsxI/Vk0rvWecs0tH7l5I+TE0lhd8Qff/74zTqexCuXAEHbj7Otzu9ONj6PpyDI+VVSFyYqgeSfb1wexM2fOOuMf//inkBMzlq25nGt8WtKJVT3jcTxQd+OcKhQgp86oLHrqzABehAd/9VvuuMfI2TG7DewuKQ2W8RF8uVWWoTQadmK9PhFtD8zu2QkZa0fvgNhY7nEV2eJ3XapRYsNePY/I7Qfc83VmjIfeacydUYbFbX6XJEVtfJxZ70RQ83z0TgQ1z0fvRFDztN6J+Gme1jsRkXTK0LzzbtfOvtOogeNjaEBf0uZqXqpcgpyi0rzBU9jp89SwYU58zjUnxmJomMyNov9vSbureXHBa6qva4HYVM7Vxbo4ms1PDdnfrGqZlV8eduzYIU888YT88Ic/lJZ/4rT9zGc+Iw8//E5T4X379snp06fl1ltvnY1TEkIIIeQSccG/PPT29so999wjK1eulNtuu01ERNLptPzqV7+Sr3/96/L5z39eVq9eLalUSh577DHzn6wQQgghZP5wwYuHrq4us+aCyDv//vXZZ5+90FMQQggh5DJi3nbVtJYr1iKmTpkvEwn85eOnxj8fve4qt7BJZyduXtbW1kKsbdEiZ3z40FuQM5V3i5MUi3nIKRnFUIaH3T2r/izu2b9x+G2IFUru3toHP3gj5JSLuLdWLrvegY5FuI/20ksvQUzvT69ahd3cFhlzNTW5G+A//slPICeZwle2ubnZGXdm8FnV1GCXudVrsXiND9eot2/YKNezTo0PVnWm2UVfk4jIFrW3v9toaLl7L8Yyn3DHgbfnQe894167D1haSmS0yTJa71LjbiPH8kG4NyYQ9FOg2lifBffaRdS/JtvmZ5AJxN3vD0MsFqT1TgQ1z0fvRFDzfPROBDVP652ISHHS0LxJV/MmzuIe/UAvat7Rw26BK613IiIf/CB2/x1Q/6pP652ISMcit7jUKy9ZXWDx+3/d0uXOeNEifGObDV/bL37kap6P3omIdGYWOmNL765fsxxiFwLLUxNCCCEkElw8EEIIISQSXDwQQgghJBJcPBBCCCEkEvPWMGlh2Y4WLnSNJL/5W78FOb9KYQGosVG30MipU6cgJ2ZUOppUpp/hUSxYcmD/i+65cmgMWnUlOtfyedfQc+bMOcgp5dEsFFOFXMbO4zWtvmIFxDasv8kZ79uHtr/Dh49B7IMfdE1p69ejSa1UsgyabiyZQnNrpYKdPetVAahF7Wji0mZMEZHGVuyQVw3aQCki8r+pAj7JrevxwJ1zZ6O0DIUHAsss6BoDt1uTPYzl4nerj/wJ48iHZRjnCv+LM7RqKGWNdpE5db57jZx9IV7n4fvdgkXZ3CcgZ3c3Poed9+51xmuzeO/gEgy/5n7DaNn5kFtYzboHPmgDpYhIpg6f/G/+hqt5Pnongprno3ciqHmv//xFyPmvhhlSa15xEg2MvSOoeWOB+9yDFL4cgxODEFtxtWsc71Z6JyLyotK8V0+i3n3kg/hu3NDlxgqW3oUYiykPpa13dRDTmldn6F2z0QH5QuAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TBp+KRGjwmQ84X7EugbsrtjaitUOO5UB5cCBA5Bjtg5X5sTC1BSkFFUFtF+/9mvISafQFDOhumEWi2iO/P0tmyGWSrr3YGwczVFf+dKfQKylpdEZv/LSK5BTMqq5TRmfWTMwgJXiMqqi3dlzQ5BTV4+V0waHzjvj31p5BeSMjOBn1l08fRn2yHlZjSuGKa/qlftuDN2nXs/urGUf1tUWRXZvds17mzvvh5yYVyNKtP3dJUa5yuC/YMwH+MJ/ElI2BP8Vj1P+wc2HGyFlzUN42K6t7ruxdS++K2Od7v1cswmNc9l1qBu7ut08fCqeWG0BEijpCxpdzfPROxHUvIa0oXd5VOKy8kIWDbPgQUPzUkrzzhfQjDnloXkpo3/S2PgwxLYqzdN6JyLystI8S+/GpvA5VAJXpwYGzkOO1jsRkb5z7nvWXI9VKM8MYafmj115izMeHEO9GyxU2fl2GvjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/PQ+h5XrA4ieB2iyNJ/C4q6+7AWK9b7ud4VZesQRyhs4bhUdWdDnjM2exE91Hb/tNZ2x5BOIBPpqBs/3O+KqrroSc7g9eD7ETPW6nzeVXYAe9ZBK9BK/9+g1nXCxgwRKrNJf2IEgSn8tUBfdBS2op2750GeToQlIiIsMTbjGZl14/Ajkpo+DU8hV4/3ywumhqWtRtqXaV/vA2jG3bZT2Hma9JBP0w27vd9/oT243DjK31Zzrd4Bbv7VT3AwW5rUbOIx7zXAOR0Ubcs9Y0Yh0pm4PuXH0ZNEY8sUn5Q4zKXOh4EJGD2qCy0++aQt2J0ngPajBWSrnFpHz0TkTkyoz7bkycx5t31YpVEDtz1tXFj97225AzPIVFoiq1ruYNDPZDzrUrroXYzTe43Uy13omIrM1gR0mteVrvRCzNwy/DWBa9WemrlX4bepcwRGGZ6sZZLOM9PzeBxbN++aarean07OnddPCXB0IIIYREgosHQgghhESCiwdCCCGERIKLB0IIIYREYv4aJiuGiyvAtVA85pr1ClNY5GPgHBofJybHnfF116LJsGIUP0nUu93MgjR2vus9fcIZl8YnICddh4aX8QnXPLN+/Qcg58ixNyF29uwZZ3zFlVhEKZFCw+TwqFukZWQEi7YkU1jEJIi79zxh5AyNomHq1tWrnfFDn/wDyBkcRHNS7+leZ9xsdJRbe9VVEFvSuRRiPnxJv3r4Gojo5n9VNrTbZrXHNNf8lolSEaLp16ev55cNL+aWrP4+HPKYybikrWiODDyqJt0n/xvEGsf2GpmbIl+TRed2fBC7lddz7nqk/iMp/eJZ7wGak0sFV/N89E5EpFtpnq13+GIHaVdLegfx3SgN4vuaVqZmrXciIh/+F1dDTGue1jsRW/MaKu51ar0TQc1LptCUG4vjZ9G66KN3Iqh5g+fRNHr0HHZ41po3m3o3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmLeGydDqKOeRl06jMXDpCjTT1Ne466pjh1+DnPExLKu3oN01pV2xGo0rf3TH59x5cmimOfjiPpx74UJn3NTcDDlTRTT9dC1d4YzjRjXJt45gVcZf/OpFZ/ziSy9DTtlYf5ZV9c8gjubPDR/aCLEbPrDOGWcMg8/KK7FT4weDD7nnC9DhZ70viQQaOavC+hZVaZDUBNsx9owR26Keg+U5NIpVemHVe3wiPOyMcznDVYm+VZHTqjNjI3ZqbMjdAzHdf/CAYMXH3V/GjqBblOcufNzo9GmYPWNQSfAu47iLC7zDxi0PjQqIWvN89E5E5HWleVOG3rW3o3Z1Ks37vWs/Bzk+mqf1TkQk1YGal465mtdVsxhy4vV4X9466WreL375IuS8ePBlZ1wOjS7CIRomg3jZGW+49SbIueHD6yC2oNOtUNwZwyq7NwQfxPMpzZtTvftH+MsDIYQQQiLBxQMhhBBCIsHFAyGEEEIi8b7yPIQVo1CO2gs6d05X7xH51S+eh9i6G9xiJB/oxn2mKaNoypkzbgGWV175NeSMnB9wxletNQpQGeu65oWtzvjc+WHIuf56LBx1btC9pu99/4eQc+JkL8QmJt0ufslaLHh11RW4fxpTRaKWLFsBObfceivE4qpITMUoeFMp43OPxd17FTM2g4OYUTzHmGs+8IlLfQEiosv3BFZDS4/b2yAPQuyh7ejO6O5zO08ekHsh5w7D6IFlhg5jkldH0kuP1jwfvRNBzfPROxHUPB+9ExF5WWme1jsRP83Teidia961N7iap/VOROQ7z1qad9oZT2L9OalNuF2Rl61CvQvqUVta16xwxj56JyJSURXnKmV8nlrvRFDzLobe8ZcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhERi3homLaziQJOTbgGR3BhaqPbtx3543378MWe87gYs9vQvfvu3IXbVda55Z/kqNCKdO+OaE48fPQo5J0+chJjuGjo+iQWhFrVj97+RnNu1s//secgZz2PxkyXLXXPQH/6rP4Kc66+9AWL5YsEZtxjFXuKpWohVlMMuZhgmjUcsYejeF8sXZK2SY7Hq1s5Pevjrlqtr2FDVmd6P6O8fFnaSDBZkymTdcbgTUgxz5GUA1lWSdTqmK2BNQ2xKR/BFzIeoCVrzfPROBDXPR+9EUPO03ol4ap7RJdlH87TeiUynea5Oda1YDjmf/UNX867qRr2bUnongprno3ciqHk+eieCmhezNNC4nxcCf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCTeV4ZJi1zOdSed7OmBnHQazSxnB4ed8X//79+FnFdewU6bN3W73SJv2oCVKa+7brUzvvm3Pg45UxPgjpJQdOc0XPudOnUaYpOTrhmyvrEFchZ0LIHYp/7wD53xhz70YchJBlglLVBGxDBmdLk0l606z6/yn0/dtIoxl29nVmCzO+wzDG+frm5mCferwAbrGvGz7FePfYOnCU90wVWjG6i+JBGRNaqp5SZscikH+2Y+/ZhkIbat2vafFxv1aO57BM3Kmw9shVinTts+K6cXEZGhHDo0teZZepc7MwyxH3zrOWd86Feod9d9GLvjXvchV/O03omI3PybM2te3HjPU5OG5h1RlSIN87eP5mm9ExHZoDTPR+9EUPP89G66mJrbCrrFgM0qu/GAFSYJIYQQcgnh4oEQQgghkeDigRBCCCGRmLeeh5hRPcPa0Wlrczdxr732WsgZPIdd2M6e6XdzsqcgZ4HR9e2tw27xk18efAVyFi6oc8Y3XIfXlB/HYijlsjsulMqQM6i8GiIiA+rzfeoPPgM5K6/GYlZLV17pjE2LQAULOYn2OJieh5mfX7WWBAt7KqMroQef3n0hVzIDYAHw832sV5YV8/NuxlDwzMxz32PEnr9bBfT4fcbDBzC2ZfeXnXHmQLdxpGE+6bPyPKid+V1oS6FpRWuepXe53n6IDfa6mmfp3dHXsNjT/l+6mqf1TkRkzfqZNS+P8ia5hKF548POWOudiJ/mab0TMTTIR++MmI/eWecLsJGpJI0YFkjE3wUs39eFwF8eCCGEEBIJLh4IIYQQEgkuHgghhBASCS4eCCGEEBKJeWuYtEwqgWEI0ZHWVjT9/O6//JcQ0ybGfb/4KeQcOXwIYvWBe0vLAXZzO93ndo8bPj8EObnhEYjlJ9xucZbdbzSHvQWnVNGUGz+wDnLaly6DWEHfvAreX6tTmy6aUrGelYd3J6gYn9BwGfnMVTHmqrpIlGFYAqr8ZgVb3PHOEJ/V1kbDvefTUnIPhp5Rj+8TIRYZel4a8UCvgjPz9+8mDx+4zxlv3X0fJumGoIYh1brnZp4P6kU39c7w82nN89E7EZH9SvNOvnoCcrTeiaDmab0TETn7LJYeG54adsb5CfzOVqQGYqM59wup9U7ET/NA70RA83z0TgQ1z0ejRERiBfczp0ppPF/Vemc4UC+A+fvtJoQQQsglgYsHQgghhESCiwdCCCGERIKLB0IIIYREYh4bJo1OZoYBLtBmPcNRlA7QlHKFqja2bCl2nTx1Eo1AL+4/6Izzr7wKOYnANbNs6MaKc0NDaKL8/ve+74wHzqGpsmXBYoh99Dd/yxlnupZDTslw4QTKnZhI4usSN0yU2shlFWCzzJ6hOl/MOtAyTEIAj6sYB1bsVncz8qBqrNdi5LR8xx2f+T3M+f94nGubHITY1nuM+3J/debPLeq+hMEdRhaWodQdXn0rYcIdx0aURpXNC6g2WnVRvSqqQFrmSOPzhYbf1YuiunvGTYkb35lKwtU8H70TQc07+wFL796AmNY8rXciIhu60cA4NORWufz+9/ZCzmgvat7i2hZn/PGP/BbkZNoMzctrAyrez4Z6V/MSHnonIhKqLpc+eiciEguVxpr/MMBAad5s6t108JcHQgghhESCiwdCCCGERIKLB0IIIYREIgirrpQzd3R1dUlvb++lvgxCCCHkny3v9Wcxf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5m1XzTMDp73y5rKApjW3z/mMpo9Vn88nZ9ZugdXk0up8OUtYXeeq/Sg+z+qKzpVec1ld9Mj7HPPF+4k73LUJMnbehUdt9ZobOTM6s+b9c9M7K29Wb4G69rnUOxHUPPuzGH9sF1SsggemjMkynZ0Rrs6FvzwQQgghJBJcPBBCCCEkElw8EEIIISQS89bzEHhupGGetYk081zW/rtUzDOq88/sQbA+ymXY7NSmutvphfWMrVi1XhBC3iGDofBxFdiEOU1qPOZ3Nr3L3Od3mJfm2Tn63ff7goLmeejdO9cw8779vNW8OdS7d6ZKuuNYEnLCKQ+9My50qvrLMuEvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TM6mucaea3YKMvnMXTEKevieb6bz+x7nhWVyspxPs2UgsgqyVGmOrFTQ7VXtfTmtLmutkTM2D7xfFpk+NA9mslZmbsa5rPIzfWvc8WGvq/Kl0Yipz2OccHTrFogFBzfNyhVZQJEoT5KT7ktVsJJqZp5nLvXOzrO+jzMb0MOyry7OdH4RiXtNhegiUdXqXRlDNSUjL6Yv1HSpAvozz6beTQd/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCTmreeh2mJBNj57gL4b+bNTsKjaz2Ltdel75XvvdMy3MJcP1vliMXct63sH/JrzzN61d6p9yJy1Bg/UXrux0d23GWPbxTUYWHaDbiOWOaACxoGdRi2kjBVUHDCqGHVX209HzbXL+DBZw7qgL6HT8C5sPYA+jMxu9wTdWbzpwcFqXQgXl3jgGhpqQqPsj0cloHIMvy8Fn+pHVrGnksd3z/h++hSJsiwPceO4ctn9PprF5Qz9Lqr7YBVWgrkM70LcsCXo2oDGLZcgVgsxH8272Ho3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmDXD5KOPPip//Md/LN/5znfk937v92RgYEC+8IUvyLFjxySdTss3vvENueWWW2brdCaWkaRa4whONbOh0H/u2TFD2vPg59XHaWPidHOB0dKY2zIZ+RXKQnRe1fZXz/PNnqnoYQx1u47FsBtdh7tFuxxFsp1mRSb3OMvjqHyA3cZHw7OJZD1aOvYZBaH0Veas+kyziG5YeXgN5uw1Ymu63U+9dTcaJjsN0+aWXREu7hLh3a1SxYrWXPYZ3JHl+vMoSlX2KAhlYZm/S2YBKPcDhiEe56N5PkbLMI7nLxkFqEJlJA1KOHfC+CzV/oF8KUyUs/LLQ09PjzzyyCOycePGd2Nf+9rXZOPGjXLkyBF59NFH5Y477pBi0XptCSGEEDKfuODFQ6VSkS996Uvy13/915JOp9+Nf/vb35a77rpLREQ2bNggnZ2d8vzzz1/o6QghhBByibngxcOOHTvk5ptvlu7u//e3v8HBQSkWi7J48eJ3YytWrJCTJ09OO0dXV9e7/xsb0z9SEkIIIeRy4YI8D6+99po89dRT8tOf/vSCLuLuu++Wu++++91xV1fXBc1HCCGEkLnjghYPP/vZz6Snp0dWr14tIiL9/f2ydetWuf/++yWRSEh/f/+7vz709PTIsmXLLvyK/5ngY2AcHh6GHCvW3NzijOvq6iAnmUxGur7/QcV0bblDXwOjNkgFhslp1jp2Xgj3qesyqi3uzKxzxnu2HIScPbN5TQo82zR0umbIBtP5aDgRoT0lHjfmZaKcuTunlWb/NonXeXCN2zHz3nvwqE17jZgyTM6xH9STvDvUZQxFJG+UYAzVV7vKpprVO5gtjK92qIyVgdFVd3hoGGNK85pbWiCnas1Tn9lH70TQXBqmMKdYRmNnPHRvzEUoFFk1F7Rt8ZWvfEWy2az09PRIT0+PbNy4UXbt2iVf+cpX5DOf+Yw8/PA7LvR9+/bJ6dOn5dZbb52ViyaEEELIpWPOelt8/etfl89//vOyevVqSaVS8thjj1X9t1tCCCGEXD7M6uJh79697/7/jo4OefbZZ2dzekIIIYRcBszfrpq68onYRYw05naf2eHNYy4rxaO4lD6sYhQ1CY39TP2vUJ599geQ8/xe/OewV165yhlfe+21kLNs2XKItbYudMaNTU2Qk0wbvyapvUq9lyki5j5hALtofkWwdKEqszueVTzLuO8+BNvd8Toj56C/6+AS4+7mrzmMHSa7+7CKUlZVnNpitMdcZ5hBNuhnswVSpMFyg+giW31YTKsxi9WzNqtiXY0ZPO7BeVAQSkQkn3bf4YolQJaW6fGc6h0GrcNMzVM6MZabhJxnd/8IYj/d+3NnvOZK9L6svfYaiGnNa21FfWtsbnDGyZTxR6ZZgEp9PqOQVK2hiwEUofIszAf3fPb0bjpYnpoQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3hslYBdc9FctIooyHlcAw6liWHqtAkT7OKBiCdhc8X0LFSpUC5JwZPg+xX7zwC2d87Lgu1CMyOT4Oscf+f//VGdfW1EKOVcDrhhtucMbX3XA9HrdiKcTaO9qdcWMjltiprcVr0PfKKsgSVtsl1Xg3SpVyVXNp5os10mJf6BoIu7eh8TE4YLSdtCpjeQDfNdOsaLgovbBahK53RjmjKNWYbIfY5VEUykVrno/eiaDmzaXeieD3WOvdOxeFmnfuzIgzfuGFX0HO6ePHcapx11j5/yi9E/HTPK13Iqh5tt61QayxMe2Mawy9E+PPIq15l6Pe/Q/4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZNWczOz8lY4s2HSqsblZ1OZuWJXYFxoYdI1+Bw9cQxyfvhzrBR58KWXnHF780LIqatDY05MrRGHzqMZ8/wgxl779WvOuP6ZBsjpWo7t02+80TUZXXvtdZCzatUqiC3OLHbP14wV3+IprGipu43qscg01dU8u32+n9mgO6Ae+ISRhZUbdQfLnGGgbJTH8TBdaa/FmHrQiIHR8Q4jBw3E0qAqTJrtONcasbfU+NK3N9Svq4/eiaDmzaXeiaDmab0TETliaN7PfuRqntY7EVvzauvcq9d6J+KneVrvRETqn6l3xl3LF0GO1jsR1DwfvRMRqW92zeY+emfFLobe8ZcHQgghhESCiwdCCCGERIKLB0IIIYREYt56HsrGfp9ZVEjt/Vh7gjFjfyheKc14DdYOUqXizlWYmoKcX7/8sjP+1tNPQs4vX8LSQ2NjbgGounQacoKysd+vioPEjIIw1j6azps09i6PHsG9y+PHe5zx88//HHJWrlwBsRtvuskZX7lmNeQsWYpFWhYtcvch6+rqICeZSkHMqKfjRUNOez/MjXQ343KsOmRg3xMsrHT7Xtdf8MRe9BuEhgVBnlDjQex2KPImhtBuAzyOlyl71Oc5gCmSCfBCt6q/V92x2zhwl9KNPVYHxJnxdVOUtX/LLNo0c4feudQ7EdQ8rXcifpqn9U5EpC5maF5B7fcbxZBszcurHPwsk5NFZ3z0yCjkHMr2QKz5V67m+eidiMhqpXlXLDb0biHGtObNpt5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkgtByEF5iurq6pLe39z1z+vqzEAtDNMqE4sasIlEJ47ikMgKVSmgoGjc6WA4NDTljbR4UEXnm71z31b6DWAxlKIfur4mpPMQ05mqw7D5iH3OkiEgy6RYoSSSxYEkijcYcgfnxFSuXrYIzbl5DExaJWrUaTZRXX3O1M77iiisgZ6lhtOxoc42WH9rwEeOaDALdvXEJpOTcGkrSeQinmdlmOR33GbH7q57NZY0R24Sh3O3OMJOzHKFGN84n3Hc/e49VlOpiY31m7dB8yMhRny/Ee7D1NuMoVf9tq6cCn+93q2eFIRqYK4JaNlmrulx66J0Iap6P3omg5mm9E/HTPFPvQvT4x/T9K+tKZCJBEfU7VuceqPVOBDXPT+9EtJb56J0Iat6q1VdBztXXYFGqK1asdMZL21HvFrdhgasPblhnXNf/y3v9WcxfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlbYbJkmE0sIyB0ojOMQZMTExAbHHaNQNk+NGhaRpJjx9yKi6+9/gbkHD1y3BmPDqM50qpMqT9M2SgZFsTQLKSP8+nKJoKGqUnjmkpYcA2oqamBWNEwMJVKbjW3wfPDkHOm/yzEXn35187Y6iy68sorIXbDNW7nO2/DJHSZfBgyGg/vcca5LXsgxyybuEffUN/6g9s982aJzWpsVZPciaGtW7c44ydyunulyFij1eWyOjLaqGr4Ogd+hrH2H6vD9mLOrWr8kPbRTodHtUwL/Y0JAvy7n2UaT08o87ehd8PDGMv29TtjH70TQc3TeiciUhgqQiym5KUmRHOipXkJrXmGKb6QNrpTqnGphNo5OeVeZ2kUr9uiJulqXtEw3JemcK7BPvcmnOnBPxte/SV+ZxYozfPRO5GZDZPvBX95IIQQQkgkuHgghBBCSCS4eCCEEEJIJOZtkajXjh+BmNUtrph3C6nkRoYh59Tbb0Ps7bfczdKenhOQMzyMc5096+7Jnzx1CnImxt19rUoR9ynLRqyoOuaV4n7FnhqU5yCRQKuL9RqUy+41jBtdNUtlPC4Wd/cO6+vrIadQKOBcJfd8Vu2VRAL3LmvVfl+xgHuJoeGRqa91O9GdOHkST2gRbJ05R+9rb0LPjOwxiihdbO+CptUKGpv5g7o/pWV68LhPlgnBB8PaY9RHmkUsQwMWBwO0N8Sa6oCfBB9VmuejdyKoebbeob5pzfPROxHUPK13In6aVwSXh0gpjp85pqpENdQ0Q46teer8ZbwmrXk+eieCmuejdyKoeYkEzl1bhzGteT56JyJyaAbNY5EoQgghhMwaXDwQQgghJBJcPBBCCCEkElw8EEIIISQS87ZI1Nlz5zBYRqNcfsLtXXj+7ADkHH+7B2Kn+93580U06oQxNO/pvEQSCxbV1LprtjCGxiBJ4PnG8spoaSz9Ekl8pOl02hnHDRNOWEGDTT6vutoZvq5YDM+nzUlWRznruJoafT/RMWkZiJLKRFlnGIPihqmpaq/wM4/MmJJTRrm9Rs6mwCgclXO7YzZiY9G5xWrceo8R26XGuX1VnrATQ2OGuVQ7JA1z20VHvT6+5bxmmGZazg4pzTP0bqqIvVoHlOad6uuBnNNDpyGWL7qFo8IY3nOdIyKSSLp3QuvdO3MZXSYTbt5YHnNszXM1VuudyHSa5+ppPo+mRgndnJhRhM/wYkq57N4Xw8cuNTXWG+PGEgnMSTbgnzuxpKt5s6p308BfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlrmJzKDUIsYZQkTAauyadtIba0q7saO5BdsazLGU9MjEPOyMgIxMbHr3LGo6PYdnLo/HlnfPg13fpPZPgczl0YcQ1EUyV0t5XKaL6sqNtSb3SdrBjdRpM17utR24hGxFIZ77nuvllbi+eLG8YjXYUtmURjkNWhU8esLqlWFbiGpuqqGwZW1UBIqmpqLLh42dV//Ue23+0MH3zofkjZutswhGov5DOY0rTGOF9wGRgkZ43qHurwlKt5lt7FjPu0QGle2tC7TqV3Iqh5lt6tUXongpqn9U7ET/MqI0YHZEPzgqKrNw0BduOsq0Pdr8Rd3SgH+Mdh0yJX83z0TkQkoTTPR+9EUPN89E4ENc/Wu0UQuxD4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiIxbz0PtRXcZ0oaBZICvQcYx/3GeD3uYzU16KIbuD8+MY4x3dXS2ns6fdotyHL6xHHIyfbj3l5CFT9JVIwiMQU8bvnK5c74Ax+4AY8zuvHpbnWBUekkN4pekNffeNMZL+7ogJy2tjaI9ff3O+PGRry/1n6fvudHjmDH1bExLJ7T3on7vF7A64L7qSJ4vvmB7378Fmf01/egz6TRKi6l0L05Lw1/a8S2qHGV3T8tctVNrTXPS+9EQPN89E4ENc9H70RQ87TeiUyneapAUtrojlnB4oBTMVfzMtcshxxL8yaV5mm9E0HN89E7EdQ8W++wYGFj40JnXFODfjHrnmvNM/Uug/fzQuAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TJbyaNCqTaNxraw6z4UhGopKRTQL6s6QsQQWHglDLPJRUdPX1aHJaHTELZpyOouGouEcFpeKxd1raGk2Wi7GsbDKB25yC7ncessHIWdiAg02gTZMGoWPhoeHIJYbdT/P8uVYnOT669HANDKywhlbRaJKJSyCpU2UsZh2pIn0GJ1Tr7umOsNkQ859z8YEqxo1yOGq5taMNc6t8bIh92VnnPPs4tk5utcZnzCuc1u1F5XzMKA2ZSDjvoY+iD2hxtZTWWfdYnUJO42cbjW2pvHyQnp6VEtTrubVpmbWOxHUvFJhZr0TQc3z0TsR1DytdyIip/tQ86bOusZHrXciIi2txguqOhDfdBMWrrrFQ/OCwDBMKs0bPmPo3QAWLFzetdQZ23qHRbdA8wy9qzdM47UFV/NOGHq3/soqDeLTwF8eCCGEEBIJLh4IIYQQEgkuHgghhBASCS4eCCGEEBKJeWuYtMx7FcO9U6m4hpNyBQ0oImgyDFSXybCMc9em0dBTqahqbsbZ+k+7lRTPnMGqafkifsBywTVDXbl2JeRcfy3Gbrp2lTNe2IjX3WQYO4NA3QPD2TU1hmaosSHXVNR09SrI6ViI52trUR3sDLPQ5CSavZqb3cppx1tw7pFGNBmt6MSqbz407nXNrGuyholL+/k6MSWLnj9Zo7yeh63jqix2aDUD3f3QWjfQjN0OpQW7Yzaq60SLqsiYdZ1ghvQzhOqjGkfxjNuNLqnbvWY3gMua+Tqrr0Hp55gMym6F2cqUoXchfmfKOhYaelfCWBh35/fROxHUPK13IiIjJy3Nc+cvF/AZr+28HmLXX+dqntY7EZGFDYbm1bpPTOudCGqepXelId0qVqT9atecuHwhmr/LLQtwLqV5tt5hNdCeFnf+UeNlXNFpGZGrh788EEIIISQSXDwQQgghJBJcPBBCCCEkEvPW81BTm4ZYxegyGSo/gy58JCJSY+zlxVTrxJjhsUikcB8rDN35yyW8pvNnXU+AsQUpobGsi8Xc3cTaNJ5/QVM9xJrrVTdOY2/P2u8rltwufpUy5gxmjQIwPWed8dnlmHOk7hjEpqbc89XV495ea2srxMaH3H3BU2/jHmsixPclZcR8OP0J/TJgkSgoR3QrZnx2K8Z2b5p5b/2ZnRjb/Lw7fl5XMBKRNUbxpb7Ovc748P9htMJ8HHtfrtnleiP6HsJN1gbzo+hrwFJSYw/fgbE7dJdLpHEUjwM/gWXOaHxkxrlN/wZguUoM9rtGFt8+pi0p5dspY4fHiiEm4FUyii/FEihwWvPSIepNIsSrL4y5mjd2CgsrlY0PXdQdiAM8X22A+rYg7XaibI6jBylRwLmCsluUqmgU2KrE3Zild8eyZyHWdc7Nqz1i6F0eO0NrzWttQ70rGT6II32u5pXSxp+P8er0bjr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZMxY9mjzYoiInFVTSqIYdmmuNG9MRa6x1UqhhHJOF9ZFZMqF9GEMzXuGnWsa7LOV19f6+YYBp/XX3sNYotaXbdXIY/d8YaGsTOcjo0MG90qj+FxEzm30MmvfvFrvM5X0EBUVp+5sRGNTx0d7Xhcyb3nr//6bchpbESz4D88/5Iz/uN/AykmYC1rQENhTpvUDuAL+4RlwlP1mG41CkltwppNEuxVObu/jEnykBFTbMdnfDiLZsXHYSq/EkmYtQuT7jIOvMtyOrrsXoexA6rIFvbdFDmwBa/hoGFmBdRhXzZMgGOG0RrKcK33OJeg5oWG4TdmmAyDmGuwq00YelfGC60os3nK6DpZESxUFSjNK6CvU+IBmqF1sb7aevz+J8tYBOvoa6874y6j82Ysj1qp9c3SQK15lt7lz+Efo7/6yRFn/PqL2EW0bJhbG9vcazf1bhzv+eu/dt9sU+9+/CbE/tjnPZ8G/vJACCGEkEhw8UAIIYSQSHDxQAghhJBIcPFACCGEkEjMW8NkIoHrntDoYRkEKs+opBgzOsOJ7jJndew0zDslZRaaGEe30PioW3ovb5h5yoYjtKQMhfkprDQ2MTIBsb0//ZUzbmzEKm253AjExsbd7pFjOTzfpFFFMFSWwjMDWJWtP0TjkW6VmkgMQMrhoz14mBpbZtPxSTSJDp5/Ea/BhwZtu7NseIYRUDGKxRVFtDdxt5FjeAdz4j4r3w6PuRZ33Lgdc4xilXCGRr/mmHPKloN+McAwoBqFL2cRVSJ0q9/JEkqtrS63EhhdgwNXF1OW3hnfGTB/G3pXLKJOTYy73zWtdyIiU4bmVZTmhcY15aewKuOk0rznld6J+Gme1jsR1DxL7xJooZYRpXnDHnonInK219W8k4kePAxnAs0bnkC9e3HwZ8aR1cNfHgghhBASCS4eCCGEEBIJLh4IIYQQEol563mIW54HYy+vErr7dEWjaFOlYMTUnlyhgHtIk0Z3s7Exd0P6TD92XJtSx9XobnkiMlnG8xVVh06r02fTwjaITUy4+2ETk+hv0AVaREQkcK+roRGL0tTV43Oob3b3FyuWX8Sj6Bb4VcTe5w10ITBjV9A6ny4u5Utu55KZk25XY8sTgHVcRJ5Q48eNykemC8Ht4pkzO33ei6GblDdjL87diGWNRMZ0dZlOzDENG7qg1myaJayKN7rTpXXvfK5z5s6b1ePneYilXC2zPAGVJH4/tOZZxeUqExgrTLkaZOsdxs70u/v9k5PDkFOTwu/opOpyWdK+MxGJB+h5aF7ofpEmJtD3NWGZFbTmGXra0OhqoK13VpE/PcbJwxA1XmuZj96JoObZeje7vxXwlwdCCCGERIKLB0IIIYREgosHQgghhESCiwdCCCGERGLeGiZjIZpUioYRqKRMcaUSFjopF4xiT8osNDGJJpz8VB5iZWXymZxEg0/jQrfATmtbM+RMnUOjZaZ9oTP+4IduhJyWRiwPVC67phvLaGkbc9yx0fxTwsAwQ6pucZaxyyhTIzF1wlgMX0/LRAlzG91ODY+RnO7tn3EuiyZlhjQaX0pWB3yrNt3hDk/fgVWOGnNGrEEZ+g5DivQZHsolVsUZheXr3JLb4Ywft+pkWR5KDwLfe6UY7TMKc213Y+bU8LAEvJc547Nobyn2VrWn7lOvp2XXtCgm3S+gNlCLiJQK+H0sKs0z9a6C5r1S2dW8Qhn1rpLA46ZKriG7oRPvesMwat6I0ry2zELIueEW1LzF9e78ZeP776N5eUNatOZZepcsG/o2ZRjQ4ZrwomJJpXlWsUDjT+0SmM0xp1q9mw7+8kAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSMxbw2RgVOwKKrgWSirTXTxpVB8M8DaUg6QzTiRSkJNPoYFIV//KGwam5r4hdZBh8Cuiqakh7V7TTR+4FnLaF6IRKVAmTsscqTvoWQSGYbISoGkrFHcuy8BYsTr76fP5dEkVqyobktQtCUWkvrbK11+9QlnTUuhTOXHm48xalj6GwrUeOSa6NKbImGyH2BPqGp6o+nyzR5Nl0Jy5uekcYzzj4PGqZtKa56N3Iqh5PnonIlJQmhcYeldjOPOalObVnhmCHB/N03onInLTupk1T+udyOxpno/eiaDm+eidCGqej969cw0utt5hheALgb88EEIIISQSXDwQQgghJBJcPBBCCCEkErPiecjn83LPPffID37wA6mpqZEbb7xRHnvsMTly5Ih88YtflHPnzklzc7N885vflGuvxT2rarC6Y1r7WhprvyhmVD8KY+6+WTKJ+28JY19Jz19Tgx0zK2o/LB7Ha2pduADnVuPaWpy7uRk3xGNGERON5UvQXUrLxt5eJcAiMeWK6v5nPBZdSMo6XxgavhaPIlEW1vNL16CPxYuc5VVwaTD9DDMzVmWBpGppkLvdQO4eIwu7cV7s65y/WN6XT6ix3354UXX/9dE7EdSkmGFe0nongt8ZH70TQc3TeicyjeYtcMutGbYIL83z0TsR1DyrK7PWPB+9e2dud+yjd+8cp3wts6p3hmntApiVxcPXvvY1CYJADh8+LEEQSH//O5Wstm3bJlu3bpU777xTnnzySbnzzjtl3759s3FKQgghhFwiLnjxMD4+Lv/5P/9n6e3tfXcVunjxYhkYGJD9+/fLs88+KyIif/AHfyBf/epX5ejRo7Jq1aoLPS0hhBBCLhEX7Hk4duyYLFy4UB544AFZv369fPSjH5XnnntOTp06JZlM5t2fuoIgkGXLlsnJkydhjh07dkhXV9e7/xsb8/lnboQQQgi5FFzw4qFUKsmJEyfkmmuukf3798tf/dVfyb/6V//KbEA1HXfffbf09va++7+Ghur2iwkhhBAy91zwtsWyZcskFovJZz/7WRER+cAHPiBXXHGFnDhxQrLZrJRKJUkkEhKGoZw8eVKWLVt2wRctMk2BjwBNKUGgi3UYpr+SEVPzx4y2bEazSCMPj4sr49GClhbIWbYMY+XypDOura3F85sGG/dCLZOTRTlUHUnLuCBMpA0Tji7aZHW5NG9edWYhPb9P8Zd/PINnnkvD4Z+oyG2Q06i7THZDiuQM02FDzh3PrjHRWJTnHnSGO43umPca13DRDZPqvniX5ar2Or1MsfoYvALrPjU0PohBD8oF9b5aemeYKCsxZfozvh+WBsaUqdFP7965in+K1jsRkQXNiyG2YkmLMy5WsLiUn+ZZfw7M/F3XeieCXZhNvTPugTaz+uidiEjg8fd5S0/L+vmZXtrq9G46LviXh7a2NvnYxz4mP/jBD0RE5Pjx43L8+HG5+eabZd26dfLYY4+JiMhTTz0lXV1d9DsQQggh85xZ+dcWDz/8sPzJn/yJ/Nmf/ZnEYjHZuXOnLFmyRHbu3Cl33nmnPPDAA9LU1CSPPvrobJyOEEIIIZeQWVk8rFy5Un7yE/1TrsjatWvlhRdemI1TEEIIIeQygRUmCSGEEBKJedtVMxbDdU9oVfFS5pKi0a0yMAwoukKXafqzOsOpMRhZBKukLViAnTA7OrBFYF/2lHtNhgnHui86y/osFnqupNF5L4gZc4UehknDDOl5WcZc2tjlV2HOqvrmw+j69e75jU6UoTwx4zxNb2FsTD/2HOaIUQRypxp/2TIKNo5CKLhfv7F4wsYGPO4Ta5rcwEE83WnjEjq1y3CNkfQQhrap8eOHMKfhLmMuxSYjpu+diEgG7Jd4D+Ru9yY/scd6WMiWzSpgfF6LuopbETUMsdqhVXWyoDSvlJhZ796ZX5n+inic9bfP5JireQuCFsjJ1OH97FjkVpg8kTUqN04YmlfnXkUCb4vEPKpxVgxNmhD3vgQlq1yuYZjU9y40OqAaU2GW1QkTzzdRcquPxipo7ExO4jO+EPjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvPQ9WIaBCcQpi2geRTBkd5YwubHrPytpHt6poxpVPwNqD1Hv0+YLhwzCKmoyPu59vYhI/r4X2Llhz+1QEtY6rlI2NO713aJgZrPsJHg6rrZ4BeiqsbpxGN9Uqi6bAUQ24az4m21VkLeRsWo9FhTJq/JBR+civ7pGvgcTdp7dqI20yjho9rM0KhyHHvE79eQ5qA4CIbMFKVQ8qU4VZZsmjcpR97yzXw1Yz02GXO9ziW5FKHefredCaV8DGwqaPJ5lS3/+S8d0z9vKT6vtY9tA7EZFQ7dNb3728dfFaF8dR38qG5mlXQGBck5fmGRaoGvVttzqEgt6J358fVlfNSqA7/VqdN41nrH4HSBjXFJvl3wr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZNWMaR4DM2QgYrF41YHNAPLCKiPs7qbaVNTASuWTExMqHlw7oaGeojpj2zNbRmmQnVNlnnIJ2aZhUxDqDIZWZ33rAJbFdUlMBQ0xVpmIZ8cq6letQYiNBVaxku32lOuE02Ae+5rgpjsdYePGLWmjHJFEG00jJZmCaOZm0eKyG6IPKEqVT3UoMs4iTxjzNQJ5kTDMGkYDw+vcW/EbigbZbNF+TjXmOZILPIFV2C17PQyY84e2ggYM/TO0kAdiscmIEeMAlB5PY/1/S/jHyFal8Yn8HyGVxA0LxZDU+WYYbScVN/3hGGmj1sdeoOZjZ2V0L0LUx56JyKSVJpn/LEjlcD641dpoGchu0DlWc1O4zRMEkIIIeRSwsUDIYQQQiLBxQMhhBBCIjFvPQ9W0Y2E0dxFFxqyPA+xON6Gstpbsxo5JVO6oIdIqegeZxWzWrCgxRlnMpjT1bUEYuMTw864pRkbapWMxl9ldU0xq2CSR2EVywVi+TUqat/O8kr4FIkKQ7wvVuGoQG3waU+JiMjg4HmIjeXGcX4Pdiv3wKYtuNvdqCwOjdjnTB48vB9i93ZucsYND+Jme9MYugn2q5pNa+7A89l47NR342S3H3DHfUZRpc8aXonNm9zz3dNnnP8wxjLKl3BgC/oUDhjVlvaucS+0c2835Ox8wrgGy+NwiclX3O+D1czK8hLFlechFPRTlSuTOJf2WKRq8aIM80JJaZ7WO5HpNK/LGY9PjEBOe3M7nq/o6ne+qN0atgegNubqjaXxgegGiegpqQgWriqq51Ap4dypilGYT2ueh96JoObNpt5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q1hMp6qgVilgiYcbS4JDbNgsYQGG+0DssyCFcMslJtwjUd5w8A4MHDWGZ8fQmNQKFgMpVhyYwWjG2fFWg9qx5RljvTpRGcYH+MhHleG52CYfhIzd6KLVdAQZpoo1TMuVLB4Vm8/lkgaN6smzcy2Pu1+RHfdPWq8fe19mPOEEYMjLbYbsfVq/DBkhJI1jtPFq9BQKM9bx7ncZ8x9H7SPFAn26KJQm2acW0Sk8aBratx90Mrabh2pxvo+TUNGmVKzVjGr6rqyVkvQqN/zmfVORKS2oAoWlVD2rQa2+useGAa/XA6/RAWleWcHzkDO0NAQnlBpXsnQ5aJRGC8l7p8FhqfRLFQXi7n6YhbKUzeh3ioMWDE0MHT/HAiNP2krhpYFFffiQ0NfLfen1rzZ1Lvp4C8PhBBCCIkEFw+EEEIIiQQXD4QQQgiJBBcPhBBCCInEvDVMWl3ZQmstFCrDpHFcuWIZn2buKFkuoYGoWHJNMGPjWO2w93S/M07XoDFw8PwAxM6fH3TGPT0nIaelCdskxnSnNsiYprOoB6HxIPS9Ms2mRqysDZPG8wyNZ6XnD41udYUyuqgmS34d66rhIbnVHR9CI2KjYbg73bR95skzRg74FfGu3zvzzLI1g7FGOQCxnOqG2bjJONDsq+mSOeRxUSIiyiBpfWPNdzjjdhs9uPkRSNm7FjueHsgoU6xRsfOAOuPtfXhVD6FnVMbUs/L97qGJe2a9ExGZSLrvfjnA976mhNUjdYfecgkNfsUSapfWvKNK70RE0jX4fRxQmnfO0MBjPb0Qa2q6xhnHBCv/mlpiqtB746N378ztmjgL0KNUJG90zNSVfkOrC7Tx2CfTruaNGno3Ost6x18eCCGEEBIJLh4IIYQQEgkuHgghhBASiSDUlXkuA7q6uqS3F/e2CCGEEHJxeK8/i/nLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvu2p+77u/gFhTA3aULBannHEhj93NxOhu9sCDX3fGvW8fg5wNq5ZDLKm6oqUS2N2svjbtjONx7ExXU5OGWCJwi4EmjK5wqQbsjtdQq46rjEOOVApGyO2OFysVISeowfM1tdS585SxG18Q4GcuqW54BWnF64zXQUjf4kqA91x37BQRKRXdz/OV//0v8HwGYbDVGe+RrZCT/bLbOvHAFmzL+Pimmc91u9HN8Q5phFhG3C6QjTm/dpW5TWPO+PB2zOnOGQfepsbYmFKsPptAJ35nt/0tpm3a5o4fwialcrixE2LbD7iJjbLbuIb3vEIREbnnwZlz8KnY6Pu5u9O6wchPlObVNrRATr44ibGS6uwbn1nvRER633Q1b6Ohd+kY/v0zrb6QWu9ERGKG5qVa3Ly00W80kZ9Z85pqjeM8NG8ixA7IiUlXI0IPvRNBzYt56J0Ial5YN7PeiaDmmXpXQP3+yn1+mmfBXx4IIYQQEgkuHgghhBASCS4eCCGEEBKJeet5+OHPfwmxRAn3Dq9btcgZr7xqI+Rks1mIDY+4cy1sqIecoIL7bxUJ3GuK4y2OxdxYuhb3tZIxy4Pg+jeGRvDzNk/ivlZrh7uXWFsTQE6qBvfkYk3udZWncE9QBM9Xybv76IWpMciRcglChZJ7XeUUfr5E0yqIhUn3OgP8eBIW8H6GRYz5scsZbRY84fPi7uXv2uQ7t7sB/8Tjxn74XgytXeOaDp7oxHt+yNiUN54MYm3m7/c5cGYacn5OgSe0VcHwWIhsgkhfp2tWaNz7OOTsehCfH16W4eDIuc/Gz7kgstd4Nj788OcvO+NE6SzkXKX0TkRk2Y2u5vnonYjIwlZX8yoB6l0+jvcuWefqW6kWNbDG2MuPKc3LK70TERkcNzQvdDWosc7wi9UbmpdwNa8hgdcEmlcx9K6Cz7MwrvJK6PsqxNF/V067ny9RM7PeiaDmmXpXrlbvbPjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiMW8Nk+dG0aTSmjgDsZa4WxykNLEScsbH0bgymXcNfUEFi24U0T8EBVJCowDV2UHX6NQ2gQamyuR5iA0OuWaakmGKKSbxfOVB1/iUrK2BnEymGWKrrrveGSfq10BOEKKBqKBMTb3Hj0JOdmAYYukG9xrC8ijkxGrxuSdq3M9XrKAZ0yKdRmOVD00N253x7Q89DDnbwj3O+FDDAchZi49dRH7ijPpkG2Q8ssU6Tk9mTW6VbTIvogrWGTGjkpM6X6Pll8RbJRl16d2W53A7Hrjmvnud8a7NeA+yuftwLnheOHemwb2InGHUnU16R88549bkAOS0JLAYUmnS1bzxMUPvpvDvkVrzikZxonQSg6EqXqf1TsRP87TeifhpntY7ET/NW3XV9ZCTaHQ1LygbemeYTU9PuprXd+4c5Gi9ExEJK67m+eidiEgxVJpnvIvV6t108JcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhERi3homV63ogFhdAU2G+bEhZzzU3ws5xSJ29hPVwTKWwAqMReP2xUJ3PXb0DJqasmdOOOOPduHcY6NoDBqecM06qRiaIycreE3xEbdKWmkEK7d1tKEJJxW450vXYhe4ZM1CiJUbm5zx+CR+lpEpdPTE0m7ltLEJND4mjEptFdV9LzBMqqkUmoWCmOEA8+COe5R5rg/NX7og4kP3Y+vG3YaBsTuz3hlnrY6PhqHwgDIi9lleRQ9zpG2zNL4fos2zloPRms01knYblSIPGId1qrxHjJx13YYZco97oHVbOq3rVA7NPuM5ZDvdnDFr8tDDkOpptNSaZ+ndeG4IYoUeV/OKeeN5ltBEqTtfFouG3hl//zw66Gqe1jsRP83TeicyjeaV3OuKD2El3NKQoXktrualjGq56ZSrecmUoXe1TRDTmjc8ZuhdCitFjk26mpeYNPTO6BoaqPsym3o3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3nofiBO7tNdXjntX4qLv/taANq9KMDmIxonLe3bPqaGuBnPo03r5c3t07fKsPN3Wbk2rPKsQ1XBG3tSSpCrKEFTwuFseY7uzZ2tECOYuX4aZuqTDpjCvnT0FOvLULYjVqD3DRkmWQM2Ls5WmLw8TIIOTECnhjGowOnZpkAp9VxfBGeKE26jOduK+9RXXH3J3B9yBrtIbcBRELfFZZuAYs6JWRzTgV7MkbvgHjqjJy0BnbvTEPQ2STnsfwEhww3n1t87DKXXUb3gXdQNJoKClinM/HH5JRV2E8Yr9aXZavxaCYdzWvqQH1Lj+B+/2pTvfpjOYMvaugL6ljYYszrq8z9K6IXom3+t0bAXonIhIYmqdsAcm0UYDK0rykG0sk8TpNzVvp3vhSOAk5lZyreabeNaPnYdEVruaNFGfWOxGRiXFX82LGHwSm3qm0pNHNuWq9mwb+8kAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSMxbw2RDEot+1MfRQFSMuWaW+gVYXGribez6NjXlGoESgiaVugDNLGPKQDQxiSacwpRrnhlsxQJNrXVY5CNRcg0vYRnPn8I6TtLQ4hYjueKqqyCnub0Nr3PYLS4TjmFRmkoBDVpNi65wxvVNrZCTMC60VHbXshMVLCTTGMPueHFVzEZiWJAlncbjiiV8X3zY3+2aBS1T3B4dtOoQbTmNsZ23uYcdNqyB3dsh1KnKUoWyA4/L7oFQ0ODOv7vT6h5pTKUKR23CFBkzCkeBhdI0K1pF29RcxnGPGDPpbpyGh1O6N2Fsj57fqoGlH6pVS8t4fJbZ04eGGlfzfPRORKS+2dW8ibdGIGfK6CycaHI1r66CBsaxPB43Me5qXiFEs+BgC2peR62reZWy0SHY0LykkhKtdyJ+mqf1TgQ1z0fvRETqW1zNM/XOMI1OxFwta0waepdCXZS4q3nputnTu+ngLw+EEEIIiQQXD4QQQgiJBBcPhBBCCIkEFw+EEEIIicS8NUx2NmMsnccqabUd65zx4iXLIWfkR89DbLLgGoHi2pUjIqHh2mpX1cZWtGIXtpd73C5zJ8fxw7QtboFYXahMm2X8vIkAzUkLFy92xouXr4Sc2jo0GcZLrjmonB+HnJjRqa1Udo05DbVojkoYXd9yZ4edcZ1xXL1hJE3WuM+mEuJnyZfQ8BoahiUfDuqKgJ4VAoG+JRBak3PHuUYsW7hGtuNcygi41agwmc2sgNiu7Fed8UPZO/AyjW6RRmNPA8tBWC0ecxllLrPa6bgJc9BGKiKBOp9VQjOn5jZNlcY12eU4Z6RzgTs29W7JOogtXu5q3sheQ+8qaHyMq+9VaHSdbG9Ag+bKFlfzDp7Erponc6iLWvOScaOrrofmLexYDDmLl82sefEimiG15sWCmfVOBDUvYehW7vQwxLTm1dfPrHciqHn5oqF3sdn9rYC/PBBCCCEkElw8EEIIISQSXDwQQgghJBLz1vPQvhiLfkwMY2GM1szVzvjQG29AznM/3Quxphp3r6nW2KNHd4FIbcy9pWs7sSTMiXNuUapDZ3KQs6JjEcSWqr3EhNEFLpjCuVoWuXNllmFRk6Y6XEeOnHU/c38/FtNqaMf9xRq1bzc0jHuJuUn0JZw9487fuXgp5DQ24d53qJuU4tSmv6EwhYXGvMjdpwIPeRyExogGjy6MjYLPM2vEtOlhl1GJKGtWLHIv4oDshZS+vm3G+dRcnVh9yfx8XiWSDPOALrJl+DAkZ1Wc8jQizHBUg9EBtWpPh358nh6I9g5X83z0TkTk0Ouu5j33k72Q05RAfdOa56N3IiKru9xnfPw86sahs1ioakWm3RkvbTIK5VmaV3BvaEsramema2bNG0nj+fqz7rU3GH4KrXciIkNDrublJgy9M/RUa15j48x6J4KaZ+rdZJV6Nw385YEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3hsnWzhsg1tJmmIUOv+mM/8s3/2/IyQ8NQOyGJW7HtZRVRCVmdDdTxpVOw1C4cbVrXDncdxJyOhYtgFgq5T6uqUksFtK+BIuv1La4n+X0GeyO+eYUunD2vfiyMw4LaLj5l7+NhY5iNa6Ra8+zP4acH790DGLLFrjOscWL8P7GjS5zYUVde8UokKJzRCQsYMEZH+7b5Lr1soZ57/GHPCpHdaIZqq9zrYpgsSeRrUbMPc70KhqlndYcdo2OazPY6bNv9z6IdWbdHpZrDuyGnMPbsZUodOg0/IxjOePqGzY5w/CwX5mqoOEtZ5yTDUYWPocGfa8MUyNYRKss/uRLa+sHnHFLo6F3J96EmNa8/Iihd8uxq24qpgrlxdEYKIYEdqqidBvzqBuHTxma1+4aHVMB/vE0NYGF6trbXM2rbcbPYmrepKsJ+371MuSERffaTb0zNGnP93/hjH/80uuQo/VORGRxq3tD4wkPvRMRUV2fZ1PvpoO/PBBCCCEkElw8EEIIISQSXDwQQgghJBJcPBBCCCEkEvPWMJlMolPn9V+/ArG/+Mv/yxlP5IYgZ93yLojVpZQZMYHd1IKEcftUbKqEHddaVWWzm1dhRbSOdjT9LO5odcanjr4NOal67HK3ZOU1zjg3gVXavvnUDyG2703X1HTLWqwOePoUmq8mw1pnPDiMFRFXZ1ogdtP1rpltQTOaRhPGc9fl1UoFrIVXCY36eFV21bxPt9WENpsiDz/i3qtsZgvk7N6yCSfPuNUc93T+LaRs7kYj4q6Ma5i0Ojfmcvj8shnXGBhsx0p4mx5B81Xnw192xn2bscrmGvRQii6v2G1Uy8wYDtQgdD/Q/oO6yqfI8xnsCLrmPjdm1YS8pxO7TG5XfsnDa6wjldvT7LyJoUZ9FZ5Gy0bV2fflXx+EnP9T6Z2IyJjSvOuXr4Ccurj1XXANkjErJ47vhta81lo87uY1huYpY7fWOxGRU8cMzWtw78uSZddAjql533I1b/+baOLUmnf6+DnImexCnRoccqtHrja6JHffiObdpgZ3rkRiZr0TESmVXH2rVKx6oOyqSQghhJBLCBcPhBBCCIkEFw+EEEIIicSseB7+/u//Xv79v//3UqlUpFQqyb/7d/9OvvjFL8rAwIB84QtfkGPHjkk6nZZvfOMbcsstt8zGKeXJp78Dse/+4HsQGxwcdMY3rV4NObFa3FeqKO9CKYb7TMkk3r5c3t1bGx08BTldTRVnvLCtA3IWpvGa2pqb3fGHcW94amwYYjW1bre4xZ14vv/lTz4DsTcOuXuABaO4zLkRrPLz9oBbmKe9uRZyll6Nz6F2gdtVLzR8CpUSFoAScfddrRoqFSOYMHwsPgTQ0nG/kbXdGWWy90LG1l3W7O7ce7rXQ0b2PvQ8BBk3ZtSfku4G9Dx073HHmx+xilthUaq+u9T59mNhp75NRoGrve5w7YHbISX3EB63657POuPsdrx5fY2fhZgu5JQxPAj3Go0+D292x/cZ1ax0marb8bEYvVRF+jLVVZN68mnXRPLdH/w95Gi9E0HNqzX0Lmn4t4rqr5bJJP5dM1dEL8HokKt5Wu9ERFqbUYMWp93ve2czdrlcvPFaiBWmhp1xQy0e17UMz/f//ZKrea+/1Qs5YyP9zvjc5CjkvP36yxBrX+hq3tKMoXet7RDTmlcJZ9a7dxLdYWDoXSpVnd5NxwUvHsIwlM997nOyd+9eueGGG6Snp0euuuoq+dSnPiVf+9rXZOPGjfL9739f9u3bJ7//+78vx48fN82OhBBCCJkfzMq2RRAEMjw8LCIio6Oj0traKul0Wr797W/LXXfdJSIiGzZskM7OTnn+eXQ2E0IIIWT+cMG/PARBIN/61rfkU5/6lNTX18vQ0JA8/fTTksvlpFgsyuJ/Uud8xYoVcvIk/nOYHTt2yI4dO94dj40ZBe8JIYQQcllwwb88lEol+fM//3N5+umn5cSJE/Lcc8/J5z//eSmZe9M2d999t/T29r77v4YG699UE0IIIeRy4IJ/eXj55Zelr6/vXSPkhg0bpKurS1599VVJJBLS39//7q8PPT09smzZsgs9pYiI/B9/9ZcQq6nBDmQ3X3eVM+5saYGcimGGrISuAyUZR7NJTQpvX98Z1yy0uA6LRC1b6nZmq2tB48ziwWGIpQLXTLPw4/8T5MQD/Cyj512jY30TGhE3rL8eYksXuYu4V17FuU8OYNGtQ8dUx8wSdsKrazQMUw3uvYoZhlTdtVREpJh3u8VZHeXiVRaEsujLaHMgmhr7VDfMPd1obu3rXgexbKdrw+sTdOHtMYpEZUQVpTLMeweMuUQZA8UwD0rf/UbQgxwWMQIyOzG0G82lWe3ZtK7TDD7uztNgzG10G9X43IE95jUha9RzwNJWNn/xV/+nM66pwXf6w0rvRETaleaZeqcddyJSqzSvpt7Qux40hC+ucb/Hy5ZgJ8q6hah5C08PqwtAnWrz0Lzz59HYnQ5wrhs+7HYpbW/Dv7S+8parJSfPGXr3FnYIlpLbkbSuCf8cWGzEQPM89E7E0DyjoJcx1QVxwYq6dOlSyWaz8uab77SCPXr0qBw7dkzWrl0rn/nMZ+Thhx8WEZF9+/bJ6dOn5dZbb73QUxJCCCHkEnLBvzx0dHTIrl275A//8A8lFotJpVKR//gf/6MsW7ZMvv71r8vnP/95Wb16taRSKXnsscf4Ly0IIYSQec6s1Hm4/fbb5fbb8d9rd3R0yLPPPjsbpyCEEELIZQIrTBJCCCEkEvO2q2bbwmaIXbMczZhL2tzObGnDhJdKopUkVP6TkrHOyuWw2lisMOyMly3DznCNbap7XAYNRQsKaPpLKlNMXRorqbW0YyWzZJ17vv7Tb0LO0AhWivvVPtfw9tYRrMAmtQshdPSMW+VucmoKckqJ0xD72AK3Hl+ihCausvH8YjE3Vq7gv/Qpl41YhH8R9E85cJ9rsLsfG2ZKX0bVNhxDw2Qmh0a9rLj3PJuxXHhGd0zDC+lxmOiKlnL6J5gSYvVICfQ/p57ZdGhdxLos1mA8+MzjEIP2lMZx3cZl6vaifWJ8vs4n8DDVbVQyZovQmQnwwRyW6nxfC1pdQ9/aVagbHRnUm7SqHplKzKx3IiKlmKt5PnongprXaHUINjXP7Y6ZNDpD1qXrIdbSvtI9ru4M5PSfPgSxoUE39quX0OD71hFlCE3jZzl6GksLTE66hslSiN04P9aI9yCRcjWvnJ5Z70RQ82ZT76aDvzwQQgghJBJcPBBCCCEkElw8EEIIISQS89bz8BvXY3e1plosElWnOokljcJORkM5SagCKeUCFvQIy7iPtbjT7abW2IzejKamBc64ZWET5NQkr4RY6ry755iqw6ImNUbBqc62pc74/Ggecr79nSch9vZJt6PcZAn3SgfOvw6xvgF3z7G2Fj/fuQn0MwyqDp11RqXRWAWPExUrFXGvtGT4IKRszOXBti2fmDkJtrr3QMpBY4++QXkCoB6ViN2q0SoA5QEWSMKCVyKbILIvdL0SB4wqSp3GtetLz+BtkfvvMy5hz1pnuNM4DnwRIiJyjxr73aeturjUM+h5gGJd1nMxvAQiqr+PZ/WeD69zC0CZemd0itWalzD+yqj1TgQ1LyzOrHciqHla70Sm0zzXw6X1TkSkrg7PV1vjat7SZUshZ3QA9fvvvuN2Zn7rVD/kTBbd+zJ6/g3I0XonIlKrOnuO5dBTNj6I72uswfVUxIpYEEoC1K1S2dW82dS76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TObHhyGWK+DHKabdXhr19WjCa6xFE4423SVjaG5ZvawRYhVlsIkbhZzq691CJ7XG+WuaFkHs6GiPM2555QjkJOuxSFTDYreQ09oPYJGa1946AbEXX3nMGcdq0fhUDvCe19S1OONiCQ2MgdHlLpZ2DWAFw+ATTGFBlri4zziZSkFOjWEkEyvmgVGfyEC/Z1gkSjrRvDemDJONfZbBz3AiNrrPtLHRr2hTY07dz77DkNO9FmP6KWS349xZwyyoP83WLHbV3LneKMiUdT/PQ7evgZQ+4xZLxj1jt3HvrK6Wu8T9bj9keDE37XWf8d61+G7utQpzdaIG+ZCfHHbGudLMeieCmmfqnfEd1Zpn6l0Jv0NxZRbUeiciUltjaF69q3la70REWl/Bd3H5R1Y544YO1KlVG38DYle+fdwZ73/lv0KO1rzxwDAiGibOnCpwVYwbemcYXmPhhDPOj05AjtY7EZFkjat5s6l308FfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlrmDzZdwpi4eQgxFJJ11xSSaF5R+eIiCxe4FZA+51b10FOXQ2WhssNnXUDcTTvJZVZKJXGamt1Tditcslq14gU/OjnkPPTl3sg9naH272twehM2XL8LYjdHnerm/0sPwQ5iRa8zsyiDmdcl8IqaR+6HitoLs64hqmJMTQLFSbQhReE7ho4NMr6xWL4jMtW9TYfctgRFFElEA+gUW+NPIRT96k8w5yZs4yBfa6hMOdl6hQBY2cnmv6sIpfYh9LAo3Lits5tELvbOOGevSqgqzuKfZ0+3I8+QChW6TN3J5hkRRoNo2VjbpMKeEwuIidPu5UMbb3D4yqxOpVj6F0LatDv3OZqXl29oXeDZyEmSdUdM42GQh/N03on4qd5Wu9E/DTv9+tRD35WcDWv1GHoXVcHxLTmWXrX0omm+Imcq3kVD70TEQkDN8/Uu1KVejcN/OWBEEIIIZHg4oEQQgghkeDigRBCCCGRmLeehw033Aix4QEsdDQ1NuzmjOM+eqKEHc/aF7j7UR0ZrPYSGscV826sYNQUCWLumi1dh5ueyRTu961c5XZcC+vrIOel7/4EYm8fec0ZJ4yuk59ub4XYrV++yxl/qDQOOf01RhGsJrerXm0SO9pNjo7gXANT7nEpXNsmk7ivXCy585fy2DV0KsT9vkq5uj3A026DR+nbehvk7Lnd3U99qBv3i/tkK07erTt2HvK7KG+PwwwY++/ogphbdljBTXN3Piw7JN4+BHcevFMNlg+iyg6oN691Ne/8ABaJG8tPQWx40tW8hGBOezvuv3d0uZrno3ciqHla70Sm0TxVUE/rnYif5mm9E/HTPK13Iqh5Pnongprno3ciIrVp914lUzPrnQhq3lRl9vRuOvjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiMW8Nk+0L0eDX1oLd1KbyrlmoaBQGyo+dh9jSTnf++nosdJII0DxTUoals4M4d6HgXkMlNKrpxLAD2tSUW3GmtgtNnJ/5X/4NxP6wwS1skq9g4ZHmRjTm1KjulCv7jkFO19g5iBWKrnlnbASL2Zw/i6bGiip0EsTxviSNykMJda/KMeO5GJ95KpidtXPnLrTcbc7ucsYHMmjGkm6s9pRVj9QqTpRbawTJZYDRLXMXhrZuNSpHeaA1r60FC95NGAbGSfV9zE8aeteBelqvitklEjPrnQhqntY7ET/N03on4qd5Wu9E/DRP650Iap6P3omg5vnonQhqno/eiaDmlYziUlM1s/tbAX95IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q1hMh5H00gqgbG6OtcIFBhmk9ERrFomMbcimdWJLp1GA1FDi1v9a2gUqzLmC+7cJaNLWiVA80654BqISsNo3qltxs+3cFGXM25bgBXRjKJlMjXsVkUr5NH4VCxiCc1y3p0sLKN5J8SQlEvuXBXjvlhU1HGhYY5KGu9LkMZ77ENnp2tjDDdjTt8u1yC5uQ+7sj6y8wDEZIs7tBpoZg1j3iH1kTc9gsd1ftnomHlAzbUHUiS7HWPGlfuhG4l+0shZVe3kc8caw7m6Ro03G4Uju2+3ZquifKWIJJWZLp5ATaqpQ2N3pc5990cGDL1LYQXGVK07v6l3BRSOoXOu5mm9ExEplQzNy7vfx3IMDZM+mqf1TkSkrdbQPDWeymEVSK15Pnongprno3cihuZV8DitdyIiUnJPUFuDepcMq9O76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIicS89TyEMdzvq8SNjSXd0c1IqavDPeTRUXeTMzeG+1rNrViwpE4VoUrV1EBOWCm/51hEpGTsa+Un3LknRrHYS3IY9wnHi+5jXpC50ut8ufP9znhqGDd+E8YblIq7e2uWzySAHUeRoFJSY6MYirF5WC67G4NG+RmzOFhg+CB82HKf2/kyK/geHDi9U0V240TbDlZ1frPPpfrQe4yGnda7L+vduRrWe52teu6Zzclmh4xh4Ni8zR0ftnwtyp9y7xrjTpn2BveEviWjJpPuQ47F8csXS6MuSuh+P+oWzKx3Iqh5PnonIpJqdjUvDFHfwpiheQn3+58fw7l9NE/rnYif5mm9E0HN89E7EdQ8U++MlstBQRW8s/QuQCNEoKRs0uigWa3eTQd/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJOatYVIsE47Rqa2izIgxIyeZwttQUWa9/n4001yxGtsbNje4nT1rkjh3Mumu2WriaJwpTQ5DTMpuZ7ZEHI0zpQLar0bPnnLG+RIaZ8IiHjd5/oR7emPuunosviLKtFUpoJEsYZh3Qm2sNIo9xQzTnw5Vysa7UTbucR473flwoLvTI+t+dQFWjlUCShnXLPenGNWIfDDncvE3R2rzXJXXNIugnc+OaXZj/S7p3OeOPW6dZ5KI31UZKJNhGLP0Do1yWvN89E4ENc9H70RQ87TeiXhqXhm/nz6ap/VOxE/ztN6JoOb56J0Iap6ld8mUYSRXAjdpFIkKrcJRSvNmU++mg788EEIIISQSXDwQQgghJBJcPBBCCCEkEu8rz0Mgxh6SKiBk7YeXjQZMtWm3eczQ+SHIGR0Zhli86O51lYpTkFMquntP5Tw2z6ptaIVYoeR+liljH72iq4WISEw11EkYxoGpIl7D8Dl3HzttFOFqaaqHWDLmvlal0gTkFI39tzBwjyuHxv6tURgnsDYBFZWKtXloGhFmpLNv5rZQ2hVh9FUS6TP2vrUNwjgwW+WWuR9YXSojd+M1yFXOuNuwPBwwrlOHrI9iuSf0bdktX8akg/hcujNuIS5r7vUe99P3Ov2o8kilXYHxXQ+M91xrXrk4s96JiAydczXP1DvDz6Q1T+udyDSaV+9qXsH4fKbmhW6e1jsRP83TeieCmrfY0rsKatJQwdW84iTeg3yAx2nNuxz0bjr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/DZNG0Y1Ad9AUkXjCLeARltC4EjdMKZUa10A0NYHd3E6++SLEamTSGU+cweJSxbg79/joMOQ0NKJhMqlauk1NYtGm7CCaE+vb3HvQGsPCLoMnjkDsjRd+6oxXr14JOR2L2yFWmBxxxuO5UcgZH0cjaVhy70s8bhRfMYxA2jgWxDAnXWd0Ny3PbDyyOGDVdpoJwyOXsWpNeXjpMoZjEkPGRKbT8j41NsygmU9gTB/laeLUV+VrHTygrr1T9mDSOpztopauqtaP5ltcKq3e87ihd0a34bDgap6pd4KGSa15J1839C42CTGtecUA5x4fHoFYgzJMJpOGydFD8+rzeA98NE/rnYjI6itdzetoR72bLONnGR93NW98cma9E0HN89E7EdS82dS76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TMYEzR9WV80gcGNBAs00Md3NUUTStW4sP4TnO/n6SxCriRWd8cQEVkks1ix0xuOjaLipLMauaKmaWmdsVdTs7RvGaxp2K8ydP3EYr+n82xCLqw59tQ3YUS6VSkMsp6pxDg8O4PkqTRCLaROs0VXT8paFId4rPNCq1FY0Ej0Ye0YF7sCcRjU2zJFZw86X6VujIkaJSSOU2aUCm/U8ItJpxGSbe00HDEOa5Trcqi9im5Hk0X00/FsjiE85m8Hr8jqfnsqaRj8rE/9+o3OF7o4ZWt8Po7pikHI1z9Q7w4CuNe/krw29q+B3aKLg6kaxbiHkjI+igbCyyL2GVKIFcgIZhFhvn/suar0TmU7z3OPiFaPyZpNrkEzVoG7ljG6cWvOKZQ+9EwHNuyz0bhr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiIxbz0PqVQKYpUS7gWFFTemi0aJ2PtKyaQq1lHbBjn5EhY/Wtju7u+N9vZCztlzbvGVs/1nIGfZlVYxK7eoSFMLFpJqrDsLscnxYWc8OIx7dGEJN4Mb29zCPO2ZJZATGPunhSl3rqGzuE9ZShl7gEnlKzGK7sSMp1Usqb084zjrOotW5zkP9t3rVonatRn32p/YpCNWX80GiOQazf6bLmuNTfq1+vlh1aZO0V4Nkb4+dbOyeE2S+yyE1jy+0z1bp1E5qw99All1HzKdVmEnvPbD+l5143vQAL0335ltRkw7hX4Oxn2ZLbw8FyJ1oet5KhjdasMYaqDWPFPv0kZBtjpX8/LlmfVOBDXvrFG47mzW0Lxla51xPIXf2aa6RRBrTLnPanJoGHJMzSu7GtvYhvrWvqjLGQeGl6BgFK4aGnA1z0vvREC7tM9FRKRYNLwL6jirkFSRRaIIIYQQcinh4oEQQgghkeDigRBCCCGR4OKBEEIIIZGYt4ZJq4FdMmWYIUOVWcECImGAppRy2TUeJevQoHnVhk9DrHPlNc646wwaJl/95U+c8bmBU5AzcgYNPtr0FxqGqeZ6vM5U3I2V6tG8MzE6BLHRnFvoZGIIO4RWxrEA1NlTR51xsYhPK1ZjPSsdMYrgGM8qrrqNhmU8rlzEYl2VEsZ86NztFoXavhsNfluVh3LP5k2Qs2szdrDsPqgCGatdpdH5Elx3mJPpu9+Yyu2q2fkIpuS6sZBTY7drRDycWQs5YtSk2pR1TY2bdhmGxsP4mQ91up/niQfxsGzmJxiU23SWkWOhTbCersZqpvYkEPd9rTe6zoYlfPdLRVfzwqShd4bZPKlM6VfdMrPeiaDmvfoLfC7nTqOWjPS7mhdksItvWBiHWHPaLVSXCrBwXakWC9zlR1Xny2HsnDx51v0slXE0NJ81TPHFvPscYtafTfhHkWjNCwx7azyGf2xrzSvnDb2rVKd308FfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiflrmDQck5b/JKb8JqFRVTBuVPFKJF1z4lQJ11nnh7CCXvD2EXdctwByWpde5Yxzh9AcqU2HIiLlinudY2NY8S2fx88SxtzqeCmj2mKsCatVFhOuiWpyCs1KYwNoQBsfcY1HQRzvgVXmToesLqmWyVFXEZW4YSgynnu1K+dMdreKoJkuk3WNVd0H0Gi1+f7NEMuC0RGNj5vXYSXF7CZVufF5o9riActoqR2aSNY67MAhZ9hnVIW0qmriVeHkVgXETVk3uu02nHv3OnR79qnXs9N4VruNEpP6ETdaZSg9ioHebfgzs9oEu2XmeUREgtA1AobGGxwzXHhJ9f0IDd9cIo13XWve+DnUu8HyEYjFlOYt6boKcnKHTmPs1FvOOG6Y28fGhiFWzrvXGQ/qIKcuiZpXampxxlrvREQqSvPGBvChTw2fg1gs7s5tFP4UQ94kVF1RTb0rG5MpzYuXUO+C2S0wyV8eCCGEEBINLh4IIYQQEgkuHgghhBASiXnreShPYYGkomGEqKlxO9HFjG6c8bjhAYi5c9UahTn6ew5D7O1fjzjjVBN2gatNu2u21gWYs2jpKjyuwd1LjCex8MjAKBY6qRTdPbKKtddmOEbaFrkd8xZ1tENOrBW7jY6PuXuA2TO4VzplmVZCVczG2POsWOXBVLGcWGjsCRrHhebu+szs2ewWiTog90HO/Z1u0aRw13pjppk9AbtPo5cg1mn4FMI97jgwNtK34Ab8l5UXY2vWqGBk2Rmy7ufLGjkH9mOsGxoX4vn2G/elU8+f3Qs5W0z7hntHt+40On1a196nPCS5PZCz83kVMOaxSlJtFtQNH6am3EJuxRB1q6YGu3/WpZQHII7vfdLYlA9r3LwT/Xjdbx4ZgZjWvNoQ/47augB1Y4HSPK13IiKBoXkJpXkFY3O/bDSi1Fktiwx/mtK8WGsL5DSPoedBa96wWfDOKFhYcmOVCh6XELwHMVXkq2zoa8oS/guAvzwQQgghJBJcPBBCCCEkElw8EEIIISQSXDwQQgghJBLz1jBpdRtLxHAtBFmGcyZIGAYUZaKMgb1GJN2yEGL1DW7HyliA15RQ3fHaFuA8MV34SEQmR9zCKsm6FrymNBpCJydVoRHjPlUKRle9Fve+TJzHwi6FHHbjrGl0jUeN9djlrjiJ5yuV1eto+HsSRhCMjzGjg53xppcMM5IP27e7hY06+9BMt1nc2JbN6KY70IkmSigSZXZg3IUhuC1GB01d20pEHsm5Zs89xgm7d6GBsbHP/TyHuvG5HOzEklD373cvYt2B7ZDTmdkKsT5tMtyEcx82Gl+OyQ43EFqVnWbutGllbLhdBSw/mvWK6cJ1M579HSoJ98CEZcKL5yE2Ja7mJZN4o0oJNF8m1Pma0jPrnQhqntY7EZG2FjSJV9TXNjeCdz1Zh9eQqHUNoZUJQzsNc2K5OOWMm2pqIGd45KQzLkyiQbSmCY2WyVZX8+I+emeQMCpJWUbvSqBunlEQaqpQnd5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q9hMoUmPKNwmoTKVBSU0LhSrmC3SEm4tyaWxFuVNKqdaaOl5aFa1HWFM+7IoOFm4sxxiFXyblXNdALNn2XDYBNThtDAMFpVDMNUJdXizp3G44xGm1LbtMId1xuVIk+ewVioXD5GlbSwbFVqUzlGNdDAMK4GRndRH7KZrBobTkRtPNSdFEUkK2i0xDKFM5v5vAmtUpFuJcqc0eVy71bDtakKGY4FRtfJcBvGut17ddBo/nnQrMCoO5DeY+RYuNVA4WUR8XMsVlucbxaL+gU1yoTnoXciIkHgGhbLRTT9SYjfmZj648FH70TwI2u9ExHpWGxpnvuuV0pYRTgdMzRPXVYsgXpjap76/lfq0Uharndf9KkCmiq13omg5lVOYudk0DsR0DwfvRNBzZtNvZsO/vJACCGEkEhw8UAIIYSQSHDxQAghhJBIeHke/vRP/1SeeeYZOXHihLz00kty0003iYjIkSNH5Itf/KKcO3dOmpub5Zvf/KZce+21M/632aBieBcCw5egC5bE4tZ6yejCVnbnD4wNxsDoUqY7dIZxvKbatuXOOHPdRshJ3YhXOTXmFmQ623sEcop92MqwVHaLoSSNfcp4Au9LvuDmpVevg5wubJMojW0dzvh8/ynIOd77JJ6v6HaiixlFnKwt5EB11Qwr+DzDMsaKBSyo4wN2YbTaObqxTBY39zNWvSLlcch2G0k+++jmPr7hn9DeBbMqlVF9SZMzrtM6zKtOjXVjtD/E8IuMWZ4Odd/7sOukdGKnTZ+PbBXd8uNWd2g0QLWoqMJxPnonIhLTf0c0vh/lvKGnSvN89E4ENU/rnYhI5npD825yx1rvRKbRvNOu5pXKRsE7D83TeieCmmfqXWsHxLTmHT+BHq98Ad+7mPI8mHqnq2mJSKg6ic6m3k2H1y8Pn/70p+XnP/+5LF/uvgTbtm2TrVu3yuHDh+XP/uzP5M477/T6b4QQQgiZv3gtHm655Rbp6upyYgMDA7J//3753Oc+JyIif/AHfyCnTp2So0ePvud/I4QQQsj8pmrPw6lTpySTyUjiH/9JYxAEsmzZMjl58uR7/jeLHTt2SFdX17v/GxszfkYkhBBCyGXBZWGYvPvuu6W3t/fd/zU0GPuShBBCCLksqLpI1NKlSyWbzUqpVJJEIiFhGMrJkydl2bJl0tTUNO1/my3KBezUJoZJJFQemHgSu04mUkYXRlVsKW5U5qgYxhxt1osblpfTR153xlMjWLSlta0VYoWiWyBl5Pw5yCmWDVOT6qIZlrGIiuVkK+TOO+MjL/0Ccs70oFmoodW99tzwecgpTWGxl6QyPpaNIirlovXc3VipYhSlKmEsNIpQ+ZA57C5uzTJOa9e6OYaXb51huOtUkx3IGrN7hLqN8x0wjst8WQXvMopZPWwUbdJNO7caC/4c/oJoWRo12FNTRFRnz0w3Gjv3WPdK3ePNe9C4uk02QSzULkajEFiQ1QW1rDfBKJ4lz+uTeVHW3XELht6hvIHmJWRmvRMRiZfcWKVo6F2A1xBPu8edfu11yDE1r8HVjUIcNWKkz9A8dZ1x4+/EYXFmzSuMoE4decHVvIE21Lv6BajVuSF3rnIOP0uj8RxKJaXVRSyUJQFqoNa80NC7RJV6Nx1V//LQ3t4u69atk8cee0xERJ566inp6uqSVatWved/I4QQQsj8xuuXh23btsl3v/td6e/vl9/+7d+WxsZGOXr0qOzcuVPuvPNOeeCBB6SpqUkeffTRd495r/9GCCGEkPmL1+Jh586dZnzt2rXywgsvRP5vhBBCCJm/XBaGSUIIIYTMH4KwWtfYHNLV1SW9vb2X+jIIIYSQf7a815/F/OWBEEIIIZHg4oEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkbgsi0Sl02lZtGiRjI2NsT33RYb3/OLDe37x4T2/+PCeX3wu9J6fPXtW8vm8+d8uy8XD/4CVJi8+vOcXH97ziw/v+cWH9/ziM5f3nNsWhBBCCIkEFw+EEEIIicRlvXi4++67L/Ul/LOD9/ziw3t+8eE9v/jwnl985vKeX9aeB0IIIYRcflzWvzwQQggh5PKDiwdCCCGERIKLB0IIIYRE4rJcPBw5ckQ+8pGPyJo1a2TDhg3y+uuvX+pLet8xNTUlv/d7vydr1qyRG2+8UT7+8Y/L0aNHRURkYGBAfud3fkdWr14t1113nfz0pz+9xFf7/uLRRx+VIAjk7/7u70SE93uuyefz8tWvflVWr14t119/vXzuc58TEerMXPH3f//3sm7dOrnpppvkuuuuk7/5m78REb7ns8mf/umfyooVKyQIAnn55Zffjb/XOz3r73t4GXLbbbeFjz76aBiGYfjf/tt/C9evX39pL+h9yOTkZPjd7343rFQqYRiG4V//9V+Ht956axiGYfiv//W/Du+7774wDMPwxRdfDJcsWRIWCoVLdKXvL44fPx5++MMfDjdu3Bh+5zvfCcOQ93uu+bf/9t+GX/3qV99917PZbBiG1Jm5oFKphAsWLAhfeeWVMAzfed/T6XQ4OjrK93wWef7558NTp06Fy5cvD1966aV34+/1Ts/2+37ZLR7OnDkTNjY2hsViMQzDd17Gjo6O8MiRI5f4yt7f7Nu3L1y+fHkYhmFYX1//rsCGYRhu2LAh/OEPf3iJruz9Q7lcDj/2sY+F+/fvD2+99dZ3Fw+833PH2NhY2NjYGI6MjDhx6szcUKlUwoULF4bPP/98GIZh+Morr4SdnZ1hPp/nez4H/NPFw3u903Pxvl922xanTp2STCYjiURCRESCIJBly5bJyZMnL/GVvb/5y7/8S/nkJz8pg4ODUiwWZfHixe/+txUrVvD+zwI7duyQm2++Wbq7u9+N8X7PLceOHZOFCxfKAw88IOvXr5ePfvSj8txzz1Fn5oggCORb3/qWfOpTn5Lly5fLb/zGb8jf/M3fSC6X43s+x7zXOz0X73tiVq6azGseeOABOXr0qDz33HMyOTl5qS/nfclrr70mTz31FPd5LzKlUklOnDgh11xzjfzFX/yFvPTSS/Lxj39cvvvd717qS3tfUiqV5M///M/l6aeflltuuUX27dsnn/jEJ5x9efL+4LL75WHp0qWSzWalVCqJiEgYhnLy5ElZtmzZJb6y9ycPPvigPP300/K9731P6urqpLW1VRKJhPT397+b09PTw/t/gfzsZz+Tnp4eWb16taxYsUJ++ctfytatW+Xb3/427/ccsmzZMonFYvLZz35WREQ+8IEPyBVXXCEnTpygzswBL7/8svT19cktt9wiIiIbNmyQrq4uefXVV/mezzHv9WfnXPy5etktHtrb22XdunXy2GOPiYjIU089JV1dXbJq1apLfGXvP3bs2CFPPPGE/PCHP5SWlpZ345/5zGfk4YcfFhGRffv2yenTp+XWW2+9RFf5/uArX/mKZLNZ6enpkZ6eHtm4caPs2rVLvvKVr/B+zyFtbW3ysY99TH7wgx+IiMjx48fl+PHjcvPNN1Nn5oD/8YfUm2++KSIiR48elWPHjsnatWv5ns8x7/Vn55z8uXpBbo054q233go3btwYrl69Ouzu7g5fffXVS31J7ztOnToViki4cuXK8MYbbwxvvPHG8IMf/GAYhmHY398ffvzjHw9XrVoVXnPNNeGPf/zjS3y17z/+qWGS93tuOXbsWLhp06bwuuuuC2+44YbwySefDMOQOjNXPP744+/e6+uuuy7827/92zAM+Z7PJlu3bg2XLFkSxuPxsL29PbzyyivDMHzvd3q233f2tiCEEEJIJC67bQtCCCGEXN5w8UAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSPz/Abkqwnqwvep0AAAAAElFTkSuQmCC\n",
"text/plain": [
"<Figure size 640x640 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import random\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib\n",
"%matplotlib inline\n",
"\n",
"def imshow(img):\n",
" fig = plt.figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\n",
" npimg = img.numpy()\n",
" plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
" plt.show()\n",
" \n",
"def get_pairs_of_imgs(idx):\n",
" clean_img = clean_train_dataset.data[idx]\n",
" unlearnable_img = unlearnable_train_dataset.data[idx]\n",
" clean_img = torchvision.transforms.functional.to_tensor(clean_img)\n",
" unlearnable_img = torchvision.transforms.functional.to_tensor(unlearnable_img)\n",
"\n",
" x = noise[idx]\n",
" x_min = torch.min(x)\n",
" x_max = torch.max(x)\n",
" noise_norm = (x - x_min) / (x_max - x_min)\n",
" noise_norm = torch.clamp(noise_norm, 0, 1)\n",
" return [clean_img, noise_norm, unlearnable_img]\n",
" \n",
"selected_idx = [random.randint(0, 50000) for _ in range(3)]\n",
"img_grid = []\n",
"for idx in selected_idx:\n",
" img_grid += get_pairs_of_imgs(idx)\n",
" \n",
"\n",
"imshow(torchvision.utils.make_grid(torch.stack(img_grid), nrow=3, pad_value=255))\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<h3>Train ResNet18 on Unlearnable Dataset</h3>"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 36.99 Loss: 1.73: 100%|██████████| 391/391 [00:20<00:00, 19.17it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 35.16\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 76.95 Loss: 0.65: 100%|██████████| 391/391 [00:20<00:00, 19.51it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.21\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 93.06 Loss: 0.21: 100%|██████████| 391/391 [00:20<00:00, 19.51it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.69\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 95.13 Loss: 0.15: 100%|██████████| 391/391 [00:20<00:00, 19.37it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 25.79\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 96.16 Loss: 0.12: 100%|██████████| 391/391 [00:20<00:00, 19.23it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 20.87\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 96.78 Loss: 0.10: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.92\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 96.89 Loss: 0.10: 100%|██████████| 391/391 [00:19<00:00, 19.65it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.44\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 97.22 Loss: 0.08: 100%|██████████| 391/391 [00:20<00:00, 19.45it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.08\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 97.35 Loss: 0.08: 100%|██████████| 391/391 [00:20<00:00, 19.47it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.07\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 97.58 Loss: 0.07: 100%|██████████| 391/391 [00:20<00:00, 19.37it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 17.37\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 97.89 Loss: 0.07: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 20.82\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 97.85 Loss: 0.07: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 18.45\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 98.05 Loss: 0.06: 100%|██████████| 391/391 [00:19<00:00, 19.59it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.74\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 98.18 Loss: 0.06: 100%|██████████| 391/391 [00:20<00:00, 19.30it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.36\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 98.30 Loss: 0.05: 100%|██████████| 391/391 [00:19<00:00, 19.55it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.84\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 98.53 Loss: 0.05: 100%|██████████| 391/391 [00:20<00:00, 19.44it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.93\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 98.61 Loss: 0.04: 100%|██████████| 391/391 [00:20<00:00, 19.52it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 16.04\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.00 Loss: 0.03: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 17.80\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.08 Loss: 0.03: 100%|██████████| 391/391 [00:20<00:00, 19.33it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.51\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.22 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.32it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.77\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.44 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.22it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.28\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.57 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.13it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 19.66\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.60 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.64it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 25.13\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.76 Loss: 0.01: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.19\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.83 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.55it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.05\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.89 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.94\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.88 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.42it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.66\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.94 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.44it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.19\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.95 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.53it/s]\n",
" 0%| | 0/391 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 22.83\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Acc 99.94 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.46it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clean Accuracy 23.60\n",
"\n"
]
}
],
"source": [
"from util import AverageMeter\n",
"\n",
"model = ResNet18()\n",
"model = model.cuda()\n",
"criterion = torch.nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.SGD(params=model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\n",
"scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=30, eta_min=0)\n",
"\n",
"unlearnable_loader = DataLoader(dataset=unlearnable_train_dataset, batch_size=128,\n",
" shuffle=True, pin_memory=True,\n",
" drop_last=False, num_workers=12)\n",
"\n",
"\n",
"for epoch in range(30):\n",
" # Train\n",
" model.train()\n",
" acc_meter = AverageMeter()\n",
" loss_meter = AverageMeter()\n",
" pbar = tqdm(unlearnable_loader, total=len(unlearnable_loader))\n",
" for images, labels in pbar:\n",
" images, labels = images.cuda(), labels.cuda()\n",
" model.zero_grad()\n",
" optimizer.zero_grad()\n",
" logits = model(images)\n",
" loss = criterion(logits, labels)\n",
" loss.backward()\n",
" torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)\n",
" optimizer.step()\n",
" \n",
" _, predicted = torch.max(logits.data, 1)\n",
" acc = (predicted == labels).sum().item()/labels.size(0)\n",
" acc_meter.update(acc)\n",
" loss_meter.update(loss.item())\n",
" pbar.set_description(\"Acc %.2f Loss: %.2f\" % (acc_meter.avg*100, loss_meter.avg))\n",
" scheduler.step()\n",
" # Eval\n",
" model.eval()\n",
" correct, total = 0, 0\n",
" for i, (images, labels) in enumerate(clean_test_loader):\n",
" images, labels = images.cuda(), labels.cuda()\n",
" with torch.no_grad():\n",
" logits = model(images)\n",
" _, predicted = torch.max(logits.data, 1)\n",
" total += labels.size(0)\n",
" correct += (predicted == labels).sum().item()\n",
" acc = correct / total\n",
" tqdm.write('Clean Accuracy %.2f\\n' % (acc*100))\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
================================================
FILE: README.md
================================================
# Unlearnable Examples
Code for ICLR2021 Spotlight Paper ["Unlearnable Examples: Making Personal Data Unexploitable "](https://openreview.net/forum?id=iAmZUo0DxC0) by Hanxun Huang, Xingjun Ma, Sarah Monazam Erfani, James Bailey, Yisen Wang.
## Quick Start
##### Use the QuickStart.ipynb notebook for a quick start.
In the notebook, you can find the minimal implementation for generating sample-wise unlearnable examples on CIFAR-10.
Please remove `mlconfig` from `models/__init__.py` if you are only using the notebook and copy-paste the model to the notebook.
## Experiments in the paper.
Check scripts folder for *.sh for each corresponding experiments.
## Sample-wise noise for unlearnable example on CIFAR-10
##### Generate noise for unlearnable examples
```console
python3 perturbation.py --config_path configs/cifar10 \
--exp_name path/to/your/experiment/folder \
--version resnet18 \
--train_data_type CIFAR10 \
--noise_shape 50000 3 32 32 \
--epsilon 8 \
--num_steps 20 \
--step_size 0.8 \
--attack_type min-min \
--perturb_type samplewise \
--universal_stop_error 0.01
```
##### Train on unlearnable examples and eval on clean test
```console
python3 -u main.py --version resnet18 \
--exp_name path/to/your/experiment/folder \
--config_path configs/cifar10 \
--train_data_type PoisonCIFAR10 \
--poison_rate 1.0 \
--perturb_type samplewise \
--perturb_tensor_filepath path/to/your/experiment/folder/perturbation.pt \
--train
```
## Class-wise noise for unlearnable example on CIFAR-10
##### Generate noise for unlearnable examples
```console
python3 perturbation.py --config_path configs/cifar10 \
--exp_name path/to/your/experiment/folder \
--version resnet18 \
--train_data_type CIFAR10 \
--noise_shape 10 3 32 32 \
--epsilon 8 \
--num_steps 1 \
--step_size 0.8 \
--attack_type min-min \
--perturb_type classwise \
--universal_train_target 'train_subset' \
--universal_stop_error 0.1 \
--use_subset
```
##### Train on unlearnable examples and eval on clean test
```console
python3 -u main.py --version resnet18 \
--exp_name path/to/your/experiment/folder \
--config_path configs/cifar10 \
--train_data_type PoisonCIFAR10 \
--poison_rate 1.0 \
--perturb_type classwise \
--perturb_tensor_filepath path/to/your/experiment/folder/perturbation.pt \
--train
```
---
## Cite Our Work
```
@inproceedings{huang2021unlearnable,
title={Unlearnable Examples: Making Personal Data Unexploitable},
author={Hanxun Huang and Xingjun Ma and Sarah Monazam Erfani and James Bailey and Yisen Wang},
booktitle={ICLR},
year={2021}
}
```
================================================
FILE: collect_results.py
================================================
import argparse
import collections
import json
import os
import numpy as np
import dataset
import mlconfig
import models
import torch
import util
from evaluator import Evaluator
from tabulate import tabulate
parser = argparse.ArgumentParser(description='ClasswiseNoise')
args = parser.parse_args()
if torch.cuda.is_available():
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
device = torch.device('cuda')
device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]
print("GPU List: %s" % (device_list))
else:
device = torch.device('cpu')
print("PyTorch Version: %s" % (torch.__version__))
def load_results(targt_exp, model_name):
# print(targt_exp)
config_file = os.path.join(targt_exp, model_name+'.yaml')
checkpoint_path_file = os.path.join(targt_exp, 'checkpoints', model_name)
if not os.path.isfile(config_file) or not os.path.isfile(checkpoint_path_file+'.pth'):
# print('No such files: \n%s\n%s' % (config_file, checkpoint_path_file))
return None
config = mlconfig.load(config_file)
config.set_immutable()
model = config.model().to(device)
checkpoints = util.load_model(filename=checkpoint_path_file, model=model, optimizer=None, scheduler=None)
if config.epochs != checkpoints['epoch']:
return None
if 'cm_history' in checkpoints['ENV']:
new_hist = []
for item in checkpoints['ENV']['cm_history']:
if isinstance(item, np.ndarray):
new_hist.append(item.tolist())
else:
new_hist.append(item)
checkpoints['ENV']['cm_history'] = new_hist
return checkpoints['ENV']
if __name__ == '__main__':
exp_names = [
'experiments/cifar10/random_samplewise/CIFAR10-eps=8',
'experiments/cifar10/min-max_samplewise/CIFAR10-eps=8-se=0.9-base_version=resnet18',
'experiments/cifar10/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18',
'experiments/cifar10/min-min_samplewise/CIFAR10-eps=8-se=0.01-base_version=resnet18',
'experiments/cifar100/min-min_samplewise/CIFAR100-eps=8-se=0.3-base_version=resnet18',
'experiments/cifar100/min-min_samplewise/CIFAR100-eps=8-se=0.01-base_version=resnet18',
'experiments/svhn/min-min_samplewise/SVHN-eps=8-se=0.1-base_version=resnet18',
'experiments/svhn/min-min_samplewise/SVHN-eps=8-se=0.01-base_version=resnet18',
'experiments/imagenet-mini/min-min_samplewise/ImageNetMini-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar10/random_classwise/CIFAR10-eps=8/',
'experiments/cifar10/min-max_classwise/CIFAR10-eps=8-se=0.8-base_version=resnet18',
'experiments/cifar10/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18',
'experiments/cifar10/min-min_classwise/CIFAR10-eps=8-se=0.01-base_version=resnet18',
'experiments/cifar100/min-min_classwise/CIFAR100-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar100/min-min_classwise/CIFAR100-eps=8-se=0.01-base_version=resnet18',
'experiments/svhn/min-min_classwise/SVHN-eps=8-se=0.1-base_version=resnet18',
'experiments/svhn/min-min_classwise/SVHN-eps=8-se=0.01-base_version=resnet18',
'experiments/imagenet-mini/min-min_classwise/ImageNetMini-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.01-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.01-base_version=resnet18',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=16-se=0.01-base_version=resnet18',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=24-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=24-se=0.01-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-2noise',
'experiments/cifar10-extension/min-min_classwise/TinyImageNet-eps=16-se=0.1-base_version=resnet18',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random8',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random16',
'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random24',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random8',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random16',
'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random24',
]
model_list = [
'resnet18',
'resnet50',
'dense121',
'resnet18_augmentation',
'resnet18_madrys',
'resnet18_classpoison',
'resnet18_classpoison_targeted',
'resnet18_add-uniform-noise',
'resnet18_add-uniform-noise-aug',
'resnet18_cutout',
'resnet18_cutmix',
'resnet18_mixup',
]
poison_rate_list = [0.0, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
exp_results = {}
for exp_name in exp_names:
print(exp_name)
table_data_header = ['Model'] + poison_rate_list
table_data = [model_list]
exp_results[exp_name] = {}
for poison_rate in poison_rate_list:
target_dir = os.path.join(exp_name, 'poison_train_%.1f' % poison_rate)
temp_list = []
exp_results[exp_name][poison_rate] = {}
for model_name in model_list:
rs_env = load_results(os.path.join(target_dir, model_name), model_name)
exp_results[exp_name][poison_rate][model_name] = rs_env
if rs_env is not None:
temp_list.append('%.2f' % rs_env['curren_acc'])
else:
temp_list.append('..')
table_data.append(temp_list)
# Transpose array
table_data = list(map(list, zip(*table_data)))
print('=' * 40 + 'Results' + '=' * 40)
print(tabulate(table_data, headers=table_data_header, floatfmt=".2f", stralign="left", numalign="left"))
print('=' * (80 + len('Results')) + '\n')
# Save results to
with open('exp_results.json', 'w') as outfile:
json.dump(exp_results, outfile)
================================================
FILE: configs/cifar10/dense121.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: DenseNet121
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/cifar10/resnet18.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar10/resnet18_add-uniform-noise-aug.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
add_uniform_noise: True
fa: True
================================================
FILE: configs/cifar10/resnet18_add-uniform-noise.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
add_uniform_noise: True
================================================
FILE: configs/cifar10/resnet18_augement.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
fa: True
================================================
FILE: configs/cifar10/resnet18_augmentation.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
fa: True
================================================
FILE: configs/cifar10/resnet18_classpoison.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
poison_classwise: True
================================================
FILE: configs/cifar10/resnet18_classpoison_targeted.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
poison_classwise: True
poison_classwise_idx: [0, 1, 8, 9]
================================================
FILE: configs/cifar10/resnet18_cutmix.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CutMixCrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
use_cutmix: True
================================================
FILE: configs/cifar10/resnet18_cutout.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
use_cutout: True
================================================
FILE: configs/cifar10/resnet18_denoise.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
img_denoise: True
================================================
FILE: configs/cifar10/resnet18_madrys.yaml
================================================
num_classes: 10
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: MadrysLoss
epsilon: 0.03137254901
perturb_steps: 10
step_size: 0.00784313725
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: MultiStepLR
milestones: [75, 90, 100]
gamma: 0.1
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar10/resnet18_mixup.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CutMixCrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
use_mixup: True
================================================
FILE: configs/cifar10/resnet50.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet50
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar10/toy_cifar.yaml
================================================
num_classes: 10
epochs: 80
grad_clip: 5.0
log_frequency: 50
model:
name: ToyModel
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.025
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar10/toy_cifar_madrys.yaml
================================================
num_classes: 10
epochs: 60
grad_clip: 5.0
log_frequency: 50
model:
name: ToyModel
criterion:
name: MadrysLoss
epsilon: 0.03137254901
perturb_steps: 10
step_size: 0.00784313725
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: MultiStepLR
milestones: [75, 90, 100]
gamma: 0.1
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar100/dense121.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: DenseNet121
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/cifar100/resnet18.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar100/resnet18_madrys.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 100
criterion:
name: MadrysLoss
epsilon: 0.03137254901
perturb_steps: 10
step_size: 0.00784313725
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: MultiStepLR
milestones: [75, 90, 100]
gamma: 0.1
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar100/resnet50.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet50
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/cifar101/resnet18.yaml
================================================
num_classes: 101
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 101
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/face/InceptionResnet.yaml
================================================
num_classes: 10575
epochs: 50
grad_clip: 5.0
log_frequency: 100
model:
name: InceptionResnetV1
num_classes: $num_classes
# pretrained: casia-webface
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.05
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: MultiStepLR
milestones: [30, 40]
gamma: 0.1
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/imagenet-mini/dense121.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: densenet121
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/imagenet-mini/resnet18.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: resnet18
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/imagenet-mini/resnet50.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: resnet50
num_classes: 100
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/svhn/dense121.yaml
================================================
num_classes: 10
epochs: 30
grad_clip: 5.0
log_frequency: 100
model:
name: DenseNet121
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/svhn/resnet18.yaml
================================================
num_classes: 10
epochs: 30
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/svhn/resnet18_madrys.yaml
================================================
num_classes: 100
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 10
criterion:
name: MadrysLoss
epsilon: 0.03137254901
perturb_steps: 10
step_size: 0.00784313725
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: MultiStepLR
milestones: [75, 90, 100]
gamma: 0.1
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/svhn/resnet50.yaml
================================================
num_classes: 10
epochs: 30
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet50
num_classes: 10
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-4
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/tiny-imagenet/dense121.yaml
================================================
num_classes: 1000
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: DenseNet121
num_classes: 1000
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 96
eval_batch_size: 128
================================================
FILE: configs/tiny-imagenet/resnet18.yaml
================================================
num_classes: 1000
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet18
num_classes: 1000
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: configs/tiny-imagenet/resnet50.yaml
================================================
num_classes: 1000
epochs: 100
grad_clip: 5.0
log_frequency: 100
model:
name: ResNet50
num_classes: 1000
criterion:
name: CrossEntropyLoss
optimizer:
name: SGD
lr: 0.1
weight_decay: 5.e-5
momentum: 0.9
scheduler:
name: CosineAnnealingLR
T_max: $epochs
eta_min: 0.0
dataset:
name: DatasetGenerator
train_batch_size: 128
eval_batch_size: 128
================================================
FILE: dataset.py
================================================
import copy
import os
import collections
import numpy as np
import torch
import util
import random
import mlconfig
import pandas
from util import onehot, rand_bbox
from torch.utils.data.dataset import Dataset
from functools import partial
from PIL import Image, ImageFilter
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from fast_autoaugment.FastAutoAugment.archive import fa_reduced_cifar10
from fast_autoaugment.FastAutoAugment.augmentations import apply_augment
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Datasets
transform_options = {
"CIFAR10": {
"train_transform": [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()],
"test_transform": [transforms.ToTensor()]},
"CIFAR100": {
"train_transform": [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(20),
transforms.ToTensor()],
"test_transform": [transforms.ToTensor()]},
"SVHN": {
"train_transform": [transforms.ToTensor()],
"test_transform": [transforms.ToTensor()]},
"ImageNet": {
"train_transform": [transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor()],
"test_transform": [transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()]},
"TinyImageNet": {
"train_transform": [transforms.CenterCrop(256),
transforms.Resize((32, 32)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()],
"test_transform": [transforms.Resize((32, 32)),
transforms.ToTensor()]},
'CatDog': {
"train_transform": [transforms.Resize((32, 32)),
transforms.ToTensor()],
"test_transform": [transforms.Resize((32, 32)),
transforms.ToTensor()]},
'CelebA': {
"train_transform": [transforms.CenterCrop((128, 128)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()],
"test_transform": [transforms.CenterCrop((128, 128)),
transforms.ToTensor()]},
'FaceScrub': {
"train_transform": [transforms.RandomHorizontalFlip(),
transforms.ToTensor()],
"test_transform": [transforms.Resize((128, 128)),
transforms.ToTensor()]},
'WebFace': {
"train_transform": [transforms.RandomHorizontalFlip(),
transforms.ToTensor()],
"test_transform": [transforms.ToTensor()]},
}
transform_options['PoisonCIFAR10'] = transform_options['CIFAR10']
transform_options['PoisonCIFAR100'] = transform_options['CIFAR100']
transform_options['PoisonCIFAR101'] = transform_options['CIFAR100']
transform_options['PoisonSVHN'] = transform_options['SVHN']
transform_options['ImageNetMini'] = transform_options['ImageNet']
transform_options['PoisonImageNetMini'] = transform_options['ImageNet']
transform_options['CelebAMini'] = transform_options['CelebA']
@mlconfig.register
class DatasetGenerator():
def __init__(self, train_batch_size=128, eval_batch_size=256, num_of_workers=4,
train_data_path='../datasets/', train_data_type='CIFAR10', seed=0,
test_data_path='../datasets/', test_data_type='CIFAR10', fa=False,
no_train_augments=False, poison_rate=1.0, perturb_type='classwise',
perturb_tensor_filepath=None, patch_location='center', img_denoise=False,
add_uniform_noise=False, poison_classwise=False, poison_classwise_idx=None,
use_cutout=None, use_cutmix=False, use_mixup=False):
np.random.seed(seed)
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.num_of_workers = num_of_workers
self.seed = seed
self.train_data_type = train_data_type
self.test_data_type = test_data_type
self.train_data_path = train_data_path
self.test_data_path = test_data_path
train_transform = transform_options[train_data_type]['train_transform']
test_transform = transform_options[test_data_type]['test_transform']
train_transform = transforms.Compose(train_transform)
test_transform = transforms.Compose(test_transform)
if no_train_augments:
train_transform = test_transform
if fa:
# FastAutoAugment
train_transform.transforms.insert(0, Augmentation(fa_reduced_cifar10()))
elif use_cutout is not None:
print('Using Cutout')
train_transform.transforms.append(Cutout(16))
# Training Datasets
if train_data_type == 'CIFAR10':
num_of_classes = 10
train_dataset = datasets.CIFAR10(root=train_data_path, train=True,
download=True, transform=train_transform)
elif train_data_type == 'PoisonCIFAR10':
num_of_classes = 10
train_dataset = PoisonCIFAR10(root=train_data_path, transform=train_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise,
poison_classwise_idx=poison_classwise_idx)
elif train_data_type == 'CIFAR100':
num_of_classes = 100
train_dataset = datasets.CIFAR100(root=train_data_path, train=True,
download=True, transform=train_transform)
elif train_data_type == 'PoisonCIFAR100':
num_of_classes = 100
train_dataset = PoisonCIFAR100(root=train_data_path, transform=train_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise)
elif train_data_type == 'PoisonCIFAR101':
num_of_classes = 101
poison_cifar10 = PoisonCIFAR10(root=train_data_path, transform=train_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise,
poison_classwise_idx=poison_classwise_idx)
train_dataset = PoisonCIFAR101(train_data_path, split='poison_train',
transform=train_transform, seed=0,
poisn_cifar10_data=poison_cifar10)
elif train_data_type == 'SVHN':
num_of_classes = 10
train_dataset = datasets.SVHN(root=train_data_path, split='train',
download=True, transform=train_transform)
elif train_data_type == 'PoisonSVHN':
num_of_classes = 10
train_dataset = PoisonSVHN(root=train_data_path, split='train', transform=train_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise)
elif train_data_type == 'TinyImageNet':
num_of_classes = 1000
train_dataset = datasets.ImageNet(root=train_data_path, split='train',
transform=train_transform)
elif train_data_type == 'ImageNetMini':
num_of_classes = 100
train_dataset = ImageNetMini(root=train_data_path, split='train',
transform=train_transform)
elif train_data_type == 'PoisonImageNetMini':
num_of_classes = 100
train_dataset = PoisonImageNetMini(root=train_data_path, split='train', seed=seed,
transform=train_transform, poison_rate=poison_rate,
perturb_tensor_filepath=perturb_tensor_filepath)
elif train_data_type == 'CatDog':
train_dataset = CatDogDataset(root=train_data_path, split='train',
transform=train_transform)
elif train_data_type == 'CelebAMini':
train_dataset = CelebAMini(root=train_data_path, split="all",
target_type="identity", transform=train_transform)
test_dataset = CelebAMini(root=train_data_path, split="all",
target_type="identity", transform=test_transform)
elif train_data_type == 'WebFace':
train_dataset = datasets.ImageFolder(root=train_data_path, transform=train_transform)
test_dataset = datasets.ImageFolder(root=test_data_path, transform=test_transform)
elif train_data_type == 'CelebA':
train_dataset = datasets.CelebA(root=train_data_path, split="all",
target_type="identity", transform=train_transform)
test_dataset = datasets.CelebA(root=train_data_path, split="all",
target_type="identity", transform=test_transform)
else:
raise('Training Dataset type %s not implemented' % train_data_type)
# Test Datset
if test_data_type == 'CIFAR10':
test_dataset = datasets.CIFAR10(root=test_data_path, train=False,
download=True, transform=test_transform)
elif test_data_type == 'PoisonCIFAR10':
test_dataset = PoisonCIFAR10(root=test_data_path, train=False, transform=test_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise,
poison_classwise_idx=poison_classwise_idx)
elif test_data_type == 'CIFAR100':
test_dataset = datasets.CIFAR100(root=test_data_path, train=False,
download=True, transform=test_transform)
elif test_data_type == 'PoisonCIFAR100':
test_dataset = PoisonCIFAR100(root=test_data_path, train=False, transform=test_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise)
elif test_data_type == 'PoisonCIFAR101':
test_dataset = PoisonCIFAR101(test_data_path, split='test',
transform=test_transform, seed=0,
poisn_cifar10_data=poison_cifar10)
elif test_data_type == 'SVHN':
test_dataset = datasets.SVHN(root=test_data_path, split='test',
download=True, transform=test_transform)
elif test_data_type == 'PoisonSVHN':
test_dataset = PoisonSVHN(root=test_data_path, split='test', transform=test_transform,
poison_rate=poison_rate, perturb_type=perturb_type,
patch_location=patch_location, seed=seed, img_denoise=img_denoise,
perturb_tensor_filepath=perturb_tensor_filepath,
add_uniform_noise=add_uniform_noise,
poison_classwise=poison_classwise)
elif test_data_type == 'ImageNetMini':
test_dataset = ImageNetMini(root=test_data_path, split='val',
transform=test_transform)
elif test_data_type == 'TinyImageNet':
test_dataset = datasets.ImageNet(root=test_data_path, split='val',
transform=test_transform)
elif test_data_type == 'PoisonImageNetMini':
test_dataset = PoisonImageNetMini(root=test_data_path, split='val', seed=0,
transform=test_transform, poison_rate=poison_rate,
perturb_tensor_filepath=perturb_tensor_filepath)
elif test_data_type == 'CatDog':
# Cat Dog only used for transfer exp, no test dataset
test_dataset = CatDogDataset(root=train_data_path, split='train',
transform=train_transform)
elif test_data_type == 'CelebAMini' or 'CelebA':
pass
elif test_data_type == 'FaceScrub' or test_data_type == 'WebFace':
pass
else:
raise('Test Dataset type %s not implemented' % test_data_type)
if use_cutmix:
train_dataset = CutMix(dataset=train_dataset, num_class=num_of_classes)
elif use_mixup:
train_dataset = MixUp(dataset=train_dataset, num_class=num_of_classes)
self.datasets = {
'train_dataset': train_dataset,
'test_dataset': test_dataset,
}
return
def getDataLoader(self, train_shuffle=True, train_drop_last=True):
data_loaders = {}
data_loaders['train_dataset'] = DataLoader(dataset=self.datasets['train_dataset'],
batch_size=self.train_batch_size,
shuffle=train_shuffle, pin_memory=True,
drop_last=train_drop_last, num_workers=self.num_of_workers)
data_loaders['test_dataset'] = DataLoader(dataset=self.datasets['test_dataset'],
batch_size=self.eval_batch_size,
shuffle=False, pin_memory=True,
drop_last=False, num_workers=self.num_of_workers)
return data_loaders
def _split_validation_set(self, train_portion, train_shuffle=True, train_drop_last=True):
np.random.seed(self.seed)
train_subset = copy.deepcopy(self.datasets['train_dataset'])
valid_subset = copy.deepcopy(self.datasets['train_dataset'])
if self.train_data_type == 'ImageNet' or self.train_data_type == 'ImageNetMini' or self.train_data_type == 'TinyImageNet' or self.train_data_type == 'PoisonImageNetMini':
data, targets = list(zip(*self.datasets['train_dataset'].samples))
datasplit = train_test_split(data, targets, test_size=1-train_portion,
train_size=train_portion, shuffle=True, stratify=targets)
train_D, valid_D, train_L, valid_L = datasplit
print('Train Labels: ', np.array(train_L))
print('Valid Labels: ', np.array(valid_L))
train_subset.samples = list(zip(train_D, train_L))
valid_subset.samples = list(zip(valid_D, valid_L))
elif self.train_data_type == 'SVHN':
data, targets = self.datasets['train_dataset'].data, self.datasets['train_dataset'].labels
datasplit = train_test_split(data, targets, test_size=1-train_portion,
train_size=train_portion, shuffle=True, stratify=targets)
train_D, valid_D, train_L, valid_L = datasplit
print('Train Labels: ', np.array(train_L))
print('Valid Labels: ', np.array(valid_L))
train_subset.data = np.array(train_D)
valid_subset.data = np.array(valid_D)
train_subset.labels = train_L
valid_subset.labels = valid_L
else:
datasplit = train_test_split(self.datasets['train_dataset'].data,
self.datasets['train_dataset'].targets,
test_size=1-train_portion, train_size=train_portion,
shuffle=True, stratify=self.datasets['train_dataset'].targets)
train_D, valid_D, train_L, valid_L = datasplit
print('Train Labels: ', np.array(train_L))
print('Valid Labels: ', np.array(valid_L))
train_subset.data = np.array(train_D)
valid_subset.data = np.array(valid_D)
train_subset.targets = train_L
valid_subset.targets = valid_L
self.datasets['train_subset'] = train_subset
self.datasets['valid_subset'] = valid_subset
print(self.datasets)
data_loaders = {}
data_loaders['train_dataset'] = DataLoader(dataset=self.datasets['train_dataset'],
batch_size=self.train_batch_size,
shuffle=train_shuffle, pin_memory=True,
drop_last=train_drop_last, num_workers=self.num_of_workers)
data_loaders['test_dataset'] = DataLoader(dataset=self.datasets['test_dataset'],
batch_size=self.eval_batch_size,
shuffle=False, pin_memory=True,
drop_last=False, num_workers=self.num_of_workers)
data_loaders['train_subset'] = DataLoader(dataset=self.datasets['train_subset'],
batch_size=self.train_batch_size,
shuffle=train_shuffle, pin_memory=True,
drop_last=train_drop_last, num_workers=self.num_of_workers)
data_loaders['valid_subset'] = DataLoader(dataset=self.datasets['valid_subset'],
batch_size=self.eval_batch_size,
shuffle=False, pin_memory=True,
drop_last=False, num_workers=self.num_of_workers)
return data_loaders
def patch_noise_extend_to_img(noise, image_size=[32, 32, 3], patch_location='center'):
h, w, c = image_size[0], image_size[1], image_size[2]
mask = np.zeros((h, w, c), np.float32)
x_len, y_len = noise.shape[0], noise.shape[1]
if patch_location == 'center' or (h == w == x_len == y_len):
x = h // 2
y = w // 2
elif patch_location == 'random':
x = np.random.randint(x_len // 2, w - x_len // 2)
y = np.random.randint(y_len // 2, h - y_len // 2)
else:
raise('Invalid patch location')
x1 = np.clip(x - x_len // 2, 0, h)
x2 = np.clip(x + x_len // 2, 0, h)
y1 = np.clip(y - y_len // 2, 0, w)
y2 = np.clip(y + y_len // 2, 0, w)
mask[x1: x2, y1: y2, :] = noise
return mask
class PoisonCIFAR10(datasets.CIFAR10):
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False, poison_rate=1.0, perturb_tensor_filepath=None,
seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,
add_uniform_noise=False, poison_classwise=False, poison_classwise_idx=None):
super(PoisonCIFAR10, self).__init__(root=root, train=train, download=download, transform=transform, target_transform=target_transform)
self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)
print(self.perturb_tensor)
if len(self.perturb_tensor.shape) == 4:
self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()
else:
self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 1, 3, 4, 2).to('cpu').numpy()
self.patch_location = patch_location
self.img_denoise = img_denoise
self.data = self.data.astype(np.float32)
# Check Shape
target_dim = self.perturb_tensor.shape[0] if len(self.perturb_tensor.shape) == 4 else self.perturb_tensor.shape[1]
if perturb_type == 'samplewise' and target_dim != len(self):
raise('Poison Perturb Tensor size not match for samplewise')
elif perturb_type == 'classwise' and target_dim != 10:
raise('Poison Perturb Tensor size not match for classwise')
# Random Select Poison Targets
self.poison_samples = collections.defaultdict(lambda: False)
self.poison_class = []
if poison_classwise:
targets = list(range(0, 10))
if poison_classwise_idx is None:
self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
else:
self.poison_class = poison_classwise_idx
self.poison_samples_idx = []
for i, label in enumerate(self.targets):
if label in self.poison_class:
self.poison_samples_idx.append(i)
else:
targets = list(range(0, len(self)))
self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
for idx in self.poison_samples_idx:
self.poison_samples[idx] = True
if len(self.perturb_tensor.shape) == 5:
perturb_id = random.choice(range(self.perturb_tensor.shape[0]))
perturb_tensor = self.perturb_tensor[perturb_id]
else:
perturb_tensor = self.perturb_tensor
if perturb_type == 'samplewise':
# Sample Wise poison
noise = perturb_tensor[idx]
noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
elif perturb_type == 'classwise':
# Class Wise Poison
noise = perturb_tensor[self.targets[idx]]
noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
if add_uniform_noise:
noise += np.random.uniform(0, 8, (32, 32, 3))
self.data[idx] = self.data[idx] + noise
self.data[idx] = np.clip(self.data[idx], a_min=0, a_max=255)
self.data = self.data.astype(np.uint8)
print('add_uniform_noise: ', add_uniform_noise)
print(self.perturb_tensor.shape)
print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))
class PoisonCIFAR100(datasets.CIFAR100):
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False, poison_rate=1.0, perturb_tensor_filepath=None,
seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,
add_uniform_noise=False, poison_classwise=False):
super(PoisonCIFAR100, self).__init__(root=root, train=train, download=download, transform=transform, target_transform=target_transform)
self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)
self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()
self.patch_location = patch_location
self.img_denoise = img_denoise
self.data = self.data.astype(np.float32)
# Check Shape
if perturb_type == 'samplewise' and self.perturb_tensor.shape[0] != len(self):
raise('Poison Perturb Tensor size not match for samplewise')
elif perturb_type == 'classwise' and self.perturb_tensor.shape[0] != 100:
raise('Poison Perturb Tensor size not match for classwise')
# Random Select Poison Targets
self.poison_samples = collections.defaultdict(lambda: False)
self.poison_class = []
if poison_classwise:
targets = list(range(0, 100))
self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
self.poison_samples_idx = []
for i, label in enumerate(self.targets):
if label in self.poison_class:
self.poison_samples_idx.append(i)
else:
targets = list(range(0, len(self)))
self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
for idx in self.poison_samples_idx:
self.poison_samples[idx] = True
if perturb_type == 'samplewise':
# Sample Wise poison
noise = self.perturb_tensor[idx]
noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
elif perturb_type == 'classwise':
# Class Wise Poison
noise = self.perturb_tensor[self.targets[idx]]
noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
if add_uniform_noise:
noise = np.random.uniform(0, 8, (32, 32, 3))
self.data[idx] += noise
self.data[idx] = np.clip(self.data[idx], 0, 255)
self.data = self.data.astype(np.uint8)
print('add_uniform_noise: ', add_uniform_noise)
print(self.perturb_tensor.shape)
print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))
class PoisonCIFAR101(datasets.VisionDataset):
def __init__(self, root, split='poison_train', transform=None, target_transform=None,
poisn_cifar10_data=None, seed=0):
np.random.seed(seed)
self.transform = transform
self.root = root
if split == 'poison_train':
self.clean_cifar100 = datasets.CIFAR100(root=root, train=True, download=True, transform=None)
cifar10 = poisn_cifar10_data
cifar10_sample_count = 500
elif split == 'test':
self.clean_cifar100 = datasets.CIFAR100(root=root, train=False, download=True, transform=None)
cifar10 = datasets.CIFAR10(root=root, train=False, download=True, transform=None)
cifar10_sample_count = 100
self.data, self.targets = self.clean_cifar100.data, self.clean_cifar100.targets
print(self.clean_cifar100.class_to_idx)
# Add Ship samples of CIFAR10
ship_idx = np.where(np.array(cifar10.targets) == 8)[0]
selected_idx = np.random.choice(ship_idx, cifar10_sample_count, replace=False)
extra_samples, extra_targets = [], []
for idx in selected_idx:
extra_samples.append(cifar10.data[idx])
extra_targets.append(100)
self.data = np.concatenate((self.data, np.array(extra_samples)))
self.targets = self.targets + extra_targets
self.poison_samples_idx = np.array(range(len(self.clean_cifar100), len(self)))
self.poison_class = [100]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
class PoisonSVHN(datasets.SVHN):
def __init__(self, root, split='train', transform=None, target_transform=None,
download=False, poison_rate=1.0, perturb_tensor_filepath=None,
seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,
add_uniform_noise=False, poison_classwise=False):
super(PoisonSVHN, self).__init__(root=root, split=split, download=download, transform=transform, target_transform=target_transform)
self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)
self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).to('cpu').numpy()
self.patch_location = patch_location
self.img_denoise = img_denoise
# Check Shape
if perturb_type == 'samplewise' and self.perturb_tensor.shape[0] != len(self):
raise('Poison Perturb Tensor size not match for samplewise')
elif perturb_type == 'classwise' and self.perturb_tensor.shape[0] != 10:
raise('Poison Perturb Tensor size not match for classwise')
self.data = self.data.astype(np.float32)
# Random Select Poison Targets
self.poison_samples = collections.defaultdict(lambda: False)
self.poison_class = []
if poison_classwise:
targets = list(range(0, 10))
self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
self.poison_samples_idx = []
for i, label in enumerate(self.labels):
if label in self.poison_class:
self.poison_samples_idx.append(i)
else:
targets = list(range(0, len(self)))
self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
for idx in self.poison_samples_idx:
self.poison_samples[idx] = True
if perturb_type == 'samplewise':
# Sample Wise poison
noise = self.perturb_tensor[idx]
# noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
elif perturb_type == 'classwise':
# Class Wise Poison
noise = self.perturb_tensor[self.labels[idx]]
# noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)
if add_uniform_noise:
noise = np.random.uniform(0, 8, (32, 32, 3))
self.data[idx] += noise
self.data[idx] = np.clip(self.data[idx], 0, 255)
self.data = self.data.astype(np.uint8)
print('add_uniform_noise: ', add_uniform_noise)
print(self.perturb_tensor.shape)
print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))
class ImageNetMini(datasets.ImageNet):
def __init__(self, root, split='train', **kwargs):
super(ImageNetMini, self).__init__(root, split=split, **kwargs)
self.new_targets = []
self.new_images = []
for i, (file, cls_id) in enumerate(self.imgs):
if cls_id <= 99:
self.new_targets.append(cls_id)
self.new_images.append((file, cls_id))
self.imgs = self.new_images
self.targets = self.new_targets
self.samples = self.imgs
print(len(self.samples))
print(len(self.targets))
return
class PoisonImageNetMini(ImageNetMini):
def __init__(self, root, split, poison_rate=1.0, seed=0,
perturb_tensor_filepath=None, **kwargs):
super(PoisonImageNetMini, self).__init__(root=root, split=split, **kwargs)
np.random.seed(seed)
self.poison_rate = poison_rate
self.perturb_tensor = torch.load(perturb_tensor_filepath)
self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()
# Random Select Poison Targets
targets = list(range(0, len(self)))
self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())
self.poison_samples = collections.defaultdict(lambda: False)
self.poison_class = []
for idx in self.poison_samples_idx:
self.poison_samples[idx] = True
print(self.perturb_tensor.shape)
print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
sample = np.array(transforms.RandomResizedCrop(224)(sample)).astype(np.float32)
if self.poison_samples[index]:
noise = self.perturb_tensor[target]
sample = sample + noise
sample = np.clip(sample, 0, 255)
sample = sample.astype(np.uint8)
sample = Image.fromarray(sample).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
class Augmentation(object):
def __init__(self, policies):
self.policies = policies
def __call__(self, img):
for _ in range(1):
policy = random.choice(self.policies)
for name, pr, level in policy:
if random.random() > pr:
continue
img = apply_augment(img, name, level)
return img
class CatDogDataset(datasets.VisionDataset):
def __init__(self, root, split='train', transform=None, target_transform=None):
self.root = root
self.split = split
self.transform = transform
self.target_transform = target_transform
self.img_file_names = os.listdir(os.path.join(root, split))
def __len__(self):
return len(self.img_file_names)
def __getitem__(self, index):
filename = self.img_file_names[index]
label = filename[:3]
if label == 'cat':
label = 0
elif label == 'dog':
label = 1
else:
print(filename)
raise('Unknown label')
with open(os.path.join(self.root, self.split, filename), 'rb') as f:
img = Image.open(f).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
label = self.target_transform(label)
return img, label
class CelebAMini(datasets.CelebA):
def __init__(self, root, split="train", target_type="attr", transform=None,
target_transform=None, download=False, num_of_classes=1000):
super(CelebAMini, self).__init__(root=root, split=split, target_type=target_type,
transform=transform, target_transform=target_transform,
download=False)
split_map = {
"train": 0,
"valid": 1,
"test": 2,
"all": None,
}
split_ = split_map[datasets.utils.verify_str_arg(split.lower(), "split", ("train", "valid", "test", "all"))]
fn = partial(os.path.join, self.root, self.base_folder)
splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0)
identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0)
mask = slice(None) if split_ is None else (splits[1] == split_)
identity = identity[mask]
identity = identity[identity[1] < num_of_classes]
self.filename = identity.index.values
self.identity = identity.values
print(self.identity)
def __len__(self):
return len(self.identity)
def __getitem__(self, index):
filename = self.filename[index]
target = self.identity[index][0]
X = Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", filename))
if self.transform is not None:
X = self.transform(X)
return X, target
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class CutMix(Dataset):
def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):
self.dataset = dataset
self.num_class = num_class
self.num_mix = num_mix
self.beta = beta
self.prob = prob
def __getitem__(self, index):
img, lb = self.dataset[index]
lb_onehot = onehot(self.num_class, lb)
for _ in range(self.num_mix):
r = np.random.rand(1)
if self.beta <= 0 or r > self.prob:
continue
# generate mixed sample
lam = np.random.beta(self.beta, self.beta)
rand_index = random.choice(range(len(self)))
img2, lb2 = self.dataset[rand_index]
lb2_onehot = onehot(self.num_class, lb2)
bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)
img[:, bbx1:bbx2, bby1:bby2] = img2[:, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (img.size()[-1] * img.size()[-2]))
lb_onehot = lb_onehot * lam + lb2_onehot * (1. - lam)
return img, lb_onehot
def __len__(self):
return len(self.dataset)
class MixUp(Dataset):
def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):
self.dataset = dataset
self.num_class = num_class
self.num_mix = num_mix
self.beta = beta
self.prob = prob
def __getitem__(self, index):
img, lb = self.dataset[index]
lb_onehot = onehot(self.num_class, lb)
for _ in range(self.num_mix):
r = np.random.rand(1)
if self.beta <= 0 or r > self.prob:
continue
# generate mixed sample
lam = np.random.beta(self.beta, self.beta)
rand_index = random.choice(range(len(self)))
img2, lb2 = self.dataset[rand_index]
lb2_onehot = onehot(self.num_class, lb2)
img = img * lam + img2 * (1-lam)
lb_onehot = lb_onehot * lam + lb2_onehot * (1. - lam)
return img, lb_onehot
def __len__(self):
return len(self.dataset)
================================================
FILE: evaluator.py
================================================
import time
import models
import torch
import torch.optim as optim
import util
from torch.autograd import Variable
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
class Evaluator():
def __init__(self, data_loader, logger, config):
self.loss_meters = util.AverageMeter()
self.acc_meters = util.AverageMeter()
self.acc5_meters = util.AverageMeter()
self.criterion = torch.nn.CrossEntropyLoss()
self.data_loader = data_loader
self.logger = logger
self.log_frequency = config.log_frequency if config.log_frequency is not None else 100
self.config = config
self.current_acc = 0
self.current_acc_top5 = 0
self.confusion_matrix = torch.zeros(config.num_classes, config.num_classes)
return
def _reset_stats(self):
self.loss_meters = util.AverageMeter()
self.acc_meters = util.AverageMeter()
self.acc5_meters = util.AverageMeter()
self.confusion_matrix = torch.zeros(self.config.num_classes, self.config.num_classes)
return
def eval(self, epoch, model):
model.eval()
for i, (images, labels) in enumerate(self.data_loader["test_dataset"]):
start = time.time()
log_payload = self.eval_batch(images=images, labels=labels, model=model)
end = time.time()
time_used = end - start
display = util.log_display(epoch=epoch,
global_step=i,
time_elapse=time_used,
**log_payload)
if self.logger is not None:
self.logger.info(display)
return
def eval_batch(self, images, labels, model):
images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)
with torch.no_grad():
pred = model(images)
loss = self.criterion(pred, labels)
acc, acc5 = util.accuracy(pred, labels, topk=(1, 5))
_, preds = torch.max(pred, 1)
for t, p in zip(labels.view(-1), preds.view(-1)):
self.confusion_matrix[t.long(), p.long()] += 1
self.loss_meters.update(loss.item(), n=images.size(0))
self.acc_meters.update(acc.item(), n=images.size(0))
self.acc5_meters.update(acc5.item(), n=images.size(0))
payload = {"acc": acc.item(),
"acc_avg": self.acc_meters.avg,
"acc5": acc5.item(),
"acc5_avg": self.acc5_meters.avg,
"loss": loss.item(),
"loss_avg": self.loss_meters.avg}
return payload
def _pgd_whitebox(self, model, X, y, random_start=True, epsilon=0.031, num_steps=20, step_size=0.003):
model.eval()
out = model(X)
acc = (out.data.max(1)[1] == y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if random_start:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = torch.nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
acc_pgd = (model(X_pgd).data.max(1)[1] == y.data).float().sum()
return acc.item(), acc_pgd.item()
================================================
FILE: fast_autoaugment/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
================================================
FILE: fast_autoaugment/FastAutoAugment/__init__.py
================================================
================================================
FILE: fast_autoaugment/FastAutoAugment/archive.py
================================================
# Policy found on CIFAR-10 and CIFAR-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from .augmentations import get_augment, augment_list
def arsaug_policy():
exp0_0 = [
[('Solarize', 0.66, 0.34), ('Equalize', 0.56, 0.61)],
[('Equalize', 0.43, 0.06), ('AutoContrast', 0.66, 0.08)],
[('Color', 0.72, 0.47), ('Contrast', 0.88, 0.86)],
[('Brightness', 0.84, 0.71), ('Color', 0.31, 0.74)],
[('Rotate', 0.68, 0.26), ('TranslateX', 0.38, 0.88)]]
exp0_1 = [
[('TranslateY', 0.88, 0.96), ('TranslateY', 0.53, 0.79)],
[('AutoContrast', 0.44, 0.36), ('Solarize', 0.22, 0.48)],
[('AutoContrast', 0.93, 0.32), ('Solarize', 0.85, 0.26)],
[('Solarize', 0.55, 0.38), ('Equalize', 0.43, 0.48)],
[('TranslateY', 0.72, 0.93), ('AutoContrast', 0.83, 0.95)]]
exp0_2 = [
[('Solarize', 0.43, 0.58), ('AutoContrast', 0.82, 0.26)],
[('TranslateY', 0.71, 0.79), ('AutoContrast', 0.81, 0.94)],
[('AutoContrast', 0.92, 0.18), ('TranslateY', 0.77, 0.85)],
[('Equalize', 0.71, 0.69), ('Color', 0.23, 0.33)],
[('Sharpness', 0.36, 0.98), ('Brightness', 0.72, 0.78)]]
exp0_3 = [
[('Equalize', 0.74, 0.49), ('TranslateY', 0.86, 0.91)],
[('TranslateY', 0.82, 0.91), ('TranslateY', 0.96, 0.79)],
[('AutoContrast', 0.53, 0.37), ('Solarize', 0.39, 0.47)],
[('TranslateY', 0.22, 0.78), ('Color', 0.91, 0.65)],
[('Brightness', 0.82, 0.46), ('Color', 0.23, 0.91)]]
exp0_4 = [
[('Cutout', 0.27, 0.45), ('Equalize', 0.37, 0.21)],
[('Color', 0.43, 0.23), ('Brightness', 0.65, 0.71)],
[('ShearX', 0.49, 0.31), ('AutoContrast', 0.92, 0.28)],
[('Equalize', 0.62, 0.59), ('Equalize', 0.38, 0.91)],
[('Solarize', 0.57, 0.31), ('Equalize', 0.61, 0.51)]]
exp0_5 = [
[('TranslateY', 0.29, 0.35), ('Sharpness', 0.31, 0.64)],
[('Color', 0.73, 0.77), ('TranslateX', 0.65, 0.76)],
[('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],
[('Color', 0.92, 0.79), ('Equalize', 0.68, 0.54)],
[('Sharpness', 0.87, 0.91), ('Sharpness', 0.93, 0.41)]]
exp0_6 = [
[('Solarize', 0.39, 0.35), ('Color', 0.31, 0.44)],
[('Color', 0.33, 0.77), ('Color', 0.25, 0.46)],
[('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],
[('AutoContrast', 0.32, 0.79), ('Cutout', 0.68, 0.34)],
[('AutoContrast', 0.67, 0.91), ('AutoContrast', 0.73, 0.83)]]
return exp0_0 + exp0_1 + exp0_2 + exp0_3 + exp0_4 + exp0_5 + exp0_6
def autoaug2arsaug(f):
def autoaug():
mapper = defaultdict(lambda: lambda x: x)
mapper.update({
'ShearX': lambda x: float_parameter(x, 0.3),
'ShearY': lambda x: float_parameter(x, 0.3),
'TranslateX': lambda x: int_parameter(x, 10),
'TranslateY': lambda x: int_parameter(x, 10),
'Rotate': lambda x: int_parameter(x, 30),
'Solarize': lambda x: 256 - int_parameter(x, 256),
'Posterize2': lambda x: 4 - int_parameter(x, 4),
'Contrast': lambda x: float_parameter(x, 1.8) + .1,
'Color': lambda x: float_parameter(x, 1.8) + .1,
'Brightness': lambda x: float_parameter(x, 1.8) + .1,
'Sharpness': lambda x: float_parameter(x, 1.8) + .1,
'CutoutAbs': lambda x: int_parameter(x, 20)
})
def low_high(name, prev_value):
_, low, high = get_augment(name)
return float(prev_value - low) / (high - low)
policies = f()
new_policies = []
for policy in policies:
new_policies.append([(name, pr, low_high(name, mapper[name](level))) for name, pr, level in policy])
return new_policies
return autoaug
@autoaug2arsaug
def autoaug_paper_cifar10():
return [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)],
[('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)],
[('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 6)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)],
[('Equalize', 0.2, 8), ('Equalize', 0.6, 4)],
[('Color', 0.9, 9), ('Equalize', 0.6, 6)],
[('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)],
[('Brightness', 0.1, 3), ('Color', 0.7, 0)],
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)],
]
@autoaug2arsaug
def autoaug_policy():
"""AutoAugment policies found on Cifar."""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateYAbs', 0.7, 9)],
[('TranslateYAbs', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateYAbs', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateYAbs', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateYAbs', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateYAbs', 0.8, 9)],
[('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.6, 9)],
[('TranslateYAbs', 0.9, 0), ('TranslateYAbs', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('CutoutAbs', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateYAbs', 0.3, 6), ('CutoutAbs', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateXAbs', 0.3, 0), ('TranslateXAbs', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateYAbs', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('CutoutAbs', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize2', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
PARAMETER_MAX = 10
def float_parameter(level, maxval):
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
return int(float_parameter(level, maxval))
def no_duplicates(f):
def wrap_remove_duplicates():
policies = f()
return remove_deplicates(policies)
return wrap_remove_duplicates
def remove_deplicates(policies):
s = set()
new_policies = []
for ops in policies:
key = []
for op in ops:
key.append(op[0])
key = '_'.join(key)
if key in s:
continue
else:
s.add(key)
new_policies.append(ops)
return new_policies
def fa_reduced_cifar10():
p = [[["Contrast", 0.8320659688593578, 0.49884310562180767], ["TranslateX", 0.41849883971249136, 0.394023086494538]], [["Color", 0.3500483749890918, 0.43355143929883955], ["Color", 0.5120716140300229, 0.7508299643325016]], [["Rotate", 0.9447932604389472, 0.29723465088990375], ["Sharpness", 0.1564936149799504, 0.47169309978091745]], [["Rotate", 0.5430015349185097, 0.6518626678905443], ["Color", 0.5694844928020679, 0.3494533005430269]], [["AutoContrast", 0.5558922032451064, 0.783136004977799], ["TranslateY", 0.683914191471972, 0.7597025305860181]], [["TranslateX", 0.03489224481658926, 0.021025488042663354], ["Equalize", 0.4788637403857401, 0.3535481281496117]], [["Sharpness", 0.6428916269794158, 0.22791511918580576], ["Contrast", 0.016014045073950323, 0.26811312269487575]], [["Rotate", 0.2972727228410451, 0.7654251516829896], ["AutoContrast", 0.16005809254943348, 0.5380523650108116]], [["Contrast", 0.5823671057717301, 0.7521166301398389], ["TranslateY", 0.9949449214751978, 0.9612671341689751]], [["Equalize", 0.8372126687702321, 0.6944127225621206], ["Rotate", 0.25393282929784755, 0.3261658365286546]], [["Invert", 0.8222011603194572, 0.6597915864008403], ["Posterize", 0.31858707654447327, 0.9541013715579584]], [["Sharpness", 0.41314621282107045, 0.9437344470879956], ["Cutout", 0.6610495837889337, 0.674411664255093]], [["Contrast", 0.780121736705407, 0.40826152397463156], ["Color", 0.344019192125256, 0.1942922781355767]], [["Rotate", 0.17153139555621344, 0.798745732456474], ["Invert", 0.6010555860501262, 0.320742172554767]], [["Invert", 0.26816063450777416, 0.27152062163148327], ["Equalize", 0.6786829200236982, 0.7469412443514213]], [["Contrast", 0.3920564414367518, 0.7493644582838497], ["TranslateY", 0.8941657805606704, 0.6580846856375955]], [["Equalize", 0.875509207399372, 0.9061130537645283], ["Cutout", 0.4940280679087308, 0.7896229623628276]], [["Contrast", 0.3331423298065147, 0.7170041362529597], ["ShearX", 0.7425484291842793, 0.5285117152426109]], [["Equalize", 0.97344237365026, 0.4745759720473106], ["TranslateY", 0.055863458430295276, 0.9625142022954672]], [["TranslateX", 0.6810614083109192, 0.7509937355495521], ["TranslateY", 0.3866463019475701, 0.5185481505576112]], [["Sharpness", 0.4751529944753671, 0.550464012488733], ["Cutout", 0.9472914750534814, 0.5584925992985023]], [["Contrast", 0.054606784909375095, 0.17257080196712182], ["Cutout", 0.6077026782754803, 0.7996504165944938]], [["ShearX", 0.328798428243695, 0.2769563264079157], ["Cutout", 0.9037632437023772, 0.4915809476763595]], [["Cutout", 0.6891202672363478, 0.9951490996172914], ["Posterize", 0.06532762462628705, 0.4005246609075227]], [["TranslateY", 0.6908583592523334, 0.725612120376128], ["Rotate", 0.39907735501746666, 0.36505798032223147]], [["TranslateX", 0.10398364107399072, 0.5913918470536627], ["Rotate", 0.7169811539340365, 0.8283850670648724]], [["ShearY", 0.9526373530768361, 0.4482347365639251], ["Contrast", 0.4203947336351471, 0.41526799558953864]], [["Contrast", 0.24894431199700073, 0.09578870500994707], ["Solarize", 0.2273713345927395, 0.6214942914963707]], [["TranslateX", 0.06331228870032912, 0.8961907489444944], ["Cutout", 0.5110007859958743, 0.23704875994050723]], [["Cutout", 0.3769183548846172, 0.6560944580253987], ["TranslateY", 0.7201924599434143, 0.4132476526938319]], [["Invert", 0.6707431156338866, 0.11622795952464149], ["Posterize", 0.12075972752370845, 0.18024933294172307]], [["Color", 0.5010057264087142, 0.5277767327434318], ["Rotate", 0.9486115946366559, 0.31485546630220784]], [["ShearX", 0.31741302466630406, 0.1991215806270692], ["Invert", 0.3744727015523084, 0.6914113986757578]], [["Brightness", 0.40348479064392617, 0.8924182735724888], ["Brightness", 0.1973098763857779, 0.3939288933689655]], [["Color", 0.01208688664030888, 0.6055693000885217], ["Equalize", 0.433259451147881, 0.420711137966155]], [["Cutout", 0.2620018360076487, 0.11594468278143644], ["Rotate", 0.1310401567856766, 0.7244318146544101]], [["ShearX", 0.15249651845933576, 0.35277277071866986], ["Contrast", 0.28221794032094016, 0.42036586509397444]], [["Brightness", 0.8492912150468908, 0.26386920887886056], ["Solarize", 0.8764208056263386, 0.1258195122766067]], [["ShearX", 0.8537058239675831, 0.8415101816171269], ["AutoContrast", 0.23958568830416294, 0.9889049529564014]], [["Rotate", 0.6463207930684552, 0.8750192129056532], ["Contrast", 0.6865032211768652, 0.8564981333033417]], [["Equalize", 0.8877190311811044, 0.7370995897848609], ["TranslateX", 0.9979660314391368, 0.005683998913244781]], [["Color", 0.6420017551677819, 0.6225337265571229], ["Solarize", 0.8344504978566362, 0.8332856969941151]], [["ShearX", 0.7439332981992567, 0.9747608698582039], ["Equalize", 0.6259189804002959, 0.028017478098245174]], [["TranslateY", 0.39794770293366843, 0.8482966537902709], ["Rotate", 0.9312935630405351, 0.5300586925826072]], [["Cutout", 0.8904075572021911, 0.3522934742068766], ["Equalize", 0.6431186289473937, 0.9930577962126151]], [["Contrast", 0.9183553386089476, 0.44974266209396685], ["TranslateY", 0.8193684583123862, 0.9633741156526566]], [["ShearY", 0.616078299924283, 0.19219314358924766], ["Solarize", 0.1480945914138868, 0.05922109541654652]], [["Solarize", 0.25332455064128157, 0.18853037431947994], ["ShearY", 0.9518390093954243, 0.14603930044061142]], [["Color", 0.8094378664335412, 0.37029830225408433], ["Contrast", 0.29504113617467465, 0.065096365468442]], [["AutoContrast", 0.7075167558685455, 0.7084621693458267], ["Sharpness", 0.03555539453323875, 0.5651948313888351]], [["TranslateY", 0.5969982600930229, 0.9857264201029572], ["Rotate", 0.9898628564873607, 0.1985685534926911]], [["Invert", 0.14915939942810352, 0.6595839632446547], ["Posterize", 0.768535289994361, 0.5997358684618563]], [["Equalize", 0.9162691815967111, 0.3331035307653627], ["Color", 0.8169118187605557, 0.7653910258006366]], [["Rotate", 0.43489185299530897, 0.752215269135173], ["Brightness", 0.1569828560334806, 0.8002808712857853]], [["Invert", 0.931876215328345, 0.029428644395760872], ["Equalize", 0.6330036052674145, 0.7235531014288485]], [["ShearX", 0.5216138393704968, 0.849272958911589], ["AutoContrast", 0.19572688655120263, 0.9786551568639575]], [["ShearX", 0.9899586208275011, 0.22580547500610293], ["Brightness", 0.9831311903178727, 0.5055159610855606]], [["Brightness", 0.29179117009211486, 0.48003584672937294], ["Solarize", 0.7544252317330058, 0.05806581735063043]], [["AutoContrast", 0.8919800329537786, 0.8511261613698553], ["Contrast", 0.49199446084551035, 0.7302297140181429]], [["Cutout", 0.7079723710644835, 0.032565015538375874], ["AutoContrast", 0.8259782090388609, 0.7860708789468442]], [["Posterize", 0.9980262659801914, 0.6725084224935673], ["ShearY", 0.6195568269664682, 0.5444170291816751]], [["Posterize", 0.8687351834713217, 0.9978004914422602], ["Equalize", 0.4532646848325955, 0.6486748015710573]], [["Contrast", 0.2713928776950594, 0.15255249557027806], ["ShearY", 0.9276834387970199, 0.5266542862333478]], [["AutoContrast", 0.5240786618055582, 0.9325642258930253], ["Cutout", 0.38448627892037357, 0.21219415055662394]], [["TranslateX", 0.4299517937295352, 0.20133751201386152], ["TranslateX", 0.6753468310276597, 0.6985621035400441]], [["Rotate", 0.4006472499103597, 0.6704748473357586], ["Equalize", 0.674161668148079, 0.6528530101705237]], [["Equalize", 0.9139902833674455, 0.9015103149680278], ["Sharpness", 0.7289667720691948, 0.7623606352376232]], [["Cutout", 0.5911267429414259, 0.5953141187177585], ["Rotate", 0.5219064817468504, 0.11085141355857986]], [["TranslateX", 0.3620095133946267, 0.26194039409492476], ["Rotate", 0.3929841359545597, 0.4913406720338047]], [["Invert", 0.5175298901458896, 0.001661410821811482], ["Invert", 0.004656581318332242, 0.8157622192213624]], [["AutoContrast", 0.013609693335051465, 0.9318651749409604], ["Invert", 0.8980844358979592, 0.2268511862780368]], [["ShearY", 0.7717126261142194, 0.09975547983707711], ["Equalize", 0.7808494401429572, 0.4141412091009955]], [["TranslateX", 0.5878675721341552, 0.29813268038163376], ["Posterize", 0.21257276051591356, 0.2837285296666412]], [["Brightness", 0.4268335108566488, 0.4723784991635417], ["Cutout", 0.9386262901570471, 0.6597686851494288]], [["ShearX", 0.8259423807590159, 0.6215304795389204], ["Invert", 0.6663365779667443, 0.7729669184580387]], [["ShearY", 0.4801338723951297, 0.5220145420100984], ["Solarize", 0.9165803796596582, 0.04299335502862134]], [["Color", 0.17621114853558817, 0.7092601754635434], ["ShearX", 0.9014406936728542, 0.6028711944367818]], [["Rotate", 0.13073284972300658, 0.9088831512880851], ["ShearX", 0.4228105332316806, 0.7985249783662675]], [["Brightness", 0.9182753692730031, 0.0063635477774044436], ["Color", 0.4279825602663798, 0.28727149118585327]], [["Equalize", 0.578218285372267, 0.9611758542158054], ["Contrast", 0.5471552264150691, 0.8819635504027596]], [["Brightness", 0.3208589067274543, 0.45324733565167497], ["Solarize", 0.5218455808633233, 0.5946097503647126]], [["Equalize", 0.3790381278653, 0.8796082535775276], ["Solarize", 0.4875526773149246, 0.5186585878052613]], [["ShearY", 0.12026461479557571, 0.1336953429068397], ["Posterize", 0.34373988646025766, 0.8557727670803785]], [["Cutout", 0.2396745247507467, 0.8123036135209865], ["Equalize", 0.05022807681008945, 0.6648492261984383]], [["Brightness", 0.35226676470748264, 0.5950011514888855], ["Rotate", 0.27555076067000894, 0.9170063321486026]], [["ShearX", 0.320224630647278, 0.9683584649071976], ["Invert", 0.6905585196648905, 0.5929115667894518]], [["Color", 0.9941395717559652, 0.7474441679798101], ["Sharpness", 0.7559998478658021, 0.6656052889626682]], [["ShearY", 0.4004220568345669, 0.5737646992826074], ["Equalize", 0.9983495213746147, 0.8307907033362303]], [["Color", 0.13726809242038207, 0.9378850119950549], ["Equalize", 0.9853362454752445, 0.42670264496554156]], [["Invert", 0.13514636153298576, 0.13516363849081958], ["Sharpness", 0.2031189356693901, 0.6110226359872745]], [["TranslateX", 0.7360305209630797, 0.41849698571655614], ["Contrast", 0.8972161549144564, 0.7820296625565641]], [["Color", 0.02713118828682548, 0.717110684828096], ["TranslateY", 0.8118759006836348, 0.9120098002024992]], [["Sharpness", 0.2915428949403711, 0.7630303724396518], ["Solarize", 0.22030536162851078, 0.38654526772661757]], [["Equalize", 0.9949114839538582, 0.7193630656062793], ["AutoContrast", 0.00889496657931299, 0.2291400476524672]], [["Rotate", 0.7120948976490488, 0.7804359309791055], ["Cutout", 0.10445418104923654, 0.8022999156052766]], [["Equalize", 0.7941710117902707, 0.8648170634288153], ["Invert", 0.9235642581144047, 0.23810725859722381]], [["Cutout", 0.3669397998623156, 0.42612815083245004], ["Solarize", 0.5896322046441561, 0.40525016166956795]], [["Color", 0.8389858785714184, 0.4805764176488667], ["Rotate", 0.7483931487048825, 0.4731174601400677]], [["Sharpness", 0.19006538611394763, 0.9480745790240234], ["TranslateY", 0.13904429049439282, 0.04117685330615939]], [["TranslateY", 0.9958097661701637, 0.34853788612580905], ["Cutout", 0.2235829624082113, 0.3737887095480745]], [["ShearX", 0.635453761342424, 0.6063917273421382], ["Posterize", 0.8738297843709666, 0.4893042590265556]], [["Brightness", 0.7907245198402727, 0.7082189713070691], ["Color", 0.030313003541849737, 0.6927897798493439]], [["Cutout", 0.6965622481073525, 0.8103522907758203], ["ShearY", 0.6186794303078708, 0.28640671575703547]], [["ShearY", 0.43734910588450226, 0.32549342535621517], ["ShearX", 0.08154980987651872, 0.3286764923112455]], [["AutoContrast", 0.5262462005050853, 0.8175584582465848], ["Contrast", 0.8683217097363655, 0.548776281479276]], [["ShearY", 0.03957878500311707, 0.5102350637943197], ["Rotate", 0.13794708520303778, 0.38035687712954236]], [["Sharpness", 0.634288567312677, 0.6387948309075822], ["AutoContrast", 0.13437288694693272, 0.7150448869023095]], [["Contrast", 0.5198339640088544, 0.9409429390321714], ["Cutout", 0.09489154903321972, 0.6228488803821982]], [["Equalize", 0.8955909061806043, 0.7727336527163008], ["AutoContrast", 0.6459479564441762, 0.7065467781139214]], [["Invert", 0.07214420843537739, 0.15334721382249505], ["ShearX", 0.9242027778363903, 0.5809187849982554]], [["Equalize", 0.9144084379856188, 0.9457539278608998], ["Sharpness", 0.14337499858300173, 0.5978054365425495]], [["Posterize", 0.18894269796951202, 0.14676331276539045], ["Equalize", 0.846204299950047, 0.0720601838168885]], [["Contrast", 0.47354445405741163, 0.1793650330107468], ["Solarize", 0.9086106327264657, 0.7578807802091502]], [["AutoContrast", 0.11805466892967886, 0.6773620948318575], ["TranslateX", 0.584222568299264, 0.9475693349391936]], [["Brightness", 0.5833017701352768, 0.6892593824176294], ["AutoContrast", 0.9073141314561828, 0.5823085733964589]], [["TranslateY", 0.5711231614144834, 0.6436240447620021], ["Contrast", 0.21466964050052473, 0.8042843954486391]], [["Contrast", 0.22967904487976765, 0.2343103109298762], ["Invert", 0.5502897289159286, 0.386181060792375]], [["Invert", 0.7008423439928628, 0.4234003051405053], ["Rotate", 0.77270460187611, 0.6650852696828039]], [["Invert", 0.050618322309703534, 0.24277027926683614], ["TranslateX", 0.789703489736613, 0.5116446685339312]], [["Color", 0.363898083076868, 0.7870323584210503], ["ShearY", 0.009608425513626617, 0.6188625018465327]], [["TranslateY", 0.9447601615216088, 0.8605867115798349], ["Equalize", 0.24139180127003634, 0.9587337957930782]], [["Equalize", 0.3968589440144503, 0.626206375426996], ["Solarize", 0.3215967960673186, 0.826785464835443]], [["TranslateX", 0.06947339047121326, 0.016705969558222122], ["Contrast", 0.6203392406528407, 0.6433525559906872]], [["Solarize", 0.2479835265518212, 0.6335009955617831], ["Sharpness", 0.6260191862978083, 0.18998095149428562]], [["Invert", 0.9818841924943431, 0.03252098144087934], ["TranslateY", 0.9740718042586802, 0.32038951753031475]], [["Solarize", 0.8795784664090814, 0.7014953994354041], ["AutoContrast", 0.8508018319577783, 0.09321935255338443]], [["Color", 0.8067046326105318, 0.13732893832354054], ["Contrast", 0.7358549680271418, 0.7880588355974301]], [["Posterize", 0.5005885536838065, 0.7152229305267599], ["ShearX", 0.6714249591308944, 0.7732232697859908]], [["TranslateY", 0.5657943483353953, 0.04622399873706862], ["AutoContrast", 0.2787442688649845, 0.567024378767143]], [["ShearY", 0.7589839214283295, 0.041071003934029404], ["Equalize", 0.3719852873722692, 0.43285778682687326]], [["Posterize", 0.8841266183653291, 0.42441306955476366], ["Cutout", 0.06578801759412933, 0.5961125797961526]], [["Rotate", 0.4057875004314082, 0.20241115848366442], ["AutoContrast", 0.19331542807918067, 0.7175484678480565]], [["Contrast", 0.20331327116693088, 0.17135387852218742], ["Cutout", 0.6282459410351067, 0.6690015305529187]], [["ShearX", 0.4309850328306535, 0.99321178125828], ["AutoContrast", 0.01809604030453338, 0.693838277506365]], [["Rotate", 0.24343531125298268, 0.5326412444169899], ["Sharpness", 0.8663989992597494, 0.7643990609130789]], [["Rotate", 0.9785019204622459, 0.8941922576710696], ["ShearY", 0.3823185048761075, 0.9258854046017292]], [["ShearY", 0.5502613342963388, 0.6193478797355644], ["Sharpness", 0.2212116534610532, 0.6648232390110979]], [["TranslateY", 0.43222920981513757, 0.5657636397633089], ["ShearY", 0.9153733286073634, 0.4868521171273169]], [["Posterize", 0.12246560519738336, 0.9132288825898972], ["Cutout", 0.6058471327881816, 0.6426901876150983]], [["Color", 0.3693970222695844, 0.038929141432555436], ["Equalize", 0.6228052875653781, 0.05064436511347281]], [["Color", 0.7172600331356893, 0.2824542634766688], ["Color", 0.425293116261649, 0.1796441283313972]], [["Cutout", 0.7539608428122959, 0.9896141728228921], ["Solarize", 0.17811081117364758, 0.9064195503634402]], [["AutoContrast", 0.6761242607012717, 0.6484842446399923], ["AutoContrast", 0.1978135076901828, 0.42166879492601317]], [["ShearY", 0.25901666379802524, 0.4770778270322449], ["Solarize", 0.7640963173407052, 0.7548463227094349]], [["TranslateY", 0.9222487731783499, 0.33658389819616463], ["Equalize", 0.9159112511468139, 0.8877136302394797]], [["TranslateX", 0.8994836977137054, 0.11036053676846591], ["Sharpness", 0.9040333410652747, 0.007266095214664592]], [["Invert", 0.627758632524958, 0.8075245097227242], ["Color", 0.7525387912148516, 0.05950239294733184]], [["TranslateX", 0.43505193292761857, 0.38108822876120796], ["TranslateY", 0.7432578052364004, 0.685678116134759]], [["Contrast", 0.9293507582470425, 0.052266842951356196], ["Posterize", 0.45187123977747456, 0.8228290399726782]], [["ShearX", 0.07240786542746291, 0.8945667925365756], ["Brightness", 0.5305443506561034, 0.12025274552427578]], [["Invert", 0.40157564448143335, 0.5364745514006678], ["Posterize", 0.3316124671813876, 0.43002413237035997]], [["ShearY", 0.7152314630009072, 0.1938339083417453], ["Invert", 0.14102478508140615, 0.41047623580174253]], [["Equalize", 0.19862832613849246, 0.5058521685279254], ["Sharpness", 0.16481208629549782, 0.29126323102770557]], [["Equalize", 0.6951591703541872, 0.7294822018800076], ["ShearX", 0.8726656726111219, 0.3151484225786487]], [["Rotate", 0.17234370554263745, 0.9356543193000078], ["TranslateX", 0.4954374070084091, 0.05496727345849217]], [["Contrast", 0.347405480122842, 0.831553005022885], ["ShearX", 0.28946367213071134, 0.11905898704394013]], [["Rotate", 0.28096672507990683, 0.16181284050307398], ["Color", 0.6554918515385365, 0.8739728050797386]], [["Solarize", 0.05408073374114053, 0.5357087283758337], ["Posterize", 0.42457175211495335, 0.051807130609045515]], [["TranslateY", 0.6216669362331361, 0.9691341207381867], ["Rotate", 0.9833579358130944, 0.12227426932415297]], [["AutoContrast", 0.7572619475282892, 0.8062834082727393], ["Contrast", 0.1447865402875591, 0.40242646573228436]], [["Rotate", 0.7035658783466086, 0.9840285268256428], ["Contrast", 0.04613961510519471, 0.7666683217450163]], [["TranslateX", 0.4580462177951252, 0.6448678609474686], ["AutoContrast", 0.14845695613708987, 0.1581134188537895]], [["Color", 0.06795037145259564, 0.9115552821158709], ["TranslateY", 0.9972953449677655, 0.6791016521791214]], [["Cutout", 0.3586908443690823, 0.11578558293480945], ["Color", 0.49083981719164294, 0.6924851425917189]], [["Brightness", 0.7994717831637873, 0.7887316255321768], ["Posterize", 0.01280463502435425, 0.2799086732858721]], [["ShearY", 0.6733451536131859, 0.8122332639516706], ["AutoContrast", 0.20433889615637357, 0.29023346867819966]], [["TranslateY", 0.709913512385177, 0.6538196931503809], ["Invert", 0.06629795606579203, 0.40913219547548296]], [["Sharpness", 0.4704559834362948, 0.4235993305308414], ["Equalize", 0.7578132044306966, 0.9388824249397175]], [["AutoContrast", 0.5281702802395268, 0.8077253610116979], ["Equalize", 0.856446858814119, 0.0479755681647559]], [["Color", 0.8244145826797791, 0.038409264586238945], ["Equalize", 0.4933123249234237, 0.8251940933672189]], [["TranslateX", 0.23949314158035084, 0.13576027004706692], ["ShearX", 0.8547563771688399, 0.8309262160483606]], [["Cutout", 0.4655680937486001, 0.2819807000622825], ["Contrast", 0.8439552665937905, 0.4843617871587037]], [["TranslateX", 0.19142454476784831, 0.7516148119169537], ["AutoContrast", 0.8677128351329768, 0.34967990912346336]], [["Contrast", 0.2997868299880966, 0.919508054854469], ["AutoContrast", 0.3003418493384957, 0.812314984368542]], [["Invert", 0.1070424236198183, 0.614674386498809], ["TranslateX", 0.5010973510899923, 0.20828478805259465]], [["Contrast", 0.6775882415611454, 0.6938564815591685], ["Cutout", 0.4814634264207498, 0.3086844939744179]], [["TranslateY", 0.939427105020265, 0.02531043619423201], ["Contrast", 0.793754257944812, 0.6676072472565451]], [["Sharpness", 0.09833672397575444, 0.5937214638292085], ["Rotate", 0.32530675291753763, 0.08302275740932441]], [["Sharpness", 0.3096455511562728, 0.6726732004553959], ["TranslateY", 0.43268997648796537, 0.8755012330217743]], [["ShearY", 0.9290771880324833, 0.22114736271319912], ["Equalize", 0.5520199288501478, 0.34269650332060553]], [["AutoContrast", 0.39763980746649374, 0.4597414582725454], ["Contrast", 0.941507852412761, 0.24991270562477041]], [["Contrast", 0.19419400547588095, 0.9127524785329233], ["Invert", 0.40544905179551727, 0.770081532844878]], [["Invert", 0.30473757368608334, 0.23534811781828846], ["Cutout", 0.26090722356706686, 0.5478390909877727]], [["Posterize", 0.49434361308057373, 0.05018423270527428], ["Color", 0.3041910676883317, 0.2603810415446437]], [["Invert", 0.5149061746764011, 0.9507449210221298], ["TranslateY", 0.4458076521892904, 0.8235358255774426]], [["Cutout", 0.7900006753351625, 0.905578861382507], ["Cutout", 0.6707153655762056, 0.8236715672258502]], [["Solarize", 0.8750534386579575, 0.10337670467100568], ["Posterize", 0.6102379615481381, 0.9264503915416868]], [["ShearY", 0.08448689377082852, 0.13981233725811626], ["TranslateX", 0.13979689669329498, 0.768774869872818]], [["TranslateY", 0.35752572266759985, 0.22827299847812488], ["Solarize", 0.3906957174236011, 0.5663314388307709]], [["ShearY", 0.29155240367061563, 0.8427516352971683], ["ShearX", 0.988825367441916, 0.9371258864857649]], [["Posterize", 0.3470780859769458, 0.5467686612321239], ["Rotate", 0.5758606274160093, 0.8843838082656007]], [["Cutout", 0.07825368363221841, 0.3230799425855425], ["Equalize", 0.2319163865298529, 0.42133965674727325]], [["Invert", 0.41972172597448654, 0.34618622513582953], ["ShearX", 0.33638469398198834, 0.9098575535928108]], [["Invert", 0.7322652233340448, 0.7747502957687412], ["Cutout", 0.9643121397298106, 0.7983335094634907]], [["TranslateY", 0.30039942808098496, 0.229018798182827], ["TranslateY", 0.27009499739380194, 0.6435577237846236]], [["Color", 0.38245274994070644, 0.7030758568461645], ["ShearX", 0.4429321461666281, 0.6963787864044149]], [["AutoContrast", 0.8432798685515605, 0.5775214369578088], ["Brightness", 0.7140899735355927, 0.8545854720117658]], [["Rotate", 0.14418935535613786, 0.5637968282213426], ["Color", 0.7115231912479835, 0.32584796564566776]], [["Sharpness", 0.4023501062807533, 0.4162097130412771], ["Brightness", 0.5536372686153666, 0.03004023273348777]], [["TranslateX", 0.7526053265574295, 0.5365938133399961], ["Cutout", 0.07914142706557492, 0.7544953091603148]], [["TranslateY", 0.6932934644882822, 0.5302211727137424], ["Invert", 0.5040606028391255, 0.6074863635108957]], [["Sharpness", 0.5013938602431629, 0.9572417724333157], ["TranslateY", 0.9160516359783026, 0.41798927975391675]], [["ShearY", 0.5130018836722556, 0.30209438428424185], ["Color", 0.15017170588500262, 0.20653495360587826]], [["TranslateX", 0.5293300090022314, 0.6407011888285266], ["Rotate", 0.4809817860439001, 0.3537850070371702]], [["Equalize", 0.42243081336551014, 0.1347
gitextract_b3y44xt6/ ├── .gitattributes ├── .gitignore ├── CITATION.cff ├── LICENSE ├── QuickStart.ipynb ├── README.md ├── collect_results.py ├── configs/ │ ├── cifar10/ │ │ ├── dense121.yaml │ │ ├── resnet18.yaml │ │ ├── resnet18_add-uniform-noise-aug.yaml │ │ ├── resnet18_add-uniform-noise.yaml │ │ ├── resnet18_augement.yaml │ │ ├── resnet18_augmentation.yaml │ │ ├── resnet18_classpoison.yaml │ │ ├── resnet18_classpoison_targeted.yaml │ │ ├── resnet18_cutmix.yaml │ │ ├── resnet18_cutout.yaml │ │ ├── resnet18_denoise.yaml │ │ ├── resnet18_madrys.yaml │ │ ├── resnet18_mixup.yaml │ │ ├── resnet50.yaml │ │ ├── toy_cifar.yaml │ │ └── toy_cifar_madrys.yaml │ ├── cifar100/ │ │ ├── dense121.yaml │ │ ├── resnet18.yaml │ │ ├── resnet18_madrys.yaml │ │ └── resnet50.yaml │ ├── cifar101/ │ │ └── resnet18.yaml │ ├── face/ │ │ └── InceptionResnet.yaml │ ├── imagenet-mini/ │ │ ├── dense121.yaml │ │ ├── resnet18.yaml │ │ └── resnet50.yaml │ ├── svhn/ │ │ ├── dense121.yaml │ │ ├── resnet18.yaml │ │ ├── resnet18_madrys.yaml │ │ └── resnet50.yaml │ └── tiny-imagenet/ │ ├── dense121.yaml │ ├── resnet18.yaml │ └── resnet50.yaml ├── dataset.py ├── evaluator.py ├── fast_autoaugment/ │ ├── .gitignore │ ├── FastAutoAugment/ │ │ ├── __init__.py │ │ ├── archive.py │ │ ├── aug_mixup.py │ │ ├── augmentations.py │ │ ├── common.py │ │ ├── data.py │ │ ├── imagenet.py │ │ ├── lr_scheduler.py │ │ ├── metrics.py │ │ ├── networks/ │ │ │ ├── __init__.py │ │ │ ├── efficientnet_pytorch/ │ │ │ │ ├── __init__.py │ │ │ │ ├── condconv.py │ │ │ │ ├── model.py │ │ │ │ └── utils.py │ │ │ ├── pyramidnet.py │ │ │ ├── resnet.py │ │ │ ├── shakedrop.py │ │ │ ├── shakeshake/ │ │ │ │ ├── __init__.py │ │ │ │ ├── shake_resnet.py │ │ │ │ ├── shake_resnext.py │ │ │ │ └── shakeshake.py │ │ │ └── wideresnet.py │ │ ├── safe_shell_exec.py │ │ ├── search.py │ │ ├── tf_port/ │ │ │ ├── __init__.py │ │ │ ├── rmsprop.py │ │ │ └── tpu_bn.py │ │ ├── train.py │ │ └── train_dist.py │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── archive.py │ ├── confs/ │ │ ├── efficientnet_b0.yaml │ │ ├── efficientnet_b0_condconv.yaml │ │ ├── efficientnet_b1.yaml │ │ ├── efficientnet_b2.yaml │ │ ├── efficientnet_b3.yaml │ │ ├── efficientnet_b4.yaml │ │ ├── pyramid272_cifar.yaml │ │ ├── resnet200.yaml │ │ ├── resnet50.yaml │ │ ├── resnet50_mixup.yaml │ │ ├── shake26_2x112d_cifar.yaml │ │ ├── shake26_2x32d_cifar.yaml │ │ ├── shake26_2x96d_cifar.yaml │ │ ├── wresnet28x10_cifar.yaml │ │ ├── wresnet28x10_svhn.yaml │ │ └── wresnet40x2_cifar.yaml │ └── requirements.txt ├── madrys.py ├── main.py ├── models/ │ ├── DenseNet.py │ ├── ResNet.py │ ├── ToyModel.py │ ├── __init__.py │ ├── download.py │ └── inception_resnet_v1.py ├── perturbation.py ├── requirements.txt ├── scripts/ │ ├── cifar10/ │ │ ├── min-max-noise/ │ │ │ ├── classwise-noise/ │ │ │ │ ├── exp_setting.sh │ │ │ │ ├── search_perturbation_noise.sh │ │ │ │ ├── submit.sh │ │ │ │ ├── train.sh │ │ │ │ └── train.slurm │ │ │ └── samplewise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── min-min-noise/ │ │ │ ├── classwise-noise/ │ │ │ │ ├── exp_setting.sh │ │ │ │ ├── search_perturbation_noise.sh │ │ │ │ ├── submit.sh │ │ │ │ ├── train.sh │ │ │ │ └── train.slurm │ │ │ └── samplewise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── random-noise/ │ │ ├── classwise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── samplewise-noise/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── submit.sh │ │ ├── train.sh │ │ └── train.slurm │ ├── cifar10-extension/ │ │ └── min-min-noise/ │ │ ├── classwise-noise-2/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-eps=16/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-eps=24/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-random-patch16/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-random-patch24/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-random-patch8/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── classwise-noise-transfer-tiny-imagenet/ │ │ │ ├── exp_setting.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── samplewise-noise-eps=16/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── samplewise-noise-eps=24/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── samplewise-noise-random-patch16/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ ├── samplewise-noise-random-patch24/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── samplewise-noise-random-patch8/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── submit.sh │ │ ├── train.sh │ │ └── train.slurm │ ├── cifar100/ │ │ └── min-min-noise/ │ │ ├── classwise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── samplewise-noise/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── submit.sh │ │ ├── train.sh │ │ └── train.slurm │ ├── cifar101/ │ │ ├── exp_setting.sh │ │ └── train.sh │ ├── face/ │ │ └── min-min-noise/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── train.sh │ │ ├── train.slurm │ │ ├── train_clean.sh │ │ ├── train_clean.slurm │ │ ├── train_protected.sh │ │ └── train_protected.slurm │ ├── imagenet-mini/ │ │ └── min-min-noise/ │ │ ├── classwise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── samplewise-noise/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── submit.sh │ │ ├── train.sh │ │ └── train.slurm │ ├── svhn/ │ │ └── min-min-noise/ │ │ ├── classwise-noise/ │ │ │ ├── exp_setting.sh │ │ │ ├── search_perturbation_noise.sh │ │ │ ├── submit.sh │ │ │ ├── train.sh │ │ │ └── train.slurm │ │ └── samplewise-noise/ │ │ ├── exp_setting.sh │ │ ├── search_perturbation_noise.sh │ │ ├── submit.sh │ │ ├── train.sh │ │ └── train.slurm │ └── tiny-imagenet/ │ └── min-min-noise/ │ └── classwise-noise/ │ ├── exp_setting.sh │ └── search_perturbation_noise.sh ├── toolbox.py ├── trainer.py └── util.py
SYMBOL INDEX (431 symbols across 41 files)
FILE: collect_results.py
function load_results (line 28) | def load_results(targt_exp, model_name):
FILE: dataset.py
class DatasetGenerator (line 89) | class DatasetGenerator():
method __init__ (line 90) | def __init__(self, train_batch_size=128, eval_batch_size=256, num_of_w...
method getDataLoader (line 273) | def getDataLoader(self, train_shuffle=True, train_drop_last=True):
method _split_validation_set (line 288) | def _split_validation_set(self, train_portion, train_shuffle=True, tra...
function patch_noise_extend_to_img (line 354) | def patch_noise_extend_to_img(noise, image_size=[32, 32, 3], patch_locat...
class PoisonCIFAR10 (line 376) | class PoisonCIFAR10(datasets.CIFAR10):
method __init__ (line 377) | def __init__(self, root, train=True, transform=None, target_transform=...
class PoisonCIFAR100 (line 441) | class PoisonCIFAR100(datasets.CIFAR100):
method __init__ (line 442) | def __init__(self, root, train=True, transform=None, target_transform=...
class PoisonCIFAR101 (line 496) | class PoisonCIFAR101(datasets.VisionDataset):
method __init__ (line 497) | def __init__(self, root, split='poison_train', transform=None, target_...
method __len__ (line 525) | def __len__(self):
method __getitem__ (line 528) | def __getitem__(self, index):
class PoisonSVHN (line 536) | class PoisonSVHN(datasets.SVHN):
method __init__ (line 537) | def __init__(self, root, split='train', transform=None, target_transfo...
class ImageNetMini (line 591) | class ImageNetMini(datasets.ImageNet):
method __init__ (line 592) | def __init__(self, root, split='train', **kwargs):
class PoisonImageNetMini (line 608) | class PoisonImageNetMini(ImageNetMini):
method __init__ (line 609) | def __init__(self, root, split, poison_rate=1.0, seed=0,
method __getitem__ (line 628) | def __getitem__(self, index):
class Augmentation (line 648) | class Augmentation(object):
method __init__ (line 649) | def __init__(self, policies):
method __call__ (line 652) | def __call__(self, img):
class CatDogDataset (line 662) | class CatDogDataset(datasets.VisionDataset):
method __init__ (line 663) | def __init__(self, root, split='train', transform=None, target_transfo...
method __len__ (line 670) | def __len__(self):
method __getitem__ (line 673) | def __getitem__(self, index):
class CelebAMini (line 695) | class CelebAMini(datasets.CelebA):
method __init__ (line 696) | def __init__(self, root, split="train", target_type="attr", transform=...
method __len__ (line 721) | def __len__(self):
method __getitem__ (line 724) | def __getitem__(self, index):
class Cutout (line 733) | class Cutout(object):
method __init__ (line 734) | def __init__(self, length):
method __call__ (line 737) | def __call__(self, img):
class CutMix (line 755) | class CutMix(Dataset):
method __init__ (line 756) | def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):
method __getitem__ (line 763) | def __getitem__(self, index):
method __len__ (line 786) | def __len__(self):
class MixUp (line 790) | class MixUp(Dataset):
method __init__ (line 791) | def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):
method __getitem__ (line 798) | def __getitem__(self, index):
method __len__ (line 819) | def __len__(self):
FILE: evaluator.py
class Evaluator (line 15) | class Evaluator():
method __init__ (line 16) | def __init__(self, data_loader, logger, config):
method _reset_stats (line 30) | def _reset_stats(self):
method eval (line 37) | def eval(self, epoch, model):
method eval_batch (line 52) | def eval_batch(self, images, labels, model):
method _pgd_whitebox (line 73) | def _pgd_whitebox(self, model, X, y, random_start=True, epsilon=0.031,...
FILE: fast_autoaugment/FastAutoAugment/archive.py
function arsaug_policy (line 11) | def arsaug_policy():
function autoaug2arsaug (line 59) | def autoaug2arsaug(f):
function autoaug_paper_cifar10 (line 91) | def autoaug_paper_cifar10():
function autoaug_policy (line 122) | def autoaug_policy():
function float_parameter (line 248) | def float_parameter(level, maxval):
function int_parameter (line 252) | def int_parameter(level, maxval):
function no_duplicates (line 256) | def no_duplicates(f):
function remove_deplicates (line 264) | def remove_deplicates(policies):
function fa_reduced_cifar10 (line 281) | def fa_reduced_cifar10():
function fa_resnet50_rimagenet (line 286) | def fa_resnet50_rimagenet():
function fa_reduced_svhn (line 291) | def fa_reduced_svhn():
function policy_decoder (line 296) | def policy_decoder(augment, num_policy, num_op):
FILE: fast_autoaugment/FastAutoAugment/aug_mixup.py
function mixup (line 13) | def mixup(data, targets, alpha):
class CrossEntropyMixUpLabelSmooth (line 26) | class CrossEntropyMixUpLabelSmooth(torch.nn.Module):
method __init__ (line 27) | def __init__(self, num_classes, epsilon, reduction='mean'):
method forward (line 31) | def forward(self, input, target1, target2, lam): # pylint: disable=re...
FILE: fast_autoaugment/FastAutoAugment/augmentations.py
function ShearX (line 13) | def ShearX(img, v): # [-0.3, 0.3]
function ShearY (line 20) | def ShearY(img, v): # [-0.3, 0.3]
function TranslateX (line 27) | def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
function TranslateY (line 35) | def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
function TranslateXAbs (line 43) | def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
function TranslateYAbs (line 50) | def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
function Rotate (line 57) | def Rotate(img, v): # [-30, 30]
function AutoContrast (line 64) | def AutoContrast(img, _):
function Invert (line 68) | def Invert(img, _):
function Equalize (line 72) | def Equalize(img, _):
function Flip (line 76) | def Flip(img, _): # not from the paper
function Solarize (line 80) | def Solarize(img, v): # [0, 256]
function Posterize (line 85) | def Posterize(img, v): # [4, 8]
function Posterize2 (line 91) | def Posterize2(img, v): # [0, 4]
function Contrast (line 97) | def Contrast(img, v): # [0.1,1.9]
function Color (line 102) | def Color(img, v): # [0.1,1.9]
function Brightness (line 107) | def Brightness(img, v): # [0.1,1.9]
function Sharpness (line 112) | def Sharpness(img, v): # [0.1,1.9]
function Cutout (line 117) | def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
function CutoutAbs (line 126) | def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
function SamplePairing (line 147) | def SamplePairing(imgs): # [0, 0.4]
function augment_list (line 156) | def augment_list(for_autoaug=True): # 16 oeprations and their ranges
function get_augment (line 188) | def get_augment(name):
function apply_augment (line 192) | def apply_augment(img, name, level):
class Lighting (line 197) | class Lighting(object):
method __init__ (line 200) | def __init__(self, alphastd, eigval, eigvec):
method __call__ (line 205) | def __call__(self, img):
FILE: fast_autoaugment/FastAutoAugment/common.py
function get_logger (line 10) | def get_logger(name, level=logging.DEBUG):
function add_filehandler (line 21) | def add_filehandler(logger, filepath, level=logging.DEBUG):
class EMA (line 28) | class EMA:
method __init__ (line 29) | def __init__(self, mu):
method state_dict (line 33) | def state_dict(self):
method __len__ (line 36) | def __len__(self):
method __call__ (line 39) | def __call__(self, module, step=None):
FILE: fast_autoaugment/FastAutoAugment/data.py
function get_dataloaders (line 37) | def get_dataloaders(dataset, batch, dataroot, split=0.15, split_idx=0, m...
class CutoutDefault (line 228) | class CutoutDefault(object):
method __init__ (line 232) | def __init__(self, length):
method __call__ (line 235) | def __call__(self, img):
class Augmentation (line 253) | class Augmentation(object):
method __init__ (line 254) | def __init__(self, policies):
method __call__ (line 257) | def __call__(self, img):
class EfficientNetRandomCrop (line 267) | class EfficientNetRandomCrop:
method __init__ (line 268) | def __init__(self, imgsize, min_covered=0.1, aspect_ratio_range=(3./4,...
method __call__ (line 280) | def __call__(self, img):
class EfficientNetCenterCrop (line 323) | class EfficientNetCenterCrop:
method __init__ (line 324) | def __init__(self, imgsize):
method __call__ (line 327) | def __call__(self, img):
class SubsetSampler (line 348) | class SubsetSampler(Sampler):
method __init__ (line 355) | def __init__(self, indices):
method __iter__ (line 358) | def __iter__(self):
method __len__ (line 361) | def __len__(self):
FILE: fast_autoaugment/FastAutoAugment/imagenet.py
class ImageNet (line 28) | class ImageNet(torchvision.datasets.ImageFolder):
method __init__ (line 52) | def __init__(self, root, split='train', download=False, **kwargs):
method download (line 102) | def download(self):
method meta_file (line 134) | def meta_file(self):
method _load_meta_file (line 137) | def _load_meta_file(self):
method _save_meta_file (line 143) | def _save_meta_file(self, wnid_to_class, val_wnids):
method _verify_split (line 146) | def _verify_split(self, split):
method valid_splits (line 154) | def valid_splits(self):
method split_folder (line 158) | def split_folder(self):
method extra_repr (line 161) | def extra_repr(self):
function extract_tar (line 165) | def extract_tar(src, dest=None, gzip=None, delete=False):
function download_and_extract_tar (line 181) | def download_and_extract_tar(url, download_root, extract_root=None, file...
function parse_devkit (line 195) | def parse_devkit(root):
function parse_meta (line 202) | def parse_meta(devkit_root, path='data', filename='meta.mat'):
function parse_val_groundtruth (line 217) | def parse_val_groundtruth(devkit_root, path='data',
function prepare_train_folder (line 224) | def prepare_train_folder(folder):
function prepare_val_folder (line 229) | def prepare_val_folder(folder, wnids):
function _splitexts (line 239) | def _splitexts(root):
FILE: fast_autoaugment/FastAutoAugment/lr_scheduler.py
function adjust_learning_rate_resnet (line 6) | def adjust_learning_rate_resnet(optimizer):
class MultiStepLR_HotFix (line 20) | class MultiStepLR_HotFix(MultiStepLR):
method __init__ (line 21) | def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
FILE: fast_autoaugment/FastAutoAugment/metrics.py
function accuracy (line 10) | def accuracy(output, target, topk=(1,)):
class CrossEntropyLabelSmooth (line 26) | class CrossEntropyLabelSmooth(torch.nn.Module):
method __init__ (line 27) | def __init__(self, num_classes, epsilon, reduction='mean'):
method forward (line 34) | def forward(self, input, target): # pylint: disable=redefined-builtin
class Accumulator (line 49) | class Accumulator:
method __init__ (line 50) | def __init__(self):
method add (line 53) | def add(self, key, value):
method add_dict (line 56) | def add_dict(self, dict):
method __getitem__ (line 60) | def __getitem__(self, item):
method __setitem__ (line 63) | def __setitem__(self, key, value):
method get_dict (line 66) | def get_dict(self):
method items (line 69) | def items(self):
method __str__ (line 72) | def __str__(self):
method __truediv__ (line 75) | def __truediv__(self, other):
class SummaryWriterDummy (line 88) | class SummaryWriterDummy:
method __init__ (line 89) | def __init__(self, log_dir):
method add_scalar (line 92) | def add_scalar(self, *args, **kwargs):
FILE: fast_autoaugment/FastAutoAugment/networks/__init__.py
function get_model (line 19) | def get_model(conf, num_class=10, local_rank=-1):
function num_class (line 93) | def num_class(dataset):
FILE: fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/condconv.py
function _ntuple (line 13) | def _ntuple(n):
function _is_static_pad (line 27) | def _is_static_pad(kernel_size, stride=1, dilation=1, **_):
function _get_padding (line 31) | def _get_padding(kernel_size, stride=1, dilation=1, **_):
function _calc_same_pad (line 36) | def _calc_same_pad(i: int, k: int, s: int, d: int):
function conv2d_same (line 40) | def conv2d_same(
function get_padding_value (line 52) | def get_padding_value(padding, kernel_size, **kwargs):
function get_condconv_initializer (line 75) | def get_condconv_initializer(initializer, num_experts, expert_shape):
class CondConv2d (line 86) | class CondConv2d(nn.Module):
method __init__ (line 94) | def __init__(self, in_channels, out_channels, kernel_size=3,
method reset_parameters (line 128) | def reset_parameters(self):
method forward (line 145) | def forward(self, x, routing_weights):
method forward_legacy (line 175) | def forward_legacy(self, x, routing_weights):
FILE: fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/model.py
class RoutingFn (line 18) | class RoutingFn(nn.Linear):
class MBConvBlock (line 22) | class MBConvBlock(nn.Module):
method __init__ (line 34) | def __init__(self, block_args, global_params, norm_layer=None):
method _is_condconv (line 79) | def _is_condconv(self):
method forward (line 82) | def forward(self, inputs, drop_connect_rate=None):
method set_swish (line 124) | def set_swish(self):
class EfficientNet (line 129) | class EfficientNet(nn.Module):
method __init__ (line 142) | def __init__(self, blocks_args=None, global_params=None, norm_layer=No...
method set_swish (line 193) | def set_swish(self):
method extract_features (line 199) | def extract_features(self, inputs):
method forward (line 217) | def forward(self, inputs):
method from_name (line 231) | def from_name(cls, model_name, override_params=None, norm_layer=None, ...
method from_pretrained (line 237) | def from_pretrained(cls, model_name, num_classes=1000):
method get_image_size (line 244) | def get_image_size(cls, model_name):
method _check_model_name_is_valid (line 250) | def _check_model_name_is_valid(cls, model_name, also_need_pretrained_w...
FILE: fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/utils.py
class SwishImplementation (line 38) | class SwishImplementation(torch.autograd.Function):
method forward (line 40) | def forward(ctx, i):
method backward (line 46) | def backward(ctx, grad_output):
class MemoryEfficientSwish (line 52) | class MemoryEfficientSwish(nn.Module):
method forward (line 53) | def forward(self, x):
function round_filters (line 57) | def round_filters(filters, global_params):
function round_repeats (line 72) | def round_repeats(repeats, global_params):
function drop_connect (line 80) | def drop_connect(inputs, drop_p, training):
function get_same_padding_conv2d (line 101) | def get_same_padding_conv2d(image_size=None, condconv_num_expert=1):
class Conv2dDynamicSamePadding (line 112) | class Conv2dDynamicSamePadding(nn.Conv2d):
method __init__ (line 115) | def __init__(self, in_channels, out_channels, kernel_size, stride=1, d...
method forward (line 119) | def forward(self, x):
class Conv2dStaticSamePadding (line 131) | class Conv2dStaticSamePadding(nn.Conv2d):
method __init__ (line 134) | def __init__(self, in_channels, out_channels, kernel_size, image_size=...
method forward (line 151) | def forward(self, x):
class Identity (line 157) | class Identity(nn.Module):
method __init__ (line 158) | def __init__(self, ):
method forward (line 161) | def forward(self, input):
function efficientnet_params (line 170) | def efficientnet_params(model_name):
class BlockDecoder (line 186) | class BlockDecoder(object):
method _decode_block_string (line 190) | def _decode_block_string(block_string):
method _encode_block_string (line 219) | def _encode_block_string(block):
method decode (line 236) | def decode(string_list):
method encode (line 250) | def encode(blocks_args):
function efficientnet (line 263) | def efficientnet(width_coefficient=None, depth_coefficient=None, dropout...
function get_model_params (line 298) | def get_model_params(model_name, override_params, condconv_num_expert=1):
function load_pretrained_weights (line 325) | def load_pretrained_weights(model, model_name, load_fc=True):
FILE: fast_autoaugment/FastAutoAugment/networks/pyramidnet.py
function conv3x3 (line 8) | def conv3x3(in_planes, out_planes, stride=1):
class BasicBlock (line 15) | class BasicBlock(nn.Module):
method __init__ (line 18) | def __init__(self, inplanes, planes, stride=1, downsample=None, p_shak...
method forward (line 30) | def forward(self, x):
class Bottleneck (line 63) | class Bottleneck(nn.Module):
method __init__ (line 66) | def __init__(self, inplanes, planes, stride=1, downsample=None, p_shak...
method forward (line 81) | def forward(self, x):
class PyramidNet (line 120) | class PyramidNet(nn.Module):
method __init__ (line 122) | def __init__(self, dataset, depth, alpha, num_classes, bottleneck=True):
method pyramidal_make_layer (line 199) | def pyramidal_make_layer(self, block, block_depth, stride=1):
method forward (line 216) | def forward(self, x):
FILE: fast_autoaugment/FastAutoAugment/networks/resnet.py
function conv3x3 (line 7) | def conv3x3(in_planes, out_planes, stride=1):
class BasicBlock (line 13) | class BasicBlock(nn.Module):
method __init__ (line 16) | def __init__(self, inplanes, planes, stride=1, downsample=None):
method forward (line 27) | def forward(self, x):
class Bottleneck (line 46) | class Bottleneck(nn.Module):
method __init__ (line 49) | def __init__(self, inplanes, planes, stride=1, downsample=None):
method forward (line 63) | def forward(self, x):
class ResNet (line 84) | class ResNet(nn.Module):
method __init__ (line 85) | def __init__(self, dataset, depth, num_classes, bottleneck=False):
method _make_layer (line 134) | def _make_layer(self, block, planes, blocks, stride=1):
method forward (line 151) | def forward(self, x):
FILE: fast_autoaugment/FastAutoAugment/networks/shakedrop.py
class ShakeDropFunction (line 9) | class ShakeDropFunction(torch.autograd.Function):
method forward (line 12) | def forward(ctx, x, training=True, p_drop=0.5, alpha_range=[-1, 1]):
method backward (line 26) | def backward(ctx, grad_output):
class ShakeDrop (line 37) | class ShakeDrop(nn.Module):
method __init__ (line 39) | def __init__(self, p_drop=0.5, alpha_range=[-1, 1]):
method forward (line 44) | def forward(self, x):
FILE: fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnet.py
class ShakeBlock (line 12) | class ShakeBlock(nn.Module):
method __init__ (line 14) | def __init__(self, in_ch, out_ch, stride=1):
method forward (line 22) | def forward(self, x):
method _make_branch (line 29) | def _make_branch(self, in_ch, out_ch, stride=1):
class ShakeResNet (line 39) | class ShakeResNet(nn.Module):
method __init__ (line 41) | def __init__(self, depth, w_base, label):
method forward (line 65) | def forward(self, x):
method _make_layer (line 76) | def _make_layer(self, n_units, in_ch, out_ch, stride=1):
FILE: fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnext.py
class ShakeBottleNeck (line 12) | class ShakeBottleNeck(nn.Module):
method __init__ (line 14) | def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
method forward (line 22) | def forward(self, x):
method _make_branch (line 29) | def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
class ShakeResNeXt (line 41) | class ShakeResNeXt(nn.Module):
method __init__ (line 43) | def __init__(self, depth, w_base, cardinary, label):
method forward (line 67) | def forward(self, x):
method _make_layer (line 78) | def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
FILE: fast_autoaugment/FastAutoAugment/networks/shakeshake/shakeshake.py
class ShakeShake (line 9) | class ShakeShake(torch.autograd.Function):
method forward (line 12) | def forward(ctx, x1, x2, training=True):
method backward (line 21) | def backward(ctx, grad_output):
class Shortcut (line 29) | class Shortcut(nn.Module):
method __init__ (line 31) | def __init__(self, in_ch, out_ch, stride):
method forward (line 38) | def forward(self, x):
FILE: fast_autoaugment/FastAutoAugment/networks/wideresnet.py
function conv3x3 (line 7) | def conv3x3(in_planes, out_planes, stride=1):
function conv_init (line 11) | def conv_init(m):
class WideBasic (line 21) | class WideBasic(nn.Module):
method __init__ (line 22) | def __init__(self, in_planes, planes, dropout_rate, stride=1):
method forward (line 36) | def forward(self, x):
class WideResNet (line 44) | class WideResNet(nn.Module):
method __init__ (line 45) | def __init__(self, depth, widen_factor, dropout_rate, num_classes):
method _wide_layer (line 64) | def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
method forward (line 74) | def forward(self, x):
FILE: fast_autoaugment/FastAutoAugment/safe_shell_exec.py
function terminate_executor_shell_and_children (line 29) | def terminate_executor_shell_and_children(pid):
function forward_stream (line 63) | def forward_stream(src_fd, dst_stream, prefix, index):
function execute (line 90) | def execute(command, env=None, stdout=None, stderr=None, index=None, eve...
FILE: fast_autoaugment/FastAutoAugment/search.py
function step_w_log (line 32) | def step_w_log(self):
function _get_path (line 56) | def _get_path(dataset, model, tag):
function train_model (line 61) | def train_model(config, dataroot, augment, cv_ratio_test, cv_fold, save_...
function eval_tta (line 70) | def eval_tta(config, augment, reporter):
FILE: fast_autoaugment/FastAutoAugment/tf_port/rmsprop.py
class RMSpropTF (line 5) | class RMSpropTF(Optimizer):
method __init__ (line 30) | def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, momentum=0, ...
method __setstate__ (line 45) | def __setstate__(self, state):
method load_state_dict (line 50) | def load_state_dict(self, state_dict):
method step (line 54) | def step(self, closure=None):
FILE: fast_autoaugment/FastAutoAugment/tf_port/tpu_bn.py
class TpuBatchNormalization (line 8) | class TpuBatchNormalization(nn.Module):
method __init__ (line 10) | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
method _reduce_avg (line 24) | def _reduce_avg(self, t):
method forward (line 28) | def forward(self, input):
FILE: fast_autoaugment/FastAutoAugment/train.py
function run_epoch (line 35) | def run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=...
function train_and_eval (line 110) | def train_and_eval(tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=No...
FILE: fast_autoaugment/FastAutoAugment/train_dist.py
function _exec_command (line 15) | def _exec_command(command):
function execute_function_multithreaded (line 29) | def execute_function_multithreaded(fn,
FILE: fast_autoaugment/archive.py
function arsaug_policy (line 11) | def arsaug_policy():
function autoaug2arsaug (line 59) | def autoaug2arsaug(f):
function autoaug_paper_cifar10 (line 91) | def autoaug_paper_cifar10():
function autoaug_policy (line 122) | def autoaug_policy():
function float_parameter (line 248) | def float_parameter(level, maxval):
function int_parameter (line 252) | def int_parameter(level, maxval):
function no_duplicates (line 256) | def no_duplicates(f):
function remove_deplicates (line 264) | def remove_deplicates(policies):
function fa_reduced_cifar10 (line 281) | def fa_reduced_cifar10():
function fa_resnet50_rimagenet (line 286) | def fa_resnet50_rimagenet():
function fa_reduced_svhn (line 291) | def fa_reduced_svhn():
function policy_decoder (line 296) | def policy_decoder(augment, num_policy, num_op):
FILE: madrys.py
class MadrysLoss (line 12) | class MadrysLoss(nn.Module):
method __init__ (line 13) | def __init__(self, step_size=0.007, epsilon=0.031, perturb_steps=10, d...
method forward (line 21) | def forward(self, model, x_natural, y, optimizer):
FILE: main.py
function train (line 77) | def train(starting_epoch, model, optimizer, scheduler, criterion, traine...
function main (line 142) | def main():
FILE: models/DenseNet.py
class Bottleneck (line 12) | class Bottleneck(nn.Module):
method __init__ (line 13) | def __init__(self, in_planes, growth_rate):
method forward (line 20) | def forward(self, x):
class Transition (line 27) | class Transition(nn.Module):
method __init__ (line 28) | def __init__(self, in_planes, out_planes):
method forward (line 33) | def forward(self, x):
class DenseNet (line 39) | class DenseNet(nn.Module):
method __init__ (line 40) | def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_...
method _make_dense_layers (line 71) | def _make_dense_layers(self, block, in_planes, nblock):
method forward (line 78) | def forward(self, x):
function DenseNet121 (line 90) | def DenseNet121(num_classes=10):
function DenseNet169 (line 94) | def DenseNet169(num_classes=10):
function DenseNet201 (line 98) | def DenseNet201(num_classes=10):
function DenseNet161 (line 102) | def DenseNet161(num_classes=10):
function densenet_cifar (line 106) | def densenet_cifar():
FILE: models/ResNet.py
class BasicBlock (line 7) | class BasicBlock(nn.Module):
method __init__ (line 10) | def __init__(self, in_planes, planes, stride=1):
method forward (line 24) | def forward(self, x):
class Bottleneck (line 32) | class Bottleneck(nn.Module):
method __init__ (line 35) | def __init__(self, in_planes, planes, stride=1):
method forward (line 51) | def forward(self, x):
class ResNet (line 60) | class ResNet(nn.Module):
method __init__ (line 61) | def __init__(self, block, num_blocks, num_classes=10):
method _make_layer (line 72) | def _make_layer(self, block, planes, num_blocks, stride):
method forward (line 80) | def forward(self, x):
function ResNet18 (line 92) | def ResNet18(num_classes=10):
function ResNet34 (line 96) | def ResNet34(num_classes=10):
function ResNet50 (line 100) | def ResNet50(num_classes=10):
function ResNet101 (line 104) | def ResNet101(num_classes=10):
function ResNet152 (line 108) | def ResNet152(num_classes=10):
function test (line 112) | def test():
class BasicConv2d (line 118) | class BasicConv2d(nn.Module):
method __init__ (line 119) | def __init__(self, in_planes, out_planes, kernel_size, stride, padding...
method forward (line 134) | def forward(self, x):
class Block35 (line 141) | class Block35(nn.Module):
method __init__ (line 143) | def __init__(self, scale=1.0):
method forward (line 164) | def forward(self, x):
class Block17 (line 175) | class Block17(nn.Module):
method __init__ (line 177) | def __init__(self, scale=1.0):
method forward (line 193) | def forward(self, x):
class Block8 (line 203) | class Block8(nn.Module):
method __init__ (line 205) | def __init__(self, scale=1.0, noReLU=False):
method forward (line 223) | def forward(self, x):
class Mixed_6a (line 234) | class Mixed_6a(nn.Module):
method __init__ (line 236) | def __init__(self):
method forward (line 249) | def forward(self, x):
class Mixed_7a (line 257) | class Mixed_7a(nn.Module):
method __init__ (line 259) | def __init__(self):
method forward (line 280) | def forward(self, x):
class InceptionResnetV1 (line 289) | class InceptionResnetV1(nn.Module):
method __init__ (line 290) | def __init__(self, num_classes=10575, face_features=512, dropout_prob=...
method forward (line 337) | def forward(self, x):
FILE: models/ToyModel.py
class ConvBrunch (line 5) | class ConvBrunch(nn.Module):
method __init__ (line 6) | def __init__(self, in_planes, out_planes, kernel_size=3):
method forward (line 14) | def forward(self, x):
class ToyModel (line 18) | class ToyModel(nn.Module):
method __init__ (line 19) | def __init__(self, num_classes=10):
method forward (line 31) | def forward(self, x):
FILE: models/__init__.py
class FocalLoss (line 39) | class FocalLoss(nn.Module):
method __init__ (line 40) | def __init__(self, gamma=0, eps=1e-7):
method forward (line 46) | def forward(self, input, target):
function cross_entropy (line 53) | def cross_entropy(input, target, size_average=True):
class CutMixCrossEntropyLoss (line 75) | class CutMixCrossEntropyLoss(torch.nn.Module):
method __init__ (line 76) | def __init__(self, size_average=True):
method forward (line 80) | def forward(self, input, target):
FILE: models/download.py
class tqdm (line 16) | class tqdm(object): # type: ignore
method __init__ (line 18) | def __init__(self, total=None, disable=False,
method update (line 25) | def update(self, n):
method __enter__ (line 36) | def __enter__(self):
method __exit__ (line 39) | def __exit__(self, exc_type, exc_val, exc_tb):
function download_url_to_file (line 46) | def download_url_to_file(url, dst, hash_prefix=None, progress=True):
FILE: models/inception_resnet_v1.py
class BasicConv2d (line 11) | class BasicConv2d(nn.Module):
method __init__ (line 12) | def __init__(self, in_planes, out_planes, kernel_size, stride, padding...
method forward (line 19) | def forward(self, x):
class Block35 (line 26) | class Block35(nn.Module):
method __init__ (line 28) | def __init__(self, scale=1.0):
method forward (line 49) | def forward(self, x):
class Block17 (line 60) | class Block17(nn.Module):
method __init__ (line 62) | def __init__(self, scale=1.0):
method forward (line 78) | def forward(self, x):
class Block8 (line 88) | class Block8(nn.Module):
method __init__ (line 90) | def __init__(self, scale=1.0, noReLU=False):
method forward (line 108) | def forward(self, x):
class Mixed_6a (line 119) | class Mixed_6a(nn.Module):
method __init__ (line 121) | def __init__(self):
method forward (line 134) | def forward(self, x):
class Mixed_7a (line 142) | class Mixed_7a(nn.Module):
method __init__ (line 144) | def __init__(self):
method forward (line 165) | def forward(self, x):
class InceptionResnetV1 (line 174) | class InceptionResnetV1(nn.Module):
method __init__ (line 190) | def __init__(self, pretrained=None, classify=False, num_classes=None, ...
method forward (line 252) | def forward(self, x):
function load_weights (line 283) | def load_weights(mdl, name):
function get_torch_home (line 309) | def get_torch_home():
FILE: perturbation.py
function train (line 88) | def train(starting_epoch, model, optimizer, scheduler, criterion, traine...
function universal_perturbation_eval (line 122) | def universal_perturbation_eval(noise_generator, random_noise, data_load...
function universal_perturbation (line 143) | def universal_perturbation(noise_generator, trainer, evaluator, model, c...
function samplewise_perturbation_eval (line 236) | def samplewise_perturbation_eval(random_noise, data_loader, model, eval_...
function sample_wise_perturbation (line 265) | def sample_wise_perturbation(noise_generator, trainer, evaluator, model,...
function main (line 395) | def main():
FILE: toolbox.py
class PerturbationTool (line 11) | class PerturbationTool():
method __init__ (line 12) | def __init__(self, seed=0, epsilon=0.03137254901, num_steps=20, step_s...
method random_noise (line 19) | def random_noise(self, noise_shape=[10, 3, 32, 32]):
method min_min_attack (line 23) | def min_min_attack(self, images, labels, model, optimizer, criterion, ...
method min_max_attack (line 51) | def min_max_attack(self, images, labels, model, optimizer, criterion, ...
method _patch_noise_extend_to_img (line 77) | def _patch_noise_extend_to_img(self, noise, image_size=[3, 32, 32], pa...
FILE: trainer.py
class Trainer (line 12) | class Trainer():
method __init__ (line 13) | def __init__(self, criterion, data_loader, logger, config, global_step=0,
method _reset_stats (line 27) | def _reset_stats(self):
method train (line 32) | def train(self, epoch, model, criterion, optimizer, random_noise=None):
method train_batch (line 55) | def train_batch(self, images, labels, model, optimizer):
FILE: util.py
function _patch_noise_extend_to_img (line 16) | def _patch_noise_extend_to_img(noise, image_size=[3, 32, 32], patch_loca...
function setup_logger (line 38) | def setup_logger(name, log_file, level=logging.INFO):
function log_display (line 52) | def log_display(epoch, global_step, time_elapse, **kwargs):
function accuracy (line 64) | def accuracy(output, target, topk=(1,)):
function save_model (line 79) | def save_model(filename, epoch, model, optimizer, scheduler, save_best=F...
function load_model (line 96) | def load_model(filename, model, optimizer, scheduler, **kwargs):
function count_parameters_in_MB (line 108) | def count_parameters_in_MB(model):
function build_dirs (line 112) | def build_dirs(path):
class AverageMeter (line 118) | class AverageMeter(object):
method __init__ (line 121) | def __init__(self):
method reset (line 124) | def reset(self):
method update (line 131) | def update(self, val, n=1):
function onehot (line 139) | def onehot(size, target):
function rand_bbox (line 145) | def rand_bbox(size, lam):
Condensed preview — 236 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (950K chars).
[
{
"path": ".gitattributes",
"chars": 66,
"preview": "# Auto detect text files and perform LF normalization\n* text=auto\n"
},
{
"path": ".gitignore",
"chars": 118,
"preview": "__pycache__\n*.pyc\n.DS_Store\n.ipynb_checkpoints\nexperiments/\ntest_exp/\npretrained_checkpoints/\nexp_results.json\nplots/\n"
},
{
"path": "CITATION.cff",
"chars": 1336,
"preview": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n- family-names: \"Huang\"\n give"
},
{
"path": "LICENSE",
"chars": 1064,
"preview": "MIT License\n\nCopyright (c) 2021 HanxunH\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof"
},
{
"path": "QuickStart.ipynb",
"chars": 87709,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"<h2>Quick Start: Creating Sample-wi"
},
{
"path": "README.md",
"chars": 4468,
"preview": "# Unlearnable Examples\n\nCode for ICLR2021 Spotlight Paper [\"Unlearnable Examples: Making Personal Data Unexploitable \"]("
},
{
"path": "collect_results.py",
"chars": 6763,
"preview": "import argparse\nimport collections\nimport json\nimport os\nimport numpy as np\nimport dataset\nimport mlconfig\nimport models"
},
{
"path": "configs/cifar10/dense121.yaml",
"chars": 367,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: DenseNet121\n num_classes: 10\n\ncriterion:\n "
},
{
"path": "configs/cifar10/resnet18.yaml",
"chars": 365,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_add-uniform-noise-aug.yaml",
"chars": 402,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_add-uniform-noise.yaml",
"chars": 391,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_augement.yaml",
"chars": 376,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_augmentation.yaml",
"chars": 376,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_classpoison.yaml",
"chars": 390,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_classpoison_targeted.yaml",
"chars": 427,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_cutmix.yaml",
"chars": 390,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_cutout.yaml",
"chars": 384,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_denoise.yaml",
"chars": 385,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet18_madrys.yaml",
"chars": 435,
"preview": "num_classes: 10\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n n"
},
{
"path": "configs/cifar10/resnet18_mixup.yaml",
"chars": 389,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/resnet50.yaml",
"chars": 365,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet50\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/cifar10/toy_cifar.yaml",
"chars": 348,
"preview": "num_classes: 10\nepochs: 80\ngrad_clip: 5.0\nlog_frequency: 50\n\nmodel:\n name: ToyModel\n\ncriterion:\n name: CrossEntropyLos"
},
{
"path": "configs/cifar10/toy_cifar_madrys.yaml",
"chars": 415,
"preview": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 50\n\nmodel:\n name: ToyModel\n\ncriterion:\n name: MadrysLoss\n ep"
},
{
"path": "configs/cifar100/dense121.yaml",
"chars": 370,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: DenseNet121\n num_classes: 100\n\ncriterion"
},
{
"path": "configs/cifar100/resnet18.yaml",
"chars": 368,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 100\n\ncriterion:\n "
},
{
"path": "configs/cifar100/resnet18_madrys.yaml",
"chars": 437,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 100\n\ncriterion:\n "
},
{
"path": "configs/cifar100/resnet50.yaml",
"chars": 368,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet50\n num_classes: 100\n\ncriterion:\n "
},
{
"path": "configs/cifar101/resnet18.yaml",
"chars": 368,
"preview": "num_classes: 101\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 101\n\ncriterion:\n "
},
{
"path": "configs/face/InceptionResnet.yaml",
"chars": 415,
"preview": "num_classes: 10575\nepochs: 50\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: InceptionResnetV1\n num_classes: $num_cl"
},
{
"path": "configs/imagenet-mini/dense121.yaml",
"chars": 370,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: densenet121\n num_classes: 100\n\ncriterion"
},
{
"path": "configs/imagenet-mini/resnet18.yaml",
"chars": 368,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: resnet18\n num_classes: 100\n\ncriterion:\n "
},
{
"path": "configs/imagenet-mini/resnet50.yaml",
"chars": 368,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: resnet50\n num_classes: 100\n\ncriterion:\n "
},
{
"path": "configs/svhn/dense121.yaml",
"chars": 367,
"preview": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: DenseNet121\n num_classes: 10\n\ncriterion:\n "
},
{
"path": "configs/svhn/resnet18.yaml",
"chars": 365,
"preview": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/svhn/resnet18_madrys.yaml",
"chars": 436,
"preview": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 10\n\ncriterion:\n "
},
{
"path": "configs/svhn/resnet50.yaml",
"chars": 365,
"preview": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet50\n num_classes: 10\n\ncriterion:\n na"
},
{
"path": "configs/tiny-imagenet/dense121.yaml",
"chars": 372,
"preview": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: DenseNet121\n num_classes: 1000\n\ncriteri"
},
{
"path": "configs/tiny-imagenet/resnet18.yaml",
"chars": 370,
"preview": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet18\n num_classes: 1000\n\ncriterion:"
},
{
"path": "configs/tiny-imagenet/resnet50.yaml",
"chars": 370,
"preview": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n name: ResNet50\n num_classes: 1000\n\ncriterion:"
},
{
"path": "dataset.py",
"chars": 40433,
"preview": "import copy\nimport os\nimport collections\nimport numpy as np\nimport torch\nimport util\nimport random\nimport mlconfig\nimpor"
},
{
"path": "evaluator.py",
"chars": 3864,
"preview": "import time\n\nimport models\nimport torch\nimport torch.optim as optim\nimport util\nfrom torch.autograd import Variable\n\nif "
},
{
"path": "fast_autoaugment/.gitignore",
"chars": 1203,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": "fast_autoaugment/FastAutoAugment/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "fast_autoaugment/FastAutoAugment/archive.py",
"chars": 177845,
"preview": "# Policy found on CIFAR-10 and CIFAR-100\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __f"
},
{
"path": "fast_autoaugment/FastAutoAugment/aug_mixup.py",
"chars": 1098,
"preview": "\"\"\"\nReference :\n- https://github.com/hysts/pytorch_image_classification/blob/master/augmentations/mixup.py\n- https://git"
},
{
"path": "fast_autoaugment/FastAutoAugment/augmentations.py",
"chars": 5554,
"preview": "# code in this file is adpated from rpmcruz/autoaugment\n# https://github.com/rpmcruz/autoaugment/blob/master/transformat"
},
{
"path": "fast_autoaugment/FastAutoAugment/common.py",
"chars": 1535,
"preview": "import copy\nimport logging\nimport warnings\n\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(me"
},
{
"path": "fast_autoaugment/FastAutoAugment/data.py",
"chars": 16434,
"preview": "import logging\n\nimport numpy as np\nimport os\n\nimport math\nimport random\nimport torch\nimport torchvision\nfrom PIL import "
},
{
"path": "fast_autoaugment/FastAutoAugment/imagenet.py",
"chars": 9218,
"preview": "from __future__ import print_function\nimport os\nimport shutil\nimport torch\n\nARCHIVE_DICT = {\n 'train': {\n 'url"
},
{
"path": "fast_autoaugment/FastAutoAugment/lr_scheduler.py",
"chars": 816,
"preview": "import torch\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom theconf import Config as C\n\n\ndef adjust_learning_rate"
},
{
"path": "fast_autoaugment/FastAutoAugment/metrics.py",
"chars": 2562,
"preview": "import copy\n\nimport torch\nimport numpy as np\nfrom collections import defaultdict\n\nfrom torch import nn\n\n\ndef accuracy(ou"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/__init__.py",
"chars": 4357,
"preview": "import torch\n\nfrom torch import nn\nfrom torch.nn import DataParallel\nfrom torch.nn.parallel import DistributedDataParall"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/__init__.py",
"chars": 178,
"preview": "__version__ = \"0.5.1\"\nfrom .model import EfficientNet, RoutingFn\nfrom .utils import (\n GlobalParams,\n BlockArgs,\n "
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/condconv.py",
"chars": 8122,
"preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch._six import container_abcs\n\nfrom itertools"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/model.py",
"chars": 10884,
"preview": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom functools import partial\nfrom .utils import"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/utils.py",
"chars": 13493,
"preview": "\"\"\"\nThis file contains helper functions for building the model and for loading model parameters.\nThese helper functions "
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/pyramidnet.py",
"chars": 9400,
"preview": "import torch\nimport torch.nn as nn\nimport math\n\nfrom FastAutoAugment.networks.shakedrop import ShakeDrop\n\n\ndef conv3x3(i"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/resnet.py",
"chars": 6346,
"preview": "# Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nimport torch.nn as nn\nimpor"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/shakedrop.py",
"chars": 1454,
"preview": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import V"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnet.py",
"chars": 2748,
"preview": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom FastAutoAugment.networ"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnext.py",
"chars": 3094,
"preview": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom FastAutoAugment.networ"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shakeshake.py",
"chars": 1413,
"preview": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import V"
},
{
"path": "fast_autoaugment/FastAutoAugment/networks/wideresnet.py",
"chars": 2956,
"preview": "import torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef conv3x3(in_p"
},
{
"path": "fast_autoaugment/FastAutoAugment/safe_shell_exec.py",
"chars": 6147,
"preview": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "fast_autoaugment/FastAutoAugment/search.py",
"chars": 13382,
"preview": "import copy\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport torch\n\nimport nump"
},
{
"path": "fast_autoaugment/FastAutoAugment/tf_port/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "fast_autoaugment/FastAutoAugment/tf_port/rmsprop.py",
"chars": 4416,
"preview": "import torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass RMSpropTF(Optimizer):\n r\"\"\"Implements RMSprop algori"
},
{
"path": "fast_autoaugment/FastAutoAugment/tf_port/tpu_bn.py",
"chars": 2870,
"preview": "import torch\nfrom torch.nn import BatchNorm2d\nfrom torch.nn.parameter import Parameter\nimport torch.distributed as dist\n"
},
{
"path": "fast_autoaugment/FastAutoAugment/train.py",
"chars": 16601,
"preview": "import pathlib\nimport sys\n\nsys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))\n\nimport itertools\nimpor"
},
{
"path": "fast_autoaugment/FastAutoAugment/train_dist.py",
"chars": 4850,
"preview": "import pathlib\nimport sys\n\nsys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))\n\nimport time\nimport os\n"
},
{
"path": "fast_autoaugment/LICENSE",
"chars": 1066,
"preview": "MIT License\n\nCopyright (c) 2019 Ildoo Kim\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\n"
},
{
"path": "fast_autoaugment/README.md",
"chars": 7920,
"preview": "# Fast AutoAugment **(Accepted at NeurIPS 2019)**\n\nOfficial [Fast AutoAugment](https://arxiv.org/abs/1905.00397) impleme"
},
{
"path": "fast_autoaugment/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "fast_autoaugment/archive.py",
"chars": 177860,
"preview": "# Policy found on CIFAR-10 and CIFAR-100\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __f"
},
{
"path": "fast_autoaugment/confs/efficientnet_b0.yaml",
"chars": 419,
"preview": "model:\n type: efficientnet-b0\n condconv_num_expert: 1 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/efficientnet_b0_condconv.yaml",
"chars": 431,
"preview": "model:\n type: efficientnet-b0\n condconv_num_expert: 8 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/efficientnet_b1.yaml",
"chars": 420,
"preview": "model:\n type: efficientnet-b1\n condconv_num_expert: 1 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/efficientnet_b2.yaml",
"chars": 420,
"preview": "model:\n type: efficientnet-b2\n condconv_num_expert: 1 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/efficientnet_b3.yaml",
"chars": 419,
"preview": "model:\n type: efficientnet-b3\n condconv_num_expert: 1 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/efficientnet_b4.yaml",
"chars": 419,
"preview": "model:\n type: efficientnet-b4\n condconv_num_expert: 1 # if this is greater than 1(eg. 4), it activates condconv.\ndata"
},
{
"path": "fast_autoaugment/confs/pyramid272_cifar.yaml",
"chars": 288,
"preview": "model:\n type: pyramid\n depth: 272\n alpha: 200\n bottleneck: True\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\n"
},
{
"path": "fast_autoaugment/confs/resnet200.yaml",
"chars": 255,
"preview": "model:\n type: resnet200\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 64\nepoch: 270\nlr: 0.025\nlr_schedule"
},
{
"path": "fast_autoaugment/confs/resnet50.yaml",
"chars": 254,
"preview": "model:\n type: resnet50\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128\nepoch: 270\nlr: 0.05\nlr_schedule:"
},
{
"path": "fast_autoaugment/confs/resnet50_mixup.yaml",
"chars": 281,
"preview": "model:\n type: resnet50\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128\nepoch: 270\nlr: 0.05\nlr_schedule:"
},
{
"path": "fast_autoaugment/confs/shake26_2x112d_cifar.yaml",
"chars": 254,
"preview": "model:\n type: shakeshake26_2x112d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nl"
},
{
"path": "fast_autoaugment/confs/shake26_2x32d_cifar.yaml",
"chars": 253,
"preview": "model:\n type: shakeshake26_2x32d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nlr"
},
{
"path": "fast_autoaugment/confs/shake26_2x96d_cifar.yaml",
"chars": 253,
"preview": "model:\n type: shakeshake26_2x96d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nlr"
},
{
"path": "fast_autoaugment/confs/wresnet28x10_cifar.yaml",
"chars": 245,
"preview": "model:\n type: wresnet28_10\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 200\nlr: 0.1\nlr_schedul"
},
{
"path": "fast_autoaugment/confs/wresnet28x10_svhn.yaml",
"chars": 240,
"preview": "model:\n type: wresnet28_10\ndataset: svhn\naug: fa_reduced_svhn\ncutout: 20\nbatch: 128\nepoch: 200\nlr: 0.01\nlr_schedule:\n "
},
{
"path": "fast_autoaugment/confs/wresnet40x2_cifar.yaml",
"chars": 244,
"preview": "model:\n type: wresnet40_2\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 200\nlr: 0.1\nlr_schedule"
},
{
"path": "fast_autoaugment/requirements.txt",
"chars": 316,
"preview": "git+https://github.com/wbaek/theconf@de32022f8c0651a043dc812d17194cdfd62066e8\ngit+https://github.com/ildoonet/pytorch-gr"
},
{
"path": "madrys.py",
"chars": 1784,
"preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models\nfrom torch.autograd import Variable\nif "
},
{
"path": "main.py",
"chars": 10164,
"preview": "import argparse\nimport datetime\nimport os\nimport shutil\nimport time\nimport numpy as np\nimport dataset\nimport mlconfig\nim"
},
{
"path": "models/DenseNet.py",
"chars": 3654,
"preview": "'''\nhttps://github.com/kuangliu/pytorch-cifar\nDenseNet in PyTorch.\n'''\nimport math\n\nimport torch\nimport torch.nn as nn\ni"
},
{
"path": "models/ResNet.py",
"chars": 11455,
"preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\nclass BasicBlock(nn.Module):\n expans"
},
{
"path": "models/ToyModel.py",
"chars": 1151,
"preview": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvBrunch(nn.Module):\n def __init__(self, in_planes, o"
},
{
"path": "models/__init__.py",
"chars": 2796,
"preview": "import mlconfig\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nfrom . import DenseNet, ResNet, ToyModel, incepti"
},
{
"path": "models/download.py",
"chars": 3720,
"preview": "import hashlib\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom urllib.request import urlopen, Request\n\ntry:\n "
},
{
"path": "models/inception_resnet_v1.py",
"chars": 10719,
"preview": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .download import download_url_to_file\nif tor"
},
{
"path": "perturbation.py",
"chars": 24083,
"preview": "import argparse\nimport collections\nimport datetime\nimport os\nimport shutil\nimport time\nimport dataset\nimport mlconfig\nim"
},
{
"path": "requirements.txt",
"chars": 27,
"preview": "torch\ntorchvision\nmlconfig\n"
},
{
"path": "scripts/cifar10/min-max-noise/classwise-noise/exp_setting.sh",
"chars": 614,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10/min-max-noise/classwise-noise/search_perturbation_noise.sh",
"chars": 1079,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n# Remove previous files\necho $exp_path\n\n# Search Universal Pertu"
},
{
"path": "scripts/cifar10/min-max-noise/classwise-noise/submit.sh",
"chars": 959,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10/min-max-noise/classwise-noise/train.sh",
"chars": 829,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/min-max-noise/classwise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10/min-max-noise/samplewise-noise/exp_setting.sh",
"chars": 617,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10/min-max-noise/samplewise-noise/search_perturbation_noise.sh",
"chars": 1042,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n# Remove previous files\necho $exp_path\n\n# Search Universal Pertu"
},
{
"path": "scripts/cifar10/min-max-noise/samplewise-noise/submit.sh",
"chars": 995,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10/min-max-noise/samplewise-noise/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/min-max-noise/samplewise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10/min-min-noise/classwise-noise/exp_setting.sh",
"chars": 623,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_datase"
},
{
"path": "scripts/cifar10/min-min-noise/classwise-noise/search_perturbation_noise.sh",
"chars": 1081,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10/min-min-noise/classwise-noise/submit.sh",
"chars": 1478,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n \"resne"
},
{
"path": "scripts/cifar10/min-min-noise/classwise-noise/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/min-min-noise/classwise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10/min-min-noise/samplewise-noise/exp_setting.sh",
"chars": 618,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
"chars": 1044,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10/min-min-noise/samplewise-noise/submit.sh",
"chars": 1454,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n \"resne"
},
{
"path": "scripts/cifar10/min-min-noise/samplewise-noise/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/min-min-noise/samplewise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10/random-noise/classwise-noise/exp_setting.sh",
"chars": 557,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10/random-noise/classwise-noise/search_perturbation_noise.sh",
"chars": 1044,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10/random-noise/classwise-noise/submit.sh",
"chars": 1205,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10/random-noise/classwise-noise/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/random-noise/classwise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10/random-noise/samplewise-noise/exp_setting.sh",
"chars": 560,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10/random-noise/samplewise-noise/search_perturbation_noise.sh",
"chars": 1044,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10/random-noise/samplewise-noise/submit.sh",
"chars": 1016,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10/random-noise/samplewise-noise/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10/random-noise/samplewise-noise/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/exp_setting.sh",
"chars": 651,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_datase"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/search_perturbation_noise.sh",
"chars": 1081,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/submit.sh",
"chars": 1093,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/exp_setting.sh",
"chars": 651,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_datase"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/search_perturbation_noise.sh",
"chars": 1081,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/submit.sh",
"chars": 1012,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/exp_setting.sh",
"chars": 651,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_datase"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/search_perturbation_noise.sh",
"chars": 1081,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/submit.sh",
"chars": 1013,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/exp_setting.sh",
"chars": 759,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/search_perturbation_noise.sh",
"chars": 1162,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/exp_setting.sh",
"chars": 759,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/search_perturbation_noise.sh",
"chars": 1162,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/exp_setting.sh",
"chars": 758,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/search_perturbation_noise.sh",
"chars": 1162,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/exp_setting.sh",
"chars": 663,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_datase"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/submit.sh",
"chars": 1057,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n \"resne"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/train.slurm",
"chars": 1374,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/exp_setting.sh",
"chars": 646,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/search_perturbation_noise.sh",
"chars": 1083,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/submit.sh",
"chars": 1317,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/exp_setting.sh",
"chars": 646,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=P"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/search_perturbation_noise.sh",
"chars": 1083,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/submit.sh",
"chars": 1348,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/exp_setting.sh",
"chars": 760,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/search_perturbation_noise.sh",
"chars": 1165,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/exp_setting.sh",
"chars": 760,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/search_perturbation_noise.sh",
"chars": 1165,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/exp_setting.sh",
"chars": 759,
"preview": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_t"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/search_perturbation_noise.sh",
"chars": 1165,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/submit.sh",
"chars": 1095,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n # \"res"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/train.sh",
"chars": 812,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/train.slurm",
"chars": 1370,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar100/min-min-noise/classwise-noise/exp_setting.sh",
"chars": 628,
"preview": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar100\nexport dataset_type=CIFAR100\nexport poison_data"
},
{
"path": "scripts/cifar100/min-min-noise/classwise-noise/search_perturbation_noise.sh",
"chars": 1152,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar100/min-min-noise/classwise-noise/submit.sh",
"chars": 1024,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n \"resne"
},
{
"path": "scripts/cifar100/min-min-noise/classwise-noise/train.sh",
"chars": 890,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
},
{
"path": "scripts/cifar100/min-min-noise/classwise-noise/train.slurm",
"chars": 1448,
"preview": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"pu"
},
{
"path": "scripts/cifar100/min-min-noise/samplewise-noise/exp_setting.sh",
"chars": 644,
"preview": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar100\nexport dataset_type=CIFAR100\nexport poison_dataset_type"
},
{
"path": "scripts/cifar100/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
"chars": 1186,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Per"
},
{
"path": "scripts/cifar100/min-min-noise/samplewise-noise/submit.sh",
"chars": 1327,
"preview": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n \"resnet18\"\n \"resne"
},
{
"path": "scripts/cifar100/min-min-noise/samplewise-noise/train.sh",
"chars": 890,
"preview": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_p"
}
]
// ... and 36 more files (download for full content)
About this extraction
This page contains the full source code of the HanxunH/Unlearnable-Examples GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 236 files (876.9 KB), approximately 321.0k tokens, and a symbol index with 431 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.