[
  {
    "path": ".gitattributes",
    "content": "# Auto detect text files and perform LF normalization\n* text=auto\n"
  },
  {
    "path": ".gitignore",
    "content": "__pycache__\n*.pyc\n.DS_Store\n.ipynb_checkpoints\nexperiments/\ntest_exp/\npretrained_checkpoints/\nexp_results.json\nplots/\n"
  },
  {
    "path": "CITATION.cff",
    "content": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n- family-names: \"Huang\"\n  given-names: \"Hanxun\"\n  orcid: \"https://orcid.org/0000-0002-2793-6680\"\n- family-names: \"Ma\"\n  given-names: \"Xingjun\"\n  orcid: \"https://orcid.org/0000-0003-2099-4973\"\n- family-names: \"Erfani\"\n  given-names: \"Sarah\"\n  orcid: \"https://orcid.org/0000-0003-0885-0643\"\n- family-names: \"Bailey\"\n  given-names: \"James\"\n  orcid: \"https://orcid.org/0000-0002-3769-3811\"\n- family-names: \"Wang\"\n  given-names: \"Yisen\"\ntitle: \"Unlearnable Examples: Making Personal Data Unexploitable\"\nversion: 0.0.1\ndate-released: 2021-01-15\nurl: \"https://github.com/HanxunH/Unlearnable-Examples\"\npreferred-citation:\n  type: conference-paper\n  title: \"Unlearnable Examples: Making Personal Data Unexploitable\"\n  authors:\n    - family-names: \"Huang\"\n      given-names: \"Hanxun\"\n      orcid: \"https://orcid.org/0000-0002-2793-6680\"\n    - family-names: \"Ma\"\n      given-names: \"Xingjun\"\n      orcid: \"https://orcid.org/0000-0003-2099-4973\"\n    - family-names: \"Erfani\"\n      given-names: \"Sarah\"\n      orcid: \"https://orcid.org/0000-0003-0885-0643\"\n    - family-names: \"Bailey\"\n      given-names: \"James\"\n      orcid: \"https://orcid.org/0000-0002-3769-3811\"\n    - family-names: \"Wang\"\n      given-names: \"Yisen\"\n  collection-title: \"ICLR\"\n  year: 2021\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021 HanxunH\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "QuickStart.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h2>Quick Start: Creating Sample-wise Unlearnable Examples</h2>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Prepare Data</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Files already downloaded and verified\\n\",\n      \"Files already downloaded and verified\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import torch\\n\",\n    \"import torchvision\\n\",\n    \"from torch.utils.data import DataLoader\\n\",\n    \"from torchvision import datasets, transforms\\n\",\n    \"\\n\",\n    \"# Prepare Dataset\\n\",\n    \"train_transform = [\\n\",\n    \"    transforms.ToTensor()\\n\",\n    \"]\\n\",\n    \"test_transform = [\\n\",\n    \"    transforms.ToTensor()\\n\",\n    \"]\\n\",\n    \"train_transform = transforms.Compose(train_transform)\\n\",\n    \"test_transform = transforms.Compose(test_transform)\\n\",\n    \"\\n\",\n    \"clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\\n\",\n    \"clean_test_dataset = datasets.CIFAR10(root='../datasets', train=False, download=True, transform=test_transform)\\n\",\n    \"\\n\",\n    \"clean_train_loader = DataLoader(dataset=clean_train_dataset, batch_size=512,\\n\",\n    \"                                shuffle=False, pin_memory=True,\\n\",\n    \"                                drop_last=False, num_workers=12)\\n\",\n    \"clean_test_loader = DataLoader(dataset=clean_test_dataset, batch_size=512,\\n\",\n    \"                                shuffle=False, pin_memory=True,\\n\",\n    \"                                drop_last=False, num_workers=12)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Prepare Model</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from models.ResNet import ResNet18\\n\",\n    \"import toolbox\\n\",\n    \"\\n\",\n    \"torch.backends.cudnn.enabled = True\\n\",\n    \"torch.backends.cudnn.benchmark = True\\n\",\n    \"\\n\",\n    \"base_model = ResNet18()\\n\",\n    \"base_model = base_model.cuda()\\n\",\n    \"criterion = torch.nn.CrossEntropyLoss()\\n\",\n    \"optimizer = torch.optim.SGD(params=base_model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\\n\",\n    \"\\n\",\n    \"noise_generator = toolbox.PerturbationTool(epsilon=0.03137254901960784, num_steps=20, step_size=0.0031372549019607846)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Generate Error-Minimizing Noise</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:05<00:00,  1.89s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 8.13\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:06<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 11.89\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 31.45\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 67.06\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:08<00:00,  1.92s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 88.17\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 68.22\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 53.30\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:08<00:00,  1.92s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 96.87\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.92s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 97.75\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"100%|██████████| 98/98 [03:07<00:00,  1.91s/it]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Accuracy 99.72\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"from tqdm import tqdm\\n\",\n    \"\\n\",\n    \"noise = torch.zeros([50000, 3, 32, 32])\\n\",\n    \"data_iter = iter(clean_train_loader)\\n\",\n    \"condition = True\\n\",\n    \"train_idx = 0\\n\",\n    \"\\n\",\n    \"while condition:\\n\",\n    \"    # optimize theta for M steps\\n\",\n    \"    base_model.train()\\n\",\n    \"    for param in base_model.parameters():\\n\",\n    \"        param.requires_grad = True\\n\",\n    \"    for j in range(0, 10):\\n\",\n    \"        try:\\n\",\n    \"            (images, labels) = next(data_iter)\\n\",\n    \"        except:\\n\",\n    \"            train_idx = 0\\n\",\n    \"            data_iter = iter(clean_train_loader)\\n\",\n    \"            (images, labels) = next(data_iter)\\n\",\n    \"        \\n\",\n    \"        for i, _ in enumerate(images):\\n\",\n    \"            # Update noise to images\\n\",\n    \"            images[i] += noise[train_idx]\\n\",\n    \"            train_idx += 1\\n\",\n    \"        images, labels = images.cuda(), labels.cuda()\\n\",\n    \"        base_model.zero_grad()\\n\",\n    \"        optimizer.zero_grad()\\n\",\n    \"        logits = base_model(images)\\n\",\n    \"        loss = criterion(logits, labels)\\n\",\n    \"        loss.backward()\\n\",\n    \"        torch.nn.utils.clip_grad_norm_(base_model.parameters(), 5.0)\\n\",\n    \"        optimizer.step()\\n\",\n    \"    \\n\",\n    \"    # Perturbation over entire dataset\\n\",\n    \"    idx = 0\\n\",\n    \"    for param in base_model.parameters():\\n\",\n    \"        param.requires_grad = False\\n\",\n    \"    for i, (images, labels) in tqdm(enumerate(clean_train_loader), total=len(clean_train_loader)):\\n\",\n    \"        batch_start_idx, batch_noise = idx, []\\n\",\n    \"        for i, _ in enumerate(images):\\n\",\n    \"            # Update noise to images\\n\",\n    \"            batch_noise.append(noise[idx])\\n\",\n    \"            idx += 1\\n\",\n    \"        batch_noise = torch.stack(batch_noise).cuda()\\n\",\n    \"        \\n\",\n    \"        # Update sample-wise perturbation\\n\",\n    \"        base_model.eval()\\n\",\n    \"        images, labels = images.cuda(), labels.cuda()\\n\",\n    \"        perturb_img, eta = noise_generator.min_min_attack(images, labels, base_model, optimizer, criterion, \\n\",\n    \"                                                          random_noise=batch_noise)\\n\",\n    \"        for i, delta in enumerate(eta):\\n\",\n    \"            noise[batch_start_idx+i] = delta.clone().detach().cpu()\\n\",\n    \"        \\n\",\n    \"    # Eval stop condition\\n\",\n    \"    eval_idx, total, correct = 0, 0, 0\\n\",\n    \"    for i, (images, labels) in enumerate(clean_train_loader):\\n\",\n    \"        for i, _ in enumerate(images):\\n\",\n    \"            # Update noise to images\\n\",\n    \"            images[i] += noise[eval_idx]\\n\",\n    \"            eval_idx += 1\\n\",\n    \"        images, labels = images.cuda(), labels.cuda()\\n\",\n    \"        with torch.no_grad():\\n\",\n    \"            logits = base_model(images)\\n\",\n    \"            _, predicted = torch.max(logits.data, 1)\\n\",\n    \"            total += labels.size(0)\\n\",\n    \"            correct += (predicted == labels).sum().item()\\n\",\n    \"    acc = correct / total\\n\",\n    \"    print('Accuracy %.2f' % (acc*100))\\n\",\n    \"    if acc > 0.99:\\n\",\n    \"        condition=False      \\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"tensor([[[[ 2.5098e-02,  3.1373e-02,  2.8235e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-9.4118e-03,  1.8824e-02, -1.2549e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [-2.1961e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 3.1370e-03, -2.1961e-02, -2.8235e-02,  ...,  6.2747e-03,\\n\",\n      \"            1.2549e-02,  2.5098e-02],\\n\",\n      \"          [-3.1370e-03,  9.4119e-03,  9.4119e-03,  ...,  2.3842e-07,\\n\",\n      \"           -2.5098e-02, -3.1373e-02],\\n\",\n      \"          [-3.1370e-03,  3.1375e-03, -1.5686e-02,  ..., -6.2742e-03,\\n\",\n      \"           -2.3842e-07, -1.5686e-02]],\\n\",\n      \"\\n\",\n      \"         [[-3.1373e-02, -5.9605e-08, -2.8235e-02,  ...,  3.1371e-03,\\n\",\n      \"           -1.5686e-02, -2.1961e-02],\\n\",\n      \"          [-3.1373e-02,  1.2549e-02,  6.2745e-03,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  2.5098e-02,  2.5098e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-3.1373e-02,  1.8823e-02,  9.4115e-03,  ..., -1.8824e-02,\\n\",\n      \"            9.4117e-03,  2.1961e-02],\\n\",\n      \"          [ 3.1370e-03, -6.2745e-03, -9.4119e-03,  ...,  3.1373e-02,\\n\",\n      \"            2.5098e-02,  2.8235e-02],\\n\",\n      \"          [ 1.2549e-02,  2.1961e-02,  1.5686e-02,  ...,  1.2549e-02,\\n\",\n      \"            2.5098e-02,  3.1373e-02]],\\n\",\n      \"\\n\",\n      \"         [[ 3.1373e-02, -2.5098e-02, -2.5098e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [ 3.1373e-02,  0.0000e+00,  1.2549e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-1.8823e-02,  0.0000e+00, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02,  2.8235e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-2.1961e-02, -2.8235e-02, -2.5098e-02,  ..., -3.1373e-02,\\n\",\n      \"           -2.7451e-02, -2.8235e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1372e-03,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -1.2549e-02, -1.8824e-02],\\n\",\n      \"          [ 9.4117e-03,  1.5686e-02, -2.8235e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02]]],\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        [[[-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  2.5098e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-2.3842e-07,  6.2742e-03, -1.8824e-02,  ..., -2.5098e-02,\\n\",\n      \"           -2.5098e-02, -2.5098e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02]],\\n\",\n      \"\\n\",\n      \"         [[ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-1.2549e-02, -3.1373e-02,  1.8824e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 1.2549e-02,  6.2747e-03,  2.5098e-02,  ...,  2.5098e-02,\\n\",\n      \"            1.2549e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  2.8235e-02,\\n\",\n      \"            2.8235e-02,  2.8235e-02],\\n\",\n      \"          [ 2.8235e-02,  3.1373e-02,  3.1373e-02,  ...,  2.8235e-02,\\n\",\n      \"            2.8235e-02,  2.8235e-02]],\\n\",\n      \"\\n\",\n      \"         [[-2.1960e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -1.2549e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-1.8824e-02,  2.5098e-02,  1.8824e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [ 1.2549e-02,  6.2746e-03, -3.1373e-02,  ..., -2.8235e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02]]],\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        [[[-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"            1.0980e-02,  1.0980e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1372e-03,\\n\",\n      \"            3.1372e-03,  3.1372e-03],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  7.0588e-03,\\n\",\n      \"            7.0588e-03,  7.0588e-03],\\n\",\n      \"          ...,\\n\",\n      \"          [ 5.9605e-08, -6.2745e-03, -1.2549e-02,  ..., -1.2549e-02,\\n\",\n      \"           -2.5098e-02, -6.2745e-03],\\n\",\n      \"          [-2.5098e-02, -1.8824e-02,  6.2746e-03,  ...,  1.2549e-02,\\n\",\n      \"            1.2549e-02,  6.2746e-03],\\n\",\n      \"          [-1.2549e-02, -1.8824e-02,  3.1373e-02,  ...,  5.9605e-08,\\n\",\n      \"            5.9605e-08, -6.2745e-03]],\\n\",\n      \"\\n\",\n      \"         [[ 3.1372e-03,  1.0980e-02,  1.0980e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [ 3.1372e-03,  3.1372e-03,  3.1372e-03,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-2.5098e-02, -1.4902e-02,  7.0588e-03,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 6.2745e-03,  1.2549e-02,  1.2549e-02,  ..., -6.2746e-03,\\n\",\n      \"           -6.2746e-03, -5.9605e-08],\\n\",\n      \"          [ 1.8824e-02,  2.5098e-02, -5.9605e-08,  ..., -1.2549e-02,\\n\",\n      \"            1.2549e-02,  2.5098e-02],\\n\",\n      \"          [-6.2746e-03,  3.1373e-02, -3.1373e-02,  ..., -5.9605e-08,\\n\",\n      \"           -6.2746e-03,  2.8235e-02]],\\n\",\n      \"\\n\",\n      \"         [[-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  1.0980e-02,\\n\",\n      \"            1.0980e-02,  1.0980e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1372e-03,\\n\",\n      \"            3.1372e-03,  3.1372e-03],\\n\",\n      \"          [-6.2747e-03, -3.1373e-02, -3.1373e-02,  ...,  7.0588e-03,\\n\",\n      \"            7.0588e-03,  7.0588e-03],\\n\",\n      \"          ...,\\n\",\n      \"          [-6.2745e-03, -6.2745e-03, -6.2745e-03,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.5098e-02],\\n\",\n      \"          [ 1.2549e-02,  1.8824e-02,  1.2549e-02,  ..., -2.1961e-02,\\n\",\n      \"           -2.5098e-02, -2.8235e-02],\\n\",\n      \"          [-2.5098e-02, -3.1373e-02, -1.8824e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -1.8824e-02]]],\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        ...,\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        [[[ 6.2745e-03,  1.2549e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-4.4703e-08, -2.8235e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 1.2549e-02,  6.2746e-03, -6.2745e-03,  ..., -6.2745e-03,\\n\",\n      \"           -6.2745e-03, -6.2745e-03],\\n\",\n      \"          [-6.2745e-03, -6.2745e-03,  5.9605e-08,  ..., -2.2352e-08,\\n\",\n      \"           -1.4901e-08, -2.3529e-03],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -1.8824e-02,  ..., -2.5098e-02,\\n\",\n      \"           -2.5098e-02, -2.5098e-02]],\\n\",\n      \"\\n\",\n      \"         [[-1.2549e-02,  1.8823e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [ 1.2549e-02,  1.2549e-02, -6.2742e-03,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-1.8824e-02,  6.2745e-03,  6.2745e-03,  ...,  6.2745e-03,\\n\",\n      \"            6.2745e-03,  6.2745e-03],\\n\",\n      \"          [-5.9605e-08, -5.9605e-08, -5.9605e-08,  ..., -5.9605e-08,\\n\",\n      \"            6.2745e-03,  1.2549e-02],\\n\",\n      \"          [ 3.1373e-02,  2.1961e-02,  1.2549e-02,  ...,  2.5098e-02,\\n\",\n      \"            2.5098e-02,  2.5098e-02]],\\n\",\n      \"\\n\",\n      \"         [[ 1.2549e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02, -3.1375e-03,  3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  1.4902e-02,  3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-6.2745e-03, -6.2745e-03, -6.2745e-03,  ..., -2.5098e-02,\\n\",\n      \"           -2.8235e-02, -2.5098e-02],\\n\",\n      \"          [ 5.9605e-08, -1.5686e-02, -1.2549e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.5098e-02, -6.2746e-03],\\n\",\n      \"          [-1.8824e-02, -2.8235e-02, -2.8235e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02]]],\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        [[[ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1370e-03,\\n\",\n      \"           -2.3842e-07, -2.3842e-07],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02, -1.8824e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02, -1.8824e-02]],\\n\",\n      \"\\n\",\n      \"         [[-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  9.4119e-03,\\n\",\n      \"           -9.4115e-03,  1.2549e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  1.8824e-02,\\n\",\n      \"            1.8824e-02,  1.8824e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ...,  1.8824e-02,\\n\",\n      \"            1.8824e-02,  1.8824e-02]],\\n\",\n      \"\\n\",\n      \"         [[ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [ 3.1372e-02, -3.1373e-02,  3.1372e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02,  2.8235e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02, -1.8824e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -3.1373e-02,  ..., -1.8824e-02,\\n\",\n      \"           -1.8824e-02, -1.8824e-02]]],\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"        [[[ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -2.5098e-02,\\n\",\n      \"           -2.5098e-02, -3.1373e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  2.8235e-02,  ..., -2.5098e-02,\\n\",\n      \"           -2.5098e-02, -2.5098e-02],\\n\",\n      \"          [ 3.1372e-02,  1.8823e-02, -3.1373e-02,  ..., -2.5098e-02,\\n\",\n      \"           -2.5098e-02, -2.5098e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1372e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -2.5098e-02,  ...,  2.8235e-02,\\n\",\n      \"            2.8235e-02,  2.8235e-02]],\\n\",\n      \"\\n\",\n      \"         [[-3.1373e-02, -3.1373e-02, -2.8235e-02,  ...,  2.5098e-02,\\n\",\n      \"            2.5098e-02,  3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -3.1373e-02, -2.3842e-07,  ...,  2.5098e-02,\\n\",\n      \"            2.5098e-02,  3.1373e-02],\\n\",\n      \"          [-6.2742e-03,  2.5098e-02,  3.1373e-02,  ...,  2.5098e-02,\\n\",\n      \"            2.5098e-02,  2.5098e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [ 2.5098e-02,  2.5098e-02,  2.5098e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  2.8235e-02],\\n\",\n      \"          [ 2.5098e-02,  2.5098e-02,  2.5098e-02,  ...,  3.1373e-02,\\n\",\n      \"            3.1373e-02,  2.8235e-02],\\n\",\n      \"          [ 3.1373e-02,  3.1373e-02,  3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02]],\\n\",\n      \"\\n\",\n      \"         [[ 1.8823e-02,  2.8235e-02,  6.2742e-03,  ..., -2.5098e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-1.2549e-02, -3.1373e-02, -3.1373e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ..., -2.5098e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          ...,\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ..., -2.8235e-02,\\n\",\n      \"           -2.8235e-02, -2.8235e-02],\\n\",\n      \"          [-2.5098e-02, -2.5098e-02, -2.5098e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02],\\n\",\n      \"          [-3.1373e-02, -2.5098e-02, -2.5098e-02,  ..., -3.1373e-02,\\n\",\n      \"           -3.1373e-02, -3.1373e-02]]]])\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Examine the noise\\n\",\n    \"print(noise)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Creat Unlearnable Dataset</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Files already downloaded and verified\\n\",\n      \"Files already downloaded and verified\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"# Add standard augmentation\\n\",\n    \"train_transform = [\\n\",\n    \"    transforms.RandomCrop(32, padding=4),\\n\",\n    \"    transforms.RandomHorizontalFlip(),\\n\",\n    \"    transforms.ToTensor()\\n\",\n    \"]\\n\",\n    \"train_transform = transforms.Compose(train_transform)\\n\",\n    \"clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\\n\",\n    \"unlearnable_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\\n\",\n    \"\\n\",\n    \"perturb_noise = noise.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\\n\",\n    \"unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.float32)\\n\",\n    \"for i in range(len(unlearnable_train_dataset)):\\n\",\n    \"    unlearnable_train_dataset.data[i] += perturb_noise[i]\\n\",\n    \"    unlearnable_train_dataset.data[i] = np.clip(unlearnable_train_dataset.data[i], a_min=0, a_max=255)\\n\",\n    \"unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.uint8)\\n\",\n    \"\\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Visualize Clean Images, Error-Minimizing Noise, Unlearnable Images</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAg8AAAIGCAYAAADTKmxqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy86wFpkAAAACXBIWXMAAAxOAAAMTgF/d4wjAACJgklEQVR4nO39eXBd53XmC699RswACRDgAcFBFAfNkgnSpq1YouK4k5sm7cSx05E8qROblPvzze0rqb/4VnWVqKpcdXxL4r1JutwidbvlfK1I125JTot0bMuWTdmOZYuDBmviJIIkiAOCBDEcTGfc3x9K6+Zdz4Lw7kOAJJTnV+Uqv0trv3ufvfd5+PK8D9cKwjAMhRBCCCHEk9ilvgBCCCGEzC+4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCTmfPFw5MgR+chHPiJr1qyRDRs2yOuvvz7XpySEEELIHBLMdZGo3/zN35QvfOELcuedd8qTTz4pX//612Xfvn3veUw6nZZFixbN5WURQggh5D04e/as5PN587/N6eJhYGBAVq1aJefPn5dEIiFhGEomk5Gf//znsmrVqmmP6+rqkt7e3rm6LEIIIYTMwHv9WTyn2xanTp2STCYjiURCRESCIJBly5bJyZMnnbwdO3ZIV1fXu/8bGxuby8sihBBCyAVwWRgm7777bunt7X33fw0NDZf6kgghhBAyDXO6eFi6dKlks1kplUoiIhKGoZw8eVKWLVs2l6clhBBCyBwyp4uH9vZ2WbdunTz22GMiIvLUU09JV1fXe/odCCGEEHJ5M+f/2uLQoUNy5513yuDgoDQ1Ncmjjz4q119//XseQ8MkIYQQcml5rz+LE3N98rVr18oLL7ww16chhBBCyEVizhcPc0Z5CmNBcPGvY0asH3aKMx8V4mepqLmsnFBKOFnZjYVFvKZYJQ6xuNrVCit4nM8PV0GVz6XaH8Ws4yqVCiZWys4w1b7Ya/7gOfV5hn2vbB7Q4pnX7JFzkxGrVnG8XoWMEcvOfFiwxuN8hzDHY9P37Mwp0ub9mmvNuxz1TgRv3sx6985R7ufRemflvBPTmocaGJZm1jytdyKoeZYGWnhpnpGC89dgklF2QWtewkPvRESkvWXay5uJy+JfWxBCCCFk/sDFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/DZPzmpnXbIFhFgq0eaeMxqB4DI0yei7DZylBYJgMi+78FcMsFMQMk5Ey78TjaMacSyyzkmlguiwNtpchy42YLtVy7mJcyD9lFv+FuTWVx1+rfuQx9bBHTptHzvzG7++oWqcsDbQM4XFxNc88zkPztN6JoOYFQR3ObRi0vTTPeu/yM7/X5sxayy6C3vGXB0IIIYREgosHQgghhESCiwdCCCGERIKLB0IIIYREgobJS4Au/hVYlRQDy/joxhKG/6U4iZU3C1OT7jiPxqDClFGZUq0tFyxoxRTLiaRTjM9XbdVJH6y5Y5axU4wqbF6cdoefXoIpT1Y59VxiGR9b1HjYcy79usyi668hh7HjTe6zGvac64Qa+x7X4plXzdw+xTltUu6wYlRuvAz/Omh9yyxTo87Ueidi/4FVzLuap/VOxFPzQrx5HXXqux3zNOrqvLk2Z+v5Db2zn0T1XIavGiGEEEIuZ7h4IIQQQkgkuHgghBBCSCTmr+fBtwhGlZ0Zq74GfT7LzhBTtz20OqBhrDDh7uX9/PmfQM6xt96A2MT4uDMeH0dfxOjIOMTa2tqd8Wc//0XIWdSBnQwvtp9Bx3zP72HXMHnwt9pnTvq0x0SGL+JenwsYNmItM4xF0AAgIjKixj4FoQzM3dTgGSPY7abILiPnPmsyZzRsZFix2cLnfL7nb1Hj9b4XUVF/17Pec5+uj1X/ldH6wnh01TX/mLHeGDdWyKN3wdK8XqV5E4Oob+Pj6A/JjQw547a2ZZCjNW+hoXdz6mfw/XOuyi6eFwJ/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJOavYXKeYNmJQu3UM5JiZQyOnj/vjJ/6fx6HnOLEBMTqa2rduRMpyMkX0cAUj7n92woFNCLNpoHRx9zqU3DKyrEIYrPlIDprxBa5w2rNkbNJixHTFYs8zJEWwc6dRnSLx5H3e8Zcho0vjfZ+vpM3O7R4zG3lWOjjqge/xyJ5DF3kvyLqJxN6OvVi6kitdyK25sWHXGNlfboF5zY1z9XKeOw05BQKw24g6IScqg2MPjplFhCs8h8GzJre/eN0szobIYQQQt73cPFACCGEkEhw8UAIIYSQSHDxQAghhJBI0DA556BJpVxRsVLZ4yiRs2f6nXHS6JzW3LIA51Lmy3wRq62lkkmI1dSk1DgNORWjEqa++liVRh3fbpz6GrwrTKrKnrO5ku77393xjlmcu2qqrB7pQ7BtG8R23tsNsW23q3qKVoFJD540viG3GhUte7xMm0iLGg8bOSs8cnzmfv/hPpuypWbhzJqn9U7E1ry6jhZnHBuPQ85UcQxilUaleS2GAbXF1bxSCg3piSnjj9GYZWb1QGueWUXU0Fwvgya7ahJCCCHkEsLFAyGEEEIiwcUDIYQQQiJBz8Os49FVU+1rWb6BiYkcxI4dOeKMx8cwZ83V10IsHrh7gK+9+SbkpBL4KiRULGn4IooFLEoTqH3JeBz3IGPG3qXe9TSLPVlFqdQ49OksaB1YNT0Q6Vw7W3NXiS7+JDLHm+14z7fh62nkVfcQzKallU9AqBhzz9fjOX+LR44uSuVzzOxi7GFf9L8O+nTVxBwfzdN6JzKd5rneGq13ItNonrourXciqHl5Q+8KcfSQ1ZXUg/DQOxHx8zxY+Ggeu2oSQggh5FLCxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLx/jJMVtndDLpcCpr1TNNPuYTHVdziJzHD9BcvubHx4UHIOfCLn0Nsz3e+44zPZgcgp/k3boHY+Pi4M17Y3gY5FeM6mxvdbpx1SVxrBmWji58qgpWIY8EU655rR08lrMHjymiGCgLXfIUZYhZIKVdbNOVTyjj6EUy5t8djnhbPmOZGI7bCY55Z/LafVt+HzirNWMGoYfRqnD1n1/PqnWqWUSOrESIr1LjFOGpYjV/xvCbr8fmhvmuB9Xe/mf8+aHW5DA1905pXEUPvBIs96e6YcaNDsI/mab0TERnJ4vPrWN/qzq30TkSkswk1byLlXpfWOxHUPFPvTCei0hZDy8xnpfMMvZPAQ7csbWORKEIIIYRcSrh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJILQLON3aenq6pLe3t73TqoYxhWzitfMWOY9bS0JTAMKGoi0iXJqDA0+lSm3Itk3//MjkPPiL/8BYufPugbJRBw/b31dPcTKZdfUVFtbBzk33ngDxGpqXfNOU2MT5GQWd0JsxRUrnXFzEx7X0toKsUTSNVYWKujwKxnPSnfRtLpqWq95Rd2X+sUZyLEInlLz93gdhrRUmWPFtAsP/WGzShjo+3mHkfW4EdMVAvHd6Au3QmyJT/vN/RgKu2dMkXsD7P75pBxwxotmPrs/6vXxl2CteVXqnWHws3vj6qihd0ZsasLVPK13In6ap/VORKShiNpVX+eaCstlrIRbW4tmyBtvdMvAphagqbFGaZ6pd0tXQqxjgXtcg6F3kjI6byrNm/DQOytm6t04mltn0rz3+rOYvzwQQgghJBJcPBBCCCEkElw8EEIIISQS768iUVUzc4EUq0iUFZoYHXbGP/je9yDn+uuuc8btmQ7IueZ67I6ZPe3uo8UNj8fIufMQS6iulseOHYOc4SEs2vLbv/M7zvj73/97yCkUCxC77ZZbnfHiVtwxbjS8Gdded70zblmK+3FJ47hCyd2bLYV4X8pGTD8/nPkyxeqYOcceB0DvxRp1cnLhEzNOs9eIbTHsDfv3b3bGB2QP5GwzjvMrN3UAIrPqcdBccpfZzHo3XUwzMTYMMa15Wu9E/DRP652ISL2leb2u5sXrsbDSsWOHIHb2l33OePNtvwM5P3rO1bxJQ+82/YtbIaY1z0fvRFDzfPROBDXP1Ls0hi5E8/jLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiQcPkNOgaOIFRdCMMsegGFizCuRtb3C5+V19/NeQs6lgAsVcPukVF3nj9NcgZVYZNEZEFTa7Drr0NC5akjYIl117lFlE5cfwo5Lx9Es2XA2fcoiKrliyGnL63j0DsXM/bzrg5sxByrrz2GogtWXGlM07XoNEqH6KJqlyeve6Ns0bLDOPpYhcZuHPeJsAH1fgeTMGaTbJTVXfq69TFpkSkD5+7YE0fYmD3uHUfqtVB0y5Y5I613on4aZ7WOxFb884Xhp3xghp0FDebmue+L1dei/1OD7/tatmxk6iBA1ksoqQ1z0fvRFDzfPROBDXvYugdf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgk3l+eh4rV3qXKqVSDq3gM94umpiYgNjYy7IxvuB4LpNSk3b28Yn4KcibHxvGiVHOukyeOQ8rCetz3XdDsxsolbFSTTGIzmWTCXVuWStiMrLEeG860LWxR58c9z6bVuG9XyI0549Ghc5Dz+vM/gdjpNw874yUrr4KcjqVXQKyuET0VPjz46Zlz7n2yqqmxwdUJI2dVlXNXyTppgNhBiGwyjrSa7iiPQx9m3Lceg1uhMdZ9eOD9aJY4MOoWB9rTOAY5c0llPcbWW925vCabMVD1XwetBldx5YSYKs6sdyKoeVrvRDw1z2hG6KN5Wu9ERMol9AAk467moQKKxEsjznhRPTbmamtogdiCJlfzfPROBDXPR+9EUPOWLUK9k0XV6d108JcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhETi/WWYrBbDmBOPBzPmTE2gqfFkj2vo6VqCVWrCvGs8LEygEak4iTFt6FncioVPrl6zBmIbP/ghZ/zcj34EOdn+sxAbH3MNPS1NaEQaGkRTY026xhk3NmHRlnzM+Hxx93Vc2ITmpNB4DhMFN3bi9dchp/dYD8Talq90xjcYpkofDny4qsPsYk+veOTMItoK+XgOzZFb7sDjtOfvQOdeyLk3g4awTVm30+ZWwzCZ2YyxUHshux+CnO14mNyhLqsBvbsit2FIf2uNS5IH4e4ZBa9kK0R2Bsr8edG7bBp6Z5aJcvN89E4ENU/rnYiteRUVa0RPpXTVo+atUZr3IaV3IrbmDSjNy49hR+JmpXlDw6h3LXU1EGusczUvHxp6V8Q/fhemXM2z9K5cwFj/AVfzztbgF2vB8i6IXfMvq9M8Ef7yQAghhJCIcPFACCGEkEhw8UAIIYSQSHDxQAghhJBI0DApIoVCAWLHj7pVvBa2oFmwVEAjkK5u9rPn90LO0MCAMz5hmI5ScVzXxQPXKHPFkiWQ09HaBrFxVclsbBQ7EqYSeL6XX3LrCA5kz0CO0VhUAvVaxVNYhTLdiK9eqt51syUqWAlTd/oTEUmU3ItI5fF55o3Y4OAAxPxwr+uJF/6NkfPIzNN8rMrTzyEHjAqM9+7GPKxx58cecZ/xnlwWctCyaWAZHy32utbOsYeMlp0G+vNZdSnRYrjXyFoLkfWh4RL1Iq3GqD8+FMKZ9U4ENc9H70RQ87TeifhpntY7EZGlV6LmtS9xNW+8iE9rrICaF693z7f/Daybeuq8q3mTxl+380nUsnjKfUHTFXyrU2lDy5TmWXoXL6HopvOuITOfR4PmyPgIxC4E/vJACCGEkEhw8UAIIYSQSHDxQAghhJBI/PPzPMRwvTQxhvthP1f7ds1GZ8hUAju1JQJ3j6pcxP3Fk8fedsZH3noTcjoXd0DsmqvdYihvvfEa5JzJ4h7y0qVLnfHZs+hdiCfwVfjlC//gjBcvxv3GmiT6GZJJt2hKsYz7dmEczxeq2xkzvBJi7IPGVDfVemOfsMbYr60LjPmr4EH5BgbvcT0P9/7Ps3KqWadx+05nfP99t2OSVT8odL8P63DrW7YbRoEn1Pjww5hzYKtxQusagFEM3eZe56zWY1rjXlRwaJo8wL1ZYdVXpT0QIlIxfBBK83z0TgQ1z0fvRFDztN6JiBx5+S2Ida5wdUrrnYjIW29gAbgzx192xiuU3omInD2F+/1x5VX45Zl/gBytebbeYRG8YsG9V2m8daB3IiJJrXmG3kkF/QzxGreEYLqAhatis6R37843q7MRQggh5H0PFw+EEEIIiQQXD4QQQgiJBBcPhBBCCInE/DVMxgwHVaUEobDiFtQYH0ZT1dCZ0xBb3OKahT60ETu1hQFeQynvtoI79CaaIU82uAab1gVY2Gl4BA0+x467xqNQd/4UkeZ6dK6Njgw746ZGNPg0NS6A2GB8yBkX8miOqmuox7laFjrjiqAzKGYYV2PqfiZieJxVNCUU9xlbr0ZNLRqISoHuU+rLzMe1PKkC66s8lUHFyzxoGK2MwlVj9x1wxg/dvx1y7nkInYBGLSkvtujAXWjeO2R08bwKauzcBzlvNaGpGUs0VckavOn+BslZIqbM14beSQwLCI1NuJrno3ciqHmp/Mx6JyJy6E1Xp84k0DA5tgC7Yw6fcYtJ9Rp/PKWLeA26UNWk0jsRkXZT81qc8cAY6o3WPEvvWmvws6SU5gUeeieCmhfIJORM1qD+JNVUs6t3NvzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmL+GydCqyoaxiRHX9Pf6q69CzuToMMQGsq6p6PgR7DqXy6H5cvHijDNOGqY/3eExlcZKcTmjA9qQMgLV1eFxV117NcQO7n/JGccTaJxpa8eKlomkaw4aGcXPO2J06HztNbcK3NDQMOQ0NGCXufq6OveaWtDkVJvCz5yuUbE43vPaOjQQVSrVVfa79/9WgRYjaZYMkkYBRpNAMiqClUZFtkOkSedhitzzkFE+0gvr/ipnl9FC8wmPtpqZPfdDrHMMY148jOZL2ebO5eVRnXPU/Yzh/R0fH4KY1jwfvRMR6VGalz+L3//MYv3eiaSUObCQx6qXoaF5pbKreUMjWAm33tS8a5zxwf1oUo8n0Eja1t7int/o9Ks1b2QQ9e6N1w5AbHSo3xmbeldfB7G2DlfzAh+9EwHNm029mw7+8kAIIYSQSHDxQAghhJBIcPFACCGEkEjMY8+DUQTH8EEMnj/vjOvrschHm9Exs6yKn3QsXgw5C1uxOEhFdXhc3NUFOYszrr/A2sdraMQ9q9zYsDO2bB8v/OpFiOULRWe8ZvVVkFMxlpFFcYvQDI3ifur1110HsVG1T3jy5EnISRhdPPW+4OlUCo8zl7vujvSY0TUwZewddl2x0hkv/41/YU2OtMwwnkXusmLm1qX2OFimC8sHMTNNeDs9sZwCeu93v5GD3RQ12U3Y4bUprO7ziVTplbjoaM2bWe9EUPN89E5EpF1pXtHQu3IFdbhlQbs7vgL9VPGFhuaddzVP652IyKTxSv3kFVfz8mERctasQc2biLver+IkatLQqFuk6frrcZ4hw/s2MeBqXuL8zHonIlJ3xtU8H70TQc3z0TsRkeXrPTXPgL88EEIIISQSXDwQQgghJBJcPBBCCCEkElw8EEIIISQSF2yYnJqakj/6oz+SN954Q2pra6W9vV3+03/6T7Jq1SoZGBiQL3zhC3Ls2DFJp9PyjW98Q2655ZbZuG4JDbegZc+amHKNQBXLZWh0Nzt92i2aMjWFhiKLJUtcI1dzSwvkXH2N2+vvyhUrIOfQEWzZ99//7jvqmrDj2pBhFsyoa6oxOsONjmCbxMFR13xVNjo1trZhR9CODtcg9Ytf/AJykkksVLVp0yZ3njbLkIrFXiYmxp1xz4snIKe/vx9iP/6Hf3DGv7v1f4Uckxa/tEtLtebB2cSq9qTfaywy1LAbv4937HK/t/u34swHNxmnQ2/g7JHzqGZlnT83NnOOQagMkj56J2JonofeifhpntY7EdQ8rXcifpqn9e6da5pZ87TeiYjUNGDX4NGRCWc8OIhm0/KUq3mtLTPrnYjIL15wNc9H70RQ80y9Gx+HmNa8/jOG3v38HyD2u1/w1DyDWfnlYevWrXLo0CF55ZVX5JOf/KR86UtfEhGRr33ta7Jx40Y5cuSIPProo3LHHXdIsYhOWEIIIYTMHy548VBTUyO/+7u/K8E/rmY3btwoPT09IiLy7W9/W+66651/bLZhwwbp7OyU559//kJPSQghhJBLyKx7Hv7yL/9SPvnJT8rg4KAUi0VZ/E/+rfCKFSvMf/O/Y8cO6erqevd/Y2P4EzohhBBCLg9mdfHwwAMPyNGjR+U//If/EOm4u+++W3p7e9/9n1U8gxBCCCGXB7NWYfLBBx+Up59+Wn70ox9JXV2d1NXVSSKRkP7+/nd/fejp6ZFly5bNyvlCo0NYEGAsGXc/4suvvw45Vy5fCrHuDRucccLo1DgyPIwXpsxI/Vk0rvWecs0tH7l5I+TE0lhd8Qff/74zTqexCuXAEHbj7Otzu9ONj6PpyDI+VVSFyYqgeSfb1wexM2fOOuMf//inkBMzlq25nGt8WtKJVT3jcTxQd+OcKhQgp86oLHrqzABehAd/9VvuuMfI2TG7DewuKQ2W8RF8uVWWoTQadmK9PhFtD8zu2QkZa0fvgNhY7nEV2eJ3XapRYsNePY/I7Qfc83VmjIfeacydUYbFbX6XJEVtfJxZ70RQ83z0TgQ1z0fvRFDztN6J+Gme1jsRkXTK0LzzbtfOvtOogeNjaEBf0uZqXqpcgpyi0rzBU9jp89SwYU58zjUnxmJomMyNov9vSbureXHBa6qva4HYVM7Vxbo4ms1PDdnfrGqZlV8eduzYIU888YT88Ic/lJZ/4rT9zGc+Iw8//E5T4X379snp06fl1ltvnY1TEkIIIeQSccG/PPT29so999wjK1eulNtuu01ERNLptPzqV7+Sr3/96/L5z39eVq9eLalUSh577DHzn6wQQgghZP5wwYuHrq4us+aCyDv//vXZZ5+90FMQQggh5DJi3nbVtJYr1iKmTpkvEwn85eOnxj8fve4qt7BJZyduXtbW1kKsbdEiZ3z40FuQM5V3i5MUi3nIKRnFUIaH3T2r/izu2b9x+G2IFUru3toHP3gj5JSLuLdWLrvegY5FuI/20ksvQUzvT69ahd3cFhlzNTW5G+A//slPICeZwle2ubnZGXdm8FnV1GCXudVrsXiND9eot2/YKNezTo0PVnWm2UVfk4jIFrW3v9toaLl7L8Yyn3DHgbfnQe894167D1haSmS0yTJa71LjbiPH8kG4NyYQ9FOg2lifBffaRdS/JtvmZ5AJxN3vD0MsFqT1TgQ1z0fvRFDzfPROBDVP652ISHHS0LxJV/MmzuIe/UAvat7Rw26BK613IiIf/CB2/x1Q/6pP652ISMcit7jUKy9ZXWDx+3/d0uXOeNEifGObDV/bL37kap6P3omIdGYWOmNL765fsxxiFwLLUxNCCCEkElw8EEIIISQSXDwQQgghJBJcPBBCCCEkEvPWMGlh2Y4WLnSNJL/5W78FOb9KYQGosVG30MipU6cgJ2ZUOppUpp/hUSxYcmD/i+65cmgMWnUlOtfyedfQc+bMOcgp5dEsFFOFXMbO4zWtvmIFxDasv8kZ79uHtr/Dh49B7IMfdE1p69ejSa1UsgyabiyZQnNrpYKdPetVAahF7Wji0mZMEZHGVuyQVw3aQCki8r+pAj7JrevxwJ1zZ6O0DIUHAsss6BoDt1uTPYzl4nerj/wJ48iHZRjnCv+LM7RqKGWNdpE5db57jZx9IV7n4fvdgkXZ3CcgZ3c3Poed9+51xmuzeO/gEgy/5n7DaNn5kFtYzboHPmgDpYhIpg6f/G/+hqt5Pnongprno3ciqHmv//xFyPmvhhlSa15xEg2MvSOoeWOB+9yDFL4cgxODEFtxtWsc71Z6JyLyotK8V0+i3n3kg/hu3NDlxgqW3oUYiykPpa13dRDTmldn6F2z0QH5QuAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TBp+KRGjwmQ84X7EugbsrtjaitUOO5UB5cCBA5Bjtg5X5sTC1BSkFFUFtF+/9mvISafQFDOhumEWi2iO/P0tmyGWSrr3YGwczVFf+dKfQKylpdEZv/LSK5BTMqq5TRmfWTMwgJXiMqqi3dlzQ5BTV4+V0waHzjvj31p5BeSMjOBn1l08fRn2yHlZjSuGKa/qlftuDN2nXs/urGUf1tUWRXZvds17mzvvh5yYVyNKtP3dJUa5yuC/YMwH+MJ/ElI2BP8Vj1P+wc2HGyFlzUN42K6t7ruxdS++K2Od7v1cswmNc9l1qBu7ut08fCqeWG0BEijpCxpdzfPROxHUvIa0oXd5VOKy8kIWDbPgQUPzUkrzzhfQjDnloXkpo3/S2PgwxLYqzdN6JyLystI8S+/GpvA5VAJXpwYGzkOO1jsRkb5z7nvWXI9VKM8MYafmj115izMeHEO9GyxU2fl2GvjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/PQ+h5XrA4ieB2iyNJ/C4q6+7AWK9b7ud4VZesQRyhs4bhUdWdDnjM2exE91Hb/tNZ2x5BOIBPpqBs/3O+KqrroSc7g9eD7ETPW6nzeVXYAe9ZBK9BK/9+g1nXCxgwRKrNJf2IEgSn8tUBfdBS2op2750GeToQlIiIsMTbjGZl14/Ajkpo+DU8hV4/3ywumhqWtRtqXaV/vA2jG3bZT2Hma9JBP0w27vd9/oT243DjK31Zzrd4Bbv7VT3AwW5rUbOIx7zXAOR0Ubcs9Y0Yh0pm4PuXH0ZNEY8sUn5Q4zKXOh4EJGD2qCy0++aQt2J0ngPajBWSrnFpHz0TkTkyoz7bkycx5t31YpVEDtz1tXFj97225AzPIVFoiq1ruYNDPZDzrUrroXYzTe43Uy13omIrM1gR0mteVrvRCzNwy/DWBa9WemrlX4bepcwRGGZ6sZZLOM9PzeBxbN++aarean07OnddPCXB0IIIYREgosHQgghhESCiwdCCCGERIKLB0IIIYREYv4aJiuGiyvAtVA85pr1ClNY5GPgHBofJybHnfF116LJsGIUP0nUu93MgjR2vus9fcIZl8YnICddh4aX8QnXPLN+/Qcg58ixNyF29uwZZ3zFlVhEKZFCw+TwqFukZWQEi7YkU1jEJIi79zxh5AyNomHq1tWrnfFDn/wDyBkcRHNS7+leZ9xsdJRbe9VVEFvSuRRiPnxJv3r4Gojo5n9VNrTbZrXHNNf8lolSEaLp16ev55cNL+aWrP4+HPKYybikrWiODDyqJt0n/xvEGsf2GpmbIl+TRed2fBC7lddz7nqk/iMp/eJZ7wGak0sFV/N89E5EpFtpnq13+GIHaVdLegfx3SgN4vuaVqZmrXciIh/+F1dDTGue1jsRW/MaKu51ar0TQc1LptCUG4vjZ9G66KN3Iqh5g+fRNHr0HHZ41po3m3o3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmLeGydDqKOeRl06jMXDpCjTT1Ne466pjh1+DnPExLKu3oN01pV2xGo0rf3TH59x5cmimOfjiPpx74UJn3NTcDDlTRTT9dC1d4YzjRjXJt45gVcZf/OpFZ/ziSy9DTtlYf5ZV9c8gjubPDR/aCLEbPrDOGWcMg8/KK7FT4weDD7nnC9DhZ70viQQaOavC+hZVaZDUBNsx9owR26Keg+U5NIpVemHVe3wiPOyMcznDVYm+VZHTqjNjI3ZqbMjdAzHdf/CAYMXH3V/GjqBblOcufNzo9GmYPWNQSfAu47iLC7zDxi0PjQqIWvN89E5E5HWleVOG3rW3o3Z1Ks37vWs/Bzk+mqf1TkQk1YGal465mtdVsxhy4vV4X9466WreL375IuS8ePBlZ1wOjS7CIRomg3jZGW+49SbIueHD6yC2oNOtUNwZwyq7NwQfxPMpzZtTvftH+MsDIYQQQiLBxQMhhBBCIsHFAyGEEEIi8b7yPIQVo1CO2gs6d05X7xH51S+eh9i6G9xiJB/oxn2mKaNoypkzbgGWV175NeSMnB9wxletNQpQGeu65oWtzvjc+WHIuf56LBx1btC9pu99/4eQc+JkL8QmJt0ufslaLHh11RW4fxpTRaKWLFsBObfceivE4qpITMUoeFMp43OPxd17FTM2g4OYUTzHmGs+8IlLfQEiosv3BFZDS4/b2yAPQuyh7ejO6O5zO08ekHsh5w7D6IFlhg5jkldH0kuP1jwfvRNBzfPROxHUPB+9ExF5WWme1jsRP83Teidia961N7iap/VOROQ7z1qad9oZT2L9OalNuF2Rl61CvQvqUVta16xwxj56JyJSURXnKmV8nlrvRFDzLobe8ZcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhERi3homLaziQJOTbgGR3BhaqPbtx3543378MWe87gYs9vQvfvu3IXbVda55Z/kqNCKdO+OaE48fPQo5J0+chJjuGjo+iQWhFrVj97+RnNu1s//secgZz2PxkyXLXXPQH/6rP4Kc66+9AWL5YsEZtxjFXuKpWohVlMMuZhgmjUcsYejeF8sXZK2SY7Hq1s5Pevjrlqtr2FDVmd6P6O8fFnaSDBZkymTdcbgTUgxz5GUA1lWSdTqmK2BNQ2xKR/BFzIeoCVrzfPROBDXPR+9EUPO03ol4ap7RJdlH87TeiUynea5Oda1YDjmf/UNX867qRr2bUnongprno3ciqHk+eieCmhezNNC4nxcCf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCTeV4ZJi1zOdSed7OmBnHQazSxnB4ed8X//79+FnFdewU6bN3W73SJv2oCVKa+7brUzvvm3Pg45UxPgjpJQdOc0XPudOnUaYpOTrhmyvrEFchZ0LIHYp/7wD53xhz70YchJBlglLVBGxDBmdLk0l606z6/yn0/dtIoxl29nVmCzO+wzDG+frm5mCferwAbrGvGz7FePfYOnCU90wVWjG6i+JBGRNaqp5SZscikH+2Y+/ZhkIbat2vafFxv1aO57BM3Kmw9shVinTts+K6cXEZGhHDo0teZZepc7MwyxH3zrOWd86Feod9d9GLvjXvchV/O03omI3PybM2te3HjPU5OG5h1RlSIN87eP5mm9ExHZoDTPR+9EUPP89G66mJrbCrrFgM0qu/GAFSYJIYQQcgnh4oEQQgghkeDigRBCCCGRmLeeh5hRPcPa0Wlrczdxr732WsgZPIdd2M6e6XdzsqcgZ4HR9e2tw27xk18efAVyFi6oc8Y3XIfXlB/HYijlsjsulMqQM6i8GiIiA+rzfeoPPgM5K6/GYlZLV17pjE2LQAULOYn2OJieh5mfX7WWBAt7KqMroQef3n0hVzIDYAHw832sV5YV8/NuxlDwzMxz32PEnr9bBfT4fcbDBzC2ZfeXnXHmQLdxpGE+6bPyPKid+V1oS6FpRWuepXe53n6IDfa6mmfp3dHXsNjT/l+6mqf1TkRkzfqZNS+P8ia5hKF548POWOudiJ/mab0TMTTIR++MmI/eWecLsJGpJI0YFkjE3wUs39eFwF8eCCGEEBIJLh4IIYQQEgkuHgghhBASCS4eCCGEEBKJeWuYtEwqgWEI0ZHWVjT9/O6//JcQ0ybGfb/4KeQcOXwIYvWBe0vLAXZzO93ndo8bPj8EObnhEYjlJ9xucZbdbzSHvQWnVNGUGz+wDnLaly6DWEHfvAreX6tTmy6aUrGelYd3J6gYn9BwGfnMVTHmqrpIlGFYAqr8ZgVb3PHOEJ/V1kbDvefTUnIPhp5Rj+8TIRYZel4a8UCvgjPz9+8mDx+4zxlv3X0fJumGoIYh1brnZp4P6kU39c7w82nN89E7EZH9SvNOvnoCcrTeiaDmab0TETn7LJYeG54adsb5CfzOVqQGYqM59wup9U7ET/NA70RA83z0TgQ1z0ejRERiBfczp0ppPF/Vemc4UC+A+fvtJoQQQsglgYsHQgghhESCiwdCCCGERIKLB0IIIYREYh4bJo1OZoYBLtBmPcNRlA7QlHKFqja2bCl2nTx1Eo1AL+4/6Izzr7wKOYnANbNs6MaKc0NDaKL8/ve+74wHzqGpsmXBYoh99Dd/yxlnupZDTslw4QTKnZhI4usSN0yU2shlFWCzzJ6hOl/MOtAyTEIAj6sYB1bsVncz8qBqrNdi5LR8xx2f+T3M+f94nGubHITY1nuM+3J/debPLeq+hMEdRhaWodQdXn0rYcIdx0aURpXNC6g2WnVRvSqqQFrmSOPzhYbf1YuiunvGTYkb35lKwtU8H70TQc07+wFL796AmNY8rXciIhu60cA4NORWufz+9/ZCzmgvat7i2hZn/PGP/BbkZNoMzctrAyrez4Z6V/MSHnonIhKqLpc+eiciEguVxpr/MMBAad5s6t108JcHQgghhESCiwdCCCGERIKLB0IIIYREIgirrpQzd3R1dUlvb++lvgxCCCHkny3v9Wcxf3kghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5m1XzTMDp73y5rKApjW3z/mMpo9Vn88nZ9ZugdXk0up8OUtYXeeq/Sg+z+qKzpVec1ld9Mj7HPPF+4k73LUJMnbehUdt9ZobOTM6s+b9c9M7K29Wb4G69rnUOxHUPPuzGH9sF1SsggemjMkynZ0Rrs6FvzwQQgghJBJcPBBCCCEkElw8EEIIISQS89bzEHhupGGetYk081zW/rtUzDOq88/sQbA+ymXY7NSmutvphfWMrVi1XhBC3iGDofBxFdiEOU1qPOZ3Nr3L3Od3mJfm2Tn63ff7goLmeejdO9cw8779vNW8OdS7d6ZKuuNYEnLCKQ+9My50qvrLMuEvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TM6mucaea3YKMvnMXTEKevieb6bz+x7nhWVyspxPs2UgsgqyVGmOrFTQ7VXtfTmtLmutkTM2D7xfFpk+NA9mslZmbsa5rPIzfWvc8WGvq/Kl0Yipz2OccHTrFogFBzfNyhVZQJEoT5KT7ktVsJJqZp5nLvXOzrO+jzMb0MOyry7OdH4RiXtNhegiUdXqXRlDNSUjL6Yv1HSpAvozz6beTQd/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCTmreeh2mJBNj57gL4b+bNTsKjaz2Ltdel75XvvdMy3MJcP1vliMXct63sH/JrzzN61d6p9yJy1Bg/UXrux0d23GWPbxTUYWHaDbiOWOaACxoGdRi2kjBVUHDCqGHVX209HzbXL+DBZw7qgL6HT8C5sPYA+jMxu9wTdWbzpwcFqXQgXl3jgGhpqQqPsj0cloHIMvy8Fn+pHVrGnksd3z/h++hSJsiwPceO4ctn9PprF5Qz9Lqr7YBVWgrkM70LcsCXo2oDGLZcgVgsxH8272Ho3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGRmDXD5KOPPip//Md/LN/5znfk937v92RgYEC+8IUvyLFjxySdTss3vvENueWWW2brdCaWkaRa4whONbOh0H/u2TFD2vPg59XHaWPidHOB0dKY2zIZ+RXKQnRe1fZXz/PNnqnoYQx1u47FsBtdh7tFuxxFsp1mRSb3OMvjqHyA3cZHw7OJZD1aOvYZBaH0Veas+kyziG5YeXgN5uw1Ymu63U+9dTcaJjsN0+aWXREu7hLh3a1SxYrWXPYZ3JHl+vMoSlX2KAhlYZm/S2YBKPcDhiEe56N5PkbLMI7nLxkFqEJlJA1KOHfC+CzV/oF8KUyUs/LLQ09PjzzyyCOycePGd2Nf+9rXZOPGjXLkyBF59NFH5Y477pBi0XptCSGEEDKfuODFQ6VSkS996Uvy13/915JOp9+Nf/vb35a77rpLREQ2bNggnZ2d8vzzz1/o6QghhBByibngxcOOHTvk5ptvlu7u//e3v8HBQSkWi7J48eJ3YytWrJCTJ09OO0dXV9e7/xsb0z9SEkIIIeRy4YI8D6+99po89dRT8tOf/vSCLuLuu++Wu++++91xV1fXBc1HCCGEkLnjghYPP/vZz6Snp0dWr14tIiL9/f2ydetWuf/++yWRSEh/f/+7vz709PTIsmXLLvyK/5ngY2AcHh6GHCvW3NzijOvq6iAnmUxGur7/QcV0bblDXwOjNkgFhslp1jp2Xgj3qesyqi3uzKxzxnu2HIScPbN5TQo82zR0umbIBtP5aDgRoT0lHjfmZaKcuTunlWb/NonXeXCN2zHz3nvwqE17jZgyTM6xH9STvDvUZQxFJG+UYAzVV7vKpprVO5gtjK92qIyVgdFVd3hoGGNK85pbWiCnas1Tn9lH70TQXBqmMKdYRmNnPHRvzEUoFFk1F7Rt8ZWvfEWy2az09PRIT0+PbNy4UXbt2iVf+cpX5DOf+Yw8/PA7LvR9+/bJ6dOn5dZbb52ViyaEEELIpWPOelt8/etfl89//vOyevVqSaVS8thjj1X9t1tCCCGEXD7M6uJh79697/7/jo4OefbZZ2dzekIIIYRcBszfrpq68onYRYw05naf2eHNYy4rxaO4lD6sYhQ1CY39TP2vUJ599geQ8/xe/OewV165yhlfe+21kLNs2XKItbYudMaNTU2Qk0wbvyapvUq9lyki5j5hALtofkWwdKEqszueVTzLuO8+BNvd8Toj56C/6+AS4+7mrzmMHSa7+7CKUlZVnNpitMdcZ5hBNuhnswVSpMFyg+giW31YTKsxi9WzNqtiXY0ZPO7BeVAQSkQkn3bf4YolQJaW6fGc6h0GrcNMzVM6MZabhJxnd/8IYj/d+3NnvOZK9L6svfYaiGnNa21FfWtsbnDGyZTxR6ZZgEp9PqOQVK2hiwEUofIszAf3fPb0bjpYnpoQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3hslYBdc9FctIooyHlcAw6liWHqtAkT7OKBiCdhc8X0LFSpUC5JwZPg+xX7zwC2d87Lgu1CMyOT4Oscf+f//VGdfW1EKOVcDrhhtucMbX3XA9HrdiKcTaO9qdcWMjltiprcVr0PfKKsgSVtsl1Xg3SpVyVXNp5os10mJf6BoIu7eh8TE4YLSdtCpjeQDfNdOsaLgovbBahK53RjmjKNWYbIfY5VEUykVrno/eiaDmzaXeieD3WOvdOxeFmnfuzIgzfuGFX0HO6ePHcapx11j5/yi9E/HTPK13Iqh5tt61QayxMe2Mawy9E+PPIq15l6Pe/Q/4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZNWczOz8lY4s2HSqsblZ1OZuWJXYFxoYdI1+Bw9cQxyfvhzrBR58KWXnHF780LIqatDY05MrRGHzqMZ8/wgxl779WvOuP6ZBsjpWo7t02+80TUZXXvtdZCzatUqiC3OLHbP14wV3+IprGipu43qscg01dU8u32+n9mgO6Ae+ISRhZUbdQfLnGGgbJTH8TBdaa/FmHrQiIHR8Q4jBw3E0qAqTJrtONcasbfU+NK3N9Svq4/eiaDmzaXeiaDmab0TETliaN7PfuRqntY7EVvzauvcq9d6J+KneVrvRETqn6l3xl3LF0GO1jsR1DwfvRMRqW92zeY+emfFLobe8ZcHQgghhESCiwdCCCGERIKLB0IIIYREYt56HsrGfp9ZVEjt/Vh7gjFjfyheKc14DdYOUqXizlWYmoKcX7/8sjP+1tNPQs4vX8LSQ2NjbgGounQacoKysd+vioPEjIIw1j6azps09i6PHsG9y+PHe5zx88//HHJWrlwBsRtvuskZX7lmNeQsWYpFWhYtcvch6+rqICeZSkHMqKfjRUNOez/MjXQ343KsOmRg3xMsrHT7Xtdf8MRe9BuEhgVBnlDjQex2KPImhtBuAzyOlyl71Oc5gCmSCfBCt6q/V92x2zhwl9KNPVYHxJnxdVOUtX/LLNo0c4feudQ7EdQ8rXcifpqn9U5EpC5maF5B7fcbxZBszcurHPwsk5NFZ3z0yCjkHMr2QKz5V67m+eidiMhqpXlXLDb0biHGtObNpt5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgkgtByEF5iurq6pLe39z1z+vqzEAtDNMqE4sasIlEJ47ikMgKVSmgoGjc6WA4NDTljbR4UEXnm71z31b6DWAxlKIfur4mpPMQ05mqw7D5iH3OkiEgy6RYoSSSxYEkijcYcgfnxFSuXrYIzbl5DExaJWrUaTZRXX3O1M77iiisgZ6lhtOxoc42WH9rwEeOaDALdvXEJpOTcGkrSeQinmdlmOR33GbH7q57NZY0R24Sh3O3OMJOzHKFGN84n3Hc/e49VlOpiY31m7dB8yMhRny/Ee7D1NuMoVf9tq6cCn+93q2eFIRqYK4JaNlmrulx66J0Iap6P3omg5mm9E/HTPFPvQvT4x/T9K+tKZCJBEfU7VuceqPVOBDXPT+9EtJb56J0Iat6q1VdBztXXYFGqK1asdMZL21HvFrdhgasPblhnXNf/y3v9WcxfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlbYbJkmE0sIyB0ojOMQZMTExAbHHaNQNk+NGhaRpJjx9yKi6+9/gbkHD1y3BmPDqM50qpMqT9M2SgZFsTQLKSP8+nKJoKGqUnjmkpYcA2oqamBWNEwMJVKbjW3wfPDkHOm/yzEXn35187Y6iy68sorIXbDNW7nO2/DJHSZfBgyGg/vcca5LXsgxyybuEffUN/6g9s982aJzWpsVZPciaGtW7c44ydyunulyFij1eWyOjLaqGr4Ogd+hrH2H6vD9mLOrWr8kPbRTodHtUwL/Y0JAvy7n2UaT08o87ehd8PDGMv29TtjH70TQc3TeiciUhgqQiym5KUmRHOipXkJrXmGKb6QNrpTqnGphNo5OeVeZ2kUr9uiJulqXtEw3JemcK7BPvcmnOnBPxte/SV+ZxYozfPRO5GZDZPvBX95IIQQQkgkuHgghBBCSCS4eCCEEEJIJOZtkajXjh+BmNUtrph3C6nkRoYh59Tbb0Ps7bfczdKenhOQMzyMc5096+7Jnzx1CnImxt19rUoR9ynLRqyoOuaV4n7FnhqU5yCRQKuL9RqUy+41jBtdNUtlPC4Wd/cO6+vrIadQKOBcJfd8Vu2VRAL3LmvVfl+xgHuJoeGRqa91O9GdOHkST2gRbJ05R+9rb0LPjOwxiihdbO+CptUKGpv5g7o/pWV68LhPlgnBB8PaY9RHmkUsQwMWBwO0N8Sa6oCfBB9VmuejdyKoebbeob5pzfPROxHUPK13In6aVwSXh0gpjp85pqpENdQ0Q46teer8ZbwmrXk+eieCmuejdyKoeYkEzl1bhzGteT56JyJyaAbNY5EoQgghhMwaXDwQQgghJBJcPBBCCCEkElw8EEIIISQS87ZI1Nlz5zBYRqNcfsLtXXj+7ADkHH+7B2Kn+93580U06oQxNO/pvEQSCxbV1LprtjCGxiBJ4PnG8spoaSz9Ekl8pOl02hnHDRNOWEGDTT6vutoZvq5YDM+nzUlWRznruJoafT/RMWkZiJLKRFlnGIPihqmpaq/wM4/MmJJTRrm9Rs6mwCgclXO7YzZiY9G5xWrceo8R26XGuX1VnrATQ2OGuVQ7JA1z20VHvT6+5bxmmGZazg4pzTP0bqqIvVoHlOad6uuBnNNDpyGWL7qFo8IY3nOdIyKSSLp3QuvdO3MZXSYTbt5YHnNszXM1VuudyHSa5+ppPo+mRgndnJhRhM/wYkq57N4Xw8cuNTXWG+PGEgnMSTbgnzuxpKt5s6p308BfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlrmJzKDUIsYZQkTAauyadtIba0q7saO5BdsazLGU9MjEPOyMgIxMbHr3LGo6PYdnLo/HlnfPg13fpPZPgczl0YcQ1EUyV0t5XKaL6sqNtSb3SdrBjdRpM17utR24hGxFIZ77nuvllbi+eLG8YjXYUtmURjkNWhU8esLqlWFbiGpuqqGwZW1UBIqmpqLLh42dV//Ue23+0MH3zofkjZutswhGov5DOY0rTGOF9wGRgkZ43qHurwlKt5lt7FjPu0QGle2tC7TqV3Iqh5lt6tUXongpqn9U7ET/MqI0YHZEPzgqKrNw0BduOsq0Pdr8Rd3SgH+Mdh0yJX83z0TkQkoTTPR+9EUPN89E4ENc/Wu0UQuxD4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiIxbz0PtRXcZ0oaBZICvQcYx/3GeD3uYzU16KIbuD8+MY4x3dXS2ns6fdotyHL6xHHIyfbj3l5CFT9JVIwiMQU8bvnK5c74Ax+4AY8zuvHpbnWBUekkN4pekNffeNMZL+7ogJy2tjaI9ff3O+PGRry/1n6fvudHjmDH1bExLJ7T3on7vF7A64L7qSJ4vvmB7378Fmf01/egz6TRKi6l0L05Lw1/a8S2qHGV3T8tctVNrTXPS+9EQPN89E4ENc9H70RQ87TeiUyneapAUtrojlnB4oBTMVfzMtcshxxL8yaV5mm9E0HN89E7EdQ8W++wYGFj40JnXFODfjHrnmvNM/Uug/fzQuAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TJbyaNCqTaNxraw6z4UhGopKRTQL6s6QsQQWHglDLPJRUdPX1aHJaHTELZpyOouGouEcFpeKxd1raGk2Wi7GsbDKB25yC7ncessHIWdiAg02gTZMGoWPhoeHIJYbdT/P8uVYnOT669HANDKywhlbRaJKJSyCpU2UsZh2pIn0GJ1Tr7umOsNkQ859z8YEqxo1yOGq5taMNc6t8bIh92VnnPPs4tk5utcZnzCuc1u1F5XzMKA2ZSDjvoY+iD2hxtZTWWfdYnUJO42cbjW2pvHyQnp6VEtTrubVpmbWOxHUvFJhZr0TQc3z0TsR1DytdyIip/tQ86bOusZHrXciIi2txguqOhDfdBMWrrrFQ/OCwDBMKs0bPmPo3QAWLFzetdQZ23qHRbdA8wy9qzdM47UFV/NOGHq3/soqDeLTwF8eCCGEEBIJLh4IIYQQEgkuHgghhBASCS4eCCGEEBKJeWuYtMx7FcO9U6m4hpNyBQ0oImgyDFSXybCMc9em0dBTqahqbsbZ+k+7lRTPnMGqafkifsBywTVDXbl2JeRcfy3Gbrp2lTNe2IjX3WQYO4NA3QPD2TU1hmaosSHXVNR09SrI6ViI52trUR3sDLPQ5CSavZqb3cppx1tw7pFGNBmt6MSqbz407nXNrGuyholL+/k6MSWLnj9Zo7yeh63jqix2aDUD3f3QWjfQjN0OpQW7Yzaq60SLqsiYdZ1ghvQzhOqjGkfxjNuNLqnbvWY3gMua+Tqrr0Hp55gMym6F2cqUoXchfmfKOhYaelfCWBh35/fROxHUPK13IiIjJy3Nc+cvF/AZr+28HmLXX+dqntY7EZGFDYbm1bpPTOudCGqepXelId0qVqT9atecuHwhmr/LLQtwLqV5tt5hNdCeFnf+UeNlXNFpGZGrh788EEIIISQSXDwQQgghJBJcPBBCCCEkEvPW81BTm4ZYxegyGSo/gy58JCJSY+zlxVTrxJjhsUikcB8rDN35yyW8pvNnXU+AsQUpobGsi8Xc3cTaNJ5/QVM9xJrrVTdOY2/P2u8rltwufpUy5gxmjQIwPWed8dnlmHOk7hjEpqbc89XV495ea2srxMaH3H3BU2/jHmsixPclZcR8OP0J/TJgkSgoR3QrZnx2K8Z2b5p5b/2ZnRjb/Lw7fl5XMBKRNUbxpb7Ovc748P9htMJ8HHtfrtnleiP6HsJN1gbzo+hrwFJSYw/fgbE7dJdLpHEUjwM/gWXOaHxkxrlN/wZguUoM9rtGFt8+pi0p5dspY4fHiiEm4FUyii/FEihwWvPSIepNIsSrL4y5mjd2CgsrlY0PXdQdiAM8X22A+rYg7XaibI6jBylRwLmCsluUqmgU2KrE3Zild8eyZyHWdc7Nqz1i6F0eO0NrzWttQ70rGT6II32u5pXSxp+P8er0bjr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZMxY9mjzYoiInFVTSqIYdmmuNG9MRa6x1UqhhHJOF9ZFZMqF9GEMzXuGnWsa7LOV19f6+YYBp/XX3sNYotaXbdXIY/d8YaGsTOcjo0MG90qj+FxEzm30MmvfvFrvM5X0EBUVp+5sRGNTx0d7Xhcyb3nr//6bchpbESz4D88/5Iz/uN/AykmYC1rQENhTpvUDuAL+4RlwlP1mG41CkltwppNEuxVObu/jEnykBFTbMdnfDiLZsXHYSq/EkmYtQuT7jIOvMtyOrrsXoexA6rIFvbdFDmwBa/hoGFmBdRhXzZMgGOG0RrKcK33OJeg5oWG4TdmmAyDmGuwq00YelfGC60os3nK6DpZESxUFSjNK6CvU+IBmqF1sb7aevz+J8tYBOvoa6874y6j82Ysj1qp9c3SQK15lt7lz+Efo7/6yRFn/PqL2EW0bJhbG9vcazf1bhzv+eu/dt9sU+9+/CbE/tjnPZ8G/vJACCGEkEhw8UAIIYSQSHDxQAghhJBIcPFACCGEkEjMW8NkIoHrntDoYRkEKs+opBgzOsOJ7jJndew0zDslZRaaGEe30PioW3ovb5h5yoYjtKQMhfkprDQ2MTIBsb0//ZUzbmzEKm253AjExsbd7pFjOTzfpFFFMFSWwjMDWJWtP0TjkW6VmkgMQMrhoz14mBpbZtPxSTSJDp5/Ea/BhwZtu7NseIYRUDGKxRVFtDdxt5FjeAdz4j4r3w6PuRZ33Lgdc4xilXCGRr/mmHPKloN+McAwoBqFL2cRVSJ0q9/JEkqtrS63EhhdgwNXF1OW3hnfGTB/G3pXLKJOTYy73zWtdyIiU4bmVZTmhcY15aewKuOk0rznld6J+Gme1jsR1DxL7xJooZYRpXnDHnonInK219W8k4kePAxnAs0bnkC9e3HwZ8aR1cNfHgghhBASCS4eCCGEEBIJLh4IIYQQEol563mIW54HYy+vErr7dEWjaFOlYMTUnlyhgHtIk0Z3s7Exd0P6TD92XJtSx9XobnkiMlnG8xVVh06r02fTwjaITUy4+2ETk+hv0AVaREQkcK+roRGL0tTV43Oob3b3FyuWX8Sj6Bb4VcTe5w10ITBjV9A6ny4u5Utu55KZk25XY8sTgHVcRJ5Q48eNykemC8Ht4pkzO33ei6GblDdjL87diGWNRMZ0dZlOzDENG7qg1myaJayKN7rTpXXvfK5z5s6b1ePneYilXC2zPAGVJH4/tOZZxeUqExgrTLkaZOsdxs70u/v9k5PDkFOTwu/opOpyWdK+MxGJB+h5aF7ofpEmJtD3NWGZFbTmGXra0OhqoK13VpE/PcbJwxA1XmuZj96JoObZeje7vxXwlwdCCCGERIKLB0IIIYREgosHQgghhESCiwdCCCGERGLeGiZjIZpUioYRqKRMcaUSFjopF4xiT8osNDGJJpz8VB5iZWXymZxEg0/jQrfATmtbM+RMnUOjZaZ9oTP+4IduhJyWRiwPVC67phvLaGkbc9yx0fxTwsAwQ6pucZaxyyhTIzF1wlgMX0/LRAlzG91ODY+RnO7tn3EuiyZlhjQaX0pWB3yrNt3hDk/fgVWOGnNGrEEZ+g5DivQZHsolVsUZheXr3JLb4Ywft+pkWR5KDwLfe6UY7TMKc213Y+bU8LAEvJc547Nobyn2VrWn7lOvp2XXtCgm3S+gNlCLiJQK+H0sKs0z9a6C5r1S2dW8Qhn1rpLA46ZKriG7oRPvesMwat6I0ry2zELIueEW1LzF9e78ZeP776N5eUNatOZZepcsG/o2ZRjQ4ZrwomJJpXlWsUDjT+0SmM0xp1q9mw7+8kAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSMxbw2RgVOwKKrgWSirTXTxpVB8M8DaUg6QzTiRSkJNPoYFIV//KGwam5r4hdZBh8Cuiqakh7V7TTR+4FnLaF6IRKVAmTsscqTvoWQSGYbISoGkrFHcuy8BYsTr76fP5dEkVqyobktQtCUWkvrbK11+9QlnTUuhTOXHm48xalj6GwrUeOSa6NKbImGyH2BPqGp6o+nyzR5Nl0Jy5uekcYzzj4PGqZtKa56N3Iqh5PnonIlJQmhcYeldjOPOalObVnhmCHB/N03onInLTupk1T+udyOxpno/eiaDm+eidCGqej969cw0utt5hheALgb88EEIIISQSXDwQQgghJBJcPBBCCCEkErPiecjn83LPPffID37wA6mpqZEbb7xRHnvsMTly5Ih88YtflHPnzklzc7N885vflGuvxT2rarC6Y1r7WhprvyhmVD8KY+6+WTKJ+28JY19Jz19Tgx0zK2o/LB7Ha2pduADnVuPaWpy7uRk3xGNGERON5UvQXUrLxt5eJcAiMeWK6v5nPBZdSMo6XxgavhaPIlEW1vNL16CPxYuc5VVwaTD9DDMzVmWBpGppkLvdQO4eIwu7cV7s65y/WN6XT6ix3354UXX/9dE7EdSkmGFe0nongt8ZH70TQc3TeicyjeYtcMutGbYIL83z0TsR1DyrK7PWPB+9e2dud+yjd+8cp3wts6p3hmntApiVxcPXvvY1CYJADh8+LEEQSH//O5Wstm3bJlu3bpU777xTnnzySbnzzjtl3759s3FKQgghhFwiLnjxMD4+Lv/5P/9n6e3tfXcVunjxYhkYGJD9+/fLs88+KyIif/AHfyBf/epX5ejRo7Jq1aoLPS0hhBBCLhEX7Hk4duyYLFy4UB544AFZv369fPSjH5XnnntOTp06JZlM5t2fuoIgkGXLlsnJkydhjh07dkhXV9e7/xsb8/lnboQQQgi5FFzw4qFUKsmJEyfkmmuukf3798tf/dVfyb/6V//KbEA1HXfffbf09va++7+Ghur2iwkhhBAy91zwtsWyZcskFovJZz/7WRER+cAHPiBXXHGFnDhxQrLZrJRKJUkkEhKGoZw8eVKWLVt2wRctMk2BjwBNKUGgi3UYpr+SEVPzx4y2bEazSCMPj4sr49GClhbIWbYMY+XypDOura3F85sGG/dCLZOTRTlUHUnLuCBMpA0Tji7aZHW5NG9edWYhPb9P8Zd/PINnnkvD4Z+oyG2Q06i7THZDiuQM02FDzh3PrjHRWJTnHnSGO43umPca13DRDZPqvniX5ar2Or1MsfoYvALrPjU0PohBD8oF9b5aemeYKCsxZfozvh+WBsaUqdFP7965in+K1jsRkQXNiyG2YkmLMy5WsLiUn+ZZfw7M/F3XeieCXZhNvTPugTaz+uidiEjg8fd5S0/L+vmZXtrq9G46LviXh7a2NvnYxz4mP/jBD0RE5Pjx43L8+HG5+eabZd26dfLYY4+JiMhTTz0lXV1d9DsQQggh85xZ+dcWDz/8sPzJn/yJ/Nmf/ZnEYjHZuXOnLFmyRHbu3Cl33nmnPPDAA9LU1CSPPvrobJyOEEIIIZeQWVk8rFy5Un7yE/1TrsjatWvlhRdemI1TEEIIIeQygRUmCSGEEBKJedtVMxbDdU9oVfFS5pKi0a0yMAwoukKXafqzOsOpMRhZBKukLViAnTA7OrBFYF/2lHtNhgnHui86y/osFnqupNF5L4gZc4UehknDDOl5WcZc2tjlV2HOqvrmw+j69e75jU6UoTwx4zxNb2FsTD/2HOaIUQRypxp/2TIKNo5CKLhfv7F4wsYGPO4Ta5rcwEE83WnjEjq1y3CNkfQQhrap8eOHMKfhLmMuxSYjpu+diEgG7Jd4D+Ru9yY/scd6WMiWzSpgfF6LuopbETUMsdqhVXWyoDSvlJhZ796ZX5n+inic9bfP5JireQuCFsjJ1OH97FjkVpg8kTUqN04YmlfnXkUCb4vEPKpxVgxNmhD3vgQlq1yuYZjU9y40OqAaU2GW1QkTzzdRcquPxipo7ExO4jO+EPjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvPQ9WIaBCcQpi2geRTBkd5YwubHrPytpHt6poxpVPwNqD1Hv0+YLhwzCKmoyPu59vYhI/r4X2Llhz+1QEtY6rlI2NO713aJgZrPsJHg6rrZ4BeiqsbpxGN9Uqi6bAUQ24az4m21VkLeRsWo9FhTJq/JBR+civ7pGvgcTdp7dqI20yjho9rM0KhyHHvE79eQ5qA4CIbMFKVQ8qU4VZZsmjcpR97yzXw1Yz02GXO9ziW5FKHefredCaV8DGwqaPJ5lS3/+S8d0z9vKT6vtY9tA7EZFQ7dNb3728dfFaF8dR38qG5mlXQGBck5fmGRaoGvVttzqEgt6J358fVlfNSqA7/VqdN41nrH4HSBjXFJvl3wr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvDZNWMaR4DM2QgYrF41YHNAPLCKiPs7qbaVNTASuWTExMqHlw7oaGeojpj2zNbRmmQnVNlnnIJ2aZhUxDqDIZWZ33rAJbFdUlMBQ0xVpmIZ8cq6letQYiNBVaxku32lOuE02Ae+5rgpjsdYePGLWmjHJFEG00jJZmCaOZm0eKyG6IPKEqVT3UoMs4iTxjzNQJ5kTDMGkYDw+vcW/EbigbZbNF+TjXmOZILPIFV2C17PQyY84e2ggYM/TO0kAdiscmIEeMAlB5PY/1/S/jHyFal8Yn8HyGVxA0LxZDU+WYYbScVN/3hGGmj1sdeoOZjZ2V0L0LUx56JyKSVJpn/LEjlcD641dpoGchu0DlWc1O4zRMEkIIIeRSwsUDIYQQQiLBxQMhhBBCIjFvPQ9W0Y2E0dxFFxqyPA+xON6Gstpbsxo5JVO6oIdIqegeZxWzWrCgxRlnMpjT1bUEYuMTw864pRkbapWMxl9ldU0xq2CSR2EVywVi+TUqat/O8kr4FIkKQ7wvVuGoQG3waU+JiMjg4HmIjeXGcX4Pdiv3wKYtuNvdqCwOjdjnTB48vB9i93ZucsYND+Jme9MYugn2q5pNa+7A89l47NR342S3H3DHfUZRpc8aXonNm9zz3dNnnP8wxjLKl3BgC/oUDhjVlvaucS+0c2835Ox8wrgGy+NwiclX3O+D1czK8hLFlechFPRTlSuTOJf2WKRq8aIM80JJaZ7WO5HpNK/LGY9PjEBOe3M7nq/o6ne+qN0atgegNubqjaXxgegGiegpqQgWriqq51Ap4dypilGYT2ueh96JoObNpt5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q1hMp6qgVilgiYcbS4JDbNgsYQGG+0DssyCFcMslJtwjUd5w8A4MHDWGZ8fQmNQKFgMpVhyYwWjG2fFWg9qx5RljvTpRGcYH+MhHleG52CYfhIzd6KLVdAQZpoo1TMuVLB4Vm8/lkgaN6smzcy2Pu1+RHfdPWq8fe19mPOEEYMjLbYbsfVq/DBkhJI1jtPFq9BQKM9bx7ncZ8x9H7SPFAn26KJQm2acW0Sk8aBratx90Mrabh2pxvo+TUNGmVKzVjGr6rqyVkvQqN/zmfVORKS2oAoWlVD2rQa2+useGAa/XA6/RAWleWcHzkDO0NAQnlBpXsnQ5aJRGC8l7p8FhqfRLFQXi7n6YhbKUzeh3ioMWDE0MHT/HAiNP2krhpYFFffiQ0NfLfen1rzZ1Lvp4C8PhBBCCIkEFw+EEEIIiQQXD4QQQgiJBBcPhBBCCInEvDVMWl3ZQmstFCrDpHFcuWIZn2buKFkuoYGoWHJNMGPjWO2w93S/M07XoDFw8PwAxM6fH3TGPT0nIaelCdskxnSnNsiYprOoB6HxIPS9Ms2mRqysDZPG8wyNZ6XnD41udYUyuqgmS34d66rhIbnVHR9CI2KjYbg73bR95skzRg74FfGu3zvzzLI1g7FGOQCxnOqG2bjJONDsq+mSOeRxUSIiyiBpfWPNdzjjdhs9uPkRSNm7FjueHsgoU6xRsfOAOuPtfXhVD6FnVMbUs/L97qGJe2a9ExGZSLrvfjnA976mhNUjdYfecgkNfsUSapfWvKNK70RE0jX4fRxQmnfO0MBjPb0Qa2q6xhnHBCv/mlpiqtB746N378ztmjgL0KNUJG90zNSVfkOrC7Tx2CfTruaNGno3Ost6x18eCCGEEBIJLh4IIYQQEgkuHgghhBASiSDUlXkuA7q6uqS3F/e2CCGEEHJxeK8/i/nLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjFvu2p+77u/gFhTA3aULBannHEhj93NxOhu9sCDX3fGvW8fg5wNq5ZDLKm6oqUS2N2svjbtjONx7ExXU5OGWCJwi4EmjK5wqQbsjtdQq46rjEOOVApGyO2OFysVISeowfM1tdS585SxG18Q4GcuqW54BWnF64zXQUjf4kqA91x37BQRKRXdz/OV//0v8HwGYbDVGe+RrZCT/bLbOvHAFmzL+Pimmc91u9HN8Q5phFhG3C6QjTm/dpW5TWPO+PB2zOnOGQfepsbYmFKsPptAJ35nt/0tpm3a5o4fwialcrixE2LbD7iJjbLbuIb3vEIREbnnwZlz8KnY6Pu5u9O6wchPlObVNrRATr44ibGS6uwbn1nvRER633Q1b6Ohd+kY/v0zrb6QWu9ERGKG5qVa3Ly00W80kZ9Z85pqjeM8NG8ixA7IiUlXI0IPvRNBzYt56J0Ial5YN7PeiaDmmXpXQP3+yn1+mmfBXx4IIYQQEgkuHgghhBASCS4eCCGEEBKJeet5+OHPfwmxRAn3Dq9btcgZr7xqI+Rks1mIDY+4cy1sqIecoIL7bxUJ3GuK4y2OxdxYuhb3tZIxy4Pg+jeGRvDzNk/ivlZrh7uXWFsTQE6qBvfkYk3udZWncE9QBM9Xybv76IWpMciRcglChZJ7XeUUfr5E0yqIhUn3OgP8eBIW8H6GRYz5scsZbRY84fPi7uXv2uQ7t7sB/8Tjxn74XgytXeOaDp7oxHt+yNiUN54MYm3m7/c5cGYacn5OgSe0VcHwWIhsgkhfp2tWaNz7OOTsehCfH16W4eDIuc/Gz7kgstd4Nj788OcvO+NE6SzkXKX0TkRk2Y2u5vnonYjIwlZX8yoB6l0+jvcuWefqW6kWNbDG2MuPKc3LK70TERkcNzQvdDWosc7wi9UbmpdwNa8hgdcEmlcx9K6Cz7MwrvJK6PsqxNF/V067ny9RM7PeiaDmmXpXrlbvbPjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiMW8Nk+dG0aTSmjgDsZa4WxykNLEScsbH0bgymXcNfUEFi24U0T8EBVJCowDV2UHX6NQ2gQamyuR5iA0OuWaakmGKKSbxfOVB1/iUrK2BnEymGWKrrrveGSfq10BOEKKBqKBMTb3Hj0JOdmAYYukG9xrC8ijkxGrxuSdq3M9XrKAZ0yKdRmOVD00N253x7Q89DDnbwj3O+FDDAchZi49dRH7ijPpkG2Q8ssU6Tk9mTW6VbTIvogrWGTGjkpM6X6Pll8RbJRl16d2W53A7Hrjmvnud8a7NeA+yuftwLnheOHemwb2InGHUnU16R88549bkAOS0JLAYUmnS1bzxMUPvpvDvkVrzikZxonQSg6EqXqf1TsRP87TeifhpntY7ET/NW3XV9ZCTaHQ1LygbemeYTU9PuprXd+4c5Gi9ExEJK67m+eidiEgxVJpnvIvV6t108JcHQgghhESCiwdCCCGERIKLB0IIIYREgosHQgghhERi3homV63ogFhdAU2G+bEhZzzU3ws5xSJ29hPVwTKWwAqMReP2xUJ3PXb0DJqasmdOOOOPduHcY6NoDBqecM06qRiaIycreE3xEbdKWmkEK7d1tKEJJxW450vXYhe4ZM1CiJUbm5zx+CR+lpEpdPTE0m7ltLEJND4mjEptFdV9LzBMqqkUmoWCmOEA8+COe5R5rg/NX7og4kP3Y+vG3YaBsTuz3hlnrY6PhqHwgDIi9lleRQ9zpG2zNL4fos2zloPRms01knYblSIPGId1qrxHjJx13YYZco97oHVbOq3rVA7NPuM5ZDvdnDFr8tDDkOpptNSaZ+ndeG4IYoUeV/OKeeN5ltBEqTtfFouG3hl//zw66Gqe1jsRP83TeicyjeaV3OuKD2El3NKQoXktrualjGq56ZSrecmUoXe1TRDTmjc8ZuhdCitFjk26mpeYNPTO6BoaqPsym3o3HfzlgRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3nofiBO7tNdXjntX4qLv/taANq9KMDmIxonLe3bPqaGuBnPo03r5c3t07fKsPN3Wbk2rPKsQ1XBG3tSSpCrKEFTwuFseY7uzZ2tECOYuX4aZuqTDpjCvnT0FOvLULYjVqD3DRkmWQM2Ls5WmLw8TIIOTECnhjGowOnZpkAp9VxfBGeKE26jOduK+9RXXH3J3B9yBrtIbcBRELfFZZuAYs6JWRzTgV7MkbvgHjqjJy0BnbvTEPQ2STnsfwEhww3n1t87DKXXUb3gXdQNJoKClinM/HH5JRV2E8Yr9aXZavxaCYdzWvqQH1Lj+B+/2pTvfpjOYMvaugL6ljYYszrq8z9K6IXom3+t0bAXonIhIYmqdsAcm0UYDK0rykG0sk8TpNzVvp3vhSOAk5lZyreabeNaPnYdEVruaNFGfWOxGRiXFX82LGHwSm3qm0pNHNuWq9mwb+8kAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSMxbw2RDEot+1MfRQFSMuWaW+gVYXGribez6NjXlGoESgiaVugDNLGPKQDQxiSacwpRrnhlsxQJNrXVY5CNRcg0vYRnPn8I6TtLQ4hYjueKqqyCnub0Nr3PYLS4TjmFRmkoBDVpNi65wxvVNrZCTMC60VHbXshMVLCTTGMPueHFVzEZiWJAlncbjiiV8X3zY3+2aBS1T3B4dtOoQbTmNsZ23uYcdNqyB3dsh1KnKUoWyA4/L7oFQ0ODOv7vT6h5pTKUKR23CFBkzCkeBhdI0K1pF29RcxnGPGDPpbpyGh1O6N2Fsj57fqoGlH6pVS8t4fJbZ04eGGlfzfPRORKS+2dW8ibdGIGfK6CycaHI1r66CBsaxPB43Me5qXiFEs+BgC2peR62reZWy0SHY0LykkhKtdyJ+mqf1TgQ1z0fvRETqW1zNM/XOMI1OxFwta0waepdCXZS4q3nputnTu+ngLw+EEEIIiQQXD4QQQgiJBBcPhBBCCIkEFw+EEEIIicS8NUx2NmMsnccqabUd65zx4iXLIWfkR89DbLLgGoHi2pUjIqHh2mpX1cZWtGIXtpd73C5zJ8fxw7QtboFYXahMm2X8vIkAzUkLFy92xouXr4Sc2jo0GcZLrjmonB+HnJjRqa1Udo05DbVojkoYXd9yZ4edcZ1xXL1hJE3WuM+mEuJnyZfQ8BoahiUfDuqKgJ4VAoG+JRBak3PHuUYsW7hGtuNcygi41agwmc2sgNiu7Fed8UPZO/AyjW6RRmNPA8tBWC0ecxllLrPa6bgJc9BGKiKBOp9VQjOn5jZNlcY12eU4Z6RzgTs29W7JOogtXu5q3sheQ+8qaHyMq+9VaHSdbG9Ag+bKFlfzDp7Erponc6iLWvOScaOrrofmLexYDDmLl82sefEimiG15sWCmfVOBDUvYehW7vQwxLTm1dfPrHciqHn5oqF3sdn9rYC/PBBCCCEkElw8EEIIISQSXDwQQgghJBLz1vPQvhiLfkwMY2GM1szVzvjQG29AznM/3Quxphp3r6nW2KNHd4FIbcy9pWs7sSTMiXNuUapDZ3KQs6JjEcSWqr3EhNEFLpjCuVoWuXNllmFRk6Y6XEeOnHU/c38/FtNqaMf9xRq1bzc0jHuJuUn0JZw9487fuXgp5DQ24d53qJuU4tSmv6EwhYXGvMjdpwIPeRyExogGjy6MjYLPM2vEtOlhl1GJKGtWLHIv4oDshZS+vm3G+dRcnVh9yfx8XiWSDPOALrJl+DAkZ1Wc8jQizHBUg9EBtWpPh358nh6I9g5X83z0TkTk0Ouu5j33k72Q05RAfdOa56N3IiKru9xnfPw86sahs1ioakWm3RkvbTIK5VmaV3BvaEsramema2bNG0nj+fqz7rU3GH4KrXciIkNDrublJgy9M/RUa15j48x6J4KaZ+rdZJV6Nw385YEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkZi3hsnWzhsg1tJmmIUOv+mM/8s3/2/IyQ8NQOyGJW7HtZRVRCVmdDdTxpVOw1C4cbVrXDncdxJyOhYtgFgq5T6uqUksFtK+BIuv1La4n+X0GeyO+eYUunD2vfiyMw4LaLj5l7+NhY5iNa6Ra8+zP4acH790DGLLFrjOscWL8P7GjS5zYUVde8UokKJzRCQsYMEZH+7b5Lr1soZ57/GHPCpHdaIZqq9zrYpgsSeRrUbMPc70KhqlndYcdo2OazPY6bNv9z6IdWbdHpZrDuyGnMPbsZUodOg0/IxjOePqGzY5w/CwX5mqoOEtZ5yTDUYWPocGfa8MUyNYRKss/uRLa+sHnHFLo6F3J96EmNa8/Iihd8uxq24qpgrlxdEYKIYEdqqidBvzqBuHTxma1+4aHVMB/vE0NYGF6trbXM2rbcbPYmrepKsJ+371MuSERffaTb0zNGnP93/hjH/80uuQo/VORGRxq3tD4wkPvRMRUV2fZ1PvpoO/PBBCCCEkElw8EEIIISQSXDwQQgghJBJcPBBCCCEkEvPWMJlMolPn9V+/ArG/+Mv/yxlP5IYgZ93yLojVpZQZMYHd1IKEcftUbKqEHddaVWWzm1dhRbSOdjT9LO5odcanjr4NOal67HK3ZOU1zjg3gVXavvnUDyG2703X1HTLWqwOePoUmq8mw1pnPDiMFRFXZ1ogdtP1rpltQTOaRhPGc9fl1UoFrIVXCY36eFV21bxPt9WENpsiDz/i3qtsZgvk7N6yCSfPuNUc93T+LaRs7kYj4q6Ma5i0Ojfmcvj8shnXGBhsx0p4mx5B81Xnw192xn2bscrmGvRQii6v2G1Uy8wYDtQgdD/Q/oO6yqfI8xnsCLrmPjdm1YS8pxO7TG5XfsnDa6wjldvT7LyJoUZ9FZ5Gy0bV2fflXx+EnP9T6Z2IyJjSvOuXr4Ccurj1XXANkjErJ47vhta81lo87uY1huYpY7fWOxGRU8cMzWtw78uSZddAjql533I1b/+baOLUmnf6+DnImexCnRoccqtHrja6JHffiObdpgZ3rkRiZr0TESmVXH2rVKx6oOyqSQghhJBLCBcPhBBCCIkEFw+EEEIIicSseB7+/u//Xv79v//3UqlUpFQqyb/7d/9OvvjFL8rAwIB84QtfkGPHjkk6nZZvfOMbcsstt8zGKeXJp78Dse/+4HsQGxwcdMY3rV4NObFa3FeqKO9CKYb7TMkk3r5c3t1bGx08BTldTRVnvLCtA3IWpvGa2pqb3fGHcW94amwYYjW1bre4xZ14vv/lTz4DsTcOuXuABaO4zLkRrPLz9oBbmKe9uRZyll6Nz6F2gdtVLzR8CpUSFoAScfddrRoqFSOYMHwsPgTQ0nG/kbXdGWWy90LG1l3W7O7ce7rXQ0b2PvQ8BBk3ZtSfku4G9Dx073HHmx+xilthUaq+u9T59mNhp75NRoGrve5w7YHbISX3EB63657POuPsdrx5fY2fhZgu5JQxPAj3Go0+D292x/cZ1ax0marb8bEYvVRF+jLVVZN68mnXRPLdH/w95Gi9E0HNqzX0Lmn4t4rqr5bJJP5dM1dEL8HokKt5Wu9ERFqbUYMWp93ve2czdrlcvPFaiBWmhp1xQy0e17UMz/f//ZKrea+/1Qs5YyP9zvjc5CjkvP36yxBrX+hq3tKMoXet7RDTmlcJZ9a7dxLdYWDoXSpVnd5NxwUvHsIwlM997nOyd+9eueGGG6Snp0euuuoq+dSnPiVf+9rXZOPGjfL9739f9u3bJ7//+78vx48fN82OhBBCCJkfzMq2RRAEMjw8LCIio6Oj0traKul0Wr797W/LXXfdJSIiGzZskM7OTnn+eXQ2E0IIIWT+cMG/PARBIN/61rfkU5/6lNTX18vQ0JA8/fTTksvlpFgsyuJ/Uud8xYoVcvIk/nOYHTt2yI4dO94dj40ZBe8JIYQQcllwwb88lEol+fM//3N5+umn5cSJE/Lcc8/J5z//eSmZe9M2d999t/T29r77v4YG699UE0IIIeRy4IJ/eXj55Zelr6/vXSPkhg0bpKurS1599VVJJBLS39//7q8PPT09smzZsgs9pYiI/B9/9ZcQq6nBDmQ3X3eVM+5saYGcimGGrISuAyUZR7NJTQpvX98Z1yy0uA6LRC1b6nZmq2tB48ziwWGIpQLXTLPw4/8T5MQD/Cyj512jY30TGhE3rL8eYksXuYu4V17FuU8OYNGtQ8dUx8wSdsKrazQMUw3uvYoZhlTdtVREpJh3u8VZHeXiVRaEsujLaHMgmhr7VDfMPd1obu3rXgexbKdrw+sTdOHtMYpEZUQVpTLMeweMuUQZA8UwD0rf/UbQgxwWMQIyOzG0G82lWe3ZtK7TDD7uztNgzG10G9X43IE95jUha9RzwNJWNn/xV/+nM66pwXf6w0rvRETaleaZeqcddyJSqzSvpt7Qux40hC+ucb/Hy5ZgJ8q6hah5C08PqwtAnWrz0Lzz59HYnQ5wrhs+7HYpbW/Dv7S+8parJSfPGXr3FnYIlpLbkbSuCf8cWGzEQPM89E7E0DyjoJcx1QVxwYq6dOlSyWaz8uab77SCPXr0qBw7dkzWrl0rn/nMZ+Thhx8WEZF9+/bJ6dOn5dZbb73QUxJCCCHkEnLBvzx0dHTIrl275A//8A8lFotJpVKR//gf/6MsW7ZMvv71r8vnP/95Wb16taRSKXnsscf4Ly0IIYSQec6s1Hm4/fbb5fbb8d9rd3R0yLPPPjsbpyCEEELIZQIrTBJCCCEkEvO2q2bbwmaIXbMczZhL2tzObGnDhJdKopUkVP6TkrHOyuWw2lisMOyMly3DznCNbap7XAYNRQsKaPpLKlNMXRorqbW0YyWzZJ17vv7Tb0LO0AhWivvVPtfw9tYRrMAmtQshdPSMW+VucmoKckqJ0xD72AK3Hl+ihCausvH8YjE3Vq7gv/Qpl41YhH8R9E85cJ9rsLsfG2ZKX0bVNhxDw2Qmh0a9rLj3PJuxXHhGd0zDC+lxmOiKlnL6J5gSYvVICfQ/p57ZdGhdxLos1mA8+MzjEIP2lMZx3cZl6vaifWJ8vs4n8DDVbVQyZovQmQnwwRyW6nxfC1pdQ9/aVagbHRnUm7SqHplKzKx3IiKlmKt5PnongprXaHUINjXP7Y6ZNDpD1qXrIdbSvtI9ru4M5PSfPgSxoUE39quX0OD71hFlCE3jZzl6GksLTE66hslSiN04P9aI9yCRcjWvnJ5Z70RQ82ZT76aDvzwQQgghJBJcPBBCCCEkElw8EEIIISQS89bz8BvXY3e1plosElWnOokljcJORkM5SagCKeUCFvQIy7iPtbjT7abW2IzejKamBc64ZWET5NQkr4RY6ry755iqw6ImNUbBqc62pc74/Ggecr79nSch9vZJt6PcZAn3SgfOvw6xvgF3z7G2Fj/fuQn0MwyqDp11RqXRWAWPExUrFXGvtGT4IKRszOXBti2fmDkJtrr3QMpBY4++QXkCoB6ViN2q0SoA5QEWSMKCVyKbILIvdL0SB4wqSp3GtetLz+BtkfvvMy5hz1pnuNM4DnwRIiJyjxr73aeturjUM+h5gGJd1nMxvAQiqr+PZ/WeD69zC0CZemd0itWalzD+yqj1TgQ1LyzOrHciqHla70Sm0zzXw6X1TkSkrg7PV1vjat7SZUshZ3QA9fvvvuN2Zn7rVD/kTBbd+zJ6/g3I0XonIlKrOnuO5dBTNj6I72uswfVUxIpYEEoC1K1S2dW82dS76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TObHhyGWK+DHKabdXhr19WjCa6xFE4423SVjaG5ZvawRYhVlsIkbhZzq691CJ7XG+WuaFkHs6GiPM2555QjkJOuxSFTDYreQ09oPYJGa1946AbEXX3nMGcdq0fhUDvCe19S1OONiCQ2MgdHlLpZ2DWAFw+ATTGFBlri4zziZSkFOjWEkEyvmgVGfyEC/Z1gkSjrRvDemDJONfZbBz3AiNrrPtLHRr2hTY07dz77DkNO9FmP6KWS349xZwyyoP83WLHbV3LneKMiUdT/PQ7evgZQ+4xZLxj1jt3HvrK6Wu8T9bj9keDE37XWf8d61+G7utQpzdaIG+ZCfHHbGudLMeieCmmfqnfEd1Zpn6l0Jv0NxZRbUeiciUltjaF69q3la70REWl/Bd3H5R1Y544YO1KlVG38DYle+fdwZ73/lv0KO1rzxwDAiGibOnCpwVYwbemcYXmPhhDPOj05AjtY7EZFkjat5s6l308FfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiXlrmDzZdwpi4eQgxFJJ11xSSaF5R+eIiCxe4FZA+51b10FOXQ2WhssNnXUDcTTvJZVZKJXGamt1Tditcslq14gU/OjnkPPTl3sg9naH272twehM2XL8LYjdHnerm/0sPwQ5iRa8zsyiDmdcl8IqaR+6HitoLs64hqmJMTQLFSbQhReE7ho4NMr6xWL4jMtW9TYfctgRFFElEA+gUW+NPIRT96k8w5yZs4yBfa6hMOdl6hQBY2cnmv6sIpfYh9LAo3Lits5tELvbOOGevSqgqzuKfZ0+3I8+QChW6TN3J5hkRRoNo2VjbpMKeEwuIidPu5UMbb3D4yqxOpVj6F0LatDv3OZqXl29oXeDZyEmSdUdM42GQh/N03on4qd5Wu9E/DTv9+tRD35WcDWv1GHoXVcHxLTmWXrX0omm+Imcq3kVD70TEQkDN8/Uu1KVejcN/OWBEEIIIZHg4oEQQgghkeDigRBCCCGRmLeehw033Aix4QEsdDQ1NuzmjOM+eqKEHc/aF7j7UR0ZrPYSGscV826sYNQUCWLumi1dh5ueyRTu961c5XZcC+vrIOel7/4EYm8fec0ZJ4yuk59ub4XYrV++yxl/qDQOOf01RhGsJrerXm0SO9pNjo7gXANT7nEpXNsmk7ivXCy585fy2DV0KsT9vkq5uj3A026DR+nbehvk7Lnd3U99qBv3i/tkK07erTt2HvK7KG+PwwwY++/ogphbdljBTXN3Piw7JN4+BHcevFMNlg+iyg6oN691Ne/8ABaJG8tPQWx40tW8hGBOezvuv3d0uZrno3ciqHla70Sm0TxVUE/rnYif5mm9E/HTPK13Iqh5Pnongprno3ciIrVp914lUzPrnQhq3lRl9vRuOvjLAyGEEEIiwcUDIYQQQiLBxQMhhBBCIsHFAyGEEEIiMW8Nk+0L0eDX1oLd1KbyrlmoaBQGyo+dh9jSTnf++nosdJII0DxTUoals4M4d6HgXkMlNKrpxLAD2tSUW3GmtgtNnJ/5X/4NxP6wwS1skq9g4ZHmRjTm1KjulCv7jkFO19g5iBWKrnlnbASL2Zw/i6bGiip0EsTxviSNykMJda/KMeO5GJ95KpidtXPnLrTcbc7ucsYHMmjGkm6s9pRVj9QqTpRbawTJZYDRLXMXhrZuNSpHeaA1r60FC95NGAbGSfV9zE8aeteBelqvitklEjPrnQhqntY7ET/N03on4qd5Wu9E/DRP650Iap6P3omg5vnonQhqno/eiaDmlYziUlM1s/tbAX95IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q1hMh5H00gqgbG6OtcIFBhmk9ERrFomMbcimdWJLp1GA1FDi1v9a2gUqzLmC+7cJaNLWiVA80654BqISsNo3qltxs+3cFGXM25bgBXRjKJlMjXsVkUr5NH4VCxiCc1y3p0sLKN5J8SQlEvuXBXjvlhU1HGhYY5KGu9LkMZ77ENnp2tjDDdjTt8u1yC5uQ+7sj6y8wDEZIs7tBpoZg1j3iH1kTc9gsd1ftnomHlAzbUHUiS7HWPGlfuhG4l+0shZVe3kc8caw7m6Ro03G4Uju2+3ZquifKWIJJWZLp5ATaqpQ2N3pc5990cGDL1LYQXGVK07v6l3BRSOoXOu5mm9ExEplQzNy7vfx3IMDZM+mqf1TkSkrdbQPDWeymEVSK15Pnongprno3cihuZV8DitdyIiUnJPUFuDepcMq9O76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIicS89TyEMdzvq8SNjSXd0c1IqavDPeTRUXeTMzeG+1rNrViwpE4VoUrV1EBOWCm/51hEpGTsa+Un3LknRrHYS3IY9wnHi+5jXpC50ut8ufP9znhqGDd+E8YblIq7e2uWzySAHUeRoFJSY6MYirF5WC67G4NG+RmzOFhg+CB82HKf2/kyK/geHDi9U0V240TbDlZ1frPPpfrQe4yGnda7L+vduRrWe52teu6Zzclmh4xh4Ni8zR0ftnwtyp9y7xrjTpn2BveEviWjJpPuQ47F8csXS6MuSuh+P+oWzKx3Iqh5PnonIpJqdjUvDFHfwpiheQn3+58fw7l9NE/rnYif5mm9E0HN89E7EdQ8U++MlstBQRW8s/QuQCNEoKRs0uigWa3eTQd/eSCEEEJIJLh4IIQQQkgkuHgghBBCSCS4eCCEEEJIJOatYVIsE47Rqa2izIgxIyeZwttQUWa9/n4001yxGtsbNje4nT1rkjh3Mumu2WriaJwpTQ5DTMpuZ7ZEHI0zpQLar0bPnnLG+RIaZ8IiHjd5/oR7emPuunosviLKtFUpoJEsYZh3Qm2sNIo9xQzTnw5Vysa7UTbucR473flwoLvTI+t+dQFWjlUCShnXLPenGNWIfDDncvE3R2rzXJXXNIugnc+OaXZj/S7p3OeOPW6dZ5KI31UZKJNhGLP0Do1yWvN89E4ENc9H70RQ87TeiXhqXhm/nz6ap/VOxE/ztN6JoOb56J0Iap6ld8mUYSRXAjdpFIkKrcJRSvNmU++mg788EEIIISQSXDwQQgghJBJcPBBCCCEkEu8rz0Mgxh6SKiBk7YeXjQZMtWm3eczQ+SHIGR0Zhli86O51lYpTkFMquntP5Tw2z6ptaIVYoeR+liljH72iq4WISEw11EkYxoGpIl7D8Dl3HzttFOFqaaqHWDLmvlal0gTkFI39tzBwjyuHxv6tURgnsDYBFZWKtXloGhFmpLNv5rZQ2hVh9FUS6TP2vrUNwjgwW+WWuR9YXSojd+M1yFXOuNuwPBwwrlOHrI9iuSf0bdktX8akg/hcujNuIS5r7vUe99P3Ov2o8kilXYHxXQ+M91xrXrk4s96JiAydczXP1DvDz6Q1T+udyDSaV+9qXsH4fKbmhW6e1jsRP83TeieCmrfY0rsKatJQwdW84iTeg3yAx2nNuxz0bjr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiLBxQMhhBBCIjF/DZNG0Y1Ad9AUkXjCLeARltC4EjdMKZUa10A0NYHd3E6++SLEamTSGU+cweJSxbg79/joMOQ0NKJhMqlauk1NYtGm7CCaE+vb3HvQGsPCLoMnjkDsjRd+6oxXr14JOR2L2yFWmBxxxuO5UcgZH0cjaVhy70s8bhRfMYxA2jgWxDAnXWd0Ny3PbDyyOGDVdpoJwyOXsWpNeXjpMoZjEkPGRKbT8j41NsygmU9gTB/laeLUV+VrHTygrr1T9mDSOpztopauqtaP5ltcKq3e87ihd0a34bDgap6pd4KGSa15J1839C42CTGtecUA5x4fHoFYgzJMJpOGydFD8+rzeA98NE/rnYjI6itdzetoR72bLONnGR93NW98cma9E0HN89E7EdS82dS76eAvD4QQQgiJBBcPhBBCCIkEFw+EEEIIiQQXD4QQQgiJxLw1TMYEzR9WV80gcGNBAs00Md3NUUTStW4sP4TnO/n6SxCriRWd8cQEVkks1ix0xuOjaLipLMauaKmaWmdsVdTs7RvGaxp2K8ydP3EYr+n82xCLqw59tQ3YUS6VSkMsp6pxDg8O4PkqTRCLaROs0VXT8paFId4rPNCq1FY0Ej0Ye0YF7sCcRjU2zJFZw86X6VujIkaJSSOU2aUCm/U8ItJpxGSbe00HDEOa5Trcqi9im5Hk0X00/FsjiE85m8Hr8jqfnsqaRj8rE/9+o3OF7o4ZWt8Po7pikHI1z9Q7w4CuNe/krw29q+B3aKLg6kaxbiHkjI+igbCyyL2GVKIFcgIZhFhvn/suar0TmU7z3OPiFaPyZpNrkEzVoG7ljG6cWvOKZQ+9EwHNuyz0bhr4ywMhhBBCIsHFAyGEEEIiwcUDIYQQQiIxbz0PqVQKYpUS7gWFFTemi0aJ2PtKyaQq1lHbBjn5EhY/Wtju7u+N9vZCztlzbvGVs/1nIGfZlVYxK7eoSFMLFpJqrDsLscnxYWc8OIx7dGEJN4Mb29zCPO2ZJZATGPunhSl3rqGzuE9ZShl7gEnlKzGK7sSMp1Usqb084zjrOotW5zkP9t3rVonatRn32p/YpCNWX80GiOQazf6bLmuNTfq1+vlh1aZO0V4Nkb4+dbOyeE2S+yyE1jy+0z1bp1E5qw99All1HzKdVmEnvPbD+l5143vQAL0335ltRkw7hX4Oxn2ZLbw8FyJ1oet5KhjdasMYaqDWPFPv0kZBtjpX8/LlmfVOBDXvrFG47mzW0Lxla51xPIXf2aa6RRBrTLnPanJoGHJMzSu7GtvYhvrWvqjLGQeGl6BgFK4aGnA1z0vvREC7tM9FRKRYNLwL6jirkFSRRaIIIYQQcinh4oEQQgghkeDigRBCCCGR4OKBEEIIIZGYt4ZJq4FdMmWYIUOVWcECImGAppRy2TUeJevQoHnVhk9DrHPlNc646wwaJl/95U+c8bmBU5AzcgYNPtr0FxqGqeZ6vM5U3I2V6tG8MzE6BLHRnFvoZGIIO4RWxrEA1NlTR51xsYhPK1ZjPSsdMYrgGM8qrrqNhmU8rlzEYl2VEsZ86NztFoXavhsNfluVh3LP5k2Qs2szdrDsPqgCGatdpdH5Elx3mJPpu9+Yyu2q2fkIpuS6sZBTY7drRDycWQs5YtSk2pR1TY2bdhmGxsP4mQ91up/niQfxsGzmJxiU23SWkWOhTbCersZqpvYkEPd9rTe6zoYlfPdLRVfzwqShd4bZPKlM6VfdMrPeiaDmvfoLfC7nTqOWjPS7mhdksItvWBiHWHPaLVSXCrBwXakWC9zlR1Xny2HsnDx51v0slXE0NJ81TPHFvPscYtafTfhHkWjNCwx7azyGf2xrzSvnDb2rVKd308FfHgghhBASCS4eCCGEEBIJLh4IIYQQEgkuHgghhBASiflrmDQck5b/JKb8JqFRVTBuVPFKJF1z4lQJ11nnh7CCXvD2EXdctwByWpde5Yxzh9AcqU2HIiLlinudY2NY8S2fx88SxtzqeCmj2mKsCatVFhOuiWpyCs1KYwNoQBsfcY1HQRzvgVXmToesLqmWyVFXEZW4YSgynnu1K+dMdreKoJkuk3WNVd0H0Gi1+f7NEMuC0RGNj5vXYSXF7CZVufF5o9riActoqR2aSNY67MAhZ9hnVIW0qmriVeHkVgXETVk3uu02nHv3OnR79qnXs9N4VruNEpP6ETdaZSg9ioHebfgzs9oEu2XmeUREgtA1AobGGxwzXHhJ9f0IDd9cIo13XWve+DnUu8HyEYjFlOYt6boKcnKHTmPs1FvOOG6Y28fGhiFWzrvXGQ/qIKcuiZpXampxxlrvREQqSvPGBvChTw2fg1gs7s5tFP4UQ94kVF1RTb0rG5MpzYuXUO+C2S0wyV8eCCGEEBINLh4IIYQQEgkuHgghhBASiXnreShPYYGkomGEqKlxO9HFjG6c8bjhAYi5c9UahTn6ew5D7O1fjzjjVBN2gatNu2u21gWYs2jpKjyuwd1LjCex8MjAKBY6qRTdPbKKtddmOEbaFrkd8xZ1tENOrBW7jY6PuXuA2TO4VzplmVZCVczG2POsWOXBVLGcWGjsCRrHhebu+szs2ewWiTog90HO/Z1u0aRw13pjppk9AbtPo5cg1mn4FMI97jgwNtK34Ab8l5UXY2vWqGBk2Rmy7ufLGjkH9mOsGxoX4vn2G/elU8+f3Qs5W0z7hntHt+40On1a196nPCS5PZCz83kVMOaxSlJtFtQNH6am3EJuxRB1q6YGu3/WpZQHII7vfdLYlA9r3LwT/Xjdbx4ZgZjWvNoQ/47augB1Y4HSPK13IiKBoXkJpXkFY3O/bDSi1Fktiwx/mtK8WGsL5DSPoedBa96wWfDOKFhYcmOVCh6XELwHMVXkq2zoa8oS/guAvzwQQgghJBJcPBBCCCEkElw8EEIIISQSXDwQQgghJBLz1jBpdRtLxHAtBFmGcyZIGAYUZaKMgb1GJN2yEGL1DW7HyliA15RQ3fHaFuA8MV34SEQmR9zCKsm6FrymNBpCJydVoRHjPlUKRle9Fve+TJzHwi6FHHbjrGl0jUeN9djlrjiJ5yuV1eto+HsSRhCMjzGjg53xppcMM5IP27e7hY06+9BMt1nc2JbN6KY70IkmSigSZXZg3IUhuC1GB01d20pEHsm5Zs89xgm7d6GBsbHP/TyHuvG5HOzEklD373cvYt2B7ZDTmdkKsT5tMtyEcx82Gl+OyQ43EFqVnWbutGllbLhdBSw/mvWK6cJ1M579HSoJ98CEZcKL5yE2Ja7mJZN4o0oJNF8m1Pma0jPrnQhqntY7EZG2FjSJV9TXNjeCdz1Zh9eQqHUNoZUJQzsNc2K5OOWMm2pqIGd45KQzLkyiQbSmCY2WyVZX8+I+emeQMCpJWUbvSqBunlEQaqpQnd5NB395IIQQQkgkuHgghBBCSCS4eCCEEEJIJLh4IIQQQkgk5q9hMoUmPKNwmoTKVBSU0LhSrmC3SEm4tyaWxFuVNKqdaaOl5aFa1HWFM+7IoOFm4sxxiFXyblXNdALNn2XDYBNThtDAMFpVDMNUJdXizp3G44xGm1LbtMId1xuVIk+ewVioXD5GlbSwbFVqUzlGNdDAMK4GRndRH7KZrBobTkRtPNSdFEUkK2i0xDKFM5v5vAmtUpFuJcqc0eVy71bDtakKGY4FRtfJcBvGut17ddBo/nnQrMCoO5DeY+RYuNVA4WUR8XMsVlucbxaL+gU1yoTnoXciIkHgGhbLRTT9SYjfmZj648FH70TwI2u9ExHpWGxpnvuuV0pYRTgdMzRPXVYsgXpjap76/lfq0Uharndf9KkCmiq13omg5lVOYudk0DsR0DwfvRNBzZtNvZsO/vJACCGEkEhw8UAIIYSQSHDxQAghhJBIeHke/vRP/1SeeeYZOXHihLz00kty0003iYjIkSNH5Itf/KKcO3dOmpub5Zvf/KZce+21M/632aBieBcCw5egC5bE4tZ6yejCVnbnD4wNxsDoUqY7dIZxvKbatuXOOHPdRshJ3YhXOTXmFmQ623sEcop92MqwVHaLoSSNfcp4Au9LvuDmpVevg5wubJMojW0dzvh8/ynIOd77JJ6v6HaiixlFnKwt5EB11Qwr+DzDMsaKBSyo4wN2YbTaObqxTBY39zNWvSLlcch2G0k+++jmPr7hn9DeBbMqlVF9SZMzrtM6zKtOjXVjtD/E8IuMWZ4Odd/7sOukdGKnTZ+PbBXd8uNWd2g0QLWoqMJxPnonIhLTf0c0vh/lvKGnSvN89E4ENU/rnYhI5npD825yx1rvRKbRvNOu5pXKRsE7D83TeieCmmfqXWsHxLTmHT+BHq98Ad+7mPI8mHqnq2mJSKg6ic6m3k2H1y8Pn/70p+XnP/+5LF/uvgTbtm2TrVu3yuHDh+XP/uzP5M477/T6b4QQQgiZv3gtHm655Rbp6upyYgMDA7J//3753Oc+JyIif/AHfyCnTp2So0ePvud/I4QQQsj8pmrPw6lTpySTyUjiH/9JYxAEsmzZMjl58uR7/jeLHTt2SFdX17v/GxszfkYkhBBCyGXBZWGYvPvuu6W3t/fd/zU0GPuShBBCCLksqLpI1NKlSyWbzUqpVJJEIiFhGMrJkydl2bJl0tTUNO1/my3KBezUJoZJJFQemHgSu04mUkYXRlVsKW5U5qgYxhxt1osblpfTR153xlMjWLSlta0VYoWiWyBl5Pw5yCmWDVOT6qIZlrGIiuVkK+TOO+MjL/0Ccs70oFmoodW99tzwecgpTWGxl6QyPpaNIirlovXc3VipYhSlKmEsNIpQ+ZA57C5uzTJOa9e6OYaXb51huOtUkx3IGrN7hLqN8x0wjst8WQXvMopZPWwUbdJNO7caC/4c/oJoWRo12FNTRFRnz0w3Gjv3WPdK3ePNe9C4uk02QSzULkajEFiQ1QW1rDfBKJ4lz+uTeVHW3XELht6hvIHmJWRmvRMRiZfcWKVo6F2A1xBPu8edfu11yDE1r8HVjUIcNWKkz9A8dZ1x4+/EYXFmzSuMoE4decHVvIE21Lv6BajVuSF3rnIOP0uj8RxKJaXVRSyUJQFqoNa80NC7RJV6Nx1V//LQ3t4u69atk8cee0xERJ566inp6uqSVatWved/I4QQQsj8xuuXh23btsl3v/td6e/vl9/+7d+WxsZGOXr0qOzcuVPuvPNOeeCBB6SpqUkeffTRd495r/9GCCGEkPmL1+Jh586dZnzt2rXywgsvRP5vhBBCCJm/XBaGSUIIIYTMH4KwWtfYHNLV1SW9vb2X+jIIIYSQf7a815/F/OWBEEIIIZHg4oEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkeDigRBCCCGR4OKBEEIIIZHg4oEQQgghkbgsi0Sl02lZtGiRjI2NsT33RYb3/OLDe37x4T2/+PCeX3wu9J6fPXtW8vm8+d8uy8XD/4CVJi8+vOcXH97ziw/v+cWH9/ziM5f3nNsWhBBCCIkEFw+EEEIIicRlvXi4++67L/Ul/LOD9/ziw3t+8eE9v/jwnl985vKeX9aeB0IIIYRcflzWvzwQQggh5PKDiwdCCCGERIKLB0IIIYRE4rJcPBw5ckQ+8pGPyJo1a2TDhg3y+uuvX+pLet8xNTUlv/d7vydr1qyRG2+8UT7+8Y/L0aNHRURkYGBAfud3fkdWr14t1113nfz0pz+9xFf7/uLRRx+VIAjk7/7u70SE93uuyefz8tWvflVWr14t119/vXzuc58TEerMXPH3f//3sm7dOrnpppvkuuuuk7/5m78REb7ns8mf/umfyooVKyQIAnn55Zffjb/XOz3r73t4GXLbbbeFjz76aBiGYfjf/tt/C9evX39pL+h9yOTkZPjd7343rFQqYRiG4V//9V+Ht956axiGYfiv//W/Du+7774wDMPwxRdfDJcsWRIWCoVLdKXvL44fPx5++MMfDjdu3Bh+5zvfCcOQ93uu+bf/9t+GX/3qV99917PZbBiG1Jm5oFKphAsWLAhfeeWVMAzfed/T6XQ4OjrK93wWef7558NTp06Fy5cvD1966aV34+/1Ts/2+37ZLR7OnDkTNjY2hsViMQzDd17Gjo6O8MiRI5f4yt7f7Nu3L1y+fHkYhmFYX1//rsCGYRhu2LAh/OEPf3iJruz9Q7lcDj/2sY+F+/fvD2+99dZ3Fw+833PH2NhY2NjYGI6MjDhx6szcUKlUwoULF4bPP/98GIZh+Morr4SdnZ1hPp/nez4H/NPFw3u903Pxvl922xanTp2STCYjiURCRESCIJBly5bJyZMnL/GVvb/5y7/8S/nkJz8pg4ODUiwWZfHixe/+txUrVvD+zwI7duyQm2++Wbq7u9+N8X7PLceOHZOFCxfKAw88IOvXr5ePfvSj8txzz1Fn5oggCORb3/qWfOpTn5Lly5fLb/zGb8jf/M3fSC6X43s+x7zXOz0X73tiVq6azGseeOABOXr0qDz33HMyOTl5qS/nfclrr70mTz31FPd5LzKlUklOnDgh11xzjfzFX/yFvPTSS/Lxj39cvvvd717qS3tfUiqV5M///M/l6aeflltuuUX27dsnn/jEJ5x9efL+4LL75WHp0qWSzWalVCqJiEgYhnLy5ElZtmzZJb6y9ycPPvigPP300/K9731P6urqpLW1VRKJhPT397+b09PTw/t/gfzsZz+Tnp4eWb16taxYsUJ++ctfytatW+Xb3/427/ccsmzZMonFYvLZz35WREQ+8IEPyBVXXCEnTpygzswBL7/8svT19cktt9wiIiIbNmyQrq4uefXVV/mezzHv9WfnXPy5etktHtrb22XdunXy2GOPiYjIU089JV1dXbJq1apLfGXvP3bs2CFPPPGE/PCHP5SWlpZ345/5zGfk4YcfFhGRffv2yenTp+XWW2+9RFf5/uArX/mKZLNZ6enpkZ6eHtm4caPs2rVLvvKVr/B+zyFtbW3ysY99TH7wgx+IiMjx48fl+PHjcvPNN1Nn5oD/8YfUm2++KSIiR48elWPHjsnatWv5ns8x7/Vn55z8uXpBbo054q233go3btwYrl69Ouzu7g5fffXVS31J7ztOnToViki4cuXK8MYbbwxvvPHG8IMf/GAYhmHY398ffvzjHw9XrVoVXnPNNeGPf/zjS3y17z/+qWGS93tuOXbsWLhp06bwuuuuC2+44YbwySefDMOQOjNXPP744+/e6+uuuy7827/92zAM+Z7PJlu3bg2XLFkSxuPxsL29PbzyyivDMHzvd3q233f2tiCEEEJIJC67bQtCCCGEXN5w8UAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSHDxQAghhJBIcPFACCGEkEhw8UAIIYSQSPz/Abkqwnqwvep0AAAAAElFTkSuQmCC\\n\",\n      \"text/plain\": [\n       \"<Figure size 640x640 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"import random\\n\",\n    \"import matplotlib.pyplot as plt\\n\",\n    \"import matplotlib\\n\",\n    \"%matplotlib inline\\n\",\n    \"\\n\",\n    \"def imshow(img):\\n\",\n    \"    fig = plt.figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\\n\",\n    \"    npimg = img.numpy()\\n\",\n    \"    plt.imshow(np.transpose(npimg, (1, 2, 0)))\\n\",\n    \"    plt.show()\\n\",\n    \"    \\n\",\n    \"def get_pairs_of_imgs(idx):\\n\",\n    \"    clean_img = clean_train_dataset.data[idx]\\n\",\n    \"    unlearnable_img = unlearnable_train_dataset.data[idx]\\n\",\n    \"    clean_img = torchvision.transforms.functional.to_tensor(clean_img)\\n\",\n    \"    unlearnable_img = torchvision.transforms.functional.to_tensor(unlearnable_img)\\n\",\n    \"\\n\",\n    \"    x = noise[idx]\\n\",\n    \"    x_min = torch.min(x)\\n\",\n    \"    x_max = torch.max(x)\\n\",\n    \"    noise_norm = (x - x_min) / (x_max - x_min)\\n\",\n    \"    noise_norm = torch.clamp(noise_norm, 0, 1)\\n\",\n    \"    return [clean_img, noise_norm, unlearnable_img]\\n\",\n    \"    \\n\",\n    \"selected_idx = [random.randint(0, 50000) for _ in range(3)]\\n\",\n    \"img_grid = []\\n\",\n    \"for idx in selected_idx:\\n\",\n    \"    img_grid += get_pairs_of_imgs(idx)\\n\",\n    \"    \\n\",\n    \"\\n\",\n    \"imshow(torchvision.utils.make_grid(torch.stack(img_grid), nrow=3, pad_value=255))\\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"<h3>Train ResNet18 on Unlearnable Dataset</h3>\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 36.99 Loss: 1.73: 100%|██████████| 391/391 [00:20<00:00, 19.17it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 35.16\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 76.95 Loss: 0.65: 100%|██████████| 391/391 [00:20<00:00, 19.51it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.21\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 93.06 Loss: 0.21: 100%|██████████| 391/391 [00:20<00:00, 19.51it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.69\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 95.13 Loss: 0.15: 100%|██████████| 391/391 [00:20<00:00, 19.37it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 25.79\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 96.16 Loss: 0.12: 100%|██████████| 391/391 [00:20<00:00, 19.23it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 20.87\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 96.78 Loss: 0.10: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.92\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 96.89 Loss: 0.10: 100%|██████████| 391/391 [00:19<00:00, 19.65it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.44\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 97.22 Loss: 0.08: 100%|██████████| 391/391 [00:20<00:00, 19.45it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.08\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 97.35 Loss: 0.08: 100%|██████████| 391/391 [00:20<00:00, 19.47it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.07\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 97.58 Loss: 0.07: 100%|██████████| 391/391 [00:20<00:00, 19.37it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 17.37\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 97.89 Loss: 0.07: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 20.82\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 97.85 Loss: 0.07: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 18.45\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 98.05 Loss: 0.06: 100%|██████████| 391/391 [00:19<00:00, 19.59it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.74\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 98.18 Loss: 0.06: 100%|██████████| 391/391 [00:20<00:00, 19.30it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.36\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 98.30 Loss: 0.05: 100%|██████████| 391/391 [00:19<00:00, 19.55it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.84\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 98.53 Loss: 0.05: 100%|██████████| 391/391 [00:20<00:00, 19.44it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.93\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 98.61 Loss: 0.04: 100%|██████████| 391/391 [00:20<00:00, 19.52it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 16.04\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.00 Loss: 0.03: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 17.80\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.08 Loss: 0.03: 100%|██████████| 391/391 [00:20<00:00, 19.33it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.51\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.22 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.32it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.77\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.44 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.22it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.28\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.57 Loss: 0.02: 100%|██████████| 391/391 [00:20<00:00, 19.13it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 19.66\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.60 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.64it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 25.13\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.76 Loss: 0.01: 100%|██████████| 391/391 [00:20<00:00, 19.43it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.19\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.83 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.55it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.05\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.89 Loss: 0.01: 100%|██████████| 391/391 [00:19<00:00, 19.56it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.94\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.88 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.42it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.66\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.94 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.44it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.19\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.95 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.53it/s]\\n\",\n      \"  0%|          | 0/391 [00:00<?, ?it/s]\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 22.83\\n\",\n      \"\\n\"\n     ]\n    },\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Acc 99.94 Loss: 0.00: 100%|██████████| 391/391 [00:20<00:00, 19.46it/s]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Clean Accuracy 23.60\\n\",\n      \"\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"from util import AverageMeter\\n\",\n    \"\\n\",\n    \"model = ResNet18()\\n\",\n    \"model = model.cuda()\\n\",\n    \"criterion = torch.nn.CrossEntropyLoss()\\n\",\n    \"optimizer = torch.optim.SGD(params=model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\\n\",\n    \"scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=30, eta_min=0)\\n\",\n    \"\\n\",\n    \"unlearnable_loader = DataLoader(dataset=unlearnable_train_dataset, batch_size=128,\\n\",\n    \"                                shuffle=True, pin_memory=True,\\n\",\n    \"                                drop_last=False, num_workers=12)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"for epoch in range(30):\\n\",\n    \"    # Train\\n\",\n    \"    model.train()\\n\",\n    \"    acc_meter = AverageMeter()\\n\",\n    \"    loss_meter = AverageMeter()\\n\",\n    \"    pbar = tqdm(unlearnable_loader, total=len(unlearnable_loader))\\n\",\n    \"    for images, labels in pbar:\\n\",\n    \"        images, labels = images.cuda(), labels.cuda()\\n\",\n    \"        model.zero_grad()\\n\",\n    \"        optimizer.zero_grad()\\n\",\n    \"        logits = model(images)\\n\",\n    \"        loss = criterion(logits, labels)\\n\",\n    \"        loss.backward()\\n\",\n    \"        torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)\\n\",\n    \"        optimizer.step()\\n\",\n    \"        \\n\",\n    \"        _, predicted = torch.max(logits.data, 1)\\n\",\n    \"        acc = (predicted == labels).sum().item()/labels.size(0)\\n\",\n    \"        acc_meter.update(acc)\\n\",\n    \"        loss_meter.update(loss.item())\\n\",\n    \"        pbar.set_description(\\\"Acc %.2f Loss: %.2f\\\" % (acc_meter.avg*100, loss_meter.avg))\\n\",\n    \"    scheduler.step()\\n\",\n    \"    # Eval\\n\",\n    \"    model.eval()\\n\",\n    \"    correct, total = 0, 0\\n\",\n    \"    for i, (images, labels) in enumerate(clean_test_loader):\\n\",\n    \"        images, labels = images.cuda(), labels.cuda()\\n\",\n    \"        with torch.no_grad():\\n\",\n    \"            logits = model(images)\\n\",\n    \"            _, predicted = torch.max(logits.data, 1)\\n\",\n    \"            total += labels.size(0)\\n\",\n    \"            correct += (predicted == labels).sum().item()\\n\",\n    \"    acc = correct / total\\n\",\n    \"    tqdm.write('Clean Accuracy %.2f\\\\n' % (acc*100))\\n\",\n    \"            \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": []\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.8.2\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "README.md",
    "content": "# Unlearnable Examples\n\nCode for ICLR2021 Spotlight Paper [\"Unlearnable Examples: Making Personal Data Unexploitable \"](https://openreview.net/forum?id=iAmZUo0DxC0) by Hanxun Huang, Xingjun Ma, Sarah Monazam Erfani, James Bailey, Yisen Wang.\n\n## Quick Start\n##### Use the QuickStart.ipynb notebook for a quick start.\nIn the notebook, you can find the minimal implementation for generating sample-wise unlearnable examples on CIFAR-10.\nPlease remove `mlconfig` from `models/__init__.py` if you are only using the notebook and copy-paste the model to the notebook.\n\n\n\n## Experiments in the paper.\nCheck scripts folder for *.sh for each corresponding experiments.\n\n## Sample-wise noise for unlearnable example on CIFAR-10\n##### Generate noise for unlearnable examples\n```console\npython3 perturbation.py --config_path             configs/cifar10                \\\n                        --exp_name                path/to/your/experiment/folder \\\n                        --version                 resnet18                       \\\n                        --train_data_type         CIFAR10                       \\\n                        --noise_shape             50000 3 32 32                  \\\n                        --epsilon                 8                              \\\n                        --num_steps               20                             \\\n                        --step_size               0.8                            \\\n                        --attack_type             min-min                        \\\n                        --perturb_type            samplewise                      \\\n                        --universal_stop_error    0.01\n```\n##### Train on unlearnable examples and eval on clean test\n```console\npython3 -u main.py    --version                 resnet18                       \\\n                      --exp_name                path/to/your/experiment/folder \\\n                      --config_path             configs/cifar10                \\\n                      --train_data_type         PoisonCIFAR10                  \\\n                      --poison_rate             1.0                            \\\n                      --perturb_type            samplewise                      \\\n                      --perturb_tensor_filepath path/to/your/experiment/folder/perturbation.pt \\\n                      --train\n```\n\n\n## Class-wise noise for unlearnable example on CIFAR-10\n##### Generate noise for unlearnable examples\n```console\npython3 perturbation.py --config_path             configs/cifar10                \\\n                        --exp_name                path/to/your/experiment/folder \\\n                        --version                 resnet18                       \\\n                        --train_data_type         CIFAR10                       \\\n                        --noise_shape             10 3 32 32                     \\\n                        --epsilon                 8                              \\\n                        --num_steps               1                              \\\n                        --step_size               0.8                            \\\n                        --attack_type             min-min                        \\\n                        --perturb_type            classwise                      \\\n                        --universal_train_target  'train_subset'                 \\\n                        --universal_stop_error    0.1                            \\\n                        --use_subset\n```\n##### Train on unlearnable examples and eval on clean test\n```console\npython3 -u main.py    --version                 resnet18                       \\\n                      --exp_name                path/to/your/experiment/folder \\\n                      --config_path             configs/cifar10                \\\n                      --train_data_type         PoisonCIFAR10                  \\\n                      --poison_rate             1.0                            \\\n                      --perturb_type            classwise                      \\\n                      --perturb_tensor_filepath path/to/your/experiment/folder/perturbation.pt \\\n                      --train\n```\n\n\n---\n## Cite Our Work\n```\n@inproceedings{huang2021unlearnable,\n    title={Unlearnable Examples: Making Personal Data Unexploitable},\n    author={Hanxun Huang and Xingjun Ma and Sarah Monazam Erfani and James Bailey and Yisen Wang},\n    booktitle={ICLR},\n    year={2021}\n}\n```\n"
  },
  {
    "path": "collect_results.py",
    "content": "import argparse\nimport collections\nimport json\nimport os\nimport numpy as np\nimport dataset\nimport mlconfig\nimport models\nimport torch\nimport util\nfrom evaluator import Evaluator\nfrom tabulate import tabulate\n\nparser = argparse.ArgumentParser(description='ClasswiseNoise')\nargs = parser.parse_args()\n\nif torch.cuda.is_available():\n    torch.backends.cudnn.enabled = True\n    torch.backends.cudnn.benchmark = True\n    device = torch.device('cuda')\n    device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]\n    print(\"GPU List: %s\" % (device_list))\nelse:\n    device = torch.device('cpu')\nprint(\"PyTorch Version: %s\" % (torch.__version__))\n\n\ndef load_results(targt_exp, model_name):\n    # print(targt_exp)\n    config_file = os.path.join(targt_exp, model_name+'.yaml')\n    checkpoint_path_file = os.path.join(targt_exp, 'checkpoints', model_name)\n    if not os.path.isfile(config_file) or not os.path.isfile(checkpoint_path_file+'.pth'):\n        # print('No such files: \\n%s\\n%s' % (config_file, checkpoint_path_file))\n        return None\n\n    config = mlconfig.load(config_file)\n    config.set_immutable()\n    model = config.model().to(device)\n    checkpoints = util.load_model(filename=checkpoint_path_file, model=model, optimizer=None, scheduler=None)\n    if config.epochs != checkpoints['epoch']:\n        return None\n    if 'cm_history' in checkpoints['ENV']:\n        new_hist = []\n        for item in checkpoints['ENV']['cm_history']:\n            if isinstance(item, np.ndarray):\n                new_hist.append(item.tolist())\n            else:\n                new_hist.append(item)\n        checkpoints['ENV']['cm_history'] = new_hist\n    return checkpoints['ENV']\n\n\nif __name__ == '__main__':\n    exp_names = [\n        'experiments/cifar10/random_samplewise/CIFAR10-eps=8',\n        'experiments/cifar10/min-max_samplewise/CIFAR10-eps=8-se=0.9-base_version=resnet18',\n        'experiments/cifar10/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18',\n        'experiments/cifar10/min-min_samplewise/CIFAR10-eps=8-se=0.01-base_version=resnet18',\n        'experiments/cifar100/min-min_samplewise/CIFAR100-eps=8-se=0.3-base_version=resnet18',\n        'experiments/cifar100/min-min_samplewise/CIFAR100-eps=8-se=0.01-base_version=resnet18',\n        'experiments/svhn/min-min_samplewise/SVHN-eps=8-se=0.1-base_version=resnet18',\n        'experiments/svhn/min-min_samplewise/SVHN-eps=8-se=0.01-base_version=resnet18',\n        'experiments/imagenet-mini/min-min_samplewise/ImageNetMini-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar10/random_classwise/CIFAR10-eps=8/',\n        'experiments/cifar10/min-max_classwise/CIFAR10-eps=8-se=0.8-base_version=resnet18',\n        'experiments/cifar10/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18',\n        'experiments/cifar10/min-min_classwise/CIFAR10-eps=8-se=0.01-base_version=resnet18',\n        'experiments/cifar100/min-min_classwise/CIFAR100-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar100/min-min_classwise/CIFAR100-eps=8-se=0.01-base_version=resnet18',\n        'experiments/svhn/min-min_classwise/SVHN-eps=8-se=0.1-base_version=resnet18',\n        'experiments/svhn/min-min_classwise/SVHN-eps=8-se=0.01-base_version=resnet18',\n        'experiments/imagenet-mini/min-min_classwise/ImageNetMini-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.01-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=24-se=0.01-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=16-se=0.01-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=24-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=24-se=0.01-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-2noise',\n        'experiments/cifar10-extension/min-min_classwise/TinyImageNet-eps=16-se=0.1-base_version=resnet18',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random8',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random16',\n        'experiments/cifar10-extension/min-min_classwise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random24',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random8',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random16',\n        'experiments/cifar10-extension/min-min_samplewise/CIFAR10-eps=8-se=0.1-base_version=resnet18-random24',\n    ]\n\n    model_list = [\n        'resnet18',\n        'resnet50',\n        'dense121',\n        'resnet18_augmentation',\n        'resnet18_madrys',\n        'resnet18_classpoison',\n        'resnet18_classpoison_targeted',\n        'resnet18_add-uniform-noise',\n        'resnet18_add-uniform-noise-aug',\n        'resnet18_cutout',\n        'resnet18_cutmix',\n        'resnet18_mixup',\n    ]\n\n    poison_rate_list = [0.0, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0]\n\n    exp_results = {}\n    for exp_name in exp_names:\n        print(exp_name)\n        table_data_header = ['Model'] + poison_rate_list\n        table_data = [model_list]\n        exp_results[exp_name] = {}\n        for poison_rate in poison_rate_list:\n            target_dir = os.path.join(exp_name, 'poison_train_%.1f' % poison_rate)\n            temp_list = []\n            exp_results[exp_name][poison_rate] = {}\n            for model_name in model_list:\n                rs_env = load_results(os.path.join(target_dir, model_name), model_name)\n                exp_results[exp_name][poison_rate][model_name] = rs_env\n                if rs_env is not None:\n                    temp_list.append('%.2f' % rs_env['curren_acc'])\n                else:\n                    temp_list.append('..')\n            table_data.append(temp_list)\n\n        # Transpose array\n        table_data = list(map(list, zip(*table_data)))\n\n        print('=' * 40 + 'Results' + '=' * 40)\n        print(tabulate(table_data, headers=table_data_header, floatfmt=\".2f\", stralign=\"left\", numalign=\"left\"))\n        print('=' * (80 + len('Results')) + '\\n')\n\n    # Save results to\n    with open('exp_results.json', 'w') as outfile:\n        json.dump(exp_results, outfile)\n"
  },
  {
    "path": "configs/cifar10/dense121.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: DenseNet121\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar10/resnet18.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar10/resnet18_add-uniform-noise-aug.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  add_uniform_noise: True\n  fa: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_add-uniform-noise.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  add_uniform_noise: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_augement.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  fa: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_augmentation.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  fa: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_classpoison.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  poison_classwise: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_classpoison_targeted.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  poison_classwise: True\n  poison_classwise_idx: [0, 1, 8, 9]\n"
  },
  {
    "path": "configs/cifar10/resnet18_cutmix.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CutMixCrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  use_cutmix: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_cutout.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  use_cutout: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_denoise.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  img_denoise: True\n"
  },
  {
    "path": "configs/cifar10/resnet18_madrys.yaml",
    "content": "num_classes: 10\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: MadrysLoss\n  epsilon: 0.03137254901\n  perturb_steps: 10\n  step_size: 0.00784313725\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: MultiStepLR\n  milestones: [75, 90, 100]\n  gamma: 0.1\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar10/resnet18_mixup.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CutMixCrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n  use_mixup: True\n"
  },
  {
    "path": "configs/cifar10/resnet50.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet50\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar10/toy_cifar.yaml",
    "content": "num_classes: 10\nepochs: 80\ngrad_clip: 5.0\nlog_frequency: 50\n\nmodel:\n  name: ToyModel\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.025\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar10/toy_cifar_madrys.yaml",
    "content": "num_classes: 10\nepochs: 60\ngrad_clip: 5.0\nlog_frequency: 50\n\nmodel:\n  name: ToyModel\n\ncriterion:\n  name: MadrysLoss\n  epsilon: 0.03137254901\n  perturb_steps: 10\n  step_size: 0.00784313725\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: MultiStepLR\n  milestones: [75, 90, 100]\n  gamma: 0.1\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar100/dense121.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: DenseNet121\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar100/resnet18.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar100/resnet18_madrys.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 100\n\ncriterion:\n  name: MadrysLoss\n  epsilon: 0.03137254901\n  perturb_steps: 10\n  step_size: 0.00784313725\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: MultiStepLR\n  milestones: [75, 90, 100]\n  gamma: 0.1\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar100/resnet50.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet50\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/cifar101/resnet18.yaml",
    "content": "num_classes: 101\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 101\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/face/InceptionResnet.yaml",
    "content": "num_classes: 10575\nepochs: 50\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: InceptionResnetV1\n  num_classes: $num_classes\n  # pretrained: casia-webface\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.05\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: MultiStepLR\n  milestones: [30, 40]\n  gamma: 0.1\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/imagenet-mini/dense121.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: densenet121\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/imagenet-mini/resnet18.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: resnet18\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/imagenet-mini/resnet50.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: resnet50\n  num_classes: 100\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/svhn/dense121.yaml",
    "content": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: DenseNet121\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/svhn/resnet18.yaml",
    "content": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/svhn/resnet18_madrys.yaml",
    "content": "num_classes: 100\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 10\n\ncriterion:\n  name: MadrysLoss\n  epsilon: 0.03137254901\n  perturb_steps: 10\n  step_size: 0.00784313725\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: MultiStepLR\n  milestones: [75, 90, 100]\n  gamma: 0.1\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/svhn/resnet50.yaml",
    "content": "num_classes: 10\nepochs: 30\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet50\n  num_classes: 10\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-4\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/tiny-imagenet/dense121.yaml",
    "content": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: DenseNet121\n  num_classes: 1000\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 96\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/tiny-imagenet/resnet18.yaml",
    "content": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet18\n  num_classes: 1000\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "configs/tiny-imagenet/resnet50.yaml",
    "content": "num_classes: 1000\nepochs: 100\ngrad_clip: 5.0\nlog_frequency: 100\n\nmodel:\n  name: ResNet50\n  num_classes: 1000\n\ncriterion:\n  name: CrossEntropyLoss\n\noptimizer:\n  name: SGD\n  lr: 0.1\n  weight_decay: 5.e-5\n  momentum: 0.9\n\nscheduler:\n  name: CosineAnnealingLR\n  T_max: $epochs\n  eta_min: 0.0\n\ndataset:\n  name: DatasetGenerator\n  train_batch_size: 128\n  eval_batch_size: 128\n"
  },
  {
    "path": "dataset.py",
    "content": "import copy\nimport os\nimport collections\nimport numpy as np\nimport torch\nimport util\nimport random\nimport mlconfig\nimport pandas\nfrom util import onehot, rand_bbox\nfrom torch.utils.data.dataset import Dataset\nfrom functools import partial\nfrom PIL import Image, ImageFilter\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom fast_autoaugment.FastAutoAugment.archive import fa_reduced_cifar10\nfrom fast_autoaugment.FastAutoAugment.augmentations import apply_augment\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n# Datasets\ntransform_options = {\n    \"CIFAR10\": {\n        \"train_transform\": [transforms.RandomCrop(32, padding=4),\n                            transforms.RandomHorizontalFlip(),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.ToTensor()]},\n    \"CIFAR100\": {\n         \"train_transform\": [transforms.RandomCrop(32, padding=4),\n                             transforms.RandomHorizontalFlip(),\n                             transforms.RandomRotation(20),\n                             transforms.ToTensor()],\n         \"test_transform\": [transforms.ToTensor()]},\n    \"SVHN\": {\n        \"train_transform\": [transforms.ToTensor()],\n        \"test_transform\": [transforms.ToTensor()]},\n    \"ImageNet\": {\n        \"train_transform\": [transforms.RandomResizedCrop(224),\n                            transforms.RandomHorizontalFlip(),\n                            transforms.ColorJitter(brightness=0.4,\n                                                   contrast=0.4,\n                                                   saturation=0.4,\n                                                   hue=0.2),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.Resize(256),\n                           transforms.CenterCrop(224),\n                           transforms.ToTensor()]},\n    \"TinyImageNet\": {\n        \"train_transform\": [transforms.CenterCrop(256),\n                            transforms.Resize((32, 32)),\n                            transforms.RandomHorizontalFlip(),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.Resize((32, 32)),\n                           transforms.ToTensor()]},\n    'CatDog': {\n        \"train_transform\": [transforms.Resize((32, 32)),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.Resize((32, 32)),\n                           transforms.ToTensor()]},\n    'CelebA': {\n        \"train_transform\": [transforms.CenterCrop((128, 128)),\n                            transforms.RandomHorizontalFlip(),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.CenterCrop((128, 128)),\n                           transforms.ToTensor()]},\n    'FaceScrub': {\n        \"train_transform\": [transforms.RandomHorizontalFlip(),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.Resize((128, 128)),\n                           transforms.ToTensor()]},\n    'WebFace': {\n        \"train_transform\": [transforms.RandomHorizontalFlip(),\n                            transforms.ToTensor()],\n        \"test_transform\": [transforms.ToTensor()]},\n}\ntransform_options['PoisonCIFAR10'] = transform_options['CIFAR10']\ntransform_options['PoisonCIFAR100'] = transform_options['CIFAR100']\ntransform_options['PoisonCIFAR101'] = transform_options['CIFAR100']\ntransform_options['PoisonSVHN'] = transform_options['SVHN']\ntransform_options['ImageNetMini'] = transform_options['ImageNet']\ntransform_options['PoisonImageNetMini'] = transform_options['ImageNet']\ntransform_options['CelebAMini'] = transform_options['CelebA']\n\n\n@mlconfig.register\nclass DatasetGenerator():\n    def __init__(self, train_batch_size=128, eval_batch_size=256, num_of_workers=4,\n                 train_data_path='../datasets/', train_data_type='CIFAR10', seed=0,\n                 test_data_path='../datasets/', test_data_type='CIFAR10', fa=False,\n                 no_train_augments=False, poison_rate=1.0, perturb_type='classwise',\n                 perturb_tensor_filepath=None, patch_location='center', img_denoise=False,\n                 add_uniform_noise=False, poison_classwise=False, poison_classwise_idx=None,\n                 use_cutout=None, use_cutmix=False, use_mixup=False):\n\n        np.random.seed(seed)\n        self.train_batch_size = train_batch_size\n        self.eval_batch_size = eval_batch_size\n        self.num_of_workers = num_of_workers\n        self.seed = seed\n        self.train_data_type = train_data_type\n        self.test_data_type = test_data_type\n        self.train_data_path = train_data_path\n        self.test_data_path = test_data_path\n\n        train_transform = transform_options[train_data_type]['train_transform']\n        test_transform = transform_options[test_data_type]['test_transform']\n        train_transform = transforms.Compose(train_transform)\n        test_transform = transforms.Compose(test_transform)\n        if no_train_augments:\n            train_transform = test_transform\n\n        if fa:\n            # FastAutoAugment\n            train_transform.transforms.insert(0, Augmentation(fa_reduced_cifar10()))\n        elif use_cutout is not None:\n            print('Using Cutout')\n            train_transform.transforms.append(Cutout(16))\n\n        # Training Datasets\n        if train_data_type == 'CIFAR10':\n            num_of_classes = 10\n            train_dataset = datasets.CIFAR10(root=train_data_path, train=True,\n                                             download=True, transform=train_transform)\n        elif train_data_type == 'PoisonCIFAR10':\n            num_of_classes = 10\n            train_dataset = PoisonCIFAR10(root=train_data_path, transform=train_transform,\n                                          poison_rate=poison_rate, perturb_type=perturb_type,\n                                          patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                          perturb_tensor_filepath=perturb_tensor_filepath,\n                                          add_uniform_noise=add_uniform_noise,\n                                          poison_classwise=poison_classwise,\n                                          poison_classwise_idx=poison_classwise_idx)\n        elif train_data_type == 'CIFAR100':\n            num_of_classes = 100\n            train_dataset = datasets.CIFAR100(root=train_data_path, train=True,\n                                              download=True, transform=train_transform)\n        elif train_data_type == 'PoisonCIFAR100':\n            num_of_classes = 100\n            train_dataset = PoisonCIFAR100(root=train_data_path, transform=train_transform,\n                                           poison_rate=poison_rate, perturb_type=perturb_type,\n                                           patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                           perturb_tensor_filepath=perturb_tensor_filepath,\n                                           add_uniform_noise=add_uniform_noise,\n                                           poison_classwise=poison_classwise)\n        elif train_data_type == 'PoisonCIFAR101':\n            num_of_classes = 101\n            poison_cifar10 = PoisonCIFAR10(root=train_data_path, transform=train_transform,\n                                           poison_rate=poison_rate, perturb_type=perturb_type,\n                                           patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                           perturb_tensor_filepath=perturb_tensor_filepath,\n                                           add_uniform_noise=add_uniform_noise,\n                                           poison_classwise=poison_classwise,\n                                           poison_classwise_idx=poison_classwise_idx)\n            train_dataset = PoisonCIFAR101(train_data_path, split='poison_train',\n                                           transform=train_transform, seed=0,\n                                           poisn_cifar10_data=poison_cifar10)\n        elif train_data_type == 'SVHN':\n            num_of_classes = 10\n            train_dataset = datasets.SVHN(root=train_data_path, split='train',\n                                          download=True, transform=train_transform)\n        elif train_data_type == 'PoisonSVHN':\n            num_of_classes = 10\n            train_dataset = PoisonSVHN(root=train_data_path, split='train', transform=train_transform,\n                                       poison_rate=poison_rate, perturb_type=perturb_type,\n                                       patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                       perturb_tensor_filepath=perturb_tensor_filepath,\n                                       add_uniform_noise=add_uniform_noise,\n                                       poison_classwise=poison_classwise)\n        elif train_data_type == 'TinyImageNet':\n            num_of_classes = 1000\n            train_dataset = datasets.ImageNet(root=train_data_path, split='train',\n                                              transform=train_transform)\n        elif train_data_type == 'ImageNetMini':\n            num_of_classes = 100\n            train_dataset = ImageNetMini(root=train_data_path, split='train',\n                                         transform=train_transform)\n        elif train_data_type == 'PoisonImageNetMini':\n            num_of_classes = 100\n            train_dataset = PoisonImageNetMini(root=train_data_path, split='train', seed=seed,\n                                               transform=train_transform, poison_rate=poison_rate,\n                                               perturb_tensor_filepath=perturb_tensor_filepath)\n        elif train_data_type == 'CatDog':\n            train_dataset = CatDogDataset(root=train_data_path, split='train',\n                                          transform=train_transform)\n        elif train_data_type == 'CelebAMini':\n            train_dataset = CelebAMini(root=train_data_path, split=\"all\",\n                                       target_type=\"identity\", transform=train_transform)\n            test_dataset = CelebAMini(root=train_data_path, split=\"all\",\n                                      target_type=\"identity\", transform=test_transform)\n        elif train_data_type == 'WebFace':\n            train_dataset = datasets.ImageFolder(root=train_data_path, transform=train_transform)\n            test_dataset = datasets.ImageFolder(root=test_data_path, transform=test_transform)\n        elif train_data_type == 'CelebA':\n            train_dataset = datasets.CelebA(root=train_data_path, split=\"all\",\n                                            target_type=\"identity\", transform=train_transform)\n            test_dataset = datasets.CelebA(root=train_data_path, split=\"all\",\n                                           target_type=\"identity\", transform=test_transform)\n        else:\n            raise('Training Dataset type %s not implemented' % train_data_type)\n\n        # Test Datset\n        if test_data_type == 'CIFAR10':\n            test_dataset = datasets.CIFAR10(root=test_data_path, train=False,\n                                            download=True, transform=test_transform)\n        elif test_data_type == 'PoisonCIFAR10':\n            test_dataset = PoisonCIFAR10(root=test_data_path, train=False, transform=test_transform,\n                                         poison_rate=poison_rate, perturb_type=perturb_type,\n                                         patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                         perturb_tensor_filepath=perturb_tensor_filepath,\n                                         add_uniform_noise=add_uniform_noise,\n                                         poison_classwise=poison_classwise,\n                                         poison_classwise_idx=poison_classwise_idx)\n\n        elif test_data_type == 'CIFAR100':\n            test_dataset = datasets.CIFAR100(root=test_data_path, train=False,\n                                             download=True, transform=test_transform)\n        elif test_data_type == 'PoisonCIFAR100':\n            test_dataset = PoisonCIFAR100(root=test_data_path, train=False, transform=test_transform,\n                                          poison_rate=poison_rate, perturb_type=perturb_type,\n                                          patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                          perturb_tensor_filepath=perturb_tensor_filepath,\n                                          add_uniform_noise=add_uniform_noise,\n                                          poison_classwise=poison_classwise)\n        elif test_data_type == 'PoisonCIFAR101':\n            test_dataset = PoisonCIFAR101(test_data_path, split='test',\n                                          transform=test_transform, seed=0,\n                                          poisn_cifar10_data=poison_cifar10)\n        elif test_data_type == 'SVHN':\n            test_dataset = datasets.SVHN(root=test_data_path, split='test',\n                                         download=True, transform=test_transform)\n        elif test_data_type == 'PoisonSVHN':\n            test_dataset = PoisonSVHN(root=test_data_path, split='test', transform=test_transform,\n                                       poison_rate=poison_rate, perturb_type=perturb_type,\n                                       patch_location=patch_location, seed=seed, img_denoise=img_denoise,\n                                       perturb_tensor_filepath=perturb_tensor_filepath,\n                                       add_uniform_noise=add_uniform_noise,\n                                       poison_classwise=poison_classwise)\n        elif test_data_type == 'ImageNetMini':\n            test_dataset = ImageNetMini(root=test_data_path, split='val',\n                                        transform=test_transform)\n        elif test_data_type == 'TinyImageNet':\n            test_dataset = datasets.ImageNet(root=test_data_path, split='val',\n                                             transform=test_transform)\n        elif test_data_type == 'PoisonImageNetMini':\n            test_dataset = PoisonImageNetMini(root=test_data_path, split='val', seed=0,\n                                              transform=test_transform, poison_rate=poison_rate,\n                                              perturb_tensor_filepath=perturb_tensor_filepath)\n        elif test_data_type == 'CatDog':\n            # Cat Dog only used for transfer exp, no test dataset\n            test_dataset = CatDogDataset(root=train_data_path, split='train',\n                                         transform=train_transform)\n        elif test_data_type == 'CelebAMini' or 'CelebA':\n            pass\n        elif test_data_type == 'FaceScrub' or test_data_type == 'WebFace':\n            pass\n        else:\n            raise('Test Dataset type %s not implemented' % test_data_type)\n\n        if use_cutmix:\n            train_dataset = CutMix(dataset=train_dataset, num_class=num_of_classes)\n        elif use_mixup:\n            train_dataset = MixUp(dataset=train_dataset, num_class=num_of_classes)\n\n        self.datasets = {\n            'train_dataset': train_dataset,\n            'test_dataset': test_dataset,\n        }\n        return\n\n    def getDataLoader(self, train_shuffle=True, train_drop_last=True):\n        data_loaders = {}\n\n        data_loaders['train_dataset'] = DataLoader(dataset=self.datasets['train_dataset'],\n                                                   batch_size=self.train_batch_size,\n                                                   shuffle=train_shuffle, pin_memory=True,\n                                                   drop_last=train_drop_last, num_workers=self.num_of_workers)\n\n        data_loaders['test_dataset'] = DataLoader(dataset=self.datasets['test_dataset'],\n                                                  batch_size=self.eval_batch_size,\n                                                  shuffle=False, pin_memory=True,\n                                                  drop_last=False, num_workers=self.num_of_workers)\n\n        return data_loaders\n\n    def _split_validation_set(self, train_portion, train_shuffle=True, train_drop_last=True):\n        np.random.seed(self.seed)\n        train_subset = copy.deepcopy(self.datasets['train_dataset'])\n        valid_subset = copy.deepcopy(self.datasets['train_dataset'])\n\n        if self.train_data_type == 'ImageNet' or self.train_data_type == 'ImageNetMini' or self.train_data_type == 'TinyImageNet' or self.train_data_type == 'PoisonImageNetMini':\n            data, targets = list(zip(*self.datasets['train_dataset'].samples))\n            datasplit = train_test_split(data, targets, test_size=1-train_portion,\n                                         train_size=train_portion, shuffle=True, stratify=targets)\n            train_D, valid_D, train_L, valid_L = datasplit\n            print('Train Labels: ', np.array(train_L))\n            print('Valid Labels: ', np.array(valid_L))\n            train_subset.samples = list(zip(train_D, train_L))\n            valid_subset.samples = list(zip(valid_D, valid_L))\n        elif self.train_data_type == 'SVHN':\n            data, targets = self.datasets['train_dataset'].data, self.datasets['train_dataset'].labels\n            datasplit = train_test_split(data, targets, test_size=1-train_portion,\n                                         train_size=train_portion, shuffle=True, stratify=targets)\n            train_D, valid_D, train_L, valid_L = datasplit\n            print('Train Labels: ', np.array(train_L))\n            print('Valid Labels: ', np.array(valid_L))\n            train_subset.data = np.array(train_D)\n            valid_subset.data = np.array(valid_D)\n            train_subset.labels = train_L\n            valid_subset.labels = valid_L\n        else:\n            datasplit = train_test_split(self.datasets['train_dataset'].data,\n                                         self.datasets['train_dataset'].targets,\n                                         test_size=1-train_portion, train_size=train_portion,\n                                         shuffle=True, stratify=self.datasets['train_dataset'].targets)\n            train_D, valid_D, train_L, valid_L = datasplit\n            print('Train Labels: ', np.array(train_L))\n            print('Valid Labels: ', np.array(valid_L))\n            train_subset.data = np.array(train_D)\n            valid_subset.data = np.array(valid_D)\n            train_subset.targets = train_L\n            valid_subset.targets = valid_L\n\n        self.datasets['train_subset'] = train_subset\n        self.datasets['valid_subset'] = valid_subset\n        print(self.datasets)\n\n        data_loaders = {}\n\n        data_loaders['train_dataset'] = DataLoader(dataset=self.datasets['train_dataset'],\n                                                   batch_size=self.train_batch_size,\n                                                   shuffle=train_shuffle, pin_memory=True,\n                                                   drop_last=train_drop_last, num_workers=self.num_of_workers)\n\n        data_loaders['test_dataset'] = DataLoader(dataset=self.datasets['test_dataset'],\n                                                  batch_size=self.eval_batch_size,\n                                                  shuffle=False, pin_memory=True,\n                                                  drop_last=False, num_workers=self.num_of_workers)\n\n        data_loaders['train_subset'] = DataLoader(dataset=self.datasets['train_subset'],\n                                                  batch_size=self.train_batch_size,\n                                                  shuffle=train_shuffle, pin_memory=True,\n                                                  drop_last=train_drop_last, num_workers=self.num_of_workers)\n\n        data_loaders['valid_subset'] = DataLoader(dataset=self.datasets['valid_subset'],\n                                                  batch_size=self.eval_batch_size,\n                                                  shuffle=False, pin_memory=True,\n                                                  drop_last=False, num_workers=self.num_of_workers)\n        return data_loaders\n\n\ndef patch_noise_extend_to_img(noise, image_size=[32, 32, 3], patch_location='center'):\n    h, w, c = image_size[0], image_size[1], image_size[2]\n    mask = np.zeros((h, w, c), np.float32)\n    x_len, y_len = noise.shape[0], noise.shape[1]\n\n    if patch_location == 'center' or (h == w == x_len == y_len):\n        x = h // 2\n        y = w // 2\n    elif patch_location == 'random':\n        x = np.random.randint(x_len // 2, w - x_len // 2)\n        y = np.random.randint(y_len // 2, h - y_len // 2)\n    else:\n        raise('Invalid patch location')\n\n    x1 = np.clip(x - x_len // 2, 0, h)\n    x2 = np.clip(x + x_len // 2, 0, h)\n    y1 = np.clip(y - y_len // 2, 0, w)\n    y2 = np.clip(y + y_len // 2, 0, w)\n    mask[x1: x2, y1: y2, :] = noise\n    return mask\n\n\nclass PoisonCIFAR10(datasets.CIFAR10):\n    def __init__(self, root, train=True, transform=None, target_transform=None,\n                 download=False, poison_rate=1.0, perturb_tensor_filepath=None,\n                 seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,\n                 add_uniform_noise=False, poison_classwise=False, poison_classwise_idx=None):\n        super(PoisonCIFAR10, self).__init__(root=root, train=train, download=download, transform=transform, target_transform=target_transform)\n        self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)\n        print(self.perturb_tensor)\n        if len(self.perturb_tensor.shape) == 4:\n            self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\n        else:\n            self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 1, 3, 4, 2).to('cpu').numpy()\n        self.patch_location = patch_location\n        self.img_denoise = img_denoise\n        self.data = self.data.astype(np.float32)\n        # Check Shape\n        target_dim = self.perturb_tensor.shape[0] if len(self.perturb_tensor.shape) == 4 else self.perturb_tensor.shape[1]\n        if perturb_type == 'samplewise' and target_dim != len(self):\n            raise('Poison Perturb Tensor size not match for samplewise')\n        elif perturb_type == 'classwise' and target_dim != 10:\n            raise('Poison Perturb Tensor size not match for classwise')\n\n        # Random Select Poison Targets\n        self.poison_samples = collections.defaultdict(lambda: False)\n        self.poison_class = []\n        if poison_classwise:\n            targets = list(range(0, 10))\n            if poison_classwise_idx is None:\n                self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n            else:\n                self.poison_class = poison_classwise_idx\n            self.poison_samples_idx = []\n            for i, label in enumerate(self.targets):\n                if label in self.poison_class:\n                    self.poison_samples_idx.append(i)\n        else:\n            targets = list(range(0, len(self)))\n            self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n\n        for idx in self.poison_samples_idx:\n            self.poison_samples[idx] = True\n            if len(self.perturb_tensor.shape) == 5:\n                perturb_id = random.choice(range(self.perturb_tensor.shape[0]))\n                perturb_tensor = self.perturb_tensor[perturb_id]\n            else:\n                perturb_tensor = self.perturb_tensor\n            if perturb_type == 'samplewise':\n                # Sample Wise poison\n                noise = perturb_tensor[idx]\n                noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n            elif perturb_type == 'classwise':\n                # Class Wise Poison\n                noise = perturb_tensor[self.targets[idx]]\n                noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n            if add_uniform_noise:\n                noise += np.random.uniform(0, 8, (32, 32, 3))\n\n            self.data[idx] = self.data[idx] + noise\n            self.data[idx] = np.clip(self.data[idx], a_min=0, a_max=255)\n        self.data = self.data.astype(np.uint8)\n        print('add_uniform_noise: ', add_uniform_noise)\n        print(self.perturb_tensor.shape)\n        print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))\n\n\nclass PoisonCIFAR100(datasets.CIFAR100):\n    def __init__(self, root, train=True, transform=None, target_transform=None,\n                 download=False, poison_rate=1.0, perturb_tensor_filepath=None,\n                 seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,\n                 add_uniform_noise=False, poison_classwise=False):\n        super(PoisonCIFAR100, self).__init__(root=root, train=train, download=download, transform=transform, target_transform=target_transform)\n        self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)\n        self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\n        self.patch_location = patch_location\n        self.img_denoise = img_denoise\n        self.data = self.data.astype(np.float32)\n\n        # Check Shape\n        if perturb_type == 'samplewise' and self.perturb_tensor.shape[0] != len(self):\n            raise('Poison Perturb Tensor size not match for samplewise')\n        elif perturb_type == 'classwise' and self.perturb_tensor.shape[0] != 100:\n            raise('Poison Perturb Tensor size not match for classwise')\n\n        # Random Select Poison Targets\n        self.poison_samples = collections.defaultdict(lambda: False)\n        self.poison_class = []\n        if poison_classwise:\n            targets = list(range(0, 100))\n            self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n            self.poison_samples_idx = []\n            for i, label in enumerate(self.targets):\n                if label in self.poison_class:\n                    self.poison_samples_idx.append(i)\n        else:\n            targets = list(range(0, len(self)))\n            self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n\n        for idx in self.poison_samples_idx:\n            self.poison_samples[idx] = True\n            if perturb_type == 'samplewise':\n                # Sample Wise poison\n                noise = self.perturb_tensor[idx]\n                noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n            elif perturb_type == 'classwise':\n                # Class Wise Poison\n                noise = self.perturb_tensor[self.targets[idx]]\n                noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n\n            if add_uniform_noise:\n                noise = np.random.uniform(0, 8, (32, 32, 3))\n\n            self.data[idx] += noise\n            self.data[idx] = np.clip(self.data[idx], 0, 255)\n\n        self.data = self.data.astype(np.uint8)\n        print('add_uniform_noise: ', add_uniform_noise)\n        print(self.perturb_tensor.shape)\n        print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))\n\n\nclass PoisonCIFAR101(datasets.VisionDataset):\n    def __init__(self, root, split='poison_train', transform=None, target_transform=None,\n                 poisn_cifar10_data=None, seed=0):\n        np.random.seed(seed)\n        self.transform = transform\n        self.root = root\n        if split == 'poison_train':\n            self.clean_cifar100 = datasets.CIFAR100(root=root, train=True, download=True, transform=None)\n            cifar10 = poisn_cifar10_data\n            cifar10_sample_count = 500\n        elif split == 'test':\n            self.clean_cifar100 = datasets.CIFAR100(root=root, train=False, download=True, transform=None)\n            cifar10 = datasets.CIFAR10(root=root, train=False, download=True, transform=None)\n            cifar10_sample_count = 100\n\n        self.data, self.targets = self.clean_cifar100.data, self.clean_cifar100.targets\n        print(self.clean_cifar100.class_to_idx)\n        # Add Ship samples of CIFAR10\n        ship_idx = np.where(np.array(cifar10.targets) == 8)[0]\n        selected_idx = np.random.choice(ship_idx, cifar10_sample_count, replace=False)\n        extra_samples, extra_targets = [], []\n        for idx in selected_idx:\n            extra_samples.append(cifar10.data[idx])\n            extra_targets.append(100)\n        self.data = np.concatenate((self.data, np.array(extra_samples)))\n        self.targets = self.targets + extra_targets\n        self.poison_samples_idx = np.array(range(len(self.clean_cifar100), len(self)))\n        self.poison_class = [100]\n\n    def __len__(self):\n        return len(self.data)\n\n    def __getitem__(self, index):\n        img, target = self.data[index], self.targets[index]\n        img = Image.fromarray(img)\n        if self.transform is not None:\n            img = self.transform(img)\n        return img, target\n\n\nclass PoisonSVHN(datasets.SVHN):\n    def __init__(self, root, split='train', transform=None, target_transform=None,\n                 download=False, poison_rate=1.0, perturb_tensor_filepath=None,\n                 seed=0, perturb_type='classwise', patch_location='center', img_denoise=False,\n                 add_uniform_noise=False, poison_classwise=False):\n        super(PoisonSVHN, self).__init__(root=root, split=split, download=download, transform=transform, target_transform=target_transform)\n        self.perturb_tensor = torch.load(perturb_tensor_filepath, map_location=device)\n        self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).to('cpu').numpy()\n        self.patch_location = patch_location\n        self.img_denoise = img_denoise\n        # Check Shape\n        if perturb_type == 'samplewise' and self.perturb_tensor.shape[0] != len(self):\n            raise('Poison Perturb Tensor size not match for samplewise')\n        elif perturb_type == 'classwise' and self.perturb_tensor.shape[0] != 10:\n            raise('Poison Perturb Tensor size not match for classwise')\n\n        self.data = self.data.astype(np.float32)\n\n        # Random Select Poison Targets\n        self.poison_samples = collections.defaultdict(lambda: False)\n        self.poison_class = []\n        if poison_classwise:\n            targets = list(range(0, 10))\n            self.poison_class = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n            self.poison_samples_idx = []\n            for i, label in enumerate(self.labels):\n                if label in self.poison_class:\n                    self.poison_samples_idx.append(i)\n        else:\n            targets = list(range(0, len(self)))\n            self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n\n        for idx in self.poison_samples_idx:\n            self.poison_samples[idx] = True\n            if perturb_type == 'samplewise':\n                # Sample Wise poison\n                noise = self.perturb_tensor[idx]\n                # noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n            elif perturb_type == 'classwise':\n                # Class Wise Poison\n                noise = self.perturb_tensor[self.labels[idx]]\n                # noise = patch_noise_extend_to_img(noise, [32, 32, 3], patch_location=self.patch_location)\n\n            if add_uniform_noise:\n                noise = np.random.uniform(0, 8, (32, 32, 3))\n\n            self.data[idx] += noise\n            self.data[idx] = np.clip(self.data[idx], 0, 255)\n\n        self.data = self.data.astype(np.uint8)\n        print('add_uniform_noise: ', add_uniform_noise)\n        print(self.perturb_tensor.shape)\n        print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))\n\n\nclass ImageNetMini(datasets.ImageNet):\n    def __init__(self, root, split='train', **kwargs):\n        super(ImageNetMini, self).__init__(root, split=split, **kwargs)\n        self.new_targets = []\n        self.new_images = []\n        for i, (file, cls_id) in enumerate(self.imgs):\n            if cls_id <= 99:\n                self.new_targets.append(cls_id)\n                self.new_images.append((file, cls_id))\n        self.imgs = self.new_images\n        self.targets = self.new_targets\n        self.samples = self.imgs\n        print(len(self.samples))\n        print(len(self.targets))\n        return\n\n\nclass PoisonImageNetMini(ImageNetMini):\n    def __init__(self, root, split, poison_rate=1.0, seed=0,\n                 perturb_tensor_filepath=None, **kwargs):\n        super(PoisonImageNetMini, self).__init__(root=root, split=split, **kwargs)\n        np.random.seed(seed)\n        self.poison_rate = poison_rate\n        self.perturb_tensor = torch.load(perturb_tensor_filepath)\n        self.perturb_tensor = self.perturb_tensor.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\n\n        # Random Select Poison Targets\n        targets = list(range(0, len(self)))\n        self.poison_samples_idx = sorted(np.random.choice(targets, int(len(targets) * poison_rate), replace=False).tolist())\n        self.poison_samples = collections.defaultdict(lambda: False)\n        self.poison_class = []\n        for idx in self.poison_samples_idx:\n            self.poison_samples[idx] = True\n\n        print(self.perturb_tensor.shape)\n        print('Poison samples: %d/%d' % (len(self.poison_samples), len(self)))\n\n    def __getitem__(self, index):\n        path, target = self.samples[index]\n        sample = self.loader(path)\n        sample = np.array(transforms.RandomResizedCrop(224)(sample)).astype(np.float32)\n\n        if self.poison_samples[index]:\n            noise = self.perturb_tensor[target]\n            sample = sample + noise\n            sample = np.clip(sample, 0, 255)\n        sample = sample.astype(np.uint8)\n        sample = Image.fromarray(sample).convert('RGB')\n\n        if self.transform is not None:\n            sample = self.transform(sample)\n        if self.target_transform is not None:\n            target = self.target_transform(target)\n\n        return sample, target\n\n\nclass Augmentation(object):\n    def __init__(self, policies):\n        self.policies = policies\n\n    def __call__(self, img):\n        for _ in range(1):\n            policy = random.choice(self.policies)\n            for name, pr, level in policy:\n                if random.random() > pr:\n                    continue\n                img = apply_augment(img, name, level)\n        return img\n\n\nclass CatDogDataset(datasets.VisionDataset):\n    def __init__(self, root, split='train', transform=None, target_transform=None):\n        self.root = root\n        self.split = split\n        self.transform = transform\n        self.target_transform = target_transform\n        self.img_file_names = os.listdir(os.path.join(root, split))\n\n    def __len__(self):\n        return len(self.img_file_names)\n\n    def __getitem__(self, index):\n        filename = self.img_file_names[index]\n        label = filename[:3]\n        if label == 'cat':\n            label = 0\n        elif label == 'dog':\n            label = 1\n        else:\n            print(filename)\n            raise('Unknown label')\n\n        with open(os.path.join(self.root, self.split, filename), 'rb') as f:\n            img = Image.open(f).convert('RGB')\n\n        if self.transform is not None:\n            img = self.transform(img)\n        if self.target_transform is not None:\n            label = self.target_transform(label)\n\n        return img, label\n\n\nclass CelebAMini(datasets.CelebA):\n    def __init__(self, root, split=\"train\", target_type=\"attr\", transform=None,\n                 target_transform=None, download=False, num_of_classes=1000):\n        super(CelebAMini, self).__init__(root=root, split=split, target_type=target_type,\n                                         transform=transform, target_transform=target_transform,\n                                         download=False)\n\n        split_map = {\n            \"train\": 0,\n            \"valid\": 1,\n            \"test\": 2,\n            \"all\": None,\n        }\n        split_ = split_map[datasets.utils.verify_str_arg(split.lower(), \"split\", (\"train\", \"valid\", \"test\", \"all\"))]\n\n        fn = partial(os.path.join, self.root, self.base_folder)\n        splits = pandas.read_csv(fn(\"list_eval_partition.txt\"), delim_whitespace=True, header=None, index_col=0)\n        identity = pandas.read_csv(fn(\"identity_CelebA.txt\"), delim_whitespace=True, header=None, index_col=0)\n\n        mask = slice(None) if split_ is None else (splits[1] == split_)\n        identity = identity[mask]\n        identity = identity[identity[1] < num_of_classes]\n        self.filename = identity.index.values\n        self.identity = identity.values\n        print(self.identity)\n\n    def __len__(self):\n        return len(self.identity)\n\n    def __getitem__(self, index):\n        filename = self.filename[index]\n        target = self.identity[index][0]\n        X = Image.open(os.path.join(self.root, self.base_folder, \"img_align_celeba\", filename))\n        if self.transform is not None:\n            X = self.transform(X)\n        return X, target\n\n\nclass Cutout(object):\n    def __init__(self, length):\n        self.length = length\n\n    def __call__(self, img):\n        h, w = img.size(1), img.size(2)\n        mask = np.ones((h, w), np.float32)\n        y = np.random.randint(h)\n        x = np.random.randint(w)\n\n        y1 = np.clip(y - self.length // 2, 0, h)\n        y2 = np.clip(y + self.length // 2, 0, h)\n        x1 = np.clip(x - self.length // 2, 0, w)\n        x2 = np.clip(x + self.length // 2, 0, w)\n\n        mask[y1: y2, x1: x2] = 0.\n        mask = torch.from_numpy(mask)\n        mask = mask.expand_as(img)\n        img *= mask\n        return img\n\n\nclass CutMix(Dataset):\n    def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):\n        self.dataset = dataset\n        self.num_class = num_class\n        self.num_mix = num_mix\n        self.beta = beta\n        self.prob = prob\n\n    def __getitem__(self, index):\n        img, lb = self.dataset[index]\n        lb_onehot = onehot(self.num_class, lb)\n\n        for _ in range(self.num_mix):\n            r = np.random.rand(1)\n            if self.beta <= 0 or r > self.prob:\n                continue\n\n            # generate mixed sample\n            lam = np.random.beta(self.beta, self.beta)\n            rand_index = random.choice(range(len(self)))\n\n            img2, lb2 = self.dataset[rand_index]\n            lb2_onehot = onehot(self.num_class, lb2)\n\n            bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)\n            img[:, bbx1:bbx2, bby1:bby2] = img2[:, bbx1:bbx2, bby1:bby2]\n            lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (img.size()[-1] * img.size()[-2]))\n            lb_onehot = lb_onehot * lam + lb2_onehot * (1. - lam)\n\n        return img, lb_onehot\n\n    def __len__(self):\n        return len(self.dataset)\n\n\nclass MixUp(Dataset):\n    def __init__(self, dataset, num_class, num_mix=2, beta=1.0, prob=0.5):\n        self.dataset = dataset\n        self.num_class = num_class\n        self.num_mix = num_mix\n        self.beta = beta\n        self.prob = prob\n\n    def __getitem__(self, index):\n        img, lb = self.dataset[index]\n        lb_onehot = onehot(self.num_class, lb)\n\n        for _ in range(self.num_mix):\n            r = np.random.rand(1)\n            if self.beta <= 0 or r > self.prob:\n                continue\n\n            # generate mixed sample\n            lam = np.random.beta(self.beta, self.beta)\n            rand_index = random.choice(range(len(self)))\n\n            img2, lb2 = self.dataset[rand_index]\n            lb2_onehot = onehot(self.num_class, lb2)\n\n            img = img * lam + img2 * (1-lam)\n            lb_onehot = lb_onehot * lam + lb2_onehot * (1. - lam)\n\n        return img, lb_onehot\n\n    def __len__(self):\n        return len(self.dataset)\n"
  },
  {
    "path": "evaluator.py",
    "content": "import time\n\nimport models\nimport torch\nimport torch.optim as optim\nimport util\nfrom torch.autograd import Variable\n\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\nclass Evaluator():\n    def __init__(self, data_loader, logger, config):\n        self.loss_meters = util.AverageMeter()\n        self.acc_meters = util.AverageMeter()\n        self.acc5_meters = util.AverageMeter()\n        self.criterion = torch.nn.CrossEntropyLoss()\n        self.data_loader = data_loader\n        self.logger = logger\n        self.log_frequency = config.log_frequency if config.log_frequency is not None else 100\n        self.config = config\n        self.current_acc = 0\n        self.current_acc_top5 = 0\n        self.confusion_matrix = torch.zeros(config.num_classes, config.num_classes)\n        return\n\n    def _reset_stats(self):\n        self.loss_meters = util.AverageMeter()\n        self.acc_meters = util.AverageMeter()\n        self.acc5_meters = util.AverageMeter()\n        self.confusion_matrix = torch.zeros(self.config.num_classes, self.config.num_classes)\n        return\n\n    def eval(self, epoch, model):\n        model.eval()\n        for i, (images, labels) in enumerate(self.data_loader[\"test_dataset\"]):\n            start = time.time()\n            log_payload = self.eval_batch(images=images, labels=labels, model=model)\n            end = time.time()\n            time_used = end - start\n        display = util.log_display(epoch=epoch,\n                                   global_step=i,\n                                   time_elapse=time_used,\n                                   **log_payload)\n        if self.logger is not None:\n            self.logger.info(display)\n        return\n\n    def eval_batch(self, images, labels, model):\n        images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)\n        with torch.no_grad():\n            pred = model(images)\n            loss = self.criterion(pred, labels)\n            acc, acc5 = util.accuracy(pred, labels, topk=(1, 5))\n            _, preds = torch.max(pred, 1)\n            for t, p in zip(labels.view(-1), preds.view(-1)):\n                self.confusion_matrix[t.long(), p.long()] += 1\n\n        self.loss_meters.update(loss.item(), n=images.size(0))\n        self.acc_meters.update(acc.item(), n=images.size(0))\n        self.acc5_meters.update(acc5.item(), n=images.size(0))\n        payload = {\"acc\": acc.item(),\n                   \"acc_avg\": self.acc_meters.avg,\n                   \"acc5\": acc5.item(),\n                   \"acc5_avg\": self.acc5_meters.avg,\n                   \"loss\": loss.item(),\n                   \"loss_avg\": self.loss_meters.avg}\n        return payload\n\n    def _pgd_whitebox(self, model, X, y, random_start=True, epsilon=0.031, num_steps=20, step_size=0.003):\n        model.eval()\n        out = model(X)\n        acc = (out.data.max(1)[1] == y.data).float().sum()\n        X_pgd = Variable(X.data, requires_grad=True)\n        if random_start:\n            random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)\n            X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)\n\n        for _ in range(num_steps):\n            opt = optim.SGD([X_pgd], lr=1e-3)\n            opt.zero_grad()\n\n            with torch.enable_grad():\n                loss = torch.nn.CrossEntropyLoss()(model(X_pgd), y)\n            loss.backward()\n            eta = step_size * X_pgd.grad.data.sign()\n            X_pgd = Variable(X_pgd.data + eta, requires_grad=True)\n            eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)\n            X_pgd = Variable(X.data + eta, requires_grad=True)\n            X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)\n        acc_pgd = (model(X_pgd).data.max(1)[1] == y.data).float().sum()\n        return acc.item(), acc_pgd.item()\n"
  },
  {
    "path": "fast_autoaugment/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/__init__.py",
    "content": ""
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/archive.py",
    "content": "# Policy found on CIFAR-10 and CIFAR-100\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\n\nfrom .augmentations import get_augment, augment_list\n\n\ndef arsaug_policy():\n    exp0_0 = [\n        [('Solarize', 0.66, 0.34), ('Equalize', 0.56, 0.61)],\n        [('Equalize', 0.43, 0.06), ('AutoContrast', 0.66, 0.08)],\n        [('Color', 0.72, 0.47), ('Contrast', 0.88, 0.86)],\n        [('Brightness', 0.84, 0.71), ('Color', 0.31, 0.74)],\n        [('Rotate', 0.68, 0.26), ('TranslateX', 0.38, 0.88)]]\n    exp0_1 = [\n        [('TranslateY', 0.88, 0.96), ('TranslateY', 0.53, 0.79)],\n        [('AutoContrast', 0.44, 0.36), ('Solarize', 0.22, 0.48)],\n        [('AutoContrast', 0.93, 0.32), ('Solarize', 0.85, 0.26)],\n        [('Solarize', 0.55, 0.38), ('Equalize', 0.43, 0.48)],\n        [('TranslateY', 0.72, 0.93), ('AutoContrast', 0.83, 0.95)]]\n    exp0_2 = [\n        [('Solarize', 0.43, 0.58), ('AutoContrast', 0.82, 0.26)],\n        [('TranslateY', 0.71, 0.79), ('AutoContrast', 0.81, 0.94)],\n        [('AutoContrast', 0.92, 0.18), ('TranslateY', 0.77, 0.85)],\n        [('Equalize', 0.71, 0.69), ('Color', 0.23, 0.33)],\n        [('Sharpness', 0.36, 0.98), ('Brightness', 0.72, 0.78)]]\n    exp0_3 = [\n        [('Equalize', 0.74, 0.49), ('TranslateY', 0.86, 0.91)],\n        [('TranslateY', 0.82, 0.91), ('TranslateY', 0.96, 0.79)],\n        [('AutoContrast', 0.53, 0.37), ('Solarize', 0.39, 0.47)],\n        [('TranslateY', 0.22, 0.78), ('Color', 0.91, 0.65)],\n        [('Brightness', 0.82, 0.46), ('Color', 0.23, 0.91)]]\n    exp0_4 = [\n        [('Cutout', 0.27, 0.45), ('Equalize', 0.37, 0.21)],\n        [('Color', 0.43, 0.23), ('Brightness', 0.65, 0.71)],\n        [('ShearX', 0.49, 0.31), ('AutoContrast', 0.92, 0.28)],\n        [('Equalize', 0.62, 0.59), ('Equalize', 0.38, 0.91)],\n        [('Solarize', 0.57, 0.31), ('Equalize', 0.61, 0.51)]]\n\n    exp0_5 = [\n        [('TranslateY', 0.29, 0.35), ('Sharpness', 0.31, 0.64)],\n        [('Color', 0.73, 0.77), ('TranslateX', 0.65, 0.76)],\n        [('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],\n        [('Color', 0.92, 0.79), ('Equalize', 0.68, 0.54)],\n        [('Sharpness', 0.87, 0.91), ('Sharpness', 0.93, 0.41)]]\n    exp0_6 = [\n        [('Solarize', 0.39, 0.35), ('Color', 0.31, 0.44)],\n        [('Color', 0.33, 0.77), ('Color', 0.25, 0.46)],\n        [('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],\n        [('AutoContrast', 0.32, 0.79), ('Cutout', 0.68, 0.34)],\n        [('AutoContrast', 0.67, 0.91), ('AutoContrast', 0.73, 0.83)]]\n\n    return exp0_0 + exp0_1 + exp0_2 + exp0_3 + exp0_4 + exp0_5 + exp0_6\n\n\ndef autoaug2arsaug(f):\n    def autoaug():\n        mapper = defaultdict(lambda: lambda x: x)\n        mapper.update({\n            'ShearX': lambda x: float_parameter(x, 0.3),\n            'ShearY': lambda x: float_parameter(x, 0.3),\n            'TranslateX': lambda x: int_parameter(x, 10),\n            'TranslateY': lambda x: int_parameter(x, 10),\n            'Rotate': lambda x: int_parameter(x, 30),\n            'Solarize': lambda x: 256 - int_parameter(x, 256),\n            'Posterize2': lambda x: 4 - int_parameter(x, 4),\n            'Contrast': lambda x: float_parameter(x, 1.8) + .1,\n            'Color': lambda x: float_parameter(x, 1.8) + .1,\n            'Brightness': lambda x: float_parameter(x, 1.8) + .1,\n            'Sharpness': lambda x: float_parameter(x, 1.8) + .1,\n            'CutoutAbs': lambda x: int_parameter(x, 20)\n        })\n\n        def low_high(name, prev_value):\n            _, low, high = get_augment(name)\n            return float(prev_value - low) / (high - low)\n\n        policies = f()\n        new_policies = []\n        for policy in policies:\n            new_policies.append([(name, pr, low_high(name, mapper[name](level))) for name, pr, level in policy])\n        return new_policies\n\n    return autoaug\n\n\n@autoaug2arsaug\ndef autoaug_paper_cifar10():\n    return [\n        [('Invert', 0.1, 7), ('Contrast', 0.2, 6)],\n        [('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],\n        [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],\n        [('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)],\n        [('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],\n        [('Color', 0.4, 3), ('Brightness', 0.6, 7)],\n        [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],\n        [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],\n        [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)],\n        [('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],\n        [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],\n        [('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],\n        [('Brightness', 0.9, 6), ('Color', 0.2, 6)],\n        [('Solarize', 0.5, 2), ('Invert', 0.0, 3)],\n        [('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)],\n        [('Equalize', 0.2, 8), ('Equalize', 0.6, 4)],\n        [('Color', 0.9, 9), ('Equalize', 0.6, 6)],\n        [('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)],\n        [('Brightness', 0.1, 3), ('Color', 0.7, 0)],\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],\n        [('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],\n        [('Equalize', 0.8, 8), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)],\n    ]\n\n\n@autoaug2arsaug\ndef autoaug_policy():\n    \"\"\"AutoAugment policies found on Cifar.\"\"\"\n    exp0_0 = [\n        [('Invert', 0.1, 7), ('Contrast', 0.2, 6)],\n        [('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],\n        [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],\n        [('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]\n    exp0_1 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],\n        [('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],\n        [('Equalize', 0.8, 8), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)]]\n    exp0_2 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],\n        [('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],\n        [('Equalize', 0.7, 5), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)]]\n    exp0_3 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],\n        [('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.9, 9)],\n        [('AutoContrast', 0.8, 0), ('TranslateYAbs', 0.7, 9)],\n        [('TranslateYAbs', 0.2, 7), ('Color', 0.9, 6)],\n        [('Equalize', 0.7, 6), ('Color', 0.4, 9)]]\n    exp1_0 = [\n        [('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],\n        [('Color', 0.4, 3), ('Brightness', 0.6, 7)],\n        [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],\n        [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],\n        [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]\n    exp1_1 = [\n        [('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],\n        [('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],\n        [('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],\n        [('TranslateYAbs', 0.2, 4), ('Sharpness', 0.3, 3)],\n        [('Brightness', 0.0, 8), ('Color', 0.8, 8)]]\n    exp1_2 = [\n        [('Solarize', 0.2, 6), ('Color', 0.8, 6)],\n        [('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],\n        [('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],\n        [('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],\n        [('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]\n    exp1_3 = [\n        [('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],\n        [('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],\n        [('Contrast', 0.5, 1), ('TranslateYAbs', 0.2, 9)],\n        [('AutoContrast', 0.6, 5), ('TranslateYAbs', 0.0, 9)],\n        [('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]\n    exp1_4 = [\n        [('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],\n        [('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],\n        [('Equalize', 0.6, 8), ('Color', 0.6, 2)],\n        [('Color', 0.3, 7), ('Color', 0.2, 4)],\n        [('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]\n    exp1_5 = [\n        [('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],\n        [('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],\n        [('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],\n        [('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],\n        [('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]\n    exp1_6 = [\n        [('Equalize', 0.8, 4), ('TranslateYAbs', 0.8, 9)],\n        [('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.6, 9)],\n        [('TranslateYAbs', 0.9, 0), ('TranslateYAbs', 0.5, 9)],\n        [('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],\n        [('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]\n    exp2_0 = [\n        [('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],\n        [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],\n        [('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],\n        [('Brightness', 0.9, 6), ('Color', 0.2, 8)],\n        [('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]\n    exp2_1 = [\n        [('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],\n        [('CutoutAbs', 0.2, 4), ('Equalize', 0.1, 1)],\n        [('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],\n        [('Color', 0.1, 8), ('ShearY', 0.2, 3)],\n        [('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]\n    exp2_2 = [\n        [('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],\n        [('TranslateYAbs', 0.3, 6), ('CutoutAbs', 0.3, 3)],\n        [('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],\n        [('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],\n        [('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]\n    exp2_3 = [\n        [('Equalize', 0.9, 5), ('Color', 0.7, 0)],\n        [('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],\n        [('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],\n        [('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],\n        [('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]\n    exp2_4 = [\n        [('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],\n        [('TranslateXAbs', 0.3, 0), ('TranslateXAbs', 0.6, 0)],\n        [('Equalize', 0.5, 9), ('TranslateYAbs', 0.6, 7)],\n        [('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],\n        [('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]\n    exp2_5 = [\n        [('AutoContrast', 0.3, 9), ('CutoutAbs', 0.5, 3)],\n        [('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],\n        [('ShearX', 0.0, 3), ('Posterize2', 0.0, 3)],\n        [('Solarize', 0.4, 3), ('Color', 0.2, 4)],\n        [('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]\n    exp2_6 = [\n        [('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],\n        [('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],\n        [('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],\n        [('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],\n        [('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]\n    exp2_7 = [\n        [('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],\n        [('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],\n        [('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],\n        [('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],\n        [('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]\n    exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3\n    exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6\n    exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7\n\n    return exp0s + exp1s + exp2s\n\n\nPARAMETER_MAX = 10\n\n\ndef float_parameter(level, maxval):\n    return float(level) * maxval / PARAMETER_MAX\n\n\ndef int_parameter(level, maxval):\n    return int(float_parameter(level, maxval))\n\n\ndef no_duplicates(f):\n    def wrap_remove_duplicates():\n        policies = f()\n        return remove_deplicates(policies)\n\n    return wrap_remove_duplicates\n\n\ndef remove_deplicates(policies):\n    s = set()\n    new_policies = []\n    for ops in policies:\n        key = []\n        for op in ops:\n            key.append(op[0])\n        key = '_'.join(key)\n        if key in s:\n            continue\n        else:\n            s.add(key)\n            new_policies.append(ops)\n\n    return new_policies\n\n\ndef fa_reduced_cifar10():\n    p = [[[\"Contrast\", 0.8320659688593578, 0.49884310562180767], [\"TranslateX\", 0.41849883971249136, 0.394023086494538]], [[\"Color\", 0.3500483749890918, 0.43355143929883955], [\"Color\", 0.5120716140300229, 0.7508299643325016]], [[\"Rotate\", 0.9447932604389472, 0.29723465088990375], [\"Sharpness\", 0.1564936149799504, 0.47169309978091745]], [[\"Rotate\", 0.5430015349185097, 0.6518626678905443], [\"Color\", 0.5694844928020679, 0.3494533005430269]], [[\"AutoContrast\", 0.5558922032451064, 0.783136004977799], [\"TranslateY\", 0.683914191471972, 0.7597025305860181]], [[\"TranslateX\", 0.03489224481658926, 0.021025488042663354], [\"Equalize\", 0.4788637403857401, 0.3535481281496117]], [[\"Sharpness\", 0.6428916269794158, 0.22791511918580576], [\"Contrast\", 0.016014045073950323, 0.26811312269487575]], [[\"Rotate\", 0.2972727228410451, 0.7654251516829896], [\"AutoContrast\", 0.16005809254943348, 0.5380523650108116]], [[\"Contrast\", 0.5823671057717301, 0.7521166301398389], [\"TranslateY\", 0.9949449214751978, 0.9612671341689751]], [[\"Equalize\", 0.8372126687702321, 0.6944127225621206], [\"Rotate\", 0.25393282929784755, 0.3261658365286546]], [[\"Invert\", 0.8222011603194572, 0.6597915864008403], [\"Posterize\", 0.31858707654447327, 0.9541013715579584]], [[\"Sharpness\", 0.41314621282107045, 0.9437344470879956], [\"Cutout\", 0.6610495837889337, 0.674411664255093]], [[\"Contrast\", 0.780121736705407, 0.40826152397463156], [\"Color\", 0.344019192125256, 0.1942922781355767]], [[\"Rotate\", 0.17153139555621344, 0.798745732456474], [\"Invert\", 0.6010555860501262, 0.320742172554767]], [[\"Invert\", 0.26816063450777416, 0.27152062163148327], [\"Equalize\", 0.6786829200236982, 0.7469412443514213]], [[\"Contrast\", 0.3920564414367518, 0.7493644582838497], [\"TranslateY\", 0.8941657805606704, 0.6580846856375955]], [[\"Equalize\", 0.875509207399372, 0.9061130537645283], [\"Cutout\", 0.4940280679087308, 0.7896229623628276]], [[\"Contrast\", 0.3331423298065147, 0.7170041362529597], [\"ShearX\", 0.7425484291842793, 0.5285117152426109]], [[\"Equalize\", 0.97344237365026, 0.4745759720473106], [\"TranslateY\", 0.055863458430295276, 0.9625142022954672]], [[\"TranslateX\", 0.6810614083109192, 0.7509937355495521], [\"TranslateY\", 0.3866463019475701, 0.5185481505576112]], [[\"Sharpness\", 0.4751529944753671, 0.550464012488733], [\"Cutout\", 0.9472914750534814, 0.5584925992985023]], [[\"Contrast\", 0.054606784909375095, 0.17257080196712182], [\"Cutout\", 0.6077026782754803, 0.7996504165944938]], [[\"ShearX\", 0.328798428243695, 0.2769563264079157], [\"Cutout\", 0.9037632437023772, 0.4915809476763595]], [[\"Cutout\", 0.6891202672363478, 0.9951490996172914], [\"Posterize\", 0.06532762462628705, 0.4005246609075227]], [[\"TranslateY\", 0.6908583592523334, 0.725612120376128], [\"Rotate\", 0.39907735501746666, 0.36505798032223147]], [[\"TranslateX\", 0.10398364107399072, 0.5913918470536627], [\"Rotate\", 0.7169811539340365, 0.8283850670648724]], [[\"ShearY\", 0.9526373530768361, 0.4482347365639251], [\"Contrast\", 0.4203947336351471, 0.41526799558953864]], [[\"Contrast\", 0.24894431199700073, 0.09578870500994707], [\"Solarize\", 0.2273713345927395, 0.6214942914963707]], [[\"TranslateX\", 0.06331228870032912, 0.8961907489444944], [\"Cutout\", 0.5110007859958743, 0.23704875994050723]], [[\"Cutout\", 0.3769183548846172, 0.6560944580253987], [\"TranslateY\", 0.7201924599434143, 0.4132476526938319]], [[\"Invert\", 0.6707431156338866, 0.11622795952464149], [\"Posterize\", 0.12075972752370845, 0.18024933294172307]], [[\"Color\", 0.5010057264087142, 0.5277767327434318], [\"Rotate\", 0.9486115946366559, 0.31485546630220784]], [[\"ShearX\", 0.31741302466630406, 0.1991215806270692], [\"Invert\", 0.3744727015523084, 0.6914113986757578]], [[\"Brightness\", 0.40348479064392617, 0.8924182735724888], [\"Brightness\", 0.1973098763857779, 0.3939288933689655]], [[\"Color\", 0.01208688664030888, 0.6055693000885217], [\"Equalize\", 0.433259451147881, 0.420711137966155]], [[\"Cutout\", 0.2620018360076487, 0.11594468278143644], [\"Rotate\", 0.1310401567856766, 0.7244318146544101]], [[\"ShearX\", 0.15249651845933576, 0.35277277071866986], [\"Contrast\", 0.28221794032094016, 0.42036586509397444]], [[\"Brightness\", 0.8492912150468908, 0.26386920887886056], [\"Solarize\", 0.8764208056263386, 0.1258195122766067]], [[\"ShearX\", 0.8537058239675831, 0.8415101816171269], [\"AutoContrast\", 0.23958568830416294, 0.9889049529564014]], [[\"Rotate\", 0.6463207930684552, 0.8750192129056532], [\"Contrast\", 0.6865032211768652, 0.8564981333033417]], [[\"Equalize\", 0.8877190311811044, 0.7370995897848609], [\"TranslateX\", 0.9979660314391368, 0.005683998913244781]], [[\"Color\", 0.6420017551677819, 0.6225337265571229], [\"Solarize\", 0.8344504978566362, 0.8332856969941151]], [[\"ShearX\", 0.7439332981992567, 0.9747608698582039], [\"Equalize\", 0.6259189804002959, 0.028017478098245174]], [[\"TranslateY\", 0.39794770293366843, 0.8482966537902709], [\"Rotate\", 0.9312935630405351, 0.5300586925826072]], [[\"Cutout\", 0.8904075572021911, 0.3522934742068766], [\"Equalize\", 0.6431186289473937, 0.9930577962126151]], [[\"Contrast\", 0.9183553386089476, 0.44974266209396685], [\"TranslateY\", 0.8193684583123862, 0.9633741156526566]], [[\"ShearY\", 0.616078299924283, 0.19219314358924766], [\"Solarize\", 0.1480945914138868, 0.05922109541654652]], [[\"Solarize\", 0.25332455064128157, 0.18853037431947994], [\"ShearY\", 0.9518390093954243, 0.14603930044061142]], [[\"Color\", 0.8094378664335412, 0.37029830225408433], [\"Contrast\", 0.29504113617467465, 0.065096365468442]], [[\"AutoContrast\", 0.7075167558685455, 0.7084621693458267], [\"Sharpness\", 0.03555539453323875, 0.5651948313888351]], [[\"TranslateY\", 0.5969982600930229, 0.9857264201029572], [\"Rotate\", 0.9898628564873607, 0.1985685534926911]], [[\"Invert\", 0.14915939942810352, 0.6595839632446547], [\"Posterize\", 0.768535289994361, 0.5997358684618563]], [[\"Equalize\", 0.9162691815967111, 0.3331035307653627], [\"Color\", 0.8169118187605557, 0.7653910258006366]], [[\"Rotate\", 0.43489185299530897, 0.752215269135173], [\"Brightness\", 0.1569828560334806, 0.8002808712857853]], [[\"Invert\", 0.931876215328345, 0.029428644395760872], [\"Equalize\", 0.6330036052674145, 0.7235531014288485]], [[\"ShearX\", 0.5216138393704968, 0.849272958911589], [\"AutoContrast\", 0.19572688655120263, 0.9786551568639575]], [[\"ShearX\", 0.9899586208275011, 0.22580547500610293], [\"Brightness\", 0.9831311903178727, 0.5055159610855606]], [[\"Brightness\", 0.29179117009211486, 0.48003584672937294], [\"Solarize\", 0.7544252317330058, 0.05806581735063043]], [[\"AutoContrast\", 0.8919800329537786, 0.8511261613698553], [\"Contrast\", 0.49199446084551035, 0.7302297140181429]], [[\"Cutout\", 0.7079723710644835, 0.032565015538375874], [\"AutoContrast\", 0.8259782090388609, 0.7860708789468442]], [[\"Posterize\", 0.9980262659801914, 0.6725084224935673], [\"ShearY\", 0.6195568269664682, 0.5444170291816751]], [[\"Posterize\", 0.8687351834713217, 0.9978004914422602], [\"Equalize\", 0.4532646848325955, 0.6486748015710573]], [[\"Contrast\", 0.2713928776950594, 0.15255249557027806], [\"ShearY\", 0.9276834387970199, 0.5266542862333478]], [[\"AutoContrast\", 0.5240786618055582, 0.9325642258930253], [\"Cutout\", 0.38448627892037357, 0.21219415055662394]], [[\"TranslateX\", 0.4299517937295352, 0.20133751201386152], [\"TranslateX\", 0.6753468310276597, 0.6985621035400441]], [[\"Rotate\", 0.4006472499103597, 0.6704748473357586], [\"Equalize\", 0.674161668148079, 0.6528530101705237]], [[\"Equalize\", 0.9139902833674455, 0.9015103149680278], [\"Sharpness\", 0.7289667720691948, 0.7623606352376232]], [[\"Cutout\", 0.5911267429414259, 0.5953141187177585], [\"Rotate\", 0.5219064817468504, 0.11085141355857986]], [[\"TranslateX\", 0.3620095133946267, 0.26194039409492476], [\"Rotate\", 0.3929841359545597, 0.4913406720338047]], [[\"Invert\", 0.5175298901458896, 0.001661410821811482], [\"Invert\", 0.004656581318332242, 0.8157622192213624]], [[\"AutoContrast\", 0.013609693335051465, 0.9318651749409604], [\"Invert\", 0.8980844358979592, 0.2268511862780368]], [[\"ShearY\", 0.7717126261142194, 0.09975547983707711], [\"Equalize\", 0.7808494401429572, 0.4141412091009955]], [[\"TranslateX\", 0.5878675721341552, 0.29813268038163376], [\"Posterize\", 0.21257276051591356, 0.2837285296666412]], [[\"Brightness\", 0.4268335108566488, 0.4723784991635417], [\"Cutout\", 0.9386262901570471, 0.6597686851494288]], [[\"ShearX\", 0.8259423807590159, 0.6215304795389204], [\"Invert\", 0.6663365779667443, 0.7729669184580387]], [[\"ShearY\", 0.4801338723951297, 0.5220145420100984], [\"Solarize\", 0.9165803796596582, 0.04299335502862134]], [[\"Color\", 0.17621114853558817, 0.7092601754635434], [\"ShearX\", 0.9014406936728542, 0.6028711944367818]], [[\"Rotate\", 0.13073284972300658, 0.9088831512880851], [\"ShearX\", 0.4228105332316806, 0.7985249783662675]], [[\"Brightness\", 0.9182753692730031, 0.0063635477774044436], [\"Color\", 0.4279825602663798, 0.28727149118585327]], [[\"Equalize\", 0.578218285372267, 0.9611758542158054], [\"Contrast\", 0.5471552264150691, 0.8819635504027596]], [[\"Brightness\", 0.3208589067274543, 0.45324733565167497], [\"Solarize\", 0.5218455808633233, 0.5946097503647126]], [[\"Equalize\", 0.3790381278653, 0.8796082535775276], [\"Solarize\", 0.4875526773149246, 0.5186585878052613]], [[\"ShearY\", 0.12026461479557571, 0.1336953429068397], [\"Posterize\", 0.34373988646025766, 0.8557727670803785]], [[\"Cutout\", 0.2396745247507467, 0.8123036135209865], [\"Equalize\", 0.05022807681008945, 0.6648492261984383]], [[\"Brightness\", 0.35226676470748264, 0.5950011514888855], [\"Rotate\", 0.27555076067000894, 0.9170063321486026]], [[\"ShearX\", 0.320224630647278, 0.9683584649071976], [\"Invert\", 0.6905585196648905, 0.5929115667894518]], [[\"Color\", 0.9941395717559652, 0.7474441679798101], [\"Sharpness\", 0.7559998478658021, 0.6656052889626682]], [[\"ShearY\", 0.4004220568345669, 0.5737646992826074], [\"Equalize\", 0.9983495213746147, 0.8307907033362303]], [[\"Color\", 0.13726809242038207, 0.9378850119950549], [\"Equalize\", 0.9853362454752445, 0.42670264496554156]], [[\"Invert\", 0.13514636153298576, 0.13516363849081958], [\"Sharpness\", 0.2031189356693901, 0.6110226359872745]], [[\"TranslateX\", 0.7360305209630797, 0.41849698571655614], [\"Contrast\", 0.8972161549144564, 0.7820296625565641]], [[\"Color\", 0.02713118828682548, 0.717110684828096], [\"TranslateY\", 0.8118759006836348, 0.9120098002024992]], [[\"Sharpness\", 0.2915428949403711, 0.7630303724396518], [\"Solarize\", 0.22030536162851078, 0.38654526772661757]], [[\"Equalize\", 0.9949114839538582, 0.7193630656062793], [\"AutoContrast\", 0.00889496657931299, 0.2291400476524672]], [[\"Rotate\", 0.7120948976490488, 0.7804359309791055], [\"Cutout\", 0.10445418104923654, 0.8022999156052766]], [[\"Equalize\", 0.7941710117902707, 0.8648170634288153], [\"Invert\", 0.9235642581144047, 0.23810725859722381]], [[\"Cutout\", 0.3669397998623156, 0.42612815083245004], [\"Solarize\", 0.5896322046441561, 0.40525016166956795]], [[\"Color\", 0.8389858785714184, 0.4805764176488667], [\"Rotate\", 0.7483931487048825, 0.4731174601400677]], [[\"Sharpness\", 0.19006538611394763, 0.9480745790240234], [\"TranslateY\", 0.13904429049439282, 0.04117685330615939]], [[\"TranslateY\", 0.9958097661701637, 0.34853788612580905], [\"Cutout\", 0.2235829624082113, 0.3737887095480745]], [[\"ShearX\", 0.635453761342424, 0.6063917273421382], [\"Posterize\", 0.8738297843709666, 0.4893042590265556]], [[\"Brightness\", 0.7907245198402727, 0.7082189713070691], [\"Color\", 0.030313003541849737, 0.6927897798493439]], [[\"Cutout\", 0.6965622481073525, 0.8103522907758203], [\"ShearY\", 0.6186794303078708, 0.28640671575703547]], [[\"ShearY\", 0.43734910588450226, 0.32549342535621517], [\"ShearX\", 0.08154980987651872, 0.3286764923112455]], [[\"AutoContrast\", 0.5262462005050853, 0.8175584582465848], [\"Contrast\", 0.8683217097363655, 0.548776281479276]], [[\"ShearY\", 0.03957878500311707, 0.5102350637943197], [\"Rotate\", 0.13794708520303778, 0.38035687712954236]], [[\"Sharpness\", 0.634288567312677, 0.6387948309075822], [\"AutoContrast\", 0.13437288694693272, 0.7150448869023095]], [[\"Contrast\", 0.5198339640088544, 0.9409429390321714], [\"Cutout\", 0.09489154903321972, 0.6228488803821982]], [[\"Equalize\", 0.8955909061806043, 0.7727336527163008], [\"AutoContrast\", 0.6459479564441762, 0.7065467781139214]], [[\"Invert\", 0.07214420843537739, 0.15334721382249505], [\"ShearX\", 0.9242027778363903, 0.5809187849982554]], [[\"Equalize\", 0.9144084379856188, 0.9457539278608998], [\"Sharpness\", 0.14337499858300173, 0.5978054365425495]], [[\"Posterize\", 0.18894269796951202, 0.14676331276539045], [\"Equalize\", 0.846204299950047, 0.0720601838168885]], [[\"Contrast\", 0.47354445405741163, 0.1793650330107468], [\"Solarize\", 0.9086106327264657, 0.7578807802091502]], [[\"AutoContrast\", 0.11805466892967886, 0.6773620948318575], [\"TranslateX\", 0.584222568299264, 0.9475693349391936]], [[\"Brightness\", 0.5833017701352768, 0.6892593824176294], [\"AutoContrast\", 0.9073141314561828, 0.5823085733964589]], [[\"TranslateY\", 0.5711231614144834, 0.6436240447620021], [\"Contrast\", 0.21466964050052473, 0.8042843954486391]], [[\"Contrast\", 0.22967904487976765, 0.2343103109298762], [\"Invert\", 0.5502897289159286, 0.386181060792375]], [[\"Invert\", 0.7008423439928628, 0.4234003051405053], [\"Rotate\", 0.77270460187611, 0.6650852696828039]], [[\"Invert\", 0.050618322309703534, 0.24277027926683614], [\"TranslateX\", 0.789703489736613, 0.5116446685339312]], [[\"Color\", 0.363898083076868, 0.7870323584210503], [\"ShearY\", 0.009608425513626617, 0.6188625018465327]], [[\"TranslateY\", 0.9447601615216088, 0.8605867115798349], [\"Equalize\", 0.24139180127003634, 0.9587337957930782]], [[\"Equalize\", 0.3968589440144503, 0.626206375426996], [\"Solarize\", 0.3215967960673186, 0.826785464835443]], [[\"TranslateX\", 0.06947339047121326, 0.016705969558222122], [\"Contrast\", 0.6203392406528407, 0.6433525559906872]], [[\"Solarize\", 0.2479835265518212, 0.6335009955617831], [\"Sharpness\", 0.6260191862978083, 0.18998095149428562]], [[\"Invert\", 0.9818841924943431, 0.03252098144087934], [\"TranslateY\", 0.9740718042586802, 0.32038951753031475]], [[\"Solarize\", 0.8795784664090814, 0.7014953994354041], [\"AutoContrast\", 0.8508018319577783, 0.09321935255338443]], [[\"Color\", 0.8067046326105318, 0.13732893832354054], [\"Contrast\", 0.7358549680271418, 0.7880588355974301]], [[\"Posterize\", 0.5005885536838065, 0.7152229305267599], [\"ShearX\", 0.6714249591308944, 0.7732232697859908]], [[\"TranslateY\", 0.5657943483353953, 0.04622399873706862], [\"AutoContrast\", 0.2787442688649845, 0.567024378767143]], [[\"ShearY\", 0.7589839214283295, 0.041071003934029404], [\"Equalize\", 0.3719852873722692, 0.43285778682687326]], [[\"Posterize\", 0.8841266183653291, 0.42441306955476366], [\"Cutout\", 0.06578801759412933, 0.5961125797961526]], [[\"Rotate\", 0.4057875004314082, 0.20241115848366442], [\"AutoContrast\", 0.19331542807918067, 0.7175484678480565]], [[\"Contrast\", 0.20331327116693088, 0.17135387852218742], [\"Cutout\", 0.6282459410351067, 0.6690015305529187]], [[\"ShearX\", 0.4309850328306535, 0.99321178125828], [\"AutoContrast\", 0.01809604030453338, 0.693838277506365]], [[\"Rotate\", 0.24343531125298268, 0.5326412444169899], [\"Sharpness\", 0.8663989992597494, 0.7643990609130789]], [[\"Rotate\", 0.9785019204622459, 0.8941922576710696], [\"ShearY\", 0.3823185048761075, 0.9258854046017292]], [[\"ShearY\", 0.5502613342963388, 0.6193478797355644], [\"Sharpness\", 0.2212116534610532, 0.6648232390110979]], [[\"TranslateY\", 0.43222920981513757, 0.5657636397633089], [\"ShearY\", 0.9153733286073634, 0.4868521171273169]], [[\"Posterize\", 0.12246560519738336, 0.9132288825898972], [\"Cutout\", 0.6058471327881816, 0.6426901876150983]], [[\"Color\", 0.3693970222695844, 0.038929141432555436], [\"Equalize\", 0.6228052875653781, 0.05064436511347281]], [[\"Color\", 0.7172600331356893, 0.2824542634766688], [\"Color\", 0.425293116261649, 0.1796441283313972]], [[\"Cutout\", 0.7539608428122959, 0.9896141728228921], [\"Solarize\", 0.17811081117364758, 0.9064195503634402]], [[\"AutoContrast\", 0.6761242607012717, 0.6484842446399923], [\"AutoContrast\", 0.1978135076901828, 0.42166879492601317]], [[\"ShearY\", 0.25901666379802524, 0.4770778270322449], [\"Solarize\", 0.7640963173407052, 0.7548463227094349]], [[\"TranslateY\", 0.9222487731783499, 0.33658389819616463], [\"Equalize\", 0.9159112511468139, 0.8877136302394797]], [[\"TranslateX\", 0.8994836977137054, 0.11036053676846591], [\"Sharpness\", 0.9040333410652747, 0.007266095214664592]], [[\"Invert\", 0.627758632524958, 0.8075245097227242], [\"Color\", 0.7525387912148516, 0.05950239294733184]], [[\"TranslateX\", 0.43505193292761857, 0.38108822876120796], [\"TranslateY\", 0.7432578052364004, 0.685678116134759]], [[\"Contrast\", 0.9293507582470425, 0.052266842951356196], [\"Posterize\", 0.45187123977747456, 0.8228290399726782]], [[\"ShearX\", 0.07240786542746291, 0.8945667925365756], [\"Brightness\", 0.5305443506561034, 0.12025274552427578]], [[\"Invert\", 0.40157564448143335, 0.5364745514006678], [\"Posterize\", 0.3316124671813876, 0.43002413237035997]], [[\"ShearY\", 0.7152314630009072, 0.1938339083417453], [\"Invert\", 0.14102478508140615, 0.41047623580174253]], [[\"Equalize\", 0.19862832613849246, 0.5058521685279254], [\"Sharpness\", 0.16481208629549782, 0.29126323102770557]], [[\"Equalize\", 0.6951591703541872, 0.7294822018800076], [\"ShearX\", 0.8726656726111219, 0.3151484225786487]], [[\"Rotate\", 0.17234370554263745, 0.9356543193000078], [\"TranslateX\", 0.4954374070084091, 0.05496727345849217]], [[\"Contrast\", 0.347405480122842, 0.831553005022885], [\"ShearX\", 0.28946367213071134, 0.11905898704394013]], [[\"Rotate\", 0.28096672507990683, 0.16181284050307398], [\"Color\", 0.6554918515385365, 0.8739728050797386]], [[\"Solarize\", 0.05408073374114053, 0.5357087283758337], [\"Posterize\", 0.42457175211495335, 0.051807130609045515]], [[\"TranslateY\", 0.6216669362331361, 0.9691341207381867], [\"Rotate\", 0.9833579358130944, 0.12227426932415297]], [[\"AutoContrast\", 0.7572619475282892, 0.8062834082727393], [\"Contrast\", 0.1447865402875591, 0.40242646573228436]], [[\"Rotate\", 0.7035658783466086, 0.9840285268256428], [\"Contrast\", 0.04613961510519471, 0.7666683217450163]], [[\"TranslateX\", 0.4580462177951252, 0.6448678609474686], [\"AutoContrast\", 0.14845695613708987, 0.1581134188537895]], [[\"Color\", 0.06795037145259564, 0.9115552821158709], [\"TranslateY\", 0.9972953449677655, 0.6791016521791214]], [[\"Cutout\", 0.3586908443690823, 0.11578558293480945], [\"Color\", 0.49083981719164294, 0.6924851425917189]], [[\"Brightness\", 0.7994717831637873, 0.7887316255321768], [\"Posterize\", 0.01280463502435425, 0.2799086732858721]], [[\"ShearY\", 0.6733451536131859, 0.8122332639516706], [\"AutoContrast\", 0.20433889615637357, 0.29023346867819966]], [[\"TranslateY\", 0.709913512385177, 0.6538196931503809], [\"Invert\", 0.06629795606579203, 0.40913219547548296]], [[\"Sharpness\", 0.4704559834362948, 0.4235993305308414], [\"Equalize\", 0.7578132044306966, 0.9388824249397175]], [[\"AutoContrast\", 0.5281702802395268, 0.8077253610116979], [\"Equalize\", 0.856446858814119, 0.0479755681647559]], [[\"Color\", 0.8244145826797791, 0.038409264586238945], [\"Equalize\", 0.4933123249234237, 0.8251940933672189]], [[\"TranslateX\", 0.23949314158035084, 0.13576027004706692], [\"ShearX\", 0.8547563771688399, 0.8309262160483606]], [[\"Cutout\", 0.4655680937486001, 0.2819807000622825], [\"Contrast\", 0.8439552665937905, 0.4843617871587037]], [[\"TranslateX\", 0.19142454476784831, 0.7516148119169537], [\"AutoContrast\", 0.8677128351329768, 0.34967990912346336]], [[\"Contrast\", 0.2997868299880966, 0.919508054854469], [\"AutoContrast\", 0.3003418493384957, 0.812314984368542]], [[\"Invert\", 0.1070424236198183, 0.614674386498809], [\"TranslateX\", 0.5010973510899923, 0.20828478805259465]], [[\"Contrast\", 0.6775882415611454, 0.6938564815591685], [\"Cutout\", 0.4814634264207498, 0.3086844939744179]], [[\"TranslateY\", 0.939427105020265, 0.02531043619423201], [\"Contrast\", 0.793754257944812, 0.6676072472565451]], [[\"Sharpness\", 0.09833672397575444, 0.5937214638292085], [\"Rotate\", 0.32530675291753763, 0.08302275740932441]], [[\"Sharpness\", 0.3096455511562728, 0.6726732004553959], [\"TranslateY\", 0.43268997648796537, 0.8755012330217743]], [[\"ShearY\", 0.9290771880324833, 0.22114736271319912], [\"Equalize\", 0.5520199288501478, 0.34269650332060553]], [[\"AutoContrast\", 0.39763980746649374, 0.4597414582725454], [\"Contrast\", 0.941507852412761, 0.24991270562477041]], [[\"Contrast\", 0.19419400547588095, 0.9127524785329233], [\"Invert\", 0.40544905179551727, 0.770081532844878]], [[\"Invert\", 0.30473757368608334, 0.23534811781828846], [\"Cutout\", 0.26090722356706686, 0.5478390909877727]], [[\"Posterize\", 0.49434361308057373, 0.05018423270527428], [\"Color\", 0.3041910676883317, 0.2603810415446437]], [[\"Invert\", 0.5149061746764011, 0.9507449210221298], [\"TranslateY\", 0.4458076521892904, 0.8235358255774426]], [[\"Cutout\", 0.7900006753351625, 0.905578861382507], [\"Cutout\", 0.6707153655762056, 0.8236715672258502]], [[\"Solarize\", 0.8750534386579575, 0.10337670467100568], [\"Posterize\", 0.6102379615481381, 0.9264503915416868]], [[\"ShearY\", 0.08448689377082852, 0.13981233725811626], [\"TranslateX\", 0.13979689669329498, 0.768774869872818]], [[\"TranslateY\", 0.35752572266759985, 0.22827299847812488], [\"Solarize\", 0.3906957174236011, 0.5663314388307709]], [[\"ShearY\", 0.29155240367061563, 0.8427516352971683], [\"ShearX\", 0.988825367441916, 0.9371258864857649]], [[\"Posterize\", 0.3470780859769458, 0.5467686612321239], [\"Rotate\", 0.5758606274160093, 0.8843838082656007]], [[\"Cutout\", 0.07825368363221841, 0.3230799425855425], [\"Equalize\", 0.2319163865298529, 0.42133965674727325]], [[\"Invert\", 0.41972172597448654, 0.34618622513582953], [\"ShearX\", 0.33638469398198834, 0.9098575535928108]], [[\"Invert\", 0.7322652233340448, 0.7747502957687412], [\"Cutout\", 0.9643121397298106, 0.7983335094634907]], [[\"TranslateY\", 0.30039942808098496, 0.229018798182827], [\"TranslateY\", 0.27009499739380194, 0.6435577237846236]], [[\"Color\", 0.38245274994070644, 0.7030758568461645], [\"ShearX\", 0.4429321461666281, 0.6963787864044149]], [[\"AutoContrast\", 0.8432798685515605, 0.5775214369578088], [\"Brightness\", 0.7140899735355927, 0.8545854720117658]], [[\"Rotate\", 0.14418935535613786, 0.5637968282213426], [\"Color\", 0.7115231912479835, 0.32584796564566776]], [[\"Sharpness\", 0.4023501062807533, 0.4162097130412771], [\"Brightness\", 0.5536372686153666, 0.03004023273348777]], [[\"TranslateX\", 0.7526053265574295, 0.5365938133399961], [\"Cutout\", 0.07914142706557492, 0.7544953091603148]], [[\"TranslateY\", 0.6932934644882822, 0.5302211727137424], [\"Invert\", 0.5040606028391255, 0.6074863635108957]], [[\"Sharpness\", 0.5013938602431629, 0.9572417724333157], [\"TranslateY\", 0.9160516359783026, 0.41798927975391675]], [[\"ShearY\", 0.5130018836722556, 0.30209438428424185], [\"Color\", 0.15017170588500262, 0.20653495360587826]], [[\"TranslateX\", 0.5293300090022314, 0.6407011888285266], [\"Rotate\", 0.4809817860439001, 0.3537850070371702]], [[\"Equalize\", 0.42243081336551014, 0.13472721311046565], [\"Posterize\", 0.4700309639484068, 0.5197704360874883]], [[\"AutoContrast\", 0.40674959899687235, 0.7312824868168921], [\"TranslateX\", 0.7397527975920833, 0.7068339877944815]], [[\"TranslateY\", 0.5880995184787206, 0.41294111378078946], [\"ShearX\", 0.3181387627799316, 0.4810010147143413]], [[\"Color\", 0.9898680233928507, 0.13241525577655167], [\"Contrast\", 0.9824932511238534, 0.5081145010853807]], [[\"Invert\", 0.1591854062582687, 0.9760371953250404], [\"Color\", 0.9913399302056851, 0.8388709501056177]], [[\"Rotate\", 0.6427451962231163, 0.9486793975292853], [\"AutoContrast\", 0.8501937877930463, 0.021326757974406196]], [[\"Contrast\", 0.13611684531087598, 0.3050858709483848], [\"Posterize\", 0.06618644756084646, 0.8776928511951034]], [[\"TranslateX\", 0.41021065663839407, 0.4965319749091702], [\"Rotate\", 0.07088831484595115, 0.4435516708223345]], [[\"Sharpness\", 0.3151707977154323, 0.28275482520179296], [\"Invert\", 0.36980384682133804, 0.20813616084536624]], [[\"Cutout\", 0.9979060206661017, 0.39712948644725854], [\"Brightness\", 0.42451052896163466, 0.942623075649937]], [[\"Equalize\", 0.5300853308425644, 0.010183500830128867], [\"AutoContrast\", 0.06930788523716991, 0.5403125318991522]], [[\"Contrast\", 0.010385458959237814, 0.2588311035539086], [\"ShearY\", 0.9347048553928764, 0.10439028366854963]], [[\"ShearY\", 0.9867649486508592, 0.8409258132716434], [\"ShearX\", 0.48031199530836444, 0.7703375364614137]], [[\"ShearY\", 0.04835889473136512, 0.2671081675890492], [\"Brightness\", 0.7856432618509617, 0.8032169570159564]], [[\"Posterize\", 0.11112884927351185, 0.7116956530752987], [\"TranslateY\", 0.7339151092128607, 0.3331241226029017]], [[\"Invert\", 0.13527036207875454, 0.8425980515358883], [\"Color\", 0.7836395778298139, 0.5517059252678862]], [[\"Sharpness\", 0.012541163521491816, 0.013197550692292892], [\"Invert\", 0.6295957932861318, 0.43276521236056054]], [[\"AutoContrast\", 0.7681480991225756, 0.3634284648496289], [\"Brightness\", 0.09708289828517969, 0.45016725043529726]], [[\"Brightness\", 0.5839450499487329, 0.47525965678316795], [\"Posterize\", 0.43096581990183735, 0.9332382960125196]], [[\"Contrast\", 0.9725334964552795, 0.9142902966863341], [\"Contrast\", 0.12376116410622995, 0.4355916974126801]], [[\"TranslateX\", 0.8572708473690132, 0.02544522678265526], [\"Sharpness\", 0.37902120723460364, 0.9606092969833118]], [[\"TranslateY\", 0.8907359001296927, 0.8011363927236099], [\"Color\", 0.7693777154407178, 0.0936768686746503]], [[\"Equalize\", 0.0002657688243309364, 0.08190798535970034], [\"Rotate\", 0.5215478065240905, 0.5773519995038368]], [[\"TranslateY\", 0.3383007813932477, 0.5733428274739165], [\"Sharpness\", 0.2436110797174722, 0.4757790814590501]], [[\"Cutout\", 0.0957402176213592, 0.8914395928996034], [\"Cutout\", 0.4959915628586883, 0.25890349461645246]], [[\"AutoContrast\", 0.594787300189186, 0.9627455357634459], [\"ShearY\", 0.5136027621132064, 0.10419602450259002]], [[\"Solarize\", 0.4684077211553732, 0.6592850629431414], [\"Sharpness\", 0.2382385935956325, 0.6589291408243176]], [[\"Cutout\", 0.4478786947325877, 0.6893616643143388], [\"TranslateX\", 0.2761781720270474, 0.21750622627277727]], [[\"Sharpness\", 0.39476077929016484, 0.930902796668923], [\"Cutout\", 0.9073012208742808, 0.9881122386614257]], [[\"TranslateY\", 0.0933719180021565, 0.7206252503441172], [\"ShearX\", 0.5151400441789256, 0.6307540083648309]], [[\"AutoContrast\", 0.7772689258806401, 0.8159317013156503], [\"AutoContrast\", 0.5932793713915097, 0.05262217353927168]], [[\"Equalize\", 0.38017352056118914, 0.8084724050448412], [\"ShearY\", 0.7239725628380852, 0.4246314890359326]], [[\"Cutout\", 0.741157483503503, 0.13244380646497977], [\"Invert\", 0.03395378056675935, 0.7140036618098844]], [[\"Rotate\", 0.0662727247460636, 0.7099861732415447], [\"Rotate\", 0.3168532707508249, 0.3553167425022127]], [[\"AutoContrast\", 0.7429303516734129, 0.07117444599776435], [\"Posterize\", 0.5379537435918104, 0.807221330263993]], [[\"TranslateY\", 0.9788586874795164, 0.7967243851346594], [\"Invert\", 0.4479103376922362, 0.04260360776727545]], [[\"Cutout\", 0.28318121763188997, 0.7748680701406292], [\"AutoContrast\", 0.9109258369403016, 0.17126397858002085]], [[\"Color\", 0.30183727885272027, 0.46718354750112456], [\"TranslateX\", 0.9628952256033627, 0.10269543754135535]], [[\"AutoContrast\", 0.6316709389784041, 0.84287698792044], [\"Brightness\", 0.5544761629904337, 0.025264772745200004]], [[\"Rotate\", 0.08803313299532567, 0.306059720523696], [\"Invert\", 0.5222165872425064, 0.045935208620454304]], [[\"TranslateY\", 0.21912346831923835, 0.48529224559004436], [\"TranslateY\", 0.15466734731903942, 0.8929485418495068]], [[\"ShearX\", 0.17141022847016563, 0.8607600402165531], [\"ShearX\", 0.6890511341106859, 0.7540899265679949]], [[\"Invert\", 0.9417455522972059, 0.9021733684991224], [\"Solarize\", 0.7693107057723746, 0.7268007946568782]], [[\"Posterize\", 0.02376991543373752, 0.6768442864453844], [\"Rotate\", 0.7736875065112697, 0.6706331753139825]], [[\"Contrast\", 0.3623841610390669, 0.15023657344457686], [\"Equalize\", 0.32975472189318666, 0.05629246869510651]], [[\"Sharpness\", 0.7874882420165824, 0.49535778020457066], [\"Posterize\", 0.09485578893387558, 0.6170768580482466]], [[\"Brightness\", 0.7099280202949585, 0.021523012961427335], [\"Posterize\", 0.2076371467666719, 0.17168118578815206]], [[\"Color\", 0.8546367645761538, 0.832011891505731], [\"Equalize\", 0.6429734783051777, 0.2618995960561532]], [[\"Rotate\", 0.8780793721476224, 0.5920897827664297], [\"ShearX\", 0.5338303685064825, 0.8605424531336439]], [[\"Sharpness\", 0.7504493806631884, 0.9723552387375258], [\"Sharpness\", 0.3206385634203266, 0.45127845905824693]], [[\"ShearX\", 0.23794709526711355, 0.06257530645720066], [\"Solarize\", 0.9132374030587093, 0.6240819934824045]], [[\"Sharpness\", 0.790583587969259, 0.28551171786655405], [\"Contrast\", 0.39872982844590554, 0.09644706751019538]], [[\"Equalize\", 0.30681999237432944, 0.5645045018157916], [\"Posterize\", 0.525966242669736, 0.7360106111256014]], [[\"TranslateX\", 0.4881014179825114, 0.6317220208872226], [\"ShearX\", 0.2935158995550958, 0.23104608987381758]], [[\"Rotate\", 0.49977116738568395, 0.6610761068306319], [\"TranslateY\", 0.7396566602715687, 0.09386747830045217]], [[\"ShearY\", 0.5909773790018789, 0.16229529902832718], [\"Equalize\", 0.06461394468918358, 0.6661349001143908]], [[\"TranslateX\", 0.7218443721851834, 0.04435720302810153], [\"Cutout\", 0.986686540951642, 0.734771197038724]], [[\"ShearX\", 0.5353800096911666, 0.8120139502148365], [\"Equalize\", 0.4613239578449774, 0.5159528929124512]], [[\"Color\", 0.0871713897628631, 0.7708895183198486], [\"Solarize\", 0.5811386808912219, 0.35260648120785887]], [[\"Posterize\", 0.3910857927477053, 0.4329219555775561], [\"Color\", 0.9115983668789468, 0.6043069944145293]], [[\"Posterize\", 0.07493067637060635, 0.4258000066006725], [\"AutoContrast\", 0.4740957581389772, 0.49069587151651295]], [[\"Rotate\", 0.34086200894268937, 0.9812149332288828], [\"Solarize\", 0.6801012471371733, 0.17271491146753837]], [[\"Color\", 0.20542270872895207, 0.5532087457727624], [\"Contrast\", 0.2718692536563381, 0.20313287569510108]], [[\"Equalize\", 0.05199827210980934, 0.0832859890912212], [\"AutoContrast\", 0.8092395764794107, 0.7778945136511004]], [[\"Sharpness\", 0.1907689513066838, 0.7705754572256907], [\"Color\", 0.3911178658498049, 0.41791326933095485]], [[\"Solarize\", 0.19611855804748257, 0.2407807485604081], [\"AutoContrast\", 0.5343964972940493, 0.9034209455548394]], [[\"Color\", 0.43586520148538865, 0.4711164626521439], [\"ShearY\", 0.28635408186820555, 0.8417816793020271]], [[\"Cutout\", 0.09818482420382535, 0.1649767430954796], [\"Cutout\", 0.34582392911178494, 0.3927982995799828]], [[\"ShearX\", 0.001253882705272269, 0.48661629027584596], [\"Solarize\", 0.9229221435457137, 0.44374894836659073]], [[\"Contrast\", 0.6829734655718668, 0.8201750485099037], [\"Cutout\", 0.7886756837648936, 0.8423285219631946]], [[\"TranslateY\", 0.857017093561528, 0.3038537151773969], [\"Invert\", 0.12809228606383538, 0.23637166191748027]], [[\"Solarize\", 0.9829027723424164, 0.9723093910674763], [\"Color\", 0.6346495302126811, 0.5405494753107188]], [[\"AutoContrast\", 0.06868643520377715, 0.23758659417688077], [\"AutoContrast\", 0.6648225411500879, 0.5618315648260103]], [[\"Invert\", 0.44202305603311676, 0.9945938909685547], [\"Equalize\", 0.7991650497684454, 0.16014142656347097]], [[\"AutoContrast\", 0.8778631604769588, 0.03951977631894088], [\"ShearY\", 0.8495160088963707, 0.35771447321250416]], [[\"Color\", 0.5365078341001592, 0.21102444169782308], [\"ShearX\", 0.7168869678248874, 0.3904298719872734]], [[\"TranslateX\", 0.6517203786101899, 0.6467598990650437], [\"Invert\", 0.26552491504364517, 0.1210812827294625]], [[\"Posterize\", 0.35196021684368994, 0.8420648319941891], [\"Invert\", 0.7796829363930631, 0.9520895999240896]], [[\"Sharpness\", 0.7391572148971984, 0.4853940393452846], [\"TranslateX\", 0.7641915295592839, 0.6351349057666782]], [[\"Posterize\", 0.18485880221115913, 0.6117603277356728], [\"Rotate\", 0.6541660490605724, 0.5704041108375348]], [[\"TranslateY\", 0.27517423188070533, 0.6610080904072458], [\"Contrast\", 0.6091250547289317, 0.7702443247557892]], [[\"Equalize\", 0.3611798581067118, 0.6623615672642768], [\"TranslateX\", 0.9537265090885917, 0.06352772509358584]], [[\"ShearX\", 0.09720029389103535, 0.7800423126320308], [\"Invert\", 0.30314352455858884, 0.8519925470889914]], [[\"Brightness\", 0.06931529763458055, 0.57760829499712], [\"Cutout\", 0.637251974467394, 0.7184346129191052]], [[\"AutoContrast\", 0.5026722100286064, 0.32025257156541886], [\"Contrast\", 0.9667478703047919, 0.14178519432669368]], [[\"Equalize\", 0.5924463845816984, 0.7187610262181517], [\"TranslateY\", 0.7059479079159405, 0.06551471830655187]], [[\"Sharpness\", 0.18161164512332928, 0.7576138481173385], [\"Brightness\", 0.19191138767695282, 0.7865880269424701]], [[\"Brightness\", 0.36780861866078696, 0.0677855546737901], [\"AutoContrast\", 0.8491446654142264, 0.09217782099938121]], [[\"TranslateY\", 0.06011399855120858, 0.8374487034710264], [\"TranslateY\", 0.8373922962070498, 0.1991295720254297]], [[\"Posterize\", 0.702559916122481, 0.30257509683007755], [\"Rotate\", 0.249899495398891, 0.9370437251176267]], [[\"ShearX\", 0.9237874098232075, 0.26241907483351146], [\"Brightness\", 0.7221766836146657, 0.6880749752986671]], [[\"Cutout\", 0.37994098189193104, 0.7836874473657957], [\"ShearX\", 0.9212861960976824, 0.8140948561570449]], [[\"Posterize\", 0.2584098274786417, 0.7990847652004848], [\"Invert\", 0.6357731737590063, 0.1066304859116326]], [[\"Sharpness\", 0.4412790857539922, 0.9692465283229825], [\"Color\", 0.9857401617339051, 0.26755393929808713]], [[\"Equalize\", 0.22348671644912665, 0.7370019910830038], [\"Posterize\", 0.5396106339575417, 0.5559536849843303]], [[\"Equalize\", 0.8742967663495852, 0.2797122599926307], [\"Rotate\", 0.4697322053105951, 0.8769872942579476]], [[\"Sharpness\", 0.44279911640509206, 0.07729581896071613], [\"Cutout\", 0.3589177366154631, 0.2704031551235969]], [[\"TranslateX\", 0.614216412574085, 0.47929659784170453], [\"Brightness\", 0.6686234118438007, 0.05700784068205689]], [[\"ShearY\", 0.17920614630857634, 0.4699685075827862], [\"Color\", 0.38251870810870003, 0.7262706923005887]], [[\"Solarize\", 0.4951799001144561, 0.212775278026479], [\"TranslateX\", 0.8666105646463097, 0.6750496637519537]], [[\"Color\", 0.8110864170849051, 0.5154263861958484], [\"Sharpness\", 0.2489044083898776, 0.3763372541462343]], [[\"Cutout\", 0.04888193613483871, 0.06041664638981603], [\"Color\", 0.06438587718683708, 0.5797881428892969]], [[\"Rotate\", 0.032427448352152166, 0.4445797818376559], [\"Posterize\", 0.4459357828482998, 0.5879865187630777]], [[\"ShearX\", 0.1617179557693058, 0.050796802246318884], [\"Cutout\", 0.8142465452060423, 0.3836391305618707]], [[\"TranslateY\", 0.1806857249209416, 0.36697730355422675], [\"Rotate\", 0.9897576550818276, 0.7483432452225264]], [[\"Brightness\", 0.18278016458098223, 0.952352527690299], [\"Cutout\", 0.3269735224453044, 0.3924869905012752]], [[\"ShearX\", 0.870832707718742, 0.3214743207190739], [\"Cutout\", 0.6805560681792573, 0.6984188155282459]], [[\"TranslateX\", 0.4157118388833776, 0.3964216288135384], [\"TranslateX\", 0.3253012682285006, 0.624835513104391]], [[\"Contrast\", 0.7678168037628158, 0.31033802162621793], [\"ShearX\", 0.27022424855977134, 0.3773245605126201]], [[\"TranslateX\", 0.37812621869017593, 0.7657993810740699], [\"Rotate\", 0.18081890120092914, 0.8893511219618171]], [[\"Posterize\", 0.8735859716088367, 0.18243793043074286], [\"TranslateX\", 0.90435994250313, 0.24116383818819453]], [[\"Invert\", 0.06666709253664793, 0.3881076083593933], [\"TranslateX\", 0.3783333964963522, 0.14411014979589543]], [[\"Equalize\", 0.8741147867162096, 0.14203839235846816], [\"TranslateX\", 0.7801536758037405, 0.6952401607812743]], [[\"Cutout\", 0.6095335117944475, 0.5679026063718094], [\"Posterize\", 0.06433868172233115, 0.07139559616012303]], [[\"TranslateY\", 0.3020364047315408, 0.21459810361176246], [\"Cutout\", 0.7097677414888889, 0.2942144632587549]], [[\"Brightness\", 0.8223662419048653, 0.195700694016108], [\"Invert\", 0.09345407040803999, 0.779843655582099]], [[\"TranslateY\", 0.7353462929356228, 0.0468520680237382], [\"Cutout\", 0.36530918247940425, 0.3897292909049672]], [[\"Invert\", 0.9676896451721213, 0.24473302189463453], [\"Invert\", 0.7369271521408992, 0.8193267003356975]], [[\"Sharpness\", 0.8691871972054326, 0.4441713912682772], [\"ShearY\", 0.47385584832119887, 0.23521684584675429]], [[\"ShearY\", 0.9266946026184021, 0.7611986713358834], [\"TranslateX\", 0.6195820760253926, 0.14661428669483678]], [[\"Sharpness\", 0.08470870576026868, 0.3380219099907229], [\"TranslateX\", 0.3062343307496658, 0.7135777338095889]], [[\"Sharpness\", 0.5246448204194909, 0.3193061215236702], [\"ShearX\", 0.8160637208508432, 0.9720697396582731]], [[\"Posterize\", 0.5249259956549405, 0.3492042382504774], [\"Invert\", 0.8183138799547441, 0.11107271762524618]], [[\"TranslateY\", 0.210869733350744, 0.7138905840721885], [\"Sharpness\", 0.7773226404450125, 0.8005353621959782]], [[\"Posterize\", 0.33067522385556025, 0.32046239220630124], [\"AutoContrast\", 0.18918147708798405, 0.4646281070474484]], [[\"TranslateX\", 0.929502026131094, 0.8029128121556285], [\"Invert\", 0.7319794306118105, 0.5421878712623392]], [[\"ShearX\", 0.25645940834182723, 0.42754710760160963], [\"ShearX\", 0.44640695310173306, 0.8132185532296811]], [[\"Color\", 0.018436846416536312, 0.8439313862001113], [\"Sharpness\", 0.3722867661453415, 0.5103570873163251]], [[\"TranslateX\", 0.7285989086776543, 0.4809027697099264], [\"TranslateY\", 0.9740807004893643, 0.8241085438636939]], [[\"Posterize\", 0.8721868989693397, 0.5700907310383815], [\"Posterize\", 0.4219074410577852, 0.8032643572845402]], [[\"Contrast\", 0.9811380092558266, 0.8498397471632105], [\"Sharpness\", 0.8380884329421594, 0.18351306571903125]], [[\"TranslateY\", 0.3878939366762001, 0.4699103438753077], [\"Invert\", 0.6055556353233807, 0.8774727658400134]], [[\"TranslateY\", 0.052317005261018346, 0.39471450378745787], [\"ShearX\", 0.8612486845942395, 0.28834103278807466]], [[\"Color\", 0.511993351208063, 0.07251427040525904], [\"Solarize\", 0.9898097047354855, 0.299761565689576]], [[\"Equalize\", 0.2721248231619904, 0.6870975927455507], [\"Cutout\", 0.8787327242363994, 0.06228061428917098]], [[\"Invert\", 0.8931880335225408, 0.49720931867378193], [\"Posterize\", 0.9619698792159256, 0.17859639696940088]], [[\"Posterize\", 0.0061688075074411985, 0.08082938731035938], [\"Brightness\", 0.27745128028826993, 0.8638528796903816]], [[\"ShearY\", 0.9140200609222026, 0.8240421430867707], [\"Invert\", 0.651734417415332, 0.08871906369930926]], [[\"Color\", 0.45585010413511196, 0.44705070078574316], [\"Color\", 0.26394624901633146, 0.11242877788650807]], [[\"ShearY\", 0.9200278466372522, 0.2995901331149652], [\"Cutout\", 0.8445407215116278, 0.7410524214287446]], [[\"ShearY\", 0.9950483746990132, 0.112964468262847], [\"ShearY\", 0.4118332303218585, 0.44839613407553636]], [[\"Contrast\", 0.7905821952255192, 0.23360046159385106], [\"Posterize\", 0.8611787233956044, 0.8984260048943528]], [[\"TranslateY\", 0.21448061359312853, 0.8228112806838331], [\"Contrast\", 0.8992297266152983, 0.9179231590570998]], [[\"Invert\", 0.3924194798946006, 0.31830516468371495], [\"Rotate\", 0.8399556845248508, 0.3764892022932781]], [[\"Cutout\", 0.7037916990046816, 0.9214620769502728], [\"AutoContrast\", 0.02913794613018239, 0.07808607528954048]], [[\"ShearY\", 0.6041490474263381, 0.6094184590800105], [\"Equalize\", 0.2932954517354919, 0.5840888946081727]], [[\"ShearX\", 0.6056801676269449, 0.6948580442549543], [\"Cutout\", 0.3028001021044615, 0.15117101733894078]], [[\"Brightness\", 0.8011486803860253, 0.18864079729374195], [\"Solarize\", 0.014965327213230961, 0.8842620292527029]], [[\"Invert\", 0.902244007904273, 0.5634673798052033], [\"Equalize\", 0.13422913507398349, 0.4110956745883727]], [[\"TranslateY\", 0.9981773319103838, 0.09568550987216096], [\"Color\", 0.7627662124105109, 0.8494409737419493]], [[\"Cutout\", 0.3013527640416782, 0.03377226729898486], [\"ShearX\", 0.5727964831614619, 0.8784196638222834]], [[\"TranslateX\", 0.6050722426803684, 0.3650103962378708], [\"TranslateX\", 0.8392084589130886, 0.6479816470292911]], [[\"Rotate\", 0.5032806606500023, 0.09276980118866307], [\"TranslateY\", 0.7800234515261191, 0.18896454379343308]], [[\"Invert\", 0.9266027256244017, 0.8246111062199752], [\"Contrast\", 0.12112023357797697, 0.33870762271759436]], [[\"Brightness\", 0.8688784756993134, 0.17263759696106606], [\"ShearX\", 0.5133700431071326, 0.6686811994542494]], [[\"Invert\", 0.8347840440941976, 0.03774897445901726], [\"Brightness\", 0.24925057499276548, 0.04293631677355758]], [[\"Color\", 0.5998145279485104, 0.4820093200092529], [\"TranslateY\", 0.6709586184077769, 0.07377334081382858]], [[\"AutoContrast\", 0.7898846202957984, 0.325293526672498], [\"Contrast\", 0.5156435596826767, 0.2889223168660645]], [[\"ShearX\", 0.08147389674998307, 0.7978924681113669], [\"Contrast\", 0.7270003309106291, 0.009571215234092656]], [[\"Sharpness\", 0.417607614440786, 0.9532566433338661], [\"Posterize\", 0.7186586546796782, 0.6936509907073302]], [[\"ShearX\", 0.9555300215926675, 0.1399385550263872], [\"Color\", 0.9981041061848231, 0.5037462398323248]], [[\"Equalize\", 0.8003487831375474, 0.5413759363796945], [\"ShearY\", 0.0026607045117773565, 0.019262273030984933]], [[\"TranslateY\", 0.04845391502469176, 0.10063445212118283], [\"Cutout\", 0.8273170186786745, 0.5045257728554577]], [[\"TranslateX\", 0.9690985344978033, 0.505202991815533], [\"TranslateY\", 0.7255326592928096, 0.02103609500701631]], [[\"Solarize\", 0.4030771176836736, 0.8424237871457034], [\"Cutout\", 0.28705805963928965, 0.9601617893682582]], [[\"Sharpness\", 0.16865290353070606, 0.6899673563468826], [\"Posterize\", 0.3985430034869616, 0.6540651997730774]], [[\"ShearY\", 0.21395578485362032, 0.09519358818949009], [\"Solarize\", 0.6692821708524135, 0.6462523623552485]], [[\"AutoContrast\", 0.912360598054091, 0.029800239085051583], [\"Invert\", 0.04319256403746308, 0.7712501517098587]], [[\"ShearY\", 0.9081969961839055, 0.4581560239984739], [\"AutoContrast\", 0.5313894814729159, 0.5508393335751848]], [[\"ShearY\", 0.860528568424097, 0.8196987216301588], [\"Posterize\", 0.41134650331494205, 0.3686632018978778]], [[\"AutoContrast\", 0.8753670810078598, 0.3679438326304749], [\"Invert\", 0.010444228965415858, 0.9581244779208277]], [[\"Equalize\", 0.07071836206680682, 0.7173594756186462], [\"Brightness\", 0.06111434312497388, 0.16175064669049277]], [[\"AutoContrast\", 0.10522219073562122, 0.9768776621069855], [\"TranslateY\", 0.2744795945215529, 0.8577967957127298]], [[\"AutoContrast\", 0.7628146493166175, 0.996157376418147], [\"Contrast\", 0.9255565598518469, 0.6826126662976868]], [[\"TranslateX\", 0.017225816199011312, 0.2470332491402908], [\"Solarize\", 0.44048494909493807, 0.4492422515972162]], [[\"ShearY\", 0.38885252627795064, 0.10272256704901939], [\"Equalize\", 0.686154959829183, 0.8973517148655337]], [[\"Rotate\", 0.29628991573592967, 0.16639926575004715], [\"ShearX\", 0.9013782324726413, 0.0838318162771563]], [[\"Color\", 0.04968391374688563, 0.6138600739645352], [\"Invert\", 0.11177127838716283, 0.10650198522261578]], [[\"Invert\", 0.49655016367624016, 0.8603374164829688], [\"ShearY\", 0.40625439617553727, 0.4516437918820778]], [[\"TranslateX\", 0.15015718916062992, 0.13867777502116208], [\"Brightness\", 0.3374464418810188, 0.7613355669536931]], [[\"Invert\", 0.644644393321966, 0.19005804481199562], [\"AutoContrast\", 0.2293259789431853, 0.30335723256340186]], [[\"Solarize\", 0.004968793254801596, 0.5370892072646645], [\"Contrast\", 0.9136902637865596, 0.9510587477779084]], [[\"Rotate\", 0.38991518440867123, 0.24796987467455756], [\"Sharpness\", 0.9911180315669776, 0.5265657122981591]], [[\"Solarize\", 0.3919646484436238, 0.6814994037194909], [\"Sharpness\", 0.4920838987787103, 0.023425724294012018]], [[\"TranslateX\", 0.25107587874378867, 0.5414936560189212], [\"Cutout\", 0.7932919623814599, 0.9891303444820169]], [[\"Brightness\", 0.07863012174272999, 0.045175652208389594], [\"Solarize\", 0.889609658064552, 0.8228793315963948]], [[\"Cutout\", 0.20477096178169596, 0.6535063675027364], [\"ShearX\", 0.9216318577173639, 0.2908690977359947]], [[\"Contrast\", 0.7035118947423187, 0.45982709058312454], [\"Contrast\", 0.7130268070749464, 0.8635123354235471]], [[\"Sharpness\", 0.26319477541228997, 0.7451278726847078], [\"Rotate\", 0.8170499362173754, 0.13998593411788207]], [[\"Rotate\", 0.8699365715164192, 0.8878057721750832], [\"Equalize\", 0.06682350555715044, 0.7164702080630689]], [[\"ShearY\", 0.3137466057521987, 0.6747433496011368], [\"Rotate\", 0.42118828936218133, 0.980121180104441]], [[\"Solarize\", 0.8470375049950615, 0.15287589264139223], [\"Cutout\", 0.14438435054693055, 0.24296463267973512]], [[\"TranslateY\", 0.08822241792224905, 0.36163911974799356], [\"TranslateY\", 0.11729726813270003, 0.6230889726445291]], [[\"ShearX\", 0.7720112337718541, 0.2773292905760122], [\"Sharpness\", 0.756290929398613, 0.27830353710507705]], [[\"Color\", 0.33825031007968287, 0.4657590047522816], [\"ShearY\", 0.3566628994713067, 0.859750504071925]], [[\"TranslateY\", 0.06830147433378053, 0.9348778582086664], [\"TranslateX\", 0.15509346516378553, 0.26320778885339435]], [[\"Posterize\", 0.20266751150740858, 0.008351463842578233], [\"Sharpness\", 0.06506971109417259, 0.7294471760284555]], [[\"TranslateY\", 0.6278911394418829, 0.8702181892620695], [\"Invert\", 0.9367073860264247, 0.9219230428944211]], [[\"Sharpness\", 0.1553425337673321, 0.17601557714491345], [\"Solarize\", 0.7040449681338888, 0.08764313147327729]], [[\"Equalize\", 0.6082233904624664, 0.4177428549911376], [\"AutoContrast\", 0.04987405274618151, 0.34516208204700916]], [[\"Brightness\", 0.9616085936167699, 0.14561237331885468], [\"Solarize\", 0.8927707736296572, 0.31176907850205704]], [[\"Brightness\", 0.6707778304730988, 0.9046457117525516], [\"Brightness\", 0.6801448953060988, 0.20015313057149042]], [[\"Color\", 0.8292680845499386, 0.5181603879593888], [\"Brightness\", 0.08549161770369762, 0.6567870536463203]], [[\"ShearY\", 0.267802208078051, 0.8388133819588173], [\"Sharpness\", 0.13453409120796123, 0.10028351311149486]], [[\"Posterize\", 0.775796593610272, 0.05359034561289766], [\"Cutout\", 0.5067360625733027, 0.054451986840317934]], [[\"TranslateX\", 0.5845238647690084, 0.7507147553486293], [\"Brightness\", 0.2642051786121197, 0.2578358927056452]], [[\"Cutout\", 0.10787517610922692, 0.8147986902794228], [\"Contrast\", 0.2190149206329539, 0.902210615462459]], [[\"TranslateX\", 0.5663614214181296, 0.05309965916414028], [\"ShearX\", 0.9682797885154938, 0.41791929533938466]], [[\"ShearX\", 0.2345325577621098, 0.383780128037189], [\"TranslateX\", 0.7298083748149163, 0.644325797667087]], [[\"Posterize\", 0.5138725709682734, 0.7901809917259563], [\"AutoContrast\", 0.7966018627776853, 0.14529337543427345]], [[\"Invert\", 0.5973031989249785, 0.417399314592829], [\"Solarize\", 0.9147539948653116, 0.8221272315548086]], [[\"Posterize\", 0.601596043336383, 0.18969646160963938], [\"Color\", 0.7527275484079655, 0.431793831326888]], [[\"Equalize\", 0.6731483454430538, 0.7866786558207602], [\"TranslateX\", 0.97574396899191, 0.5970255778044692]], [[\"Cutout\", 0.15919495850169718, 0.8916094305850562], [\"Invert\", 0.8351348834751027, 0.4029937360314928]], [[\"Invert\", 0.5894085405226027, 0.7283806854157764], [\"Brightness\", 0.3973976860470554, 0.949681121498567]], [[\"AutoContrast\", 0.3707914135327408, 0.21192068592079616], [\"ShearX\", 0.28040127351140676, 0.6754553511344856]], [[\"Solarize\", 0.07955132378694896, 0.15073572961927306], [\"ShearY\", 0.5735850168851625, 0.27147326850217746]], [[\"Equalize\", 0.678653949549764, 0.8097796067861455], [\"Contrast\", 0.2283048527510083, 0.15507804874474185]], [[\"Equalize\", 0.286013868374536, 0.186785848694501], [\"Posterize\", 0.16319021740810458, 0.1201304443285659]], [[\"Sharpness\", 0.9601590830563757, 0.06267915026513238], [\"AutoContrast\", 0.3813920685124327, 0.294224403296912]], [[\"Brightness\", 0.2703246632402241, 0.9168405377492277], [\"ShearX\", 0.6156009855831097, 0.4955986055846403]], [[\"Color\", 0.9065504424987322, 0.03393612216080133], [\"ShearY\", 0.6768595880405884, 0.9981068127818191]], [[\"Equalize\", 0.28812842368483904, 0.300387487349145], [\"ShearY\", 0.28812248704858345, 0.27105076231533964]], [[\"Brightness\", 0.6864882730513477, 0.8205553299102412], [\"Cutout\", 0.45995236371265424, 0.5422030370297759]], [[\"Color\", 0.34941404877084326, 0.25857961830158516], [\"AutoContrast\", 0.3451390878441899, 0.5000938249040454]], [[\"Invert\", 0.8268247541815854, 0.6691380821226468], [\"Cutout\", 0.46489193601530476, 0.22620873109485895]], [[\"Rotate\", 0.17879730528062376, 0.22670425330593935], [\"Sharpness\", 0.8692795688221834, 0.36586055020855723]], [[\"Brightness\", 0.31203975139659634, 0.6934046293010939], [\"Cutout\", 0.31649437872271236, 0.08078625004157935]], [[\"Cutout\", 0.3119482836150119, 0.6397160035509996], [\"Contrast\", 0.8311248624784223, 0.22897510169718616]], [[\"TranslateX\", 0.7631157841429582, 0.6482890521284557], [\"Brightness\", 0.12681196272427664, 0.3669813784257344]], [[\"TranslateX\", 0.06027722649179801, 0.3101104512201861], [\"Sharpness\", 0.5652076706249394, 0.05210008400968136]], [[\"AutoContrast\", 0.39213552101583127, 0.5047021194355596], [\"ShearY\", 0.7164003055682187, 0.8063370761002899]], [[\"Solarize\", 0.9574307011238342, 0.21472064809226854], [\"AutoContrast\", 0.8102612285047174, 0.716870148067014]], [[\"Rotate\", 0.3592634277567387, 0.6452602893051465], [\"AutoContrast\", 0.27188430331411506, 0.06003099168464854]], [[\"Cutout\", 0.9529536554825503, 0.5285505311027461], [\"Solarize\", 0.08478231903311029, 0.15986449762728216]], [[\"TranslateY\", 0.31176130458018936, 0.5642853506158253], [\"Equalize\", 0.008890883901317648, 0.5146121040955942]], [[\"Color\", 0.40773645085566157, 0.7110398926612682], [\"Color\", 0.18233100156439364, 0.7830036002758337]], [[\"Posterize\", 0.5793809197821732, 0.043748553135581236], [\"Invert\", 0.4479962016131668, 0.7349663010359488]], [[\"TranslateX\", 0.1994882312299382, 0.05216859488899439], [\"Rotate\", 0.48288726352035416, 0.44713829026777585]], [[\"Posterize\", 0.22122838185154603, 0.5034546841241283], [\"TranslateX\", 0.2538745835410222, 0.6129055170893385]], [[\"Color\", 0.6786559960640814, 0.4529749369803212], [\"Equalize\", 0.30215879674415336, 0.8733394611096772]], [[\"Contrast\", 0.47316062430673456, 0.46669538897311447], [\"Invert\", 0.6514906551984854, 0.3053339444067804]], [[\"Equalize\", 0.6443202625334524, 0.8689731394616441], [\"Color\", 0.7549183794057628, 0.8889001426329578]], [[\"Solarize\", 0.616709740662654, 0.7792180816399313], [\"ShearX\", 0.9659155537406062, 0.39436937531179495]], [[\"Equalize\", 0.23694011299406226, 0.027711152164392128], [\"TranslateY\", 0.1677339686527083, 0.3482126536808231]], [[\"Solarize\", 0.15234175951790285, 0.7893840414281341], [\"TranslateX\", 0.2396395768284183, 0.27727219214979715]], [[\"Contrast\", 0.3792017455380605, 0.32323660409845334], [\"Contrast\", 0.1356037413846466, 0.9127772969992305]], [[\"ShearX\", 0.02642732222284716, 0.9184662576502115], [\"Equalize\", 0.11504884472142995, 0.8957638893097964]], [[\"TranslateY\", 0.3193812913345325, 0.8828100030493128], [\"ShearY\", 0.9374975727563528, 0.09909415611083694]], [[\"AutoContrast\", 0.025840721736048122, 0.7941037581373024], [\"TranslateY\", 0.498518003323313, 0.5777122846572548]], [[\"ShearY\", 0.6042199307830248, 0.44809668754508836], [\"Cutout\", 0.3243978207701482, 0.9379740926294765]], [[\"ShearY\", 0.6858549297583574, 0.9993252035788924], [\"Sharpness\", 0.04682428732773203, 0.21698099707915652]], [[\"ShearY\", 0.7737469436637263, 0.8810127181224531], [\"ShearY\", 0.8995655445246451, 0.4312416220354539]], [[\"TranslateY\", 0.4953094136709374, 0.8144161580138571], [\"Solarize\", 0.26301211718928097, 0.518345311180405]], [[\"Brightness\", 0.8820246486031275, 0.571075863786249], [\"ShearX\", 0.8586669146703955, 0.0060476383595142735]], [[\"Sharpness\", 0.20519233710982254, 0.6144574759149729], [\"Posterize\", 0.07976625267460813, 0.7480145046726968]], [[\"ShearY\", 0.374075419680195, 0.3386105402023202], [\"ShearX\", 0.8228083637082115, 0.5885174783155361]], [[\"Brightness\", 0.3528780713814561, 0.6999884884306623], [\"Sharpness\", 0.3680348120526238, 0.16953358258959617]], [[\"Brightness\", 0.24891223104442084, 0.7973853494920095], [\"TranslateX\", 0.004256803835524736, 0.0470216343108546]], [[\"Posterize\", 0.1947344282646012, 0.7694802711054367], [\"Cutout\", 0.9594385534844785, 0.5469744140592429]], [[\"Invert\", 0.19012504762806026, 0.7816140211434693], [\"TranslateY\", 0.17479746932338402, 0.024249345245078602]], [[\"Rotate\", 0.9669262055946796, 0.510166180775991], [\"TranslateX\", 0.8990602034610352, 0.6657802719304693]], [[\"ShearY\", 0.5453049050407278, 0.8476872739603525], [\"Cutout\", 0.14226529093962592, 0.15756960661106634]], [[\"Equalize\", 0.5895291156113004, 0.6797218994447763], [\"TranslateY\", 0.3541442192192753, 0.05166001155849864]], [[\"Equalize\", 0.39530681662726097, 0.8448335365081087], [\"Brightness\", 0.6785483272734143, 0.8805568647038574]], [[\"Cutout\", 0.28633258271917905, 0.7750870268336066], [\"Equalize\", 0.7221097824537182, 0.5865506280531162]], [[\"Posterize\", 0.9044429629421187, 0.4620266401793388], [\"Invert\", 0.1803008045494473, 0.8073190766288534]], [[\"Sharpness\", 0.7054649148075851, 0.3877207948962055], [\"TranslateX\", 0.49260224225927285, 0.8987462620731029]], [[\"Sharpness\", 0.11196934729294483, 0.5953704422694938], [\"Contrast\", 0.13969334315069737, 0.19310569898434204]], [[\"Posterize\", 0.5484346101051778, 0.7914140118600685], [\"Brightness\", 0.6428044691630473, 0.18811316670808076]], [[\"Invert\", 0.22294834094984717, 0.05173157689962704], [\"Cutout\", 0.6091129168510456, 0.6280845506243643]], [[\"AutoContrast\", 0.5726444076195267, 0.2799840903601295], [\"Cutout\", 0.3055752727786235, 0.591639807512993]], [[\"Brightness\", 0.3707116723204462, 0.4049175910826627], [\"Rotate\", 0.4811601625588309, 0.2710760253723644]], [[\"ShearY\", 0.627791719653608, 0.6877498291550205], [\"TranslateX\", 0.8751753308366824, 0.011164650018719358]], [[\"Posterize\", 0.33832547954522263, 0.7087039872581657], [\"Posterize\", 0.6247474435007484, 0.7707784192114796]], [[\"Contrast\", 0.17620186308493468, 0.9946224854942095], [\"Solarize\", 0.5431896088395964, 0.5867904203742308]], [[\"ShearX\", 0.4667959516719652, 0.8938082224109446], [\"TranslateY\", 0.7311343008292865, 0.6829842246020277]], [[\"ShearX\", 0.6130281467237769, 0.9924010909612302], [\"Brightness\", 0.41039241699696916, 0.9753218875311392]], [[\"TranslateY\", 0.0747250386427123, 0.34602725521067534], [\"Rotate\", 0.5902597465515901, 0.361094672021087]], [[\"Invert\", 0.05234890878959486, 0.36914978664919407], [\"Sharpness\", 0.42140532878231374, 0.19204058551048275]], [[\"ShearY\", 0.11590485361909497, 0.6518540857972316], [\"Invert\", 0.6482444740361704, 0.48256237896163945]], [[\"Rotate\", 0.4931329446923608, 0.037076242417301675], [\"Contrast\", 0.9097939772412852, 0.5619594905306389]], [[\"Posterize\", 0.7311032479626216, 0.4796364593912915], [\"Color\", 0.13912123993932402, 0.03997286439663705]], [[\"AutoContrast\", 0.6196602944085344, 0.2531430457527588], [\"Rotate\", 0.5583937060431972, 0.9893379795224023]], [[\"AutoContrast\", 0.8847753125072959, 0.19123028952580057], [\"TranslateY\", 0.494361716097206, 0.14232297727461696]], [[\"Invert\", 0.6212360716340707, 0.033898871473033165], [\"AutoContrast\", 0.30839896957008295, 0.23603569542166247]], [[\"Equalize\", 0.8255583546605049, 0.613736933157845], [\"AutoContrast\", 0.6357166629525485, 0.7894617347709095]], [[\"Brightness\", 0.33840706322846814, 0.07917167871493658], [\"ShearY\", 0.15693175752528676, 0.6282773652129153]], [[\"Cutout\", 0.7550520024859294, 0.08982367300605598], [\"ShearX\", 0.5844942417320858, 0.36051195083380105]]]\n    return p\n\n\ndef fa_resnet50_rimagenet():\n    p = [[[\"ShearY\", 0.14143816458479197, 0.513124791615952], [\"Sharpness\", 0.9290316227291179, 0.9788406212603302]], [[\"Color\", 0.21502874228385338, 0.3698477943880306], [\"TranslateY\", 0.49865058747734736, 0.4352676987103321]], [[\"Brightness\", 0.6603452126485386, 0.6990174510500261], [\"Cutout\", 0.7742953773992511, 0.8362550883640804]], [[\"Posterize\", 0.5188375788270497, 0.9863648925446865], [\"TranslateY\", 0.8365230108655313, 0.6000972236440252]], [[\"ShearY\", 0.9714994964711299, 0.2563663552809896], [\"Equalize\", 0.8987567223581153, 0.1181761775609772]], [[\"Sharpness\", 0.14346409304565366, 0.5342189791746006], [\"Sharpness\", 0.1219714162835897, 0.44746801278319975]], [[\"TranslateX\", 0.08089260772173967, 0.028011721602479833], [\"TranslateX\", 0.34767877352421406, 0.45131294688688794]], [[\"Brightness\", 0.9191164585327378, 0.5143232242627864], [\"Color\", 0.9235247849934283, 0.30604586249462173]], [[\"Contrast\", 0.4584173187505879, 0.40314219914942756], [\"Rotate\", 0.550289356406774, 0.38419022293237126]], [[\"Posterize\", 0.37046156420799325, 0.052693291117634544], [\"Cutout\", 0.7597581409366909, 0.7535799791937421]], [[\"Color\", 0.42583964114658746, 0.6776641859552079], [\"ShearY\", 0.2864805671096011, 0.07580175477739545]], [[\"Brightness\", 0.5065952125552232, 0.5508640233704984], [\"Brightness\", 0.4760021616081475, 0.3544313318097987]], [[\"Posterize\", 0.5169630851995185, 0.9466018906715961], [\"Posterize\", 0.5390336503396841, 0.1171015788193209]], [[\"Posterize\", 0.41153170909576176, 0.7213063942615204], [\"Rotate\", 0.6232230424824348, 0.7291984098675746]], [[\"Color\", 0.06704687234714028, 0.5278429246040438], [\"Sharpness\", 0.9146652195810183, 0.4581415618941407]], [[\"ShearX\", 0.22404644446773492, 0.6508620171913467], [\"Brightness\", 0.06421961538672451, 0.06859528721039095]], [[\"Rotate\", 0.29864103693134797, 0.5244313199644495], [\"Sharpness\", 0.4006161706584276, 0.5203708477368657]], [[\"AutoContrast\", 0.5748186910788027, 0.8185482599354216], [\"Posterize\", 0.9571441684265188, 0.1921474117448481]], [[\"ShearY\", 0.5214786760436251, 0.8375629059785009], [\"Invert\", 0.6872393349333636, 0.9307694335024579]], [[\"Contrast\", 0.47219838080793364, 0.8228524484275648], [\"TranslateY\", 0.7435518856840543, 0.5888865560614439]], [[\"Posterize\", 0.10773482839638836, 0.6597021018893648], [\"Contrast\", 0.5218466423129691, 0.562985661685268]], [[\"Rotate\", 0.4401753067886466, 0.055198255925702475], [\"Rotate\", 0.3702153509335602, 0.5821574425474759]], [[\"TranslateY\", 0.6714729117832363, 0.7145542887432927], [\"Equalize\", 0.0023263758097700205, 0.25837341854887885]], [[\"Cutout\", 0.3159707561240235, 0.19539664199170742], [\"TranslateY\", 0.8702824829864558, 0.5832348977243467]], [[\"AutoContrast\", 0.24800812729140026, 0.08017301277245716], [\"Brightness\", 0.5775505849482201, 0.4905904775616114]], [[\"Color\", 0.4143517886294533, 0.8445937742921498], [\"ShearY\", 0.28688910858536587, 0.17539366839474402]], [[\"Brightness\", 0.6341134194059947, 0.43683815933640435], [\"Brightness\", 0.3362277685899835, 0.4612826163288225]], [[\"Sharpness\", 0.4504035748829761, 0.6698294470467474], [\"Posterize\", 0.9610055612671645, 0.21070714173174876]], [[\"Posterize\", 0.19490421920029832, 0.7235798208354267], [\"Rotate\", 0.8675551331308305, 0.46335565746433094]], [[\"Color\", 0.35097958351003306, 0.42199181561523186], [\"Invert\", 0.914112788087429, 0.44775583211984815]], [[\"Cutout\", 0.223575616055454, 0.6328591417299063], [\"TranslateY\", 0.09269465212259387, 0.5101073959070608]], [[\"Rotate\", 0.3315734525975911, 0.9983593458299167], [\"Sharpness\", 0.12245416662856974, 0.6258689139914664]], [[\"ShearY\", 0.696116760180471, 0.6317805202283014], [\"Color\", 0.847501151593963, 0.4440116609830195]], [[\"Solarize\", 0.24945891607225948, 0.7651150206105561], [\"Cutout\", 0.7229677092930331, 0.12674657348602494]], [[\"TranslateX\", 0.43461945065713675, 0.06476571036747841], [\"Color\", 0.6139316940180952, 0.7376264330632316]], [[\"Invert\", 0.1933003530637138, 0.4497819016184308], [\"Invert\", 0.18391634069983653, 0.3199769100951113]], [[\"Color\", 0.20418296626476137, 0.36785101882029814], [\"Posterize\", 0.624658293920083, 0.8390081535735991]], [[\"Sharpness\", 0.5864963540530814, 0.586672446690273], [\"Posterize\", 0.1980280647652339, 0.222114611452575]], [[\"Invert\", 0.3543654961628104, 0.5146369635250309], [\"Equalize\", 0.40751271919434434, 0.4325310837291978]], [[\"ShearY\", 0.22602859359451877, 0.13137880879778158], [\"Posterize\", 0.7475029061591305, 0.803900538461099]], [[\"Sharpness\", 0.12426276165599924, 0.5965912716602046], [\"Invert\", 0.22603903038966913, 0.4346802001255868]], [[\"TranslateY\", 0.010307035630661765, 0.16577665156754046], [\"Posterize\", 0.4114319141395257, 0.829872913683949]], [[\"TranslateY\", 0.9353069865746215, 0.5327821671247214], [\"Color\", 0.16990443486261103, 0.38794866007484197]], [[\"Cutout\", 0.1028174322829021, 0.3955952903458266], [\"ShearY\", 0.4311995281335693, 0.48024695395374734]], [[\"Posterize\", 0.1800334334284686, 0.0548749478418862], [\"Brightness\", 0.7545808536793187, 0.7699080551646432]], [[\"Color\", 0.48695305373084197, 0.6674269768464615], [\"ShearY\", 0.4306032279086781, 0.06057690550239343]], [[\"Brightness\", 0.4919399683825053, 0.677338905806407], [\"Brightness\", 0.24112708387760828, 0.42761103121157656]], [[\"Posterize\", 0.4434818644882532, 0.9489450593207714], [\"Posterize\", 0.40957675116385955, 0.015664946759584186]], [[\"Posterize\", 0.41307949855153797, 0.6843276552020272], [\"Rotate\", 0.8003545094091291, 0.7002300783416026]], [[\"Color\", 0.7038570031770905, 0.4697612983649519], [\"Sharpness\", 0.9700016496081002, 0.25185103545948884]], [[\"AutoContrast\", 0.714641656154856, 0.7962423001719023], [\"Sharpness\", 0.2410097684093468, 0.5919171048019731]], [[\"TranslateX\", 0.8101567644494714, 0.7156447005337443], [\"Solarize\", 0.5634727831229329, 0.8875158446846]], [[\"Sharpness\", 0.5335258857303261, 0.364743126378182], [\"Color\", 0.453280875871377, 0.5621962714743068]], [[\"Cutout\", 0.7423678127672542, 0.7726370777867049], [\"Invert\", 0.2806161382641934, 0.6021111986900146]], [[\"TranslateY\", 0.15190341320343761, 0.3860373175487939], [\"Cutout\", 0.9980805818665679, 0.05332384819400854]], [[\"Posterize\", 0.36518675678786605, 0.2935819027397963], [\"TranslateX\", 0.26586180351840005, 0.303641300745208]], [[\"Brightness\", 0.19994509744377761, 0.90813953707639], [\"Equalize\", 0.8447217761297836, 0.3449396603478335]], [[\"Sharpness\", 0.9294773669936768, 0.999713346583839], [\"Brightness\", 0.1359744825665662, 0.1658489221872924]], [[\"TranslateX\", 0.11456529257659381, 0.9063795878367734], [\"Equalize\", 0.017438134319894553, 0.15776887259743755]], [[\"ShearX\", 0.9833726383270114, 0.5688194948373335], [\"Equalize\", 0.04975615490994345, 0.8078130016227757]], [[\"Brightness\", 0.2654654830488695, 0.8989789725280538], [\"TranslateX\", 0.3681535065952329, 0.36433345713161036]], [[\"Rotate\", 0.04956524209892327, 0.5371942433238247], [\"ShearY\", 0.0005527499145153714, 0.56082571605602]], [[\"Rotate\", 0.7918337108932019, 0.5906896260060501], [\"Posterize\", 0.8223967034091191, 0.450216998388943]], [[\"Color\", 0.43595106766978337, 0.5253013785221605], [\"Sharpness\", 0.9169421073531799, 0.8439997639348893]], [[\"TranslateY\", 0.20052300197155504, 0.8202662448307549], [\"Sharpness\", 0.2875792108435686, 0.6997181624527842]], [[\"Color\", 0.10568089980973616, 0.3349467065132249], [\"Brightness\", 0.13070947282207768, 0.5757725013960775]], [[\"AutoContrast\", 0.3749999712869779, 0.6665578760607657], [\"Brightness\", 0.8101178402610292, 0.23271946112218125]], [[\"Color\", 0.6473605933679651, 0.7903409763232029], [\"ShearX\", 0.588080941572581, 0.27223524148254086]], [[\"Cutout\", 0.46293361616697304, 0.7107761001833921], [\"AutoContrast\", 0.3063766931658412, 0.8026114219854579]], [[\"Brightness\", 0.7884854981520251, 0.5503669863113797], [\"Brightness\", 0.5832456158675261, 0.5840349298921661]], [[\"Solarize\", 0.4157539625058916, 0.9161905834309929], [\"Sharpness\", 0.30628197221802017, 0.5386291658995193]], [[\"Sharpness\", 0.03329610069672856, 0.17066672983670506], [\"Invert\", 0.9900547302690527, 0.6276238841220477]], [[\"Solarize\", 0.551015648982762, 0.6937104775938737], [\"Color\", 0.8838491591064375, 0.31596634380795385]], [[\"AutoContrast\", 0.16224182418148447, 0.6068227969351896], [\"Sharpness\", 0.9599468096118623, 0.4885289719905087]], [[\"TranslateY\", 0.06576432526133724, 0.6899544605400214], [\"Posterize\", 0.2177096480169678, 0.9949164789616582]], [[\"Solarize\", 0.529820544480292, 0.7576047224165541], [\"Sharpness\", 0.027047878909321643, 0.45425231553970685]], [[\"Sharpness\", 0.9102526010473146, 0.8311987141993857], [\"Invert\", 0.5191838751826638, 0.6906136644742229]], [[\"Solarize\", 0.4762773516008588, 0.7703654263842423], [\"Color\", 0.8048437792602289, 0.4741523094238038]], [[\"Sharpness\", 0.7095055508594206, 0.7047344238075169], [\"Sharpness\", 0.5059623654132546, 0.6127255499234886]], [[\"TranslateY\", 0.02150725921966186, 0.3515764519224378], [\"Posterize\", 0.12482170119714735, 0.7829851754051393]], [[\"Color\", 0.7983830079184816, 0.6964694521670339], [\"Brightness\", 0.3666527856286296, 0.16093151636495978]], [[\"AutoContrast\", 0.6724982375829505, 0.536777706678488], [\"Sharpness\", 0.43091754837597646, 0.7363240924241439]], [[\"Brightness\", 0.2889770401966227, 0.4556557902380539], [\"Sharpness\", 0.8805303296690755, 0.6262218017754902]], [[\"Sharpness\", 0.5341939854581068, 0.6697109101429343], [\"Rotate\", 0.6806606655137529, 0.4896914517968317]], [[\"Sharpness\", 0.5690509737059344, 0.32790632371915096], [\"Posterize\", 0.7951894258661069, 0.08377850335209162]], [[\"Color\", 0.6124132978216081, 0.5756485920709012], [\"Brightness\", 0.33053544654445344, 0.23321841707002083]], [[\"TranslateX\", 0.0654795026615917, 0.5227246924310244], [\"ShearX\", 0.2932320531132063, 0.6732066478183716]], [[\"Cutout\", 0.6226071187083615, 0.01009274433736012], [\"ShearX\", 0.7176799968189801, 0.3758780240463811]], [[\"Rotate\", 0.18172339508029314, 0.18099184896819184], [\"ShearY\", 0.7862658331645667, 0.295658135767252]], [[\"Contrast\", 0.4156099177015862, 0.7015784500878446], [\"Sharpness\", 0.6454135310009, 0.32335858947955287]], [[\"Color\", 0.6215885089922037, 0.6882673235388836], [\"Brightness\", 0.3539881732605379, 0.39486736455795496]], [[\"Invert\", 0.8164816716866418, 0.7238192000817796], [\"Sharpness\", 0.3876355847343607, 0.9870077619731956]], [[\"Brightness\", 0.1875628712629315, 0.5068115936257], [\"Sharpness\", 0.8732419122060423, 0.5028019258530066]], [[\"Sharpness\", 0.6140734993408259, 0.6458239834366959], [\"Rotate\", 0.5250107862824867, 0.533419456933602]], [[\"Sharpness\", 0.5710893143725344, 0.15551651073007305], [\"ShearY\", 0.6548487860151722, 0.021365083044319146]], [[\"Color\", 0.7610250354649954, 0.9084452893074055], [\"Brightness\", 0.6934611792619156, 0.4108071412071374]], [[\"ShearY\", 0.07512550098923898, 0.32923768385754293], [\"ShearY\", 0.2559588911696498, 0.7082337365398496]], [[\"Cutout\", 0.5401319018926146, 0.004750568603408445], [\"ShearX\", 0.7473354415031975, 0.34472481968368773]], [[\"Rotate\", 0.02284154583679092, 0.1353450082435801], [\"ShearY\", 0.8192458031684238, 0.2811653613473772]], [[\"Contrast\", 0.21142896718139154, 0.7230739568811746], [\"Sharpness\", 0.6902690582665707, 0.13488436112901683]], [[\"Posterize\", 0.21701219600958138, 0.5900695769640687], [\"Rotate\", 0.7541095031505971, 0.5341162375286219]], [[\"Posterize\", 0.5772853064792737, 0.45808311743269936], [\"Brightness\", 0.14366050177823675, 0.4644871239446629]], [[\"Cutout\", 0.8951718842805059, 0.4970074074310499], [\"Equalize\", 0.3863835903119882, 0.9986531042150006]], [[\"Equalize\", 0.039411354473938925, 0.7475477254908457], [\"Sharpness\", 0.8741966378291861, 0.7304822679596362]], [[\"Solarize\", 0.4908704265218634, 0.5160677350249471], [\"Color\", 0.24961813832742435, 0.09362352627360726]], [[\"Rotate\", 7.870457075154214e-05, 0.8086950025500952], [\"Solarize\", 0.10200484521793163, 0.12312889222989265]], [[\"Contrast\", 0.8052564975559727, 0.3403813036543645], [\"Solarize\", 0.7690158533600184, 0.8234626822018851]], [[\"AutoContrast\", 0.680362728854513, 0.9415320040873628], [\"TranslateY\", 0.5305871824686941, 0.8030609611614028]], [[\"Cutout\", 0.1748050257378294, 0.06565343731910589], [\"TranslateX\", 0.1812738872339903, 0.6254461448344308]], [[\"Brightness\", 0.4230502644722749, 0.3346463682905031], [\"ShearX\", 0.19107198973659312, 0.6715789128604919]], [[\"ShearX\", 0.1706528684548394, 0.7816570201200446], [\"TranslateX\", 0.494545185948171, 0.4710810058360291]], [[\"TranslateX\", 0.42356251508933324, 0.23865307292867322], [\"TranslateX\", 0.24407503619326745, 0.6013778508137331]], [[\"AutoContrast\", 0.7719512185744232, 0.3107905373009763], [\"ShearY\", 0.49448082925617176, 0.5777951230577671]], [[\"Cutout\", 0.13026983827940525, 0.30120438757485657], [\"Brightness\", 0.8857896834516185, 0.7731541459513939]], [[\"AutoContrast\", 0.6422800349197934, 0.38637401090264556], [\"TranslateX\", 0.25085431400995084, 0.3170642592664873]], [[\"Sharpness\", 0.22336654455367122, 0.4137774852324138], [\"ShearY\", 0.22446851054920894, 0.518341735882535]], [[\"Color\", 0.2597579403253848, 0.7289643913060193], [\"Sharpness\", 0.5227416670468619, 0.9239943674030637]], [[\"Cutout\", 0.6835337711563527, 0.24777620448593812], [\"AutoContrast\", 0.37260245353051846, 0.4840361183247263]], [[\"Posterize\", 0.32756602788628375, 0.21185124493743707], [\"ShearX\", 0.25431504951763967, 0.19585996561416225]], [[\"AutoContrast\", 0.07930627591849979, 0.5719381348340309], [\"AutoContrast\", 0.335512380071304, 0.4208050118308541]], [[\"Rotate\", 0.2924360268257798, 0.5317629242879337], [\"Sharpness\", 0.4531050021499891, 0.4102650087199528]], [[\"Equalize\", 0.5908862210984079, 0.468742362277498], [\"Brightness\", 0.08571766548550425, 0.5629320703375056]], [[\"Cutout\", 0.52751122383816, 0.7287774744737556], [\"Equalize\", 0.28721628275296274, 0.8075179887475786]], [[\"AutoContrast\", 0.24208377391366226, 0.34616549409607644], [\"TranslateX\", 0.17454707403766834, 0.5278055700078459]], [[\"Brightness\", 0.5511881924749478, 0.999638675514418], [\"Equalize\", 0.14076197797220913, 0.2573030693317552]], [[\"ShearX\", 0.668731433926434, 0.7564253049646743], [\"Color\", 0.63235486543845, 0.43954436063340785]], [[\"ShearX\", 0.40511960873276237, 0.5710419512142979], [\"Contrast\", 0.9256769948746423, 0.7461350716211649]], [[\"Cutout\", 0.9995917204023061, 0.22908419326246265], [\"TranslateX\", 0.5440902956629469, 0.9965570051216295]], [[\"Color\", 0.22552987172228894, 0.4514558960849747], [\"Sharpness\", 0.638058150559443, 0.9987829481002615]], [[\"Contrast\", 0.5362775837534763, 0.7052133185951871], [\"ShearY\", 0.220369845547023, 0.7593922994775721]], [[\"ShearX\", 0.0317785822935219, 0.775536785253455], [\"TranslateX\", 0.7939510227015061, 0.5355620618496535]], [[\"Cutout\", 0.46027969917602196, 0.31561199122527517], [\"Color\", 0.06154066467629451, 0.5384660000729091]], [[\"Sharpness\", 0.7205483743301113, 0.552222392539886], [\"Posterize\", 0.5146496404711752, 0.9224333144307473]], [[\"ShearX\", 0.00014547730356910538, 0.3553954298642108], [\"TranslateY\", 0.9625736029090676, 0.57403418640424]], [[\"Posterize\", 0.9199917903297341, 0.6690259107633706], [\"Posterize\", 0.0932558110217602, 0.22279303372106138]], [[\"Invert\", 0.25401453476874863, 0.3354329544078385], [\"Posterize\", 0.1832673201325652, 0.4304718799821412]], [[\"TranslateY\", 0.02084122674367607, 0.12826181437197323], [\"ShearY\", 0.655862534043703, 0.3838330909470975]], [[\"Contrast\", 0.35231797644104523, 0.3379356652070079], [\"Cutout\", 0.19685599014304822, 0.1254328595280942]], [[\"Sharpness\", 0.18795594984191433, 0.09488678946484895], [\"ShearX\", 0.33332876790679306, 0.633523782574133]], [[\"Cutout\", 0.28267175940290246, 0.7901991550267817], [\"Contrast\", 0.021200195312951198, 0.4733128702798515]], [[\"ShearX\", 0.966231043411256, 0.7700673327786812], [\"TranslateX\", 0.7102390777763321, 0.12161245817120675]], [[\"Cutout\", 0.5183324259533826, 0.30766086003013055], [\"Color\", 0.48399078150128927, 0.4967477809069189]], [[\"Sharpness\", 0.8160855187385873, 0.47937658961644], [\"Posterize\", 0.46360395447862535, 0.7685454058155061]], [[\"ShearX\", 0.10173571421694395, 0.3987290690178754], [\"TranslateY\", 0.8939980277379345, 0.5669994143735713]], [[\"Posterize\", 0.6768089584801844, 0.7113149244621721], [\"Posterize\", 0.054896856043358935, 0.3660837250743921]], [[\"AutoContrast\", 0.5915576211896306, 0.33607718177676493], [\"Contrast\", 0.3809408206617828, 0.5712201773913784]], [[\"AutoContrast\", 0.012321347472748323, 0.06379072432796573], [\"Rotate\", 0.0017964439160045656, 0.7598026295973337]], [[\"Contrast\", 0.6007100085192627, 0.36171972473370206], [\"Invert\", 0.09553573684975913, 0.12218510774295901]], [[\"AutoContrast\", 0.32848604643836266, 0.2619457656206414], [\"Invert\", 0.27082113532501784, 0.9967965642293485]], [[\"AutoContrast\", 0.6156282120903395, 0.9422706516080884], [\"Sharpness\", 0.4215509247379262, 0.4063347716503587]], [[\"Solarize\", 0.25059210436331264, 0.7215305521159305], [\"Invert\", 0.1654465185253614, 0.9605851884186778]], [[\"AutoContrast\", 0.4464438610980994, 0.685334175815482], [\"Cutout\", 0.24358625461158645, 0.4699066834058694]], [[\"Rotate\", 0.5931657741857909, 0.6813978655574067], [\"AutoContrast\", 0.9259100547738681, 0.4903201223870492]], [[\"Color\", 0.8203976071280751, 0.9777824466585101], [\"Posterize\", 0.4620669369254169, 0.2738895968716055]], [[\"Contrast\", 0.13754352055786848, 0.3369433962088463], [\"Posterize\", 0.48371187792441916, 0.025718004361451302]], [[\"Rotate\", 0.5208233630704999, 0.1760188899913535], [\"TranslateX\", 0.49753461392937226, 0.4142935276250922]], [[\"Cutout\", 0.5967418240931212, 0.8028675552639539], [\"Cutout\", 0.20021854152659121, 0.19426330549590076]], [[\"ShearY\", 0.549583567386676, 0.6601326640171705], [\"Cutout\", 0.6111813470383047, 0.4141935587984994]], [[\"Brightness\", 0.6354891977535064, 0.31591459747846745], [\"AutoContrast\", 0.7853952208711621, 0.6555861906702081]], [[\"AutoContrast\", 0.7333725370546154, 0.9919410576081586], [\"Cutout\", 0.9984177877923588, 0.2938253683694291]], [[\"Color\", 0.33219296307742263, 0.6378995578424113], [\"AutoContrast\", 0.15432820754183288, 0.7897899838932103]], [[\"Contrast\", 0.5905289460222578, 0.8158577207653422], [\"Cutout\", 0.3980284381203051, 0.43030531250317217]], [[\"TranslateX\", 0.452093693346745, 0.5251475931559115], [\"Rotate\", 0.991422504871258, 0.4556503729269001]], [[\"Color\", 0.04560406292983776, 0.061574671308480766], [\"Brightness\", 0.05161079440128734, 0.6718398142425688]], [[\"Contrast\", 0.02913302416506853, 0.14402056093217708], [\"Rotate\", 0.7306930378774588, 0.47088249057922094]], [[\"Solarize\", 0.3283072384190169, 0.82680847744367], [\"Invert\", 0.21632614168418854, 0.8792241691482687]], [[\"Equalize\", 0.4860808352478527, 0.9440534949023064], [\"Cutout\", 0.31395897639184694, 0.41805859306017523]], [[\"Rotate\", 0.2816043232522335, 0.5451282807926706], [\"Color\", 0.7388520447173302, 0.7706503658143311]], [[\"Color\", 0.9342776719536201, 0.9039981381514299], [\"Rotate\", 0.6646389177840164, 0.5147917008383647]], [[\"Cutout\", 0.08929430082050335, 0.22416445996932374], [\"Posterize\", 0.454485751267457, 0.500958345348237]], [[\"TranslateX\", 0.14674201106374488, 0.7018633472428202], [\"Sharpness\", 0.6128796723832848, 0.743535235614809]], [[\"TranslateX\", 0.5189900164469432, 0.6491132403587601], [\"Contrast\", 0.26309555778227806, 0.5976857969656114]], [[\"Solarize\", 0.23569808291972655, 0.3315781686591778], [\"ShearY\", 0.07292078937544964, 0.7460326987587573]], [[\"ShearY\", 0.7090542757477153, 0.5246437008439621], [\"Sharpness\", 0.9666919148538443, 0.4841687888767071]], [[\"Solarize\", 0.3486952615189488, 0.7012877201721799], [\"Invert\", 0.1933387967311534, 0.9535472742828175]], [[\"AutoContrast\", 0.5393460721514914, 0.6924005011697713], [\"Cutout\", 0.16988156769247176, 0.3667207571712882]], [[\"Rotate\", 0.5815329514554719, 0.5390406879316949], [\"AutoContrast\", 0.7370538341589625, 0.7708822194197815]], [[\"Color\", 0.8463701017918459, 0.9893491045831084], [\"Invert\", 0.06537367901579016, 0.5238468509941635]], [[\"Contrast\", 0.8099771812443645, 0.39371603893945184], [\"Posterize\", 0.38273629875646487, 0.46493786058573966]], [[\"Color\", 0.11164686537114032, 0.6771450570033168], [\"Posterize\", 0.27921361289661406, 0.7214300893597819]], [[\"Contrast\", 0.5958265906571906, 0.5963959447666958], [\"Sharpness\", 0.2640889223630885, 0.3365870842641453]], [[\"Color\", 0.255634146724125, 0.5610029792926452], [\"ShearY\", 0.7476893976084721, 0.36613194760395557]], [[\"ShearX\", 0.2167581882130063, 0.022978065071245002], [\"TranslateX\", 0.1686864409720319, 0.4919575435512007]], [[\"Solarize\", 0.10702753776284957, 0.3954707963684698], [\"Contrast\", 0.7256100635368403, 0.48845259655719686]], [[\"Sharpness\", 0.6165615058519549, 0.2624079463213861], [\"ShearX\", 0.3804820351860919, 0.4738994677544202]], [[\"TranslateX\", 0.18066394808448177, 0.8174509422318228], [\"Solarize\", 0.07964569396290502, 0.45495935736800974]], [[\"Sharpness\", 0.2741884021129658, 0.9311045302358317], [\"Cutout\", 0.0009101326429323388, 0.5932102256756948]], [[\"Rotate\", 0.8501796375826188, 0.5092564038282137], [\"Brightness\", 0.6520146983999912, 0.724091283316938]], [[\"Brightness\", 0.10079744898900078, 0.7644088017429471], [\"AutoContrast\", 0.33540215138213575, 0.1487538541758792]], [[\"ShearY\", 0.10632545944757177, 0.9565164562996977], [\"Rotate\", 0.275833816849538, 0.6200731548023757]], [[\"Color\", 0.6749819274397422, 0.41042188598168844], [\"AutoContrast\", 0.22396590966461932, 0.5048018491863738]], [[\"Equalize\", 0.5044277111650255, 0.2649182381110667], [\"Brightness\", 0.35715133289571355, 0.8653260893016869]], [[\"Cutout\", 0.49083594426355326, 0.5602781291093129], [\"Posterize\", 0.721795488514384, 0.5525847430754974]], [[\"Sharpness\", 0.5081835448947317, 0.7453323423804428], [\"TranslateX\", 0.11511932212234266, 0.4337766796030984]], [[\"Solarize\", 0.3817050641766593, 0.6879004573473403], [\"Invert\", 0.0015041436267447528, 0.9793134066888262]], [[\"AutoContrast\", 0.5107410439697935, 0.8276720355454423], [\"Cutout\", 0.2786270701864015, 0.43993387208414564]], [[\"Rotate\", 0.6711202569428987, 0.6342930903972932], [\"Posterize\", 0.802820231163559, 0.42770002619222053]], [[\"Color\", 0.9426854321337312, 0.9055431782458764], [\"AutoContrast\", 0.3556422423506799, 0.2773922428787449]], [[\"Contrast\", 0.10318991257659992, 0.30841372533347416], [\"Posterize\", 0.4202264962677853, 0.05060395018085634]], [[\"Invert\", 0.549305630337048, 0.886056156681853], [\"Cutout\", 0.9314157033373055, 0.3485836940307909]], [[\"ShearX\", 0.5642891775895684, 0.16427372934801418], [\"Invert\", 0.228741164726475, 0.5066345406806475]], [[\"ShearY\", 0.5813123201003086, 0.33474363490586106], [\"Equalize\", 0.11803439432255824, 0.8583936440614798]], [[\"Sharpness\", 0.1642809706111211, 0.6958675237301609], [\"ShearY\", 0.5989560762277414, 0.6194018060415276]], [[\"Rotate\", 0.05092104774529638, 0.9358045394527796], [\"Cutout\", 0.6443254331615441, 0.28548414658857657]], [[\"Brightness\", 0.6986036769232594, 0.9618046340942727], [\"Sharpness\", 0.5564490243465492, 0.6295231286085622]], [[\"Brightness\", 0.42725649792574105, 0.17628028916784244], [\"Equalize\", 0.4425109360966546, 0.6392872650036018]], [[\"ShearY\", 0.5758622795525444, 0.8773349286588288], [\"ShearX\", 0.038525646435423666, 0.8755366512394268]], [[\"Sharpness\", 0.3704459924265827, 0.9236361456197351], [\"Color\", 0.6379842432311235, 0.4548767717224531]], [[\"Contrast\", 0.1619523824549347, 0.4506528800882731], [\"AutoContrast\", 0.34513874426188385, 0.3580290330996726]], [[\"Contrast\", 0.728699731513527, 0.6932238009822878], [\"Brightness\", 0.8602917375630352, 0.5341445123280423]], [[\"Equalize\", 0.3574552353044203, 0.16814745124536548], [\"Rotate\", 0.24191717169379262, 0.3279497108179034]], [[\"ShearY\", 0.8567478695576244, 0.37746117240238164], [\"ShearX\", 0.9654125389830487, 0.9283047610798827]], [[\"ShearY\", 0.4339052480582405, 0.5394548246617406], [\"Cutout\", 0.5070570647967001, 0.7846286976687882]], [[\"AutoContrast\", 0.021620100406875065, 0.44425839772845227], [\"AutoContrast\", 0.33978157614075183, 0.47716564815092244]], [[\"Contrast\", 0.9727600659025666, 0.6651758819229426], [\"Brightness\", 0.9893133904996626, 0.39176397622636105]], [[\"Equalize\", 0.283428620586305, 0.18727922861893637], [\"Rotate\", 0.3556063466797136, 0.3722839913107821]], [[\"ShearY\", 0.7276172841941864, 0.4834188516302227], [\"ShearX\", 0.010783217950465884, 0.9756458772142235]], [[\"ShearY\", 0.2901753295101581, 0.5684700238749064], [\"Cutout\", 0.655585564610337, 0.9490071307790201]], [[\"AutoContrast\", 0.008507193981450278, 0.4881150103902877], [\"AutoContrast\", 0.6561989723231185, 0.3715071329838596]], [[\"Contrast\", 0.7702505530948414, 0.6961371266519999], [\"Brightness\", 0.9953051630261895, 0.3861962467326121]], [[\"Equalize\", 0.2805270012472756, 0.17715406116880994], [\"Rotate\", 0.3111256593947474, 0.15824352183820073]], [[\"Brightness\", 0.9888680802094193, 0.4856236485253163], [\"ShearX\", 0.022370252047332284, 0.9284975906226682]], [[\"ShearY\", 0.4065719044318099, 0.7468528006921563], [\"AutoContrast\", 0.19494427109708126, 0.8613186475174786]], [[\"AutoContrast\", 0.023296727279367765, 0.9170949567425306], [\"AutoContrast\", 0.11663051100921168, 0.7908646792175343]], [[\"AutoContrast\", 0.7335191671571732, 0.4958357308292425], [\"Color\", 0.7964964008349845, 0.4977687544324929]], [[\"ShearX\", 0.19905221600021472, 0.3033081933150046], [\"Equalize\", 0.9383410219319321, 0.3224669877230161]], [[\"ShearX\", 0.8265450331466404, 0.6509091423603757], [\"Sharpness\", 0.7134181178748723, 0.6472835976443643]], [[\"ShearY\", 0.46962439525486044, 0.223433110541722], [\"Rotate\", 0.7749806946212373, 0.5337060376916906]], [[\"Posterize\", 0.1652499695106796, 0.04860659068586126], [\"Brightness\", 0.6644577712782511, 0.4144528269429337]], [[\"TranslateY\", 0.6220449565731829, 0.4917495676722932], [\"Posterize\", 0.6255000355409635, 0.8374266890984867]], [[\"AutoContrast\", 0.4887160797052227, 0.7106426020530529], [\"Sharpness\", 0.7684218571497236, 0.43678474722954763]], [[\"Invert\", 0.13178101535845366, 0.8301141976359813], [\"Color\", 0.002820877424219378, 0.49444413062487075]], [[\"TranslateX\", 0.9920683666478188, 0.5862245842588877], [\"Posterize\", 0.5536357075855376, 0.5454300367281468]], [[\"Brightness\", 0.8150181219663427, 0.1411060258870707], [\"Sharpness\", 0.8548823004164599, 0.77008691072314]], [[\"Brightness\", 0.9580478020413399, 0.7198667636628974], [\"ShearY\", 0.8431585033377366, 0.38750016565010803]], [[\"Solarize\", 0.2331505347152334, 0.25754361489084787], [\"TranslateY\", 0.447431373734262, 0.5782399531772253]], [[\"TranslateY\", 0.8904927998691309, 0.25872872455072315], [\"AutoContrast\", 0.7129888139716263, 0.7161603231650524]], [[\"ShearY\", 0.6336216800247362, 0.5247508616674911], [\"Cutout\", 0.9167315119726633, 0.2060557387978919]], [[\"ShearX\", 0.001661782345968199, 0.3682225725445044], [\"Solarize\", 0.12303352043754572, 0.5014989548584458]], [[\"Brightness\", 0.9723625105116246, 0.6555444729681099], [\"Contrast\", 0.5539208721135375, 0.7819973409318487]], [[\"Equalize\", 0.3262607499912611, 0.0006745572802121513], [\"Contrast\", 0.35341551623767103, 0.36814689398886347]], [[\"ShearY\", 0.7478539900243613, 0.37322078030129185], [\"TranslateX\", 0.41558847793529247, 0.7394615158544118]], [[\"Invert\", 0.13735541232529067, 0.5536403864332143], [\"Cutout\", 0.5109718190377135, 0.0447509485253679]], [[\"AutoContrast\", 0.09403602327274725, 0.5909250807862687], [\"ShearY\", 0.53234060616395, 0.5316981359469398]], [[\"ShearX\", 0.5651922367876323, 0.6794110241313183], [\"Posterize\", 0.7431624856363638, 0.7896861463783287]], [[\"Brightness\", 0.30949179379286806, 0.7650569096019195], [\"Sharpness\", 0.5461629122105034, 0.6814369444005866]], [[\"Sharpness\", 0.28459340191768434, 0.7802208350806028], [\"Rotate\", 0.15097973114238117, 0.5259683294104645]], [[\"ShearX\", 0.6430803693700531, 0.9333735880102375], [\"Contrast\", 0.7522209520030653, 0.18831747966185058]], [[\"Contrast\", 0.4219455937915647, 0.29949769435499646], [\"Color\", 0.6925322933509542, 0.8095523885795443]], [[\"ShearX\", 0.23553236193043048, 0.17966207900468323], [\"AutoContrast\", 0.9039700567886262, 0.21983629944639108]], [[\"ShearX\", 0.19256223146671514, 0.31200739880443584], [\"Sharpness\", 0.31962196883294713, 0.6828107668550425]], [[\"Cutout\", 0.5947690279080912, 0.21728220253899178], [\"Rotate\", 0.6757188879871141, 0.489460599679474]], [[\"ShearY\", 0.18365897125470526, 0.3988571115918058], [\"Brightness\", 0.7727489489504, 0.4790369956329955]], [[\"Contrast\", 0.7090301084131432, 0.5178303607560537], [\"ShearX\", 0.16749258277688506, 0.33061773301592356]], [[\"ShearX\", 0.3706690885419934, 0.38510677124319415], [\"AutoContrast\", 0.8288356276501032, 0.16556487668770264]], [[\"TranslateY\", 0.16758043046445614, 0.30127092823893986], [\"Brightness\", 0.5194636577132354, 0.6225165310621702]], [[\"Cutout\", 0.6087289363049726, 0.10439287037803044], [\"Rotate\", 0.7503452083033819, 0.7425316019981433]], [[\"ShearY\", 0.24347189588329932, 0.5554979486672325], [\"Brightness\", 0.9468115239174161, 0.6132449358023568]], [[\"Brightness\", 0.7144508395807994, 0.4610594769966929], [\"ShearX\", 0.16466683833092968, 0.3382903812375781]], [[\"Sharpness\", 0.27743648684265465, 0.17200038071656915], [\"Color\", 0.47404262107546236, 0.7868991675614725]], [[\"Sharpness\", 0.8603993513633618, 0.324604728411791], [\"TranslateX\", 0.3331597130403763, 0.9369586812977804]], [[\"Color\", 0.1535813630595832, 0.4700116846558207], [\"Color\", 0.5435647971896318, 0.7639291483525243]], [[\"Brightness\", 0.21486188101947656, 0.039347277341450576], [\"Cutout\", 0.7069526940684954, 0.39273934115015696]], [[\"ShearY\", 0.7267130888840517, 0.6310800726389485], [\"AutoContrast\", 0.662163190824139, 0.31948540372237766]], [[\"ShearX\", 0.5123132117185981, 0.1981015909438834], [\"AutoContrast\", 0.9009347363863067, 0.26790399126924036]], [[\"Brightness\", 0.24245061453231648, 0.2673478678291436], [\"ShearX\", 0.31707976089283946, 0.6800582845544948]], [[\"Cutout\", 0.9257780138367764, 0.03972673526848819], [\"Rotate\", 0.6807858944518548, 0.46974332280612097]], [[\"ShearY\", 0.1543443071262312, 0.6051682587030671], [\"Brightness\", 0.9758203119828304, 0.4941406868162414]], [[\"Contrast\", 0.07578049236491124, 0.38953819133407647], [\"ShearX\", 0.20194918288164293, 0.4141510791947318]], [[\"Color\", 0.27826402243792286, 0.43517491081531157], [\"AutoContrast\", 0.6159269026143263, 0.2021846783488046]], [[\"AutoContrast\", 0.5039377966534692, 0.19241507605941105], [\"Invert\", 0.5563931144385394, 0.7069728937319112]], [[\"Sharpness\", 0.19031632433810566, 0.26310171056096743], [\"Color\", 0.4724537593175573, 0.6715201448387876]], [[\"ShearY\", 0.2280910467786642, 0.33340559088059313], [\"ShearY\", 0.8858560034869303, 0.2598627441471076]], [[\"ShearY\", 0.07291814128021593, 0.5819462692986321], [\"Cutout\", 0.27605696060512147, 0.9693427371868695]], [[\"Posterize\", 0.4249871586563321, 0.8256952014328607], [\"Posterize\", 0.005907466926447169, 0.8081353382152597]], [[\"Brightness\", 0.9071305290601128, 0.4781196213717954], [\"Posterize\", 0.8996214311439275, 0.5540717376630279]], [[\"Brightness\", 0.06560728936236392, 0.9920627849065685], [\"TranslateX\", 0.04530789794044952, 0.5318568944702607]], [[\"TranslateX\", 0.6800263601084814, 0.4611536772507228], [\"Rotate\", 0.7245888375283157, 0.0914772551375381]], [[\"Sharpness\", 0.879556061897963, 0.42272481462067535], [\"TranslateX\", 0.4600350422524085, 0.5742175429334919]], [[\"AutoContrast\", 0.5005776243176145, 0.22597121331684505], [\"Invert\", 0.10763286370369299, 0.6841782704962373]], [[\"Sharpness\", 0.7422908472000116, 0.6850324203882405], [\"TranslateX\", 0.3832914614128403, 0.34798646673324896]], [[\"ShearY\", 0.31939465302679326, 0.8792088167639516], [\"Brightness\", 0.4093604352811235, 0.21055483197261338]], [[\"AutoContrast\", 0.7447595860998638, 0.19280222555998586], [\"TranslateY\", 0.317754779431227, 0.9983454520593591]], [[\"Equalize\", 0.27706973689750847, 0.6447455020660622], [\"Contrast\", 0.5626579126863761, 0.7920049962776781]], [[\"Rotate\", 0.13064369451773816, 0.1495367590684905], [\"Sharpness\", 0.24893941981801215, 0.6295943894521504]], [[\"ShearX\", 0.6856269993063254, 0.5167938584189854], [\"Sharpness\", 0.24835352574609537, 0.9990550493102627]], [[\"AutoContrast\", 0.461654115871693, 0.43097388896245004], [\"Cutout\", 0.366359682416437, 0.08011826474215511]], [[\"AutoContrast\", 0.993892672935951, 0.2403608711236933], [\"ShearX\", 0.6620817870694181, 0.1744814077869482]], [[\"ShearY\", 0.6396747719986443, 0.15031017143644265], [\"Brightness\", 0.9451954879495629, 0.26490678840264714]], [[\"Color\", 0.19311480787397262, 0.15712300697448575], [\"Posterize\", 0.05391448762015258, 0.6943963643155474]], [[\"Sharpness\", 0.6199669674684085, 0.5412492335319072], [\"Invert\", 0.14086213450149815, 0.2611850277919339]], [[\"Posterize\", 0.5533129268803405, 0.5332478159319912], [\"ShearX\", 0.48956244029096635, 0.09223930853562916]], [[\"ShearY\", 0.05871590849449765, 0.19549715278943228], [\"TranslateY\", 0.7208521362741379, 0.36414003004659434]], [[\"ShearY\", 0.7316263417917531, 0.0629747985768501], [\"Contrast\", 0.036359793501448245, 0.48658745414898386]], [[\"Rotate\", 0.3301497610942963, 0.5686622043085637], [\"ShearX\", 0.40581487555676843, 0.5866127743850192]], [[\"ShearX\", 0.6679039628249283, 0.5292270693200821], [\"Sharpness\", 0.25901391739310703, 0.9778360586541461]], [[\"AutoContrast\", 0.27373222012596854, 0.14456771405730712], [\"Contrast\", 0.3877220783523938, 0.7965158941894336]], [[\"Solarize\", 0.29440905483979096, 0.06071633809388455], [\"Equalize\", 0.5246736285116214, 0.37575084834661976]], [[\"TranslateY\", 0.2191269464520395, 0.7444942293988484], [\"Posterize\", 0.3840878524812771, 0.31812671711741247]], [[\"Solarize\", 0.25159267140731356, 0.5833264622559661], [\"Brightness\", 0.07552262572348738, 0.33210648549288435]], [[\"AutoContrast\", 0.9770099298399954, 0.46421915310428197], [\"AutoContrast\", 0.04707358934642503, 0.24922048012183493]], [[\"Cutout\", 0.5379685806621965, 0.02038212605928355], [\"Brightness\", 0.5900728303717965, 0.28807872931416956]], [[\"Sharpness\", 0.11596624872886108, 0.6086947716949325], [\"AutoContrast\", 0.34876470059667525, 0.22707897759730578]], [[\"Contrast\", 0.276545513135698, 0.8822580384226156], [\"Rotate\", 0.04874027684061846, 0.6722214281612163]], [[\"ShearY\", 0.595839851757025, 0.4389866852785822], [\"Equalize\", 0.5225492356128832, 0.2735290854063459]], [[\"Sharpness\", 0.9918029636732927, 0.9919926583216121], [\"Sharpness\", 0.03672376137997366, 0.5563865980047012]], [[\"AutoContrast\", 0.34169589759999847, 0.16419911552645738], [\"Invert\", 0.32995953043129234, 0.15073174739720568]], [[\"Posterize\", 0.04600255098477292, 0.2632612790075844], [\"TranslateY\", 0.7852153329831825, 0.6990722310191976]], [[\"AutoContrast\", 0.4414653815356372, 0.2657468780017082], [\"Posterize\", 0.30647061536763337, 0.3688222724948656]], [[\"Contrast\", 0.4239361091421837, 0.6076562806342001], [\"Cutout\", 0.5780707784165284, 0.05361325256745192]], [[\"Sharpness\", 0.7657895907855394, 0.9842407321667671], [\"Sharpness\", 0.5416352696151596, 0.6773681575200902]], [[\"AutoContrast\", 0.13967381098331305, 0.10787258006315015], [\"Posterize\", 0.5019536507897069, 0.9881978222469807]], [[\"Brightness\", 0.030528346448984903, 0.31562058762552847], [\"TranslateY\", 0.0843808140595676, 0.21019213305350526]], [[\"AutoContrast\", 0.6934579165006736, 0.2530484168209199], [\"Rotate\", 0.0005751408130693636, 0.43790043943210005]], [[\"TranslateX\", 0.611258547664328, 0.25465240215894935], [\"Sharpness\", 0.5001446909868196, 0.36102204109889413]], [[\"Contrast\", 0.8995127327150193, 0.5493190695343996], [\"Brightness\", 0.242708780669213, 0.5461116653329015]], [[\"AutoContrast\", 0.3751825351022747, 0.16845985803896962], [\"Cutout\", 0.25201103287363663, 0.0005893331783358435]], [[\"ShearX\", 0.1518985779435941, 0.14768180777304504], [\"Color\", 0.85133530274324, 0.4006641163378305]], [[\"TranslateX\", 0.5489668255504668, 0.4694591826554948], [\"Rotate\", 0.1917354490155893, 0.39993269385802177]], [[\"ShearY\", 0.6689267479532809, 0.34304285013663577], [\"Equalize\", 0.24133154048883143, 0.279324043138247]], [[\"Contrast\", 0.3412544002099494, 0.20217358823930232], [\"Color\", 0.8606984790510235, 0.14305503544676373]], [[\"Cutout\", 0.21656155695311988, 0.5240101349572595], [\"Brightness\", 0.14109877717636352, 0.2016827341210295]], [[\"Sharpness\", 0.24764371218833872, 0.19655480259925423], [\"Posterize\", 0.19460398862039913, 0.4975414350200679]], [[\"Brightness\", 0.6071850094982323, 0.7270716448607151], [\"Solarize\", 0.111786402398499, 0.6325641684614275]], [[\"Contrast\", 0.44772949532200856, 0.44267502710695955], [\"AutoContrast\", 0.360117506402693, 0.2623958228760273]], [[\"Sharpness\", 0.8888131688583053, 0.936897400764746], [\"Sharpness\", 0.16080674198274894, 0.5681119841445879]], [[\"AutoContrast\", 0.8004456226590612, 0.1788600469525269], [\"Brightness\", 0.24832285390647374, 0.02755350284841604]], [[\"ShearY\", 0.06910320102646594, 0.26076407321544054], [\"Contrast\", 0.8633703022354964, 0.38968514704043056]], [[\"AutoContrast\", 0.42306251382780613, 0.6883260271268138], [\"Rotate\", 0.3938724346852023, 0.16740881249086037]], [[\"Contrast\", 0.2725343884286728, 0.6468194318074759], [\"Sharpness\", 0.32238942646494745, 0.6721149242783824]], [[\"AutoContrast\", 0.942093919956842, 0.14675331481712853], [\"Posterize\", 0.5406276708262192, 0.683901182218153]], [[\"Cutout\", 0.5386811894643584, 0.04498833938429728], [\"Posterize\", 0.17007257321724775, 0.45761177118620633]], [[\"Contrast\", 0.13599408935104654, 0.53282738083886], [\"Solarize\", 0.26941667995081114, 0.20958261079465895]], [[\"Color\", 0.6600788518606634, 0.9522228302165842], [\"Invert\", 0.0542722262516899, 0.5152431169321683]], [[\"Contrast\", 0.5328934819727553, 0.2376220512388278], [\"Posterize\", 0.04890422575781711, 0.3182233123739474]], [[\"AutoContrast\", 0.9289628064340965, 0.2976678437448435], [\"Color\", 0.20936893798507963, 0.9649612821434217]], [[\"Cutout\", 0.9019423698575457, 0.24002036989728096], [\"Brightness\", 0.48734445615892974, 0.047660899809176316]], [[\"Sharpness\", 0.09347824275711591, 0.01358686275590612], [\"Posterize\", 0.9248539660538934, 0.4064232632650468]], [[\"Brightness\", 0.46575675383704634, 0.6280194775484345], [\"Invert\", 0.17276207634499413, 0.21263495428839635]], [[\"Brightness\", 0.7238014711679732, 0.6178946027258592], [\"Equalize\", 0.3815496086340364, 0.07301281068847276]], [[\"Contrast\", 0.754557393588416, 0.895332753570098], [\"Color\", 0.32709957750707447, 0.8425486003491515]], [[\"Rotate\", 0.43406698081696576, 0.28628263254953723], [\"TranslateY\", 0.43949548709125374, 0.15927082198238685]], [[\"Brightness\", 0.0015838339831640708, 0.09341692553352654], [\"AutoContrast\", 0.9113966907329718, 0.8345900469751112]], [[\"ShearY\", 0.46698796308585017, 0.6150701348176804], [\"Invert\", 0.14894062704815722, 0.2778388046184728]], [[\"Color\", 0.30360499169455957, 0.995713092016834], [\"Contrast\", 0.2597016288524961, 0.8654420870658932]], [[\"Brightness\", 0.9661642031891435, 0.7322006407169436], [\"TranslateY\", 0.4393502786333408, 0.33934762664274265]], [[\"Color\", 0.9323638351992302, 0.912776309755293], [\"Brightness\", 0.1618274755371618, 0.23485741708056307]], [[\"Color\", 0.2216470771158821, 0.3359240197334976], [\"Sharpness\", 0.6328691811471494, 0.6298393874452548]], [[\"Solarize\", 0.4772769142265505, 0.7073470698713035], [\"ShearY\", 0.2656114148206966, 0.31343097010487253]], [[\"Solarize\", 0.3839017339304234, 0.5985505779429036], [\"Equalize\", 0.002412059429196589, 0.06637506181196245]], [[\"Contrast\", 0.12751196553017863, 0.46980311434237976], [\"Sharpness\", 0.3467487455865491, 0.4054907610444406]], [[\"AutoContrast\", 0.9321813669127206, 0.31328471589533274], [\"Rotate\", 0.05801738717432747, 0.36035756254444273]], [[\"TranslateX\", 0.52092390458353, 0.5261722561643886], [\"Contrast\", 0.17836804476171306, 0.39354333443158535]], [[\"Posterize\", 0.5458100909925713, 0.49447244994482603], [\"Brightness\", 0.7372536822363605, 0.5303409097463796]], [[\"Solarize\", 0.1913974941725724, 0.5582966653986761], [\"Equalize\", 0.020733669175727026, 0.9377467166472878]], [[\"Equalize\", 0.16265732137763889, 0.5206282340874929], [\"Sharpness\", 0.2421533133595281, 0.506389065871883]], [[\"AutoContrast\", 0.9787324801448523, 0.24815051941486466], [\"Rotate\", 0.2423487151245957, 0.6456493129745148]], [[\"TranslateX\", 0.6809867726670327, 0.6949687002397612], [\"Contrast\", 0.16125673359747458, 0.7582679978218987]], [[\"Posterize\", 0.8212000950994955, 0.5225012157831872], [\"Brightness\", 0.8824891856626245, 0.4499216779709508]], [[\"Solarize\", 0.12061313332505218, 0.5319371283368052], [\"Equalize\", 0.04120865969945108, 0.8179402157299602]], [[\"Rotate\", 0.11278256686005855, 0.4022686554165438], [\"ShearX\", 0.2983451019112792, 0.42782525461812604]], [[\"ShearY\", 0.8847385513289983, 0.5429227024179573], [\"Rotate\", 0.21316428726607445, 0.6712120087528564]], [[\"TranslateX\", 0.46448081241068717, 0.4746090648963252], [\"Brightness\", 0.19973580961271142, 0.49252862676553605]], [[\"Posterize\", 0.49664100539481526, 0.4460713166484651], [\"Brightness\", 0.6629559985581529, 0.35192346529003693]], [[\"Color\", 0.22710733249173676, 0.37943185764616194], [\"ShearX\", 0.015809774971472595, 0.8472080190835669]], [[\"Contrast\", 0.4187366322381491, 0.21621979869256666], [\"AutoContrast\", 0.7631045030367304, 0.44965231251615134]], [[\"Sharpness\", 0.47240637876720515, 0.8080091811749525], [\"Cutout\", 0.2853425420104144, 0.6669811510150936]], [[\"Posterize\", 0.7830320527127324, 0.2727062685529881], [\"Solarize\", 0.527834000867504, 0.20098218845222998]], [[\"Contrast\", 0.366380535288225, 0.39766001659663075], [\"Cutout\", 0.8708808878088891, 0.20669525734273086]], [[\"ShearX\", 0.6815427281122932, 0.6146858582671569], [\"AutoContrast\", 0.28330622372053493, 0.931352024154997]], [[\"AutoContrast\", 0.8668174463154519, 0.39961453880632863], [\"AutoContrast\", 0.5718557712359253, 0.6337062930797239]], [[\"ShearY\", 0.8923152519411871, 0.02480062504737446], [\"Cutout\", 0.14954159341231515, 0.1422219808492364]], [[\"Rotate\", 0.3733718175355636, 0.3861928572224287], [\"Sharpness\", 0.5651126520194574, 0.6091103847442831]], [[\"Posterize\", 0.8891714191922857, 0.29600154265251016], [\"TranslateY\", 0.7865351723963945, 0.5664998548985523]], [[\"TranslateX\", 0.9298214806998273, 0.729856565052017], [\"AutoContrast\", 0.26349082482341846, 0.9638882609038888]], [[\"Sharpness\", 0.8387378377527128, 0.42146721129032494], [\"AutoContrast\", 0.9860522000876452, 0.4200699464169384]], [[\"ShearY\", 0.019609159303115145, 0.37197835936879514], [\"Cutout\", 0.22199340461754258, 0.015932573201085848]], [[\"Rotate\", 0.43871085583928443, 0.3283504258860078], [\"Sharpness\", 0.6077702068037776, 0.6830305349618742]], [[\"Contrast\", 0.6160211756538094, 0.32029451083389626], [\"Cutout\", 0.8037631428427006, 0.4025688837399259]], [[\"TranslateY\", 0.051637820936985435, 0.6908417834391846], [\"Sharpness\", 0.7602756948473368, 0.4927111506643095]], [[\"Rotate\", 0.4973618638052235, 0.45931479729281227], [\"TranslateY\", 0.04701789716427618, 0.9408779705948676]], [[\"Rotate\", 0.5214194592768602, 0.8371249272013652], [\"Solarize\", 0.17734812472813338, 0.045020798970228315]], [[\"ShearX\", 0.7457999920079351, 0.19025612553075893], [\"Sharpness\", 0.5994846101703786, 0.5665094068864229]], [[\"Contrast\", 0.6172655452900769, 0.7811432139704904], [\"Cutout\", 0.09915620454670282, 0.3963692287596121]], [[\"TranslateX\", 0.2650112299235817, 0.7377261946165307], [\"AutoContrast\", 0.5019539734059677, 0.26905046992024506]], [[\"Contrast\", 0.6646299821370135, 0.41667784809592945], [\"Cutout\", 0.9698457154992128, 0.15429001887703997]], [[\"Sharpness\", 0.9467079029475773, 0.44906457469098204], [\"Cutout\", 0.30036908747917396, 0.4766149689663106]], [[\"Equalize\", 0.6667517691051055, 0.5014839828447363], [\"Solarize\", 0.4127890336820831, 0.9578274770236529]], [[\"Cutout\", 0.6447384874120834, 0.2868806107728985], [\"Cutout\", 0.4800990488106021, 0.4757538246206956]], [[\"Solarize\", 0.12560195032363236, 0.5557473475801568], [\"Equalize\", 0.019957161871490228, 0.5556797187823773]], [[\"Contrast\", 0.12607637375759484, 0.4300633627435161], [\"Sharpness\", 0.3437273670109087, 0.40493203127714417]], [[\"AutoContrast\", 0.884353334807183, 0.5880138314357569], [\"Rotate\", 0.9846032404597116, 0.3591877296622974]], [[\"TranslateX\", 0.6862295865975581, 0.5307482119690076], [\"Contrast\", 0.19439251187251982, 0.3999195825722808]], [[\"Posterize\", 0.4187641835025246, 0.5008988942651585], [\"Brightness\", 0.6665805605402482, 0.3853288204214253]], [[\"Posterize\", 0.4507470690013903, 0.4232437206624681], [\"TranslateX\", 0.6054107416317659, 0.38123828040922203]], [[\"AutoContrast\", 0.29562338573283276, 0.35608605102687474], [\"TranslateX\", 0.909954785390274, 0.20098894888066549]], [[\"Contrast\", 0.6015278411777212, 0.6049140992035096], [\"Cutout\", 0.47178713636517855, 0.5333747244651914]], [[\"TranslateX\", 0.490851976691112, 0.3829593925141144], [\"Sharpness\", 0.2716675173824095, 0.5131696240367152]], [[\"Posterize\", 0.4190558294646337, 0.39316689077269873], [\"Rotate\", 0.5018526072725914, 0.295712490156129]], [[\"AutoContrast\", 0.29624715560691617, 0.10937329832409388], [\"Posterize\", 0.8770505275992637, 0.43117765012206943]], [[\"Rotate\", 0.6649970092751698, 0.47767131373391974], [\"ShearX\", 0.6257923540490786, 0.6643337040198358]], [[\"Sharpness\", 0.5553620705849509, 0.8467799429696928], [\"Cutout\", 0.9006185811918932, 0.3537270716262]], [[\"ShearY\", 0.0007619678283789788, 0.9494591850536303], [\"Invert\", 0.24267733654007673, 0.7851608409575828]], [[\"Contrast\", 0.9730916198112872, 0.404670123321921], [\"Sharpness\", 0.5923587793251186, 0.7405792404430281]], [[\"Cutout\", 0.07393909593373034, 0.44569630026328344], [\"TranslateX\", 0.2460593252211425, 0.4817527814541055]], [[\"Brightness\", 0.31058654119340867, 0.7043749950260936], [\"ShearX\", 0.7632161538947713, 0.8043681264908555]], [[\"AutoContrast\", 0.4352334371415373, 0.6377550087204297], [\"Rotate\", 0.2892714673415678, 0.49521052050510556]], [[\"Equalize\", 0.509071051375276, 0.7352913414974414], [\"ShearX\", 0.5099959429711828, 0.7071566714593619]], [[\"Posterize\", 0.9540506532512889, 0.8498853304461906], [\"ShearY\", 0.28199061357155397, 0.3161715627214629]], [[\"Posterize\", 0.6740855359097433, 0.684004694936616], [\"Posterize\", 0.6816720350737863, 0.9654766942980918]], [[\"Solarize\", 0.7149344531717328, 0.42212789795181643], [\"Brightness\", 0.686601460864528, 0.4263050070610551]], [[\"Cutout\", 0.49577164991501, 0.08394890892056037], [\"Rotate\", 0.5810369852730606, 0.3320732965776973]], [[\"TranslateY\", 0.1793755480490623, 0.6006520265468684], [\"Brightness\", 0.3769016576438939, 0.7190746300828186]], [[\"TranslateX\", 0.7226363597757153, 0.3847027238123509], [\"Brightness\", 0.7641713191794035, 0.36234003077512544]], [[\"TranslateY\", 0.1211227055347106, 0.6693523474608023], [\"Brightness\", 0.13011180247738063, 0.5126647617294864]], [[\"Equalize\", 0.1501070550869129, 0.0038548909451806557], [\"Posterize\", 0.8266535939653881, 0.5502199643499207]], [[\"Sharpness\", 0.550624117428359, 0.2023044586648523], [\"Brightness\", 0.06291556314780017, 0.7832635398703937]], [[\"Color\", 0.3701578205508141, 0.9051537973590863], [\"Contrast\", 0.5763972727739397, 0.4905511239739898]], [[\"Rotate\", 0.7678527224046323, 0.6723066265307555], [\"Solarize\", 0.31458533097383207, 0.38329324335154524]], [[\"Brightness\", 0.292050127929522, 0.7047582807953063], [\"ShearX\", 0.040541891910333805, 0.06639328601282746]], [[\"TranslateY\", 0.4293891393238555, 0.6608516902234284], [\"Sharpness\", 0.7794685477624004, 0.5168044063408147]], [[\"Color\", 0.3682450402286552, 0.17274523597220048], [\"ShearY\", 0.3936056470397763, 0.5702597289866161]], [[\"Equalize\", 0.43436990310624657, 0.9207072627823626], [\"Contrast\", 0.7608688260846083, 0.4759023148841439]], [[\"Brightness\", 0.7926088966143935, 0.8270093925674497], [\"ShearY\", 0.4924174064969461, 0.47424347505831244]], [[\"Contrast\", 0.043917555279430476, 0.15861903591675125], [\"ShearX\", 0.30439480405505853, 0.1682659341098064]], [[\"TranslateY\", 0.5598255583454538, 0.721352536005039], [\"Posterize\", 0.9700921973303752, 0.6882015184440126]], [[\"AutoContrast\", 0.3620887415037668, 0.5958176322317132], [\"TranslateX\", 0.14213781552733287, 0.6230799786459947]], [[\"Color\", 0.490366889723972, 0.9863152892045195], [\"Color\", 0.817792262022319, 0.6755656429452775]], [[\"Brightness\", 0.7030707021937771, 0.254633187122679], [\"Color\", 0.13977318232688843, 0.16378180123959793]], [[\"AutoContrast\", 0.2933247831326118, 0.6283663376211102], [\"Sharpness\", 0.85430478154147, 0.9753613184208796]], [[\"Rotate\", 0.6674299955457268, 0.48571208708018976], [\"Contrast\", 0.47491370175907016, 0.6401079552479657]], [[\"Sharpness\", 0.37589579644127863, 0.8475131989077025], [\"TranslateY\", 0.9985149867598191, 0.057815729375099975]], [[\"Equalize\", 0.0017194373841596389, 0.7888361311461602], [\"Contrast\", 0.6779293670669408, 0.796851411454113]], [[\"TranslateY\", 0.3296782119072306, 0.39765117357271834], [\"Sharpness\", 0.5890554357001884, 0.6318339473765834]], [[\"Posterize\", 0.25423810893163856, 0.5400430289894207], [\"Sharpness\", 0.9273643918988342, 0.6480913470982622]], [[\"Cutout\", 0.850219975768305, 0.4169812455601289], [\"Solarize\", 0.5418755745870089, 0.5679666650495466]], [[\"Brightness\", 0.008881361977310959, 0.9282562314720516], [\"TranslateY\", 0.7736066471553994, 0.20041167606029642]], [[\"Brightness\", 0.05382537581401925, 0.6405265501035952], [\"Contrast\", 0.30484329473639593, 0.5449338155734242]], [[\"Color\", 0.613257119787967, 0.4541503912724138], [\"Brightness\", 0.9061572524724674, 0.4030159294447347]], [[\"Brightness\", 0.02739111568942537, 0.006028056532326534], [\"ShearX\", 0.17276751958646486, 0.05967365780621859]], [[\"TranslateY\", 0.4376298213047888, 0.7691816164456199], [\"Sharpness\", 0.8162292718857824, 0.6054926462265117]], [[\"Color\", 0.37963069679121214, 0.5946919433483344], [\"Posterize\", 0.08485417284005387, 0.5663580913231766]], [[\"Equalize\", 0.49785780226818316, 0.9999137109183761], [\"Sharpness\", 0.7685879484682496, 0.6260846154212211]], [[\"AutoContrast\", 0.4190931409670763, 0.2374852525139795], [\"Posterize\", 0.8797422264608563, 0.3184738541692057]], [[\"Rotate\", 0.7307269024632872, 0.41523609600701106], [\"ShearX\", 0.6166685870692289, 0.647133807748274]], [[\"Sharpness\", 0.5633713231039904, 0.8276694754755876], [\"Cutout\", 0.8329340776895764, 0.42656043027424073]], [[\"ShearY\", 0.14934828370884312, 0.8622510773680372], [\"Invert\", 0.25925989086863277, 0.8813283584888576]], [[\"Contrast\", 0.9457071292265932, 0.43228655518614034], [\"Sharpness\", 0.8485316947644338, 0.7590298998732413]], [[\"AutoContrast\", 0.8386103589399184, 0.5859583131318076], [\"Solarize\", 0.466758711343543, 0.9956215363818983]], [[\"Rotate\", 0.9387133710926467, 0.19180564509396503], [\"Rotate\", 0.5558247609706255, 0.04321698692007105]], [[\"ShearX\", 0.3608716600695567, 0.15206159451532864], [\"TranslateX\", 0.47295292905710146, 0.5290760596129888]], [[\"TranslateX\", 0.8357685981547495, 0.5991305115727084], [\"Posterize\", 0.5362929404188211, 0.34398525441943373]], [[\"ShearY\", 0.6751984031632811, 0.6066293622133011], [\"Contrast\", 0.4122723990263818, 0.4062467515095566]], [[\"Color\", 0.7515349936021702, 0.5122124665429213], [\"Contrast\", 0.03190514292904123, 0.22903520154660545]], [[\"Contrast\", 0.5448962625054385, 0.38655673938910545], [\"AutoContrast\", 0.4867400684894492, 0.3433111101096984]], [[\"Rotate\", 0.0008372434310827959, 0.28599951781141714], [\"Equalize\", 0.37113686925530087, 0.5243929348114981]], [[\"Color\", 0.720054993488857, 0.2010177651701808], [\"TranslateX\", 0.23036196506059398, 0.11152764304368781]], [[\"Cutout\", 0.859134208332423, 0.6727345740185254], [\"ShearY\", 0.02159833505865088, 0.46390076266538544]], [[\"Sharpness\", 0.3428232157391428, 0.4067874527486514], [\"Brightness\", 0.5409415136577347, 0.3698432231874003]], [[\"Solarize\", 0.27303978936454776, 0.9832186173589548], [\"ShearY\", 0.08831127213044043, 0.4681870331149774]], [[\"TranslateY\", 0.2909309268736869, 0.4059460811623174], [\"Sharpness\", 0.6425125139803729, 0.20275737203293587]], [[\"Contrast\", 0.32167626214661627, 0.28636162794046977], [\"Invert\", 0.4712405253509603, 0.7934644799163176]], [[\"Color\", 0.867993060896951, 0.96574321666213], [\"Color\", 0.02233897320328512, 0.44478933557303063]], [[\"AutoContrast\", 0.1841254751814967, 0.2779992148017741], [\"Color\", 0.3586283093530607, 0.3696246850445087]], [[\"Posterize\", 0.2052935984046965, 0.16796913860308244], [\"ShearX\", 0.4807226832843722, 0.11296747254563266]], [[\"Cutout\", 0.2016411266364791, 0.2765295444084803], [\"Brightness\", 0.3054112810424313, 0.695924264931216]], [[\"Rotate\", 0.8405872184910479, 0.5434142541450815], [\"Cutout\", 0.4493615138203356, 0.893453735250007]], [[\"Contrast\", 0.8433310507685494, 0.4915423577963278], [\"ShearX\", 0.22567799557913246, 0.20129892537008834]], [[\"Contrast\", 0.045954277103674224, 0.5043900167190442], [\"Cutout\", 0.5552992473054611, 0.14436447810888237]], [[\"AutoContrast\", 0.7719296115130478, 0.4440417544621306], [\"Sharpness\", 0.13992809206158283, 0.7988278670709781]], [[\"Color\", 0.7838574233513952, 0.5971351401625151], [\"TranslateY\", 0.13562290583925385, 0.2253039635819158]], [[\"Cutout\", 0.24870301109385806, 0.6937886690381568], [\"TranslateY\", 0.4033400068952813, 0.06253378991880915]], [[\"TranslateX\", 0.0036059390486775644, 0.5234723884081843], [\"Solarize\", 0.42724862530733526, 0.8697702564187633]], [[\"Equalize\", 0.5446026737834311, 0.9367992979112202], [\"ShearY\", 0.5943478903735789, 0.42345889214100046]], [[\"ShearX\", 0.18611885697957506, 0.7320849092947314], [\"ShearX\", 0.3796416430900566, 0.03817761920009881]], [[\"Posterize\", 0.37636778506979124, 0.26807924785236537], [\"Brightness\", 0.4317372554383255, 0.5473346211870932]], [[\"Brightness\", 0.8100436240916665, 0.3817612088285007], [\"Brightness\", 0.4193974619003253, 0.9685902764026623]], [[\"Contrast\", 0.701776402197012, 0.6612786008858009], [\"Color\", 0.19882787177960912, 0.17275597188875483]], [[\"Color\", 0.9538303302832989, 0.48362384535228686], [\"ShearY\", 0.2179980837345602, 0.37027290936457313]], [[\"TranslateY\", 0.6068028691503798, 0.3919346523454841], [\"Cutout\", 0.8228303342563138, 0.18372280287814613]], [[\"Equalize\", 0.016416758802906828, 0.642838949194916], [\"Cutout\", 0.5761717838655257, 0.7600661153497648]], [[\"Color\", 0.9417761826818639, 0.9916074035986558], [\"Equalize\", 0.2524209308597042, 0.6373703468715077]], [[\"Brightness\", 0.75512589439513, 0.6155072321007569], [\"Contrast\", 0.32413476940254515, 0.4194739830159837]], [[\"Sharpness\", 0.3339450765586968, 0.9973297539194967], [\"AutoContrast\", 0.6523930242124429, 0.1053482471037186]], [[\"ShearX\", 0.2961391955838801, 0.9870036064904368], [\"ShearY\", 0.18705025965909403, 0.4550895821154484]], [[\"TranslateY\", 0.36956447983807883, 0.36371471767143543], [\"Sharpness\", 0.6860051967688487, 0.2850190720087796]], [[\"Cutout\", 0.13017742151902967, 0.47316674150067195], [\"Invert\", 0.28923829959551883, 0.9295585654924601]], [[\"Contrast\", 0.7302368472279086, 0.7178974949876642], [\"TranslateY\", 0.12589674152030433, 0.7485392909494947]], [[\"Color\", 0.6474693117772619, 0.5518269515590674], [\"Contrast\", 0.24643004970708016, 0.3435581358079418]], [[\"Contrast\", 0.5650327855750835, 0.4843031798040887], [\"Brightness\", 0.3526684005761239, 0.3005305004600969]], [[\"Rotate\", 0.09822284968122225, 0.13172798244520356], [\"Equalize\", 0.38135066977857157, 0.5135129123554154]], [[\"Contrast\", 0.5902590645585712, 0.2196062383730596], [\"ShearY\", 0.14188379126120954, 0.1582612142182743]], [[\"Cutout\", 0.8529913814417812, 0.89734031211874], [\"Color\", 0.07293767043078672, 0.32577659205278897]], [[\"Equalize\", 0.21401668971453247, 0.040015259500028266], [\"ShearY\", 0.5126400895338797, 0.4726484828276388]], [[\"Brightness\", 0.8269430025954498, 0.9678362841865166], [\"ShearY\", 0.17142069814830432, 0.4726727848289514]], [[\"Brightness\", 0.699707089334018, 0.2795501395789335], [\"ShearX\", 0.5308818178242845, 0.10581814221896294]], [[\"Equalize\", 0.32519644258946145, 0.15763390340309183], [\"TranslateX\", 0.6149090364414208, 0.7454832565718259]], [[\"AutoContrast\", 0.5404508567155423, 0.7472387762067986], [\"Equalize\", 0.05649876539221024, 0.5628180219887216]]]\n    return p\n\n\ndef fa_reduced_svhn():\n    p = [[[\"TranslateX\", 0.001576965129744562, 0.43180488809874773], [\"Invert\", 0.7395307279252639, 0.7538444307982558]], [[\"Contrast\", 0.5762062225409211, 0.7532431872873473], [\"TranslateX\", 0.45212523461624615, 0.02451684483019846]], [[\"Contrast\", 0.18962433143225088, 0.29481185671147325], [\"Contrast\", 0.9998112218299271, 0.813015355163255]], [[\"Posterize\", 0.9633391295905683, 0.4136786222304747], [\"TranslateY\", 0.8011655496664203, 0.44102126789970797]], [[\"Color\", 0.8231185187716968, 0.4171602946893402], [\"TranslateX\", 0.8684965619113907, 0.36514568324909674]], [[\"Color\", 0.904075230324581, 0.46319140331093767], [\"Contrast\", 0.4115196534764559, 0.7773329158740563]], [[\"Sharpness\", 0.6600262774093967, 0.8045637700026345], [\"TranslateY\", 0.5917663766021198, 0.6844241908520602]], [[\"AutoContrast\", 0.16223989311434306, 0.48169653554195924], [\"ShearX\", 0.5433173232860344, 0.7460278151912152]], [[\"ShearX\", 0.4913604762760715, 0.83391837859561], [\"Color\", 0.5580367056511908, 0.2961512691312932]], [[\"Color\", 0.18567091721211237, 0.9296983204905286], [\"Cutout\", 0.6074026199060156, 0.03303273406448193]], [[\"Invert\", 0.8049054771963224, 0.1340792344927909], [\"Color\", 0.4208839940504979, 0.7096454840962345]], [[\"ShearX\", 0.7997786664546294, 0.6492629575700173], [\"AutoContrast\", 0.3142777134084793, 0.6526010594925064]], [[\"TranslateX\", 0.2581027144644976, 0.6997433332894101], [\"Rotate\", 0.45490480973606834, 0.238620570022944]], [[\"Solarize\", 0.837397161027719, 0.9311141273136286], [\"Contrast\", 0.640364826293148, 0.6299761518677469]], [[\"Brightness\", 0.3782457347141744, 0.7085036717054278], [\"Brightness\", 0.5346150083208507, 0.5858930737867671]], [[\"Invert\", 0.48780391510474086, 0.610086407879722], [\"Color\", 0.5601999247616932, 0.5393836220423195]], [[\"Brightness\", 0.00250086643283564, 0.5003355864896979], [\"Brightness\", 0.003922153283353616, 0.41107110154584925]], [[\"TranslateX\", 0.4073069009685957, 0.9843435292693372], [\"Invert\", 0.38837085318721926, 0.9298542033875989]], [[\"ShearY\", 0.05479740443795811, 0.9113983424872698], [\"AutoContrast\", 0.2181108114232728, 0.713996037012164]], [[\"Brightness\", 0.27747508429413903, 0.3217467607288693], [\"ShearX\", 0.02715239061946995, 0.5430731635396449]], [[\"Sharpness\", 0.08994432959374538, 0.004706443546453831], [\"Posterize\", 0.10768206853226996, 0.39020299239900236]], [[\"Cutout\", 0.37498679037853905, 0.20784809761469553], [\"Color\", 0.9825516352194511, 0.7654155662756019]], [[\"Color\", 0.8899349124453552, 0.7797700766409008], [\"Rotate\", 0.1370222187174981, 0.2622119295138398]], [[\"Cutout\", 0.7088223332663685, 0.7884456023190028], [\"Solarize\", 0.5362257505160836, 0.6426837537811545]], [[\"Invert\", 0.15686225694987552, 0.5500563899117913], [\"Rotate\", 0.16315224193260078, 0.4246854030170752]], [[\"Rotate\", 0.005266247922433631, 0.06612026206223394], [\"Contrast\", 0.06494357829209037, 0.2738420319474947]], [[\"Cutout\", 0.30200619566806275, 0.06558008068236942], [\"Rotate\", 0.2168576483823022, 0.878645566986328]], [[\"Color\", 0.6358930679444622, 0.613404714161498], [\"Rotate\", 0.08733206733004326, 0.4348276574435751]], [[\"Cutout\", 0.8834634887239585, 0.0006853845293474659], [\"Solarize\", 0.38132051231951847, 0.42558752668491195]], [[\"ShearY\", 0.08830136548479937, 0.5522438878371283], [\"Brightness\", 0.23816560427834074, 0.3033709051157141]], [[\"Solarize\", 0.9015331490756151, 0.9108788708847556], [\"Contrast\", 0.2057898014670072, 0.03260096030427456]], [[\"Equalize\", 0.9455978685121174, 0.14850077333434056], [\"TranslateY\", 0.6888705996522545, 0.5300565492007543]], [[\"Cutout\", 0.16942673959343585, 0.7294197201361826], [\"TranslateX\", 0.41184830642301534, 0.7060207449376135]], [[\"Color\", 0.30133344118702166, 0.24384417956342314], [\"Sharpness\", 0.4640904544421743, 0.32431840288061864]], [[\"Sharpness\", 0.5195055033472676, 0.9386677467005835], [\"Color\", 0.9536519432978372, 0.9624043444556467]], [[\"Rotate\", 0.8689597230556101, 0.23955490826730633], [\"Contrast\", 0.050071600927462656, 0.1309891556004179]], [[\"Cutout\", 0.5349421090878962, 0.08239510727779054], [\"Rotate\", 0.46064964710717216, 0.9037689320897339]], [[\"AutoContrast\", 0.5625256909986802, 0.5358003783186498], [\"Equalize\", 0.09204330691163354, 0.4386906784850649]], [[\"ShearX\", 0.0011061172864470226, 0.07150284682189278], [\"AutoContrast\", 0.6015956946553209, 0.4375362295530898]], [[\"ShearY\", 0.25294276499800983, 0.7937560397859562], [\"Brightness\", 0.30834103299704474, 0.21960258701547009]], [[\"Posterize\", 0.7423948904688074, 0.4598609935109695], [\"Rotate\", 0.5510348811675979, 0.26763724868985933]], [[\"TranslateY\", 0.3208729319318745, 0.945513054853888], [\"ShearX\", 0.4916473963030882, 0.8743840560039451]], [[\"ShearY\", 0.7557718687011286, 0.3125397104722828], [\"Cutout\", 0.5565359791865849, 0.5151359251135629]], [[\"AutoContrast\", 0.16652786355571275, 0.1101575800958632], [\"Rotate\", 0.05108851703032641, 0.2612966401802814]], [[\"Brightness\", 0.380296489835016, 0.0428162454174662], [\"ShearX\", 0.3911934083168285, 0.18933607362790178]], [[\"Color\", 0.002476250465397678, 0.07795275305347571], [\"Posterize\", 0.08131841266654188, 0.14843363184306413]], [[\"Cutout\", 0.36664558716104434, 0.20904484995063996], [\"Cutout\", 0.07986452057223141, 0.9287747671053432]], [[\"Color\", 0.9296812469919231, 0.6634239915141935], [\"Rotate\", 0.07632463573240006, 0.408624029443747]], [[\"Cutout\", 0.7594470171961278, 0.9834672124229463], [\"Solarize\", 0.4471371303745053, 0.5751101102286562]], [[\"Posterize\", 0.051186719734032285, 0.5110941294710823], [\"Sharpness\", 0.040432522797391596, 0.42652298706992164]], [[\"Sharpness\", 0.2645335264327221, 0.8844553189835457], [\"Brightness\", 0.7229600357932696, 0.16660749270785696]], [[\"Sharpness\", 0.6296376086802589, 0.15564989758083458], [\"Sharpness\", 0.7913410481400365, 0.7022615408082826]], [[\"Cutout\", 0.5517247347343883, 0.43794888517764674], [\"ShearX\", 0.6951051782530201, 0.6230992857867065]], [[\"ShearX\", 0.9015708556331022, 0.6322135168527783], [\"Contrast\", 0.4285629283441831, 0.18158321019502988]], [[\"Brightness\", 0.9014292329524769, 0.3660463325457713], [\"Invert\", 0.6700729097206592, 0.16502732071917703]], [[\"AutoContrast\", 0.6432764477303431, 0.9998909112400834], [\"Invert\", 0.8124063975545761, 0.8149683327882365]], [[\"Cutout\", 0.6023944009428617, 0.9630976951918225], [\"ShearX\", 0.2734723568803071, 0.3080911542121765]], [[\"Sharpness\", 0.048949115014412806, 0.44497866256845164], [\"Brightness\", 0.5611832867244329, 0.12994217480426257]], [[\"TranslateY\", 0.4619112333002525, 0.47317728091588396], [\"Solarize\", 0.618638784910472, 0.9508297099190338]], [[\"Sharpness\", 0.9656274391147018, 0.3402622993963962], [\"Cutout\", 0.8452511174508919, 0.3094717093312621]], [[\"ShearX\", 0.04942201651478659, 0.6910568465705691], [\"AutoContrast\", 0.7155342517619936, 0.8565418847743523]], [[\"Brightness\", 0.5222290590721783, 0.6462675303633422], [\"Sharpness\", 0.7756317511341633, 0.05010730683866704]], [[\"Contrast\", 0.17098396012942796, 0.9128908626236187], [\"TranslateY\", 0.1523815376677518, 0.4269909829886339]], [[\"Cutout\", 0.7679024720089866, 0.22229116396644455], [\"Sharpness\", 0.47714827844878843, 0.8242815864830401]], [[\"Brightness\", 0.9321772357292445, 0.11339758604001371], [\"Invert\", 0.7021078495093375, 0.27507749184928154]], [[\"ShearY\", 0.7069449324510433, 0.07262757954730437], [\"Cutout\", 0.6298690227159313, 0.8866813664859028]], [[\"ShearX\", 0.8153137620199989, 0.8478194179953927], [\"ShearX\", 0.7519451353411938, 0.3914579556959725]], [[\"Cutout\", 0.07152574469472753, 0.2629935229222503], [\"TranslateX\", 0.43728405510089485, 0.2610201002449789]], [[\"AutoContrast\", 0.5824529633013098, 0.5619551536261955], [\"Rotate\", 0.45434137552116965, 0.7567169855140041]], [[\"TranslateY\", 0.9338431187142137, 0.14230481341042783], [\"Cutout\", 0.744797723251028, 0.4346601666787713]], [[\"ShearX\", 0.3197252560289169, 0.8770408070016171], [\"Color\", 0.7657013088540465, 0.2685586719812284]], [[\"ShearY\", 0.6542181749801549, 0.8148188744344297], [\"Sharpness\", 0.5108985661436543, 0.9926016115463769]], [[\"ShearY\", 0.39218730620135694, 0.857769946478945], [\"Color\", 0.39588355914920886, 0.9910530523789284]], [[\"Invert\", 0.4993610396803735, 0.08449723470758526], [\"TranslateX\", 0.46267456928508305, 0.46691125646493964]], [[\"Equalize\", 0.8640576819821256, 0.3973808869887604], [\"ShearY\", 0.5491163877063172, 0.422429328786161]], [[\"Contrast\", 0.6146206387722841, 0.8453559854684094], [\"TranslateX\", 0.7974333014574718, 0.47395476786951773]], [[\"Contrast\", 0.6828704722015236, 0.6952755697785722], [\"Brightness\", 0.7903069452567497, 0.8350915035109574]], [[\"Rotate\", 0.1211091761531299, 0.9667702562228727], [\"Color\", 0.47888534537103344, 0.8298620028065332]], [[\"Equalize\", 0.20009722872711086, 0.21851235854853018], [\"Invert\", 0.4433641154198673, 0.41902203581091935]], [[\"AutoContrast\", 0.6333190204577053, 0.23965630032835372], [\"Color\", 0.38651217030044804, 0.06447323778198723]], [[\"Brightness\", 0.378274337541471, 0.5482593116308322], [\"Cutout\", 0.4856574442608347, 0.8889688535495244]], [[\"Rotate\", 0.8201259323479384, 0.7404525573938633], [\"Color\", 0.28371236449364595, 0.7866003515933161]], [[\"Brightness\", 0.10053196350009105, 0.18814037089411267], [\"Sharpness\", 0.5572102497672569, 0.04458217557977126]], [[\"AutoContrast\", 0.6445330112376135, 0.48082049184921843], [\"TranslateY\", 0.378898917914949, 0.9338102625289362]], [[\"AutoContrast\", 0.08482623401924708, 0.25199930695784384], [\"Solarize\", 0.5981823550521426, 0.19626357596662092]], [[\"Solarize\", 0.4373030803918095, 0.22907881245285625], [\"AutoContrast\", 0.6383084635487905, 0.29517603235993883]], [[\"AutoContrast\", 0.922112624726991, 0.29398098144910145], [\"AutoContrast\", 0.8550184811514672, 0.8030331582292343]], [[\"ShearX\", 0.38761582800913896, 0.06304125015084923], [\"Contrast\", 0.3225758804984975, 0.7089696696094797]], [[\"TranslateY\", 0.27499498563849206, 0.1917583097241206], [\"Color\", 0.5845853711746438, 0.5353520071667661]], [[\"ShearY\", 0.530881951424285, 0.47961248148116453], [\"ShearX\", 0.04666387744533289, 0.275772822690165]], [[\"Solarize\", 0.5727309318844802, 0.02889734544563341], [\"AutoContrast\", 0.638852434854615, 0.9819440776921611]], [[\"AutoContrast\", 0.9766868312173507, 0.9651796447738792], [\"AutoContrast\", 0.3489760216898085, 0.3082182741354106]], [[\"Sharpness\", 0.13693510871346704, 0.08297205456926067], [\"Contrast\", 0.3155812019005854, 0.031402991638917896]], [[\"TranslateY\", 0.2664707540547008, 0.4838091910041236], [\"ShearX\", 0.5935665395229432, 0.7813088248538167]], [[\"ShearY\", 0.7578577752251343, 0.5116014090216161], [\"ShearX\", 0.8332831240873545, 0.26781876290841017]], [[\"TranslateY\", 0.473254381651761, 0.4203181582821155], [\"ShearY\", 0.732848696900726, 0.47895514793728433]], [[\"Solarize\", 0.6922689176672292, 0.36403255869823725], [\"AutoContrast\", 0.910654040826914, 0.888651414068326]], [[\"ShearX\", 0.37326536936166244, 0.47830923320699525], [\"Equalize\", 0.4724702976076929, 0.8176108279939023]], [[\"Contrast\", 0.3839906424759326, 0.09109695563933692], [\"Invert\", 0.36305435543972325, 0.5701589223795499]], [[\"Invert\", 0.5175591137387999, 0.38815675919253867], [\"TranslateY\", 0.1354848160153554, 0.41734106283245065]], [[\"Color\", 0.829616006981199, 0.18631472346156963], [\"Color\", 0.2465115448326214, 0.9439365672808333]], [[\"Contrast\", 0.18207939197942158, 0.39841173152850873], [\"ShearX\", 0.16723588254695632, 0.2868649619006758]], [[\"Posterize\", 0.1941909136988733, 0.6322499882557473], [\"Contrast\", 0.6109060391509794, 0.27329598688783296]], [[\"AutoContrast\", 0.9148775146158022, 0.09129288311923844], [\"Sharpness\", 0.4222442287436423, 0.847961820057229]], [[\"Color\", 0.21084007475489852, 0.008218056412554131], [\"Contrast\", 0.43996934555301637, 0.500680146508504]], [[\"ShearY\", 0.6745287915240038, 0.6120305524405164], [\"Equalize\", 0.467403794543269, 0.2207148995882467]], [[\"Color\", 0.7712823974371379, 0.2839161885566902], [\"Color\", 0.8725368489709752, 0.3349470222415115]], [[\"Solarize\", 0.5563976601161562, 0.540446614847802], [\"Invert\", 0.14228071175107454, 0.2242332811481905]], [[\"Contrast\", 0.34596757983998383, 0.9158971503395041], [\"Cutout\", 0.6823724203724072, 0.5221518922863516]], [[\"Posterize\", 0.3275475232882672, 0.6520033254468702], [\"Color\", 0.7434224109271398, 0.0824308188060544]], [[\"Cutout\", 0.7295122229650082, 0.277887573018184], [\"Brightness\", 0.5303655506515258, 0.28628046739964497]], [[\"Color\", 0.8533293996815943, 0.24909788223027743], [\"Color\", 0.6915962825167857, 0.33592561040195834]], [[\"TranslateX\", 0.0761441550001345, 0.7043906245420134], [\"Equalize\", 0.670845297717783, 0.30986063097084215]], [[\"Contrast\", 0.30592723366237995, 0.7365013059287382], [\"Color\", 0.6173835128817455, 0.6417028717640598]], [[\"Rotate\", 0.05558240682703821, 0.7284722849011761], [\"Color\", 0.7814801133853666, 0.13335113981884217]], [[\"ShearY\", 0.6521743070190724, 0.6272195913574455], [\"Rotate\", 0.36278432239870423, 0.2335623679787695]], [[\"Color\", 0.6799351102482663, 0.3850250771244986], [\"Brightness\", 0.613901077818094, 0.2374900558949702]], [[\"Color\", 0.551451255148252, 0.7284757153447965], [\"Solarize\", 0.4863815212982878, 0.3857941567681324]], [[\"Contrast\", 0.32516343965159267, 0.689921852601276], [\"Cutout\", 0.5922142001124506, 0.7709605594115009]], [[\"Brightness\", 0.23760063764495856, 0.6392077018854179], [\"Brightness\", 0.7288124083714078, 0.4487520490201095]], [[\"Sharpness\", 0.5631112298553713, 0.6803534985114782], [\"ShearX\", 0.6743791169050775, 0.34039227245151127]], [[\"AutoContrast\", 0.8260911840078349, 0.7705607269534767], [\"Rotate\", 0.8880749478363638, 0.8182460047684648]], [[\"ShearY\", 0.7037620764408412, 0.5219573160970589], [\"Posterize\", 0.7186150466761102, 0.6187857686944253]], [[\"TranslateY\", 0.2140494926702246, 0.9104233882669488], [\"TranslateX\", 0.4096039512896902, 0.9692703030784571]], [[\"Equalize\", 0.5404313549028165, 0.04094078980738014], [\"AutoContrast\", 0.07870278300673744, 0.841020779977939]], [[\"ShearY\", 0.2684638876128488, 0.5599793678740521], [\"Cutout\", 0.19537995362704022, 0.2400995206366768]], [[\"AutoContrast\", 0.19366394417090382, 0.4130755503251951], [\"Sharpness\", 0.11735660606190662, 0.39276612830651914]], [[\"Cutout\", 0.8313266945081518, 0.37171822186374703], [\"Contrast\", 0.5088549187459019, 0.2956405118511817]], [[\"Cutout\", 0.28375485371479847, 0.37020183949342683], [\"Posterize\", 0.718761436947423, 0.2278804627251678]], [[\"ShearY\", 0.6625840735667625, 0.5045065697748213], [\"Rotate\", 0.5175257698523389, 0.39496923901188824]], [[\"Color\", 0.6498154010188212, 0.38674158604408604], [\"Brightness\", 0.8157804892728057, 0.05660118670560971]], [[\"Color\", 0.5512855420254102, 0.7812054820692542], [\"Solarize\", 0.8851292984174468, 0.2808951606943277]], [[\"Contrast\", 0.35258433539074363, 0.8085377169629859], [\"Cutout\", 0.5197965849563265, 0.8657111726930974]], [[\"Cutout\", 0.23650925054419358, 0.746860862983295], [\"Brightness\", 0.8842190203336139, 0.4389347348156118]], [[\"Rotate\", 0.8651460526861932, 0.0031372441327392753], [\"Equalize\", 0.3909498933963822, 0.6221687914603954]], [[\"TranslateX\", 0.5793690303540427, 0.37939687327382987], [\"Invert\", 0.846172545690258, 0.36950442052945853]], [[\"Invert\", 0.5151721602607067, 0.5860134277259832], [\"Contrast\", 0.6868708526377458, 0.2188104093363727]], [[\"Contrast\", 0.28019632529718025, 0.8403553410328943], [\"Cutout\", 0.5238340355491738, 0.6948434115725599]], [[\"Rotate\", 0.1592592617684533, 0.5212044951482974], [\"Color\", 0.42404215473874546, 0.45894052919059103]], [[\"AutoContrast\", 0.21780978427851283, 0.11813011387113281], [\"Contrast\", 0.14557770349869537, 0.5468616480449002]], [[\"Cutout\", 0.03573873600256905, 0.8747186430368771], [\"AutoContrast\", 0.4804465018567564, 0.3968185812087325]], [[\"ShearY\", 0.027192162947493492, 0.35923750027515866], [\"Sharpness\", 0.03207302705814674, 0.25868625346023777]], [[\"AutoContrast\", 0.9111793886013045, 0.33534571661592005], [\"ShearY\", 0.31365410004768934, 0.37055495208177025]], [[\"Color\", 0.5119732811716222, 0.10635303813092001], [\"Solarize\", 0.9828759703639677, 0.33302532900783466]], [[\"Contrast\", 0.9652840964645487, 0.9550826002089741], [\"ShearY\", 0.16934262075572262, 0.35893022906919625]], [[\"Invert\", 0.21526903298837538, 0.5491812432380025], [\"TranslateX\", 0.27691575128765095, 0.9916365493500338]], [[\"AutoContrast\", 0.7223428288831728, 0.3001506080569529], [\"Posterize\", 0.28280773693692957, 0.5630226986948541]], [[\"TranslateY\", 0.5334698670580152, 0.4329627064903895], [\"Solarize\", 0.11621274404555687, 0.38564564358937725]], [[\"Brightness\", 0.9001900081991266, 0.15453762529292236], [\"Equalize\", 0.6749827304986464, 0.2174408558291521]], [[\"TranslateY\", 0.703293071780793, 0.20371204513522137], [\"Invert\", 0.7921926919880306, 0.2647654009616249]], [[\"AutoContrast\", 0.32650519442680254, 0.5567514700913352], [\"ShearY\", 0.7627653627354407, 0.5363510886152073]], [[\"Rotate\", 0.364293676091047, 0.4262321334071656], [\"Posterize\", 0.7284189361001443, 0.6052618047275847]], [[\"Contrast\", 0.004679138490284229, 0.6985327823420937], [\"Posterize\", 0.25412559986607497, 0.969098825421215]], [[\"ShearY\", 0.6831738973100172, 0.6916463366962687], [\"TranslateY\", 0.8744153159733203, 0.3667879549647143]], [[\"Posterize\", 0.39138456188265913, 0.8617909225610128], [\"TranslateX\", 0.5198303654364824, 0.5518823068009463]], [[\"Invert\", 0.6471155996761706, 0.4793957129423701], [\"ShearX\", 0.8046274258703997, 0.9711394307595065]], [[\"Solarize\", 0.2442520851809611, 0.5518114414771629], [\"Sharpness\", 0.02324109511463257, 0.18216585433541427]], [[\"Cutout\", 0.7004457278387007, 0.4904439660213413], [\"Contrast\", 0.6516622044646659, 0.7324290164242575]], [[\"Brightness\", 0.594212018801632, 0.5624822682300464], [\"ShearX\", 0.47929863548325596, 0.5610640338380719]], [[\"TranslateX\", 0.20863492063218445, 0.23761872077836552], [\"Color\", 0.9374148559524687, 0.06390809573246009]], [[\"AutoContrast\", 0.5548946725094693, 0.40547561665765874], [\"Equalize\", 0.26341425401933344, 0.2763692089379619]], [[\"Invert\", 0.8224614398122034, 0.15547159819315676], [\"Rotate\", 0.4915912924663281, 0.6995695827608112]], [[\"Equalize\", 0.05752620481520809, 0.80230125774557], [\"Rotate\", 0.16338857010673558, 0.8066738989167762]], [[\"ShearY\", 0.5437502855505825, 0.252101665309144], [\"Contrast\", 0.9268450172095902, 0.13437399256747992]], [[\"TranslateY\", 0.6946438457089812, 0.35376889837139813], [\"Sharpness\", 0.15438234648960253, 0.2668696344562673]], [[\"Invert\", 0.24506516252953542, 0.1939315433476327], [\"Sharpness\", 0.8921986990130818, 0.21478051316241717]], [[\"TranslateY\", 0.5292829065905086, 0.6896826369723732], [\"Invert\", 0.4461047865540309, 0.9854416526561315]], [[\"Posterize\", 0.8085062334285464, 0.4538963572040656], [\"Brightness\", 0.2623572045603854, 0.16723779221170698]], [[\"Solarize\", 0.1618752496191097, 0.6007634864056693], [\"TranslateY\", 0.07808851801433346, 0.3951252736249746]], [[\"TranslateX\", 0.35426056783145843, 0.8875451782909476], [\"Brightness\", 0.5537927990151869, 0.3042790536918476]], [[\"Cutout\", 0.9051584028783342, 0.6050507821593669], [\"ShearX\", 0.31185875057627255, 0.39145181108334876]], [[\"Brightness\", 0.43157388465566776, 0.45511767545129933], [\"ShearY\", 0.626464342187273, 0.5251031991594401]], [[\"Contrast\", 0.7978520212540166, 0.45088491126800995], [\"ShearY\", 0.20415027867560143, 0.24369493783350643]], [[\"ShearX\", 0.48152242363853065, 0.001652619381325604], [\"Sharpness\", 0.6154899720956758, 0.22465778944283568]], [[\"Posterize\", 0.0008092255557418104, 0.8624848793450179], [\"Solarize\", 0.7580784903978838, 0.4141187863855049]], [[\"TranslateY\", 0.4829597846471378, 0.6077028815706373], [\"ShearX\", 0.43316420981872894, 0.007119694447608018]], [[\"Equalize\", 0.2914045973615852, 0.6298874433109889], [\"Cutout\", 0.18663096101056076, 0.20634383363149222]], [[\"TranslateX\", 0.6909947340830737, 0.40843889682671003], [\"ShearX\", 0.3693105697811625, 0.070573833710386]], [[\"Rotate\", 0.6184027722396339, 0.6483359499288176], [\"AutoContrast\", 0.8658233903089285, 0.31462524418660626]], [[\"Brightness\", 0.8165837262133947, 0.38138221738335765], [\"Contrast\", 0.01566790570443702, 0.1250581265407818]], [[\"Equalize\", 0.16745169701901802, 0.9239433721204139], [\"ShearY\", 0.5535908803004554, 0.35879199699526654]], [[\"Color\", 0.9675880875486578, 0.19745998576077994], [\"Posterize\", 0.641736196661405, 0.5702363593336868]], [[\"ShearY\", 0.27730895136251943, 0.4730273890919014], [\"Posterize\", 0.35829530316120517, 0.9040968539551122]], [[\"Cutout\", 0.9989158254302966, 0.3210048366589035], [\"Equalize\", 0.9226385492886618, 0.21132010337062]], [[\"Posterize\", 0.32861829410989934, 0.7608163668499222], [\"TranslateY\", 0.528381246453454, 0.6837459631017135]], [[\"ShearY\", 0.6786278797045173, 0.49006792710382946], [\"ShearX\", 0.7860409944610941, 0.7960317025665418]], [[\"Solarize\", 0.4420731874598513, 0.7163961196254427], [\"Sharpness\", 0.11927615232343353, 0.3649599343067734]], [[\"Cutout\", 0.4606157449857542, 0.4682141505042986], [\"Contrast\", 0.8955528913735222, 0.8468556570983498]], [[\"Brightness\", 0.5742349576881501, 0.5633914487991978], [\"ShearX\", 0.8288987143597276, 0.5937556836469728]], [[\"Posterize\", 0.05362153577922808, 0.40072961361335696], [\"Rotate\", 0.6681795049585278, 0.5348470042353504]], [[\"TranslateY\", 0.6190833866612555, 0.7338431624993972], [\"Color\", 0.5352400737236565, 0.1598194251940268]], [[\"Brightness\", 0.9942846465176832, 0.11918348505217388], [\"Brightness\", 0.0659098729688602, 0.6558077481794591]], [[\"Equalize\", 0.34089122700685126, 0.048940774058585546], [\"ShearX\", 0.5472987107071652, 0.2965222509150173]], [[\"Sharpness\", 0.3660728361470086, 0.37607120931207433], [\"Sharpness\", 0.9974987257291261, 0.2483317486035219]], [[\"Posterize\", 0.931283270966942, 0.7525022430475327], [\"Cutout\", 0.6299208568533524, 0.3313382622423058]], [[\"Invert\", 0.5074998650080915, 0.9722820836624784], [\"Solarize\", 0.13997049847474802, 0.19340041815763026]], [[\"AutoContrast\", 0.6804950477263457, 0.31675149536227815], [\"Solarize\", 0.800632422196852, 0.09054278636377117]], [[\"TranslateY\", 0.6886579465517867, 0.549118383513461], [\"Brightness\", 0.7298771973550124, 0.59421647759784]], [[\"Equalize\", 0.8117050130827859, 0.22494316766261946], [\"AutoContrast\", 0.5217061631918504, 0.6106946809838144]], [[\"Equalize\", 0.4734718117645248, 0.7746036952254298], [\"Posterize\", 0.032049205574512685, 0.9681402692267316]], [[\"Brightness\", 0.4724177066851541, 0.7969700024018729], [\"Solarize\", 0.6930049134926459, 0.3880086567038069]], [[\"TranslateX\", 0.2833979092130342, 0.6873833799104118], [\"Rotate\", 0.37167767436617366, 0.03249352593350204]], [[\"Posterize\", 0.7080588381354884, 0.03014586990329654], [\"Posterize\", 0.20883930954891392, 0.1328596635826556]], [[\"Cutout\", 0.1992050307454733, 0.8079881690617468], [\"ShearY\", 0.3057279570820446, 0.34868823290010564]], [[\"TranslateY\", 0.6204358851346782, 0.24978856155434062], [\"ShearX\", 0.2403059671388028, 0.6706906799258086]], [[\"Contrast\", 0.5527380063918701, 0.27504242043334765], [\"Rotate\", 0.37361791978638376, 0.17818567121454373]], [[\"Cutout\", 0.3368229687890997, 0.013512329226772313], [\"Contrast\", 0.18480406673028238, 0.21653280083721013]], [[\"AutoContrast\", 0.13634047961070397, 0.5322441057075571], [\"Posterize\", 0.3409948654529233, 0.2562132228604077]], [[\"Invert\", 0.3375636037272626, 0.5417577242453775], [\"Sharpness\", 0.10271458969925179, 0.5125859420868099]], [[\"Invert\", 0.26465503753231256, 0.7386494688407392], [\"AutoContrast\", 0.5310106090963371, 0.14699248759273964]], [[\"Sharpness\", 0.8494538270706318, 0.9524607358113082], [\"Solarize\", 0.21142978953773187, 0.10711867917080763]], [[\"Equalize\", 0.5185117903942263, 0.06342404369282638], [\"ShearY\", 0.26812877371366156, 0.32386585917978056]], [[\"TranslateY\", 0.42724471339053904, 0.5218262942425845], [\"Brightness\", 0.7618037699290332, 0.5773256674209075]], [[\"Solarize\", 0.5683461491921462, 0.7988018975591509], [\"AutoContrast\", 0.21826664523938988, 0.4395073407383595]], [[\"Posterize\", 0.2564295537162734, 0.6778150727248975], [\"Equalize\", 0.7571361164411801, 0.4281744623444925]], [[\"Invert\", 0.5171620125994946, 0.8719074953677988], [\"ShearX\", 0.10216776728552601, 0.20888013515457593]], [[\"Equalize\", 0.934033636879294, 0.7724470445507672], [\"ShearX\", 0.14671590364536757, 0.06500753170863127]], [[\"Cutout\", 0.48433709681747783, 0.8989915985203363], [\"ShearY\", 0.5161346572684965, 0.3154078452465332]], [[\"AutoContrast\", 0.4337913490682531, 0.8651407398083308], [\"AutoContrast\", 0.31402168607643444, 0.5001710653814162]], [[\"Brightness\", 0.4805460794016203, 0.8182812769485313], [\"Equalize\", 0.6811585495672738, 0.25172380097389147]], [[\"TranslateX\", 0.05384872718386273, 0.7854623644701991], [\"Color\", 0.12583336502656287, 0.08656304042059215]], [[\"TranslateX\", 0.3949348949001942, 0.0668909826131569], [\"ShearX\", 0.2895255694762277, 0.23998090792480392]], [[\"TranslateY\", 0.3183346601371876, 0.5869865305603826], [\"Cutout\", 0.38601500458347904, 0.37785641359408184]], [[\"Sharpness\", 0.3676509660134142, 0.6370727445512337], [\"Rotate\", 0.17589815946040205, 0.912442427082365]], [[\"Equalize\", 0.46427003979798154, 0.7771177715171392], [\"Cutout\", 0.6622980582423883, 0.47780927252115374]], [[\"TranslateX\", 0.4535588156726688, 0.9548833090146791], [\"ShearY\", 0.18609208838268262, 0.034329918652624025]], [[\"Rotate\", 0.4896172340987028, 0.4842683413051553], [\"Brightness\", 0.08416972178617699, 0.2946109607041465]], [[\"TranslateY\", 0.1443363248914217, 0.7352253161146544], [\"ShearX\", 0.025210952382823004, 0.6249971039957651]], [[\"Brightness\", 0.08771030702840285, 0.5926338109828604], [\"Contrast\", 0.629121304110493, 0.36114268164347396]], [[\"Cutout\", 0.003318169533990778, 0.984234627407162], [\"Color\", 0.5656264894233379, 0.9913705503959709]], [[\"Cutout\", 0.17582168928005226, 0.5163176285036686], [\"Sharpness\", 0.42976684239235224, 0.9936723374147685]], [[\"Rotate\", 0.13343297511611085, 0.730719022391835], [\"Cutout\", 0.43419793455016154, 0.9802436121876401]], [[\"ShearX\", 0.8761482122895571, 0.11688364945899332], [\"Solarize\", 0.6071032746712549, 0.9972373138154098]], [[\"Contrast\", 0.2721995133325574, 0.9467839388553563], [\"AutoContrast\", 0.357368427575824, 0.6530359095247653]], [[\"Equalize\", 0.5334298945812708, 0.7157629957411794], [\"Brightness\", 0.8885107405370157, 0.2909013041171791]], [[\"Equalize\", 0.4907081744271751, 0.9999203497290372], [\"ShearX\", 0.0055186544890628575, 0.20501406304441697]], [[\"Color\", 0.4865852751351166, 0.14717278223914915], [\"TranslateX\", 0.0492335566831905, 0.01654291587484527]], [[\"Contrast\", 0.3753662301521211, 0.866484274102244], [\"Color\", 0.21148416029328898, 0.37861792266657684]], [[\"TranslateY\", 0.03960047686663052, 0.9948086048192006], [\"TranslateX\", 0.5802633545422445, 0.7696464344779717]], [[\"Contrast\", 0.6456791961464718, 0.6304663998505495], [\"Sharpness\", 0.594774521429873, 0.8024138008893688]], [[\"Equalize\", 0.5326123709954759, 0.7361990154971826], [\"Invert\", 0.5337609996065145, 0.06826577456972233]], [[\"ShearY\", 0.7177596430755101, 0.16672206074906565], [\"Equalize\", 0.1847132768987843, 0.16186121936769876]], [[\"ShearY\", 0.037342495065949534, 0.7762322168034441], [\"Rotate\", 0.28731231550023495, 0.4605573565280328]], [[\"Contrast\", 0.6815742688289678, 0.04073638022156048], [\"Cutout\", 0.20201133153964437, 0.048429819360450654]], [[\"Color\", 0.5295323372448824, 0.8591352159356821], [\"Posterize\", 0.7743900815037675, 0.8308865010050488]], [[\"Solarize\", 0.9325362059095493, 0.4070769736318192], [\"Contrast\", 0.09359008071252661, 0.2808191171337515]], [[\"Sharpness\", 0.6413241263332543, 0.5493867784897841], [\"Solarize\", 0.021951790397463734, 0.1045868634597023]], [[\"Color\", 0.006027943433085061, 0.698043169126901], [\"TranslateX\", 0.06672167045857719, 0.6096719632236709]], [[\"TranslateX\", 0.42167004878865333, 0.8844171486107537], [\"Color\", 0.12383835252312375, 0.9559595374068695]], [[\"Posterize\", 0.5382560989047361, 0.6014252438301297], [\"Color\", 0.26197040526014054, 0.3423981550778665]], [[\"Cutout\", 0.33150268513579584, 0.40828564490879615], [\"AutoContrast\", 0.6907753092981255, 0.05779246756831708]], [[\"Equalize\", 0.31608006376116865, 0.9958870759781376], [\"TranslateY\", 0.15842255624921547, 0.5764254535539765]], [[\"Contrast\", 0.19859706438565994, 0.12680764238281503], [\"TranslateY\", 0.4694115475285127, 0.45831161348904836]], [[\"TranslateX\", 0.18768081492494126, 0.7718605539481094], [\"Cutout\", 0.2340834739291012, 0.3290460999084155]], [[\"Posterize\", 0.17300123510877463, 0.5276823821218432], [\"AutoContrast\", 0.5861008799330297, 0.31557924295308126]], [[\"TranslateX\", 0.36140745478517367, 0.4172762477431993], [\"Sharpness\", 0.6518477061748665, 0.9033991248207786]], [[\"AutoContrast\", 0.1757278990984992, 0.9562490311064124], [\"Invert\", 0.43712652497757065, 0.26925880337078234]], [[\"TranslateX\", 0.38113274849599377, 0.35742156735271613], [\"TranslateY\", 0.47708889990018216, 0.7975974044609476]], [[\"Brightness\", 0.39538470887490523, 0.09692156164771923], [\"Equalize\", 0.876825166573471, 0.0979346217138612]], [[\"Solarize\", 0.07679586061933875, 0.45996163577975313], [\"Invert\", 0.039726680682847904, 0.23574574397443826]], [[\"ShearX\", 0.9739648414905278, 0.5217986621319772], [\"TranslateY\", 0.21653455086845896, 0.30415852174016683]], [[\"TranslateY\", 0.26965366633030263, 0.4355259497820251], [\"Sharpness\", 0.6343493801543757, 0.9337027079656623]], [[\"Rotate\", 0.42301232492240126, 0.07813015342326983], [\"AutoContrast\", 0.28524730310382906, 0.24127293503900557]], [[\"Color\", 0.826300213905907, 0.008451115447607682], [\"Equalize\", 0.6770124607838715, 0.2889698349030014]], [[\"Cutout\", 0.3461911530045792, 0.7481322146924341], [\"Brightness\", 0.1831459184570124, 0.5487074846857195]], [[\"Brightness\", 0.8455429603962046, 0.4838335496721761], [\"Cutout\", 0.5778222397066808, 0.7789798279724414]], [[\"Brightness\", 0.7859388330361665, 0.5907006126719181], [\"Brightness\", 0.5299842953874527, 0.008670514958094622]], [[\"Rotate\", 0.9584331504536162, 0.7242692977964363], [\"TranslateY\", 0.46941406313257866, 0.748911298847083]], [[\"AutoContrast\", 0.5878130357161462, 0.25218818797390996], [\"Solarize\", 0.815466142337258, 0.20231731395730107]], [[\"ShearX\", 0.15594838773787617, 0.9764784874102524], [\"TranslateY\", 0.5805369037495945, 0.1412009058745196]], [[\"Sharpness\", 0.7936370935749524, 0.5142489498674206], [\"Sharpness\", 0.1544307510097193, 0.3678451501088748]], [[\"TranslateY\", 0.29391437860633873, 0.3520843012638746], [\"Brightness\", 0.5885278199370352, 0.04915265122854349]], [[\"AutoContrast\", 0.3329771519033218, 0.2459852352278583], [\"Equalize\", 0.8674782697650298, 0.2900192232303214]], [[\"Cutout\", 0.58997726901359, 0.9910393463442352], [\"Contrast\", 0.09792234559792412, 0.23341828880112486]], [[\"Cutout\", 0.4643317809492098, 0.3224299097542076], [\"TranslateY\", 0.7998033586490294, 0.27086436352896565]], [[\"AutoContrast\", 0.13138317155414905, 0.3419742927322439], [\"TranslateY\", 0.05413070060788905, 0.5504283113763994]], [[\"Posterize\", 0.3645493423712921, 0.10684861674653627], [\"Color\", 0.6343589365592908, 0.9712261380583729]], [[\"Color\", 0.06539862123316142, 0.34370535435837324], [\"Equalize\", 0.8098077629435421, 0.1272416658849032]], [[\"Invert\", 0.3600258964493429, 0.7455698641930473], [\"Color\", 0.4118102215241555, 0.4489347750419333]], [[\"Sharpness\", 0.2230673636976691, 0.2240713255305713], [\"AutoContrast\", 0.5039292091174429, 0.033700713206763835]], [[\"ShearX\", 0.10611028325684749, 0.4235430688519599], [\"Brightness\", 0.354597328722803, 0.6835155193055997]], [[\"ShearX\", 0.101313662029975, 0.3048854771395032], [\"ShearX\", 0.39832929626318425, 0.5569152062399838]], [[\"ShearX\", 0.46033087857932264, 0.5976525683159943], [\"Color\", 0.8117411866929898, 0.22950658046373415]], [[\"Cutout\", 0.04125062306390376, 0.5021647863925347], [\"TranslateY\", 0.4949139091550513, 0.40234738545601595]], [[\"TranslateX\", 0.9982425877241792, 0.3912268450702254], [\"Cutout\", 0.8094853705295444, 0.4628037417520003]], [[\"Contrast\", 0.47154787535001147, 0.5116549800625204], [\"Invert\", 0.4929108509901112, 0.713690694626014]], [[\"ShearX\", 0.3073913369156325, 0.5912409524756753], [\"Equalize\", 0.5603975982699875, 0.12046838435247365]], [[\"TranslateY\", 0.8622939212850868, 0.057802109037417344], [\"Invert\", 0.7577173459800602, 0.33727019024447835]], [[\"Cutout\", 0.3646694663986778, 0.6285264075514656], [\"Color\", 0.5589259087346165, 0.6650676195317845]], [[\"Invert\", 0.8563008117600374, 0.6216056385231019], [\"AutoContrast\", 0.7575002303510038, 0.6906934785154547]], [[\"ShearX\", 0.4415411885102101, 0.301535484182858], [\"TranslateY\", 0.779716145113622, 0.5792057745092073]], [[\"Invert\", 0.10736083594024397, 0.10640910911300788], [\"Posterize\", 0.5923391813408784, 0.5437447559328059]], [[\"Color\", 0.4745215286268124, 0.08046291318852558], [\"Rotate\", 0.1642897827127771, 0.20754337935267492]], [[\"Invert\", 0.3141086213412405, 0.5865422721808763], [\"AutoContrast\", 0.7551954144793225, 0.5588044000850431]], [[\"Equalize\", 0.979500405577596, 0.6846916489547885], [\"Rotate\", 0.11257616752512875, 0.8137724117751907]], [[\"Equalize\", 0.6315666801659133, 0.71548254701219], [\"Cutout\", 0.38805635642306224, 0.29282906744304604]], [[\"Posterize\", 0.022485702859896456, 0.2794994040845844], [\"Color\", 0.4554990465860552, 0.5842888808848151]], [[\"Invert\", 0.15787502346886398, 0.5137397924063724], [\"TranslateY\", 0.487638703473969, 0.6428121360825987]], [[\"Rotate\", 0.20473927977443407, 0.6090899892067203], [\"Contrast\", 0.3794752343740154, 0.8056548374185936]], [[\"AutoContrast\", 0.35889225269685354, 0.7311496777471619], [\"Sharpness\", 0.10152796686794396, 0.34768639850633193]], [[\"Rotate\", 0.6298704242033275, 0.09649334401126405], [\"Solarize\", 0.24713244934163017, 0.4292117526982358]], [[\"Contrast\", 0.9851015107131748, 0.30895068679118054], [\"Sharpness\", 0.7167845732283787, 0.36269175386392893]], [[\"Equalize\", 0.49699932368219435, 0.21262924430159158], [\"Contrast\", 0.8497731498354579, 0.672321242252727]], [[\"ShearX\", 0.18955591368056923, 0.47178691165954034], [\"Sharpness\", 0.17732805705271348, 0.5486957094984023]], [[\"ShearY\", 0.5087926728214892, 0.8236809302978783], [\"AutoContrast\", 0.9661195881001936, 0.1309360428195535]], [[\"Rotate\", 0.7825835251082691, 0.8292427086033229], [\"TranslateX\", 0.2034110174253454, 0.4073091408820304]], [[\"Cutout\", 0.33457316681888716, 0.480098511703719], [\"Sharpness\", 0.8686004956803908, 0.21719357589897192]], [[\"ShearX\", 0.30750577846813, 0.6349236735519613], [\"Color\", 0.5096781256213182, 0.5367289796478476]], [[\"Rotate\", 0.7881847986981432, 0.846966895144323], [\"Posterize\", 0.33955649631388407, 0.9484449471562024]], [[\"Posterize\", 0.5154127791998345, 0.8765287012129974], [\"Posterize\", 0.09621562708431097, 0.42108077474553995]], [[\"ShearX\", 0.5513772653411826, 0.27285892893658015], [\"AutoContrast\", 0.027608088485522986, 0.1738173285576814]], [[\"Equalize\", 0.7950881609822011, 0.05938388811616446], [\"ShearX\", 0.7864733097562856, 0.5928584864954718]], [[\"Equalize\", 0.03401947599579436, 0.4936643525799874], [\"Solarize\", 0.8445332527647407, 0.4695434980914176]], [[\"AutoContrast\", 0.9656295942383031, 0.6330670076537706], [\"Brightness\", 0.303859679517296, 0.8882002295195086]], [[\"ShearY\", 0.5242765280639856, 0.7977406809732712], [\"Rotate\", 0.24810823616083127, 0.41392557985700773]], [[\"Posterize\", 0.6824268148168342, 0.21831492475831715], [\"ShearY\", 0.0008811906288737209, 0.1939566265644924]], [[\"ShearY\", 0.8413370823124643, 0.7075999817793881], [\"Brightness\", 0.7942266192900009, 0.0384845738170444]], [[\"ShearY\", 0.9003919463843213, 0.5068340457708402], [\"AutoContrast\", 0.9990937631537938, 0.35323621376481695]], [[\"Contrast\", 0.3266913024108897, 0.5470774782762176], [\"Contrast\", 0.31235464476196995, 0.5723334696204473]], [[\"AutoContrast\", 0.40137522654585955, 0.4274859892417776], [\"Sharpness\", 0.6173858127038773, 0.9629236289042568]], [[\"Sharpness\", 0.3728210261025356, 0.7873518787942092], [\"Solarize\", 0.4319848902062112, 0.799524274852396]], [[\"Sharpness\", 0.009379857090624758, 0.3143858944787348], [\"ShearY\", 0.20273037650420184, 0.3501104740582885]], [[\"Color\", 0.1837135820716444, 0.5709648984713641], [\"Solarize\", 0.36312838060628455, 0.3753448575775562]], [[\"Cutout\", 0.3400431457353702, 0.6871688775988243], [\"ShearX\", 0.42524570507364123, 0.7108865889616602]], [[\"Sharpness\", 0.30703348499729893, 0.885278643437672], [\"Cutout\", 0.04407034125935705, 0.6821013415071144]], [[\"Brightness\", 0.7164362367177879, 0.3383891625406651], [\"Posterize\", 0.002136409392137939, 0.5744439712876557]], [[\"Rotate\", 0.757566991428807, 0.41351586654059386], [\"TranslateY\", 0.6716670812367449, 0.45381701497377025]], [[\"Color\", 0.29554345831738604, 0.5747484938203239], [\"Brightness\", 0.6495565535422139, 0.38353714282675055]], [[\"Color\", 0.6552239827844064, 0.6396684879350223], [\"Rotate\", 0.4078437959841622, 0.8229364582618871]], [[\"ShearX\", 0.3325165311431108, 0.99875651917317], [\"Cutout\", 0.060614087173980605, 0.8655206968462149]], [[\"ShearY\", 0.8591223614020521, 0.47375809606391645], [\"ShearY\", 0.09964216351993155, 0.7076762087109618]], [[\"Color\", 0.9353968383925787, 0.5171703648813921], [\"Cutout\", 0.7542267059402566, 0.4591488152776885]], [[\"ShearX\", 0.6832456179177027, 0.6798505733549863], [\"Color\", 0.7408439718746301, 0.5061967673457707]], [[\"Equalize\", 0.4451729339243929, 0.9242958562575693], [\"Posterize\", 0.2426742903818478, 0.7914731845374992]], [[\"Posterize\", 0.6241497285503436, 0.6800650930438693], [\"Rotate\", 0.8212761169895445, 0.42470879405266637]], [[\"Sharpness\", 0.35467334577635123, 0.4150922293649909], [\"Color\", 0.38988011871489925, 0.08762395748275534]], [[\"Invert\", 0.20231176261188386, 0.34300045056881756], [\"Color\", 0.6311643386438919, 0.4311911861691113]], [[\"Contrast\", 0.2892223327756343, 0.533349670629816], [\"ShearY\", 0.6483243327679983, 0.37584367848303185]], [[\"Contrast\", 0.6516401043089397, 0.3801387361685983], [\"Contrast\", 0.38470661862567795, 0.994720698440467]], [[\"Contrast\", 0.44558087160644655, 0.4234506152228727], [\"AutoContrast\", 0.30132391715441104, 0.7758068064149011]], [[\"ShearY\", 0.8336612877669443, 0.6961881064757953], [\"TranslateX\", 0.111182606133131, 0.7138593872015647]], [[\"Brightness\", 0.7252053408816349, 0.6883715819669095], [\"Cutout\", 0.6664014893052573, 0.5118622737562747]], [[\"TranslateX\", 0.04294623433241698, 0.4737274091618545], [\"Solarize\", 0.15848056715239178, 0.436678451116009]], [[\"ShearX\", 0.41843604414439584, 0.5571669083243844], [\"Solarize\", 0.31754187268874345, 0.643294796216908]], [[\"Cutout\", 0.308644829376876, 0.9455913104658791], [\"Cutout\", 0.04221174396591258, 0.8004389485099825]], [[\"Invert\", 0.7644819805649288, 0.393641460630097], [\"Posterize\", 0.20832144467525543, 0.6449709932505365]], [[\"ShearY\", 0.60954354330238, 0.45193814135157406], [\"Rotate\", 0.07564178568434804, 0.5700158941616946]], [[\"Color\", 0.47993653910354905, 0.18770437256254732], [\"Equalize\", 0.16540989366253533, 0.3295832145751728]], [[\"Sharpness\", 0.773656112445468, 0.899183686347773], [\"AutoContrast\", 0.6225833171499476, 0.8375805811436356]], [[\"Brightness\", 0.3119630413126101, 0.21694186245727698], [\"Cutout\", 0.08263220622864997, 0.9910421137289533]], [[\"TranslateY\", 0.5200200210314198, 0.44467464167817444], [\"Cutout\", 0.3466375681433383, 0.22385957813397142]], [[\"ShearY\", 0.4445374219718209, 0.23917745675733915], [\"Equalize\", 0.32094329607540717, 0.6286388268054685]], [[\"Invert\", 0.6194633221674505, 0.6219326801360905], [\"Color\", 0.43219405413154555, 0.5463431710956901]], [[\"ShearX\", 0.5491808798436206, 0.4485147269153593], [\"ShearX\", 0.9624243432991532, 0.581319457926692]], [[\"Cutout\", 0.8486066390061917, 0.48538785811340557], [\"Cutout\", 0.15945182827781573, 0.4114259503742423]], [[\"TranslateX\", 0.9845485123667319, 0.7590166645874611], [\"Solarize\", 0.9920857955871512, 0.33259831689209834]], [[\"Brightness\", 0.3985764491687188, 0.3516086190155328], [\"Cutout\", 0.13907765098725244, 0.42430309616193995]], [[\"Color\", 0.35877942890428727, 0.363294622757879], [\"Equalize\", 0.4997709941984466, 0.34475754120666147]], [[\"Sharpness\", 0.5234916035905941, 0.8988480410886609], [\"AutoContrast\", 0.793554237802939, 0.2575758806963965]], [[\"Brightness\", 0.36998588693418133, 0.24144652775222428], [\"Cutout\", 0.06610767765334377, 0.9979246311006975]], [[\"TranslateY\", 0.6132425595571164, 0.43952345951359123], [\"Cutout\", 0.361849532200793, 0.8462247954545264]], [[\"Posterize\", 0.36953849915949677, 0.3144747463577223], [\"Equalize\", 0.3258985378881982, 0.6314053736452068]], [[\"TranslateY\", 0.35835648104981205, 0.08075066564380576], [\"TranslateX\", 0.5242389109555177, 0.11959330395816647]], [[\"ShearX\", 0.32773751079554303, 0.9307864751586945], [\"Sharpness\", 0.006921805496030664, 0.8736511230672348]], [[\"TranslateY\", 0.48202000226401526, 0.7058919195136056], [\"ShearY\", 0.6998308555145181, 0.21074360071080764]], [[\"AutoContrast\", 0.7615852152325713, 0.24914859158079972], [\"Cutout\", 0.8270894478252626, 0.5804285538051077]], [[\"AutoContrast\", 0.5391662421077847, 0.5233969710179517], [\"Brightness\", 0.04205906143049083, 0.382677139318253]], [[\"Brightness\", 0.6904817357054526, 0.9116378156160974], [\"Invert\", 0.24305250280628815, 0.2384731852843838]], [[\"TranslateX\", 0.2661235046256291, 0.9705982948874188], [\"Sharpness\", 0.35821873293899625, 0.0030835471296858444]], [[\"Posterize\", 0.39029991982997647, 0.4286238191447004], [\"TranslateX\", 0.08954883207184736, 0.7263973533121859]], [[\"Cutout\", 0.040284118298638344, 0.0388330236482832], [\"Posterize\", 0.7807814946471116, 0.5238352731112299]], [[\"ShearY\", 0.43556653451802413, 0.6924037743225071], [\"Contrast\", 0.001081515338562919, 0.7340363920548519]], [[\"Sharpness\", 0.6966467544442373, 0.10202517317137291], [\"Color\", 0.18836344735972566, 0.31736252662501935]], [[\"Contrast\", 0.6460000689193517, 0.16242196500430484], [\"AutoContrast\", 0.6003831047484897, 0.8612141912778188]], [[\"Brightness\", 0.9172874494072921, 0.292364504408795], [\"Solarize\", 0.344602582555059, 0.7054248176903991]], [[\"Brightness\", 0.020940469451794064, 0.5051042440134866], [\"Cutout\", 0.569500058123745, 0.9091247933460598]], [[\"Invert\", 0.7367715506799225, 0.636137024500329], [\"TranslateY\", 0.6186960283294023, 0.37626001619073624]], [[\"TranslateX\", 0.2863246154089121, 0.7454318730628517], [\"ShearY\", 0.6649909124084395, 0.37639265910774133]], [[\"Equalize\", 0.34603376919062656, 0.9324026002997775], [\"Sharpness\", 0.8481669261233902, 0.14545759197862507]], [[\"Contrast\", 0.6184370038862784, 0.8074198580702933], [\"TranslateX\", 0.07036135693949985, 0.46222686847401306]], [[\"Invert\", 0.9304884364616345, 0.26298808050002387], [\"Color\", 0.8027813156985396, 0.7748486756116594]], [[\"Posterize\", 0.2887993806199106, 0.9576118517235523], [\"Contrast\", 0.07498577510121784, 0.09131727137211232]], [[\"Contrast\", 0.8110536569461197, 0.051038215841138386], [\"Solarize\", 0.8799018446258887, 0.25028365826721977]], [[\"Cutout\", 0.006954733791187662, 0.030507696587206496], [\"Brightness\", 0.45329597160103124, 0.9623148451520953]], [[\"TranslateX\", 0.7436227980344521, 0.45996857241163086], [\"Solarize\", 0.9682234479355196, 0.70777684485634]], [[\"Brightness\", 0.2080557865889058, 0.025557286020371328], [\"AutoContrast\", 0.4786039197123853, 0.9271157120589375]], [[\"Solarize\", 0.1822930503108656, 0.8448222682426465], [\"ShearX\", 0.6221001240196488, 0.207994745014715]], [[\"Color\", 0.27879201870553094, 0.9112278219836276], [\"Color\", 0.7508664408516654, 0.14885798940641318]], [[\"ShearX\", 0.5496326925552889, 0.7643918760952656], [\"AutoContrast\", 0.7887459433195374, 0.5993900500657054]], [[\"ShearY\", 0.7182376017241904, 0.7470412126724141], [\"Rotate\", 0.7644845975844854, 0.38510752407409893]], [[\"Contrast\", 0.7984591239416293, 0.054767400038152704], [\"Posterize\", 0.7324315466290486, 0.41749946919991243]], [[\"Contrast\", 0.596887781894766, 0.14832691232456097], [\"Contrast\", 0.05140651977459313, 0.14459348285712803]], [[\"TranslateX\", 0.32766681876233766, 0.5291103977440215], [\"Color\", 0.6039423443931029, 0.6280077043167083]], [[\"Invert\", 0.5267106136816635, 0.9429838545064784], [\"Sharpness\", 0.9999053422304087, 0.24764251340211074]], [[\"Contrast\", 0.495767451313242, 0.6744720418896594], [\"Brightness\", 0.2220993631062378, 0.023842431692152832]], [[\"Invert\", 0.7609399278201697, 0.38010826932678554], [\"Color\", 0.8454251931688355, 0.5876680099851194]], [[\"Posterize\", 0.24967505238473384, 0.3801835337368412], [\"Contrast\", 0.15106121477353399, 0.6785384814310887]], [[\"Invert\", 0.49594153211743874, 0.32307787492774986], [\"Contrast\", 0.46822075688054793, 0.7106858486805577]], [[\"Sharpness\", 0.7204076261101202, 0.5928585438185809], [\"Rotate\", 0.2922878012111486, 0.2742491027179961]], [[\"Solarize\", 0.2866813728691532, 0.2856363754608978], [\"TranslateY\", 0.7817609208793659, 0.17156048740523572]], [[\"Cutout\", 0.03345540659323987, 0.30068271036485605], [\"ShearY\", 0.2556603044234358, 0.32397855468866993]], [[\"TranslateY\", 0.20032231858163152, 0.4577561841994639], [\"Cutout\", 0.8063563515601337, 0.9224365467344459]], [[\"TranslateY\", 0.27130034613023113, 0.7446375583249849], [\"ShearX\", 0.8254766023480402, 0.4187078898038131]], [[\"ShearX\", 0.2937536068210411, 0.3864492533047109], [\"Contrast\", 0.7069611463424469, 0.686695922492015]], [[\"TranslateX\", 0.5869084659063555, 0.7866008068031776], [\"Invert\", 0.289041613918004, 0.5774431720429087]], [[\"Posterize\", 0.6199250263408456, 0.36010044446077893], [\"Color\", 0.7216853388297056, 0.18586684958836489]], [[\"Posterize\", 0.16831615585406814, 0.08052519983493259], [\"Cutout\", 0.7325882891023244, 0.77416439921321]], [[\"Posterize\", 0.3000961100422498, 0.5181759282337892], [\"Contrast\", 0.40376073196794304, 0.613724714153924]], [[\"ShearX\", 0.32203193464136226, 0.037459860897434916], [\"Solarize\", 0.961542785512965, 0.5176575408248285]], [[\"Posterize\", 0.8986732529036036, 0.7773257927223327], [\"AutoContrast\", 0.9765986969928243, 0.2092264330225745]], [[\"Posterize\", 0.7463386563644007, 0.7086671048242543], [\"Posterize\", 0.6433819807034994, 0.00541136425219968]], [[\"Contrast\", 0.8810746688690078, 0.4821029611474963], [\"Invert\", 0.5121169325265204, 0.6360694878582249]], [[\"AutoContrast\", 0.457606735372388, 0.6104794570624505], [\"Color\", 0.0020511991982608124, 0.6488142202778011]], [[\"Invert\", 0.01744463899367027, 0.9799156424364703], [\"ShearY\", 0.3448213456605478, 0.04437356383800711]], [[\"Solarize\", 0.28511589596283315, 0.283465265528744], [\"Rotate\", 0.6831807199089897, 0.0617176467316177]], [[\"Sharpness\", 0.329148970281285, 0.398397318402924], [\"Color\", 0.9125837011914073, 0.4724426676489746]], [[\"Posterize\", 0.05701522811381192, 0.17109014518445975], [\"Cutout\", 0.785885656821686, 0.39072624694455804]], [[\"TranslateY\", 0.36644251447248277, 0.5818480868136134], [\"Equalize\", 0.06162286852923926, 0.710929848709861]], [[\"ShearY\", 0.8667124241442813, 0.7556246528256454], [\"ShearY\", 0.505190335528531, 0.2935701441277698]], [[\"Brightness\", 0.6369570015916268, 0.5131486964430919], [\"Color\", 0.4887119711633827, 0.9364572089679907]], [[\"Equalize\", 0.06596702627228657, 0.42632445412423303], [\"Equalize\", 0.583434672187985, 0.045592788478947655]], [[\"ShearY\", 0.12701084021549092, 0.501622939075192], [\"Cutout\", 0.7948319202684251, 0.5662618207034569]], [[\"Posterize\", 0.24586808377061664, 0.5178008194277262], [\"Contrast\", 0.1647040530405073, 0.7459410952796975]], [[\"Solarize\", 0.346601298126444, 0.02933266448415553], [\"ShearY\", 0.9571781647031095, 0.4992610484566735]], [[\"Brightness\", 0.5174960605130408, 0.4387498174634591], [\"AutoContrast\", 0.6327403754086753, 0.8279630556620247]], [[\"Posterize\", 0.7591448754183128, 0.6265369743070788], [\"Posterize\", 0.5030300462943854, 0.00401699185532868]], [[\"Contrast\", 0.02643254602183477, 0.44677741300429646], [\"Invert\", 0.2921779546234399, 0.732876182854368]], [[\"TranslateY\", 0.3516821152310867, 0.7142224211142528], [\"Brightness\", 0.07382104862245475, 0.45368581543623165]], [[\"Invert\", 0.21382474908836685, 0.8413922690356168], [\"Invert\", 0.4082563426777157, 0.17018243778787834]], [[\"Brightness\", 0.9533955059573749, 0.8279651051553477], [\"Cutout\", 0.6730769221406385, 0.07780554260470988]], [[\"Brightness\", 0.6022173063382547, 0.6008500678386571], [\"Sharpness\", 0.5051909719558138, 0.002298383273851839]], [[\"Contrast\", 0.03373395758348563, 0.3343918835437655], [\"Sharpness\", 0.8933651164916847, 0.21738300404986516]], [[\"TranslateX\", 0.7095755408419822, 0.26445508146225394], [\"Equalize\", 0.18255527363432034, 0.38857557766574147]], [[\"Solarize\", 0.4045911117686074, 0.009106925727519921], [\"Posterize\", 0.9380296936271705, 0.5485821516085955]], [[\"Posterize\", 0.20361995432403968, 0.45378735898242406], [\"AutoContrast\", 0.9020357653982511, 0.7880592087609304]], [[\"AutoContrast\", 0.9921550787672145, 0.7396130723399785], [\"Cutout\", 0.4203609896071977, 0.13000504717682415]], [[\"Equalize\", 0.1917806394805356, 0.5549114911941102], [\"Posterize\", 0.27636900597148506, 0.02953514963949344]], [[\"AutoContrast\", 0.5427071893197213, 0.6650127340685553], [\"Color\", 0.011762461060904839, 0.3793508738225649]], [[\"Invert\", 0.18495006059896424, 0.8561476625981166], [\"ShearY\", 0.6417068692813954, 0.9908751019535517]], [[\"Solarize\", 0.2992385431633619, 0.33622162977907644], [\"Rotate\", 0.6070550252540432, 0.010205544695142064]], [[\"Sharpness\", 0.33292787606841845, 0.549446566149951], [\"Color\", 0.9097665730481233, 0.9947658451503181]], [[\"Posterize\", 0.11207465085954937, 0.23296263754645155], [\"Cutout\", 0.6159972426858633, 0.38289684517298556]], [[\"TranslateX\", 0.7343689718523805, 0.16303049089087485], [\"Equalize\", 0.3138385390145809, 0.6096356352129273]], [[\"Solarize\", 0.4807269891506887, 0.28116279654856363], [\"Posterize\", 0.9753467973380021, 0.6327025372916857]], [[\"Posterize\", 0.837244997106023, 0.5586046483574153], [\"AutoContrast\", 0.9005775602024721, 0.7983389828641411]], [[\"AutoContrast\", 0.8347112949943837, 0.7321850307727004], [\"Cutout\", 0.3322676575657192, 0.14409873524237032]], [[\"Equalize\", 0.12285967262649124, 0.5368519477089722], [\"Posterize\", 0.2693593445898034, 0.15098267759162076]], [[\"Invert\", 0.331021587020619, 0.3140868578915853], [\"Cutout\", 0.48268387543799884, 0.7642598986625201]], [[\"Equalize\", 0.47573794714622175, 0.8628185952549363], [\"Solarize\", 0.14860046214144496, 0.3739284346347912]], [[\"AutoContrast\", 0.6747373196190459, 0.2912917979635714], [\"Posterize\", 0.27259573208358623, 0.9643671211873469]], [[\"Sharpness\", 0.15019788105901233, 0.7289238028242861], [\"ShearY\", 0.7998448015985137, 0.5924798900807636]], [[\"Brightness\", 0.7874052186079156, 0.9446398428550358], [\"Equalize\", 0.5105557539139616, 0.6719808885741001]], [[\"ShearX\", 0.783252331899515, 0.74960184771181], [\"ShearX\", 0.4327935527932927, 0.29980994764698565]], [[\"Rotate\", 0.03892023906368644, 0.24868635699639904], [\"Cutout\", 0.6408903979315637, 0.32135851733523907]], [[\"Invert\", 0.9972802027590713, 0.9374194642823106], [\"ShearX\", 0.20016463162924894, 0.0052278586143255645]], [[\"AutoContrast\", 0.9328687102578992, 0.44280614999256235], [\"Color\", 0.05637751621265141, 0.26921974769786455]], [[\"AutoContrast\", 0.2798532308065416, 0.5283914274806746], [\"Cutout\", 0.12930089032151, 0.25624459046884057]], [[\"Invert\", 0.2397428994839993, 0.31011715409282065], [\"Cutout\", 0.5875151915473042, 0.7454458580264322]], [[\"Equalize\", 0.374815667651982, 0.9502053862625081], [\"Solarize\", 0.10100323698574426, 0.5124939317648691]], [[\"AutoContrast\", 0.6009889057852652, 0.3080148907275367], [\"Posterize\", 0.6543352447742621, 0.17498668744492413]], [[\"Sharpness\", 0.14402909409016001, 0.9239239955843186], [\"ShearY\", 0.8959818090635513, 0.7258262803413784]], [[\"Brightness\", 0.8672271320432974, 0.8241439816189235], [\"Equalize\", 0.4954433852960082, 0.6687050430971254]], [[\"Solarize\", 0.47813402689782114, 0.9447222576804901], [\"TranslateY\", 0.32546974113401694, 0.8367777573080345]], [[\"Sharpness\", 0.48098022972519927, 0.2731904819197933], [\"Rotate\", 0.14601550238940067, 0.3955290089346866]], [[\"AutoContrast\", 0.3777442613874327, 0.9991495158709968], [\"TranslateY\", 0.2951496731751222, 0.6276755696126608]], [[\"Cutout\", 0.487150344941835, 0.7976642551725155], [\"Solarize\", 0.643407733524025, 0.6313641977306543]], [[\"Rotate\", 0.35017053741686033, 0.23960877779589906], [\"Sharpness\", 0.8741761196478873, 0.12362019972427862]], [[\"Invert\", 0.8849459784626776, 0.48532144354199647], [\"Invert\", 0.702430443380318, 0.924655906426149]], [[\"Equalize\", 0.6324140359298986, 0.9780539325897597], [\"AutoContrast\", 0.39105074227907843, 0.3636856607173081]], [[\"AutoContrast\", 0.8049993541952016, 0.3231157206314408], [\"ShearY\", 0.6675686366141409, 0.7345332792455934]], [[\"Sharpness\", 0.12332351413693327, 0.9345179453120547], [\"Solarize\", 0.1594280186083361, 0.422049311332906]], [[\"Rotate\", 0.38227253679386375, 0.7664364038099101], [\"AutoContrast\", 0.5725492572719726, 0.21049701651094446]], [[\"Brightness\", 0.6432891832524184, 0.8243948738979008], [\"Equalize\", 0.20355899618080098, 0.7983877568044979]], [[\"ShearY\", 0.694393675204811, 0.3686964692262895], [\"TranslateX\", 0.5593122846101599, 0.3378904046390629]], [[\"Invert\", 0.9139730140623171, 0.7183505086140822], [\"Posterize\", 0.2675839177893596, 0.21399738931234905]], [[\"TranslateX\", 0.05309461965184896, 0.032983777975422554], [\"Sharpness\", 0.412621944330688, 0.4752089612268503]], [[\"Equalize\", 0.06901149860261116, 0.27405796188385945], [\"AutoContrast\", 0.7710451977604326, 0.20474249114426807]], [[\"ShearX\", 0.47416427531072325, 0.2738614239087857], [\"Cutout\", 0.2820106413231565, 0.6295219975308107]], [[\"Cutout\", 0.19984489885141582, 0.7019895950299546], [\"ShearX\", 0.4264722378410729, 0.8483962467724536]], [[\"ShearY\", 0.42111446850243256, 0.1837626718066795], [\"Brightness\", 0.9187856196205942, 0.07478292286531767]], [[\"Solarize\", 0.2832036589192868, 0.8253473638854684], [\"Cutout\", 0.7279303826662196, 0.615420010694839]], [[\"ShearX\", 0.963251873356884, 0.5625577053738846], [\"Color\", 0.9637046840298858, 0.9992644813427337]], [[\"Invert\", 0.7976502716811696, 0.43330238739921956], [\"ShearY\", 0.9113181667853614, 0.9066729024232627]], [[\"Posterize\", 0.5750620807485399, 0.7729691927432935], [\"Contrast\", 0.4527879467651071, 0.9647739595774402]], [[\"Posterize\", 0.5918751472569104, 0.26467375535556653], [\"Posterize\", 0.6347402742279589, 0.7476940787143674]], [[\"Invert\", 0.16552404612306285, 0.9829939598708993], [\"Solarize\", 0.29886553921638087, 0.22487098773064948]], [[\"Cutout\", 0.24209211313246753, 0.5522928952260516], [\"AutoContrast\", 0.6212831649673523, 0.4191071063984261]], [[\"ShearX\", 0.4726406722647257, 0.26783614257572447], [\"TranslateY\", 0.251078162624763, 0.26103450676044304]], [[\"Cutout\", 0.8721775527314426, 0.6284108541347894], [\"ShearX\", 0.7063325779145683, 0.8467168866724094]], [[\"ShearY\", 0.42226987564279606, 0.18012694533480308], [\"Brightness\", 0.858499853702629, 0.4738929353785444]], [[\"Solarize\", 0.30039851082582764, 0.8151511479162529], [\"Cutout\", 0.7228873804059033, 0.6174351379837011]], [[\"ShearX\", 0.4921198221896609, 0.5678998037958154], [\"Color\", 0.7865298825314806, 0.9309020966406338]], [[\"Invert\", 0.8077821007916464, 0.7375015762124386], [\"Cutout\", 0.032464574567796195, 0.25405044477004846]], [[\"Color\", 0.6061325441870133, 0.2813794250571565], [\"TranslateY\", 0.5882949270385848, 0.33262043078220227]], [[\"ShearX\", 0.7877331864215293, 0.8001131937448647], [\"Cutout\", 0.19828215489868783, 0.5949317580743655]], [[\"Contrast\", 0.529508728421701, 0.36477855845285007], [\"Color\", 0.7145481740509138, 0.2950794787786947]], [[\"Contrast\", 0.9932891064746089, 0.46930062926732646], [\"Posterize\", 0.9033014136780437, 0.5745902253320527]]]\n    return p\n\n\ndef policy_decoder(augment, num_policy, num_op):\n    op_list = augment_list(False)\n    policies = []\n    for i in range(num_policy):\n        ops = []\n        for j in range(num_op):\n            op_idx = augment['policy_%d_%d' % (i, j)]\n            op_prob = augment['prob_%d_%d' % (i, j)]\n            op_level = augment['level_%d_%d' % (i, j)]\n            ops.append((op_list[op_idx][0].__name__, op_prob, op_level))\n        policies.append(ops)\n    return policies\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/aug_mixup.py",
    "content": "\"\"\"\nReference :\n- https://github.com/hysts/pytorch_image_classification/blob/master/augmentations/mixup.py\n- https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/imagenet_input.py#L120\n\"\"\"\n\nimport numpy as np\nimport torch\n\nfrom FastAutoAugment.metrics import CrossEntropyLabelSmooth\n\n\ndef mixup(data, targets, alpha):\n    indices = torch.randperm(data.size(0))\n    shuffled_data = data[indices]\n    shuffled_targets = targets[indices]\n\n    lam = np.random.beta(alpha, alpha)\n    lam = max(lam, 1. - lam)\n    assert 0.0 <= lam <= 1.0, lam\n    data = data * lam + shuffled_data * (1 - lam)\n\n    return data, targets, shuffled_targets, lam\n\n\nclass CrossEntropyMixUpLabelSmooth(torch.nn.Module):\n    def __init__(self, num_classes, epsilon, reduction='mean'):\n        super(CrossEntropyMixUpLabelSmooth, self).__init__()\n        self.ce = CrossEntropyLabelSmooth(num_classes, epsilon, reduction=reduction)\n\n    def forward(self, input, target1, target2, lam):  # pylint: disable=redefined-builtin\n        return lam * self.ce(input, target1) + (1 - lam) * self.ce(input, target2)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/augmentations.py",
    "content": "# code in this file is adpated from rpmcruz/autoaugment\n# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py\nimport random\n\nimport PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw\nimport numpy as np\nimport torch\nfrom torchvision.transforms.transforms import Compose\n\nrandom_mirror = True\n\n\ndef ShearX(img, v):  # [-0.3, 0.3]\n    assert -0.3 <= v <= 0.3\n    if random_mirror and random.random() > 0.5:\n        v = -v\n    return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))\n\n\ndef ShearY(img, v):  # [-0.3, 0.3]\n    assert -0.3 <= v <= 0.3\n    if random_mirror and random.random() > 0.5:\n        v = -v\n    return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))\n\n\ndef TranslateX(img, v):  # [-150, 150] => percentage: [-0.45, 0.45]\n    assert -0.45 <= v <= 0.45\n    if random_mirror and random.random() > 0.5:\n        v = -v\n    v = v * img.size[0]\n    return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\n\ndef TranslateY(img, v):  # [-150, 150] => percentage: [-0.45, 0.45]\n    assert -0.45 <= v <= 0.45\n    if random_mirror and random.random() > 0.5:\n        v = -v\n    v = v * img.size[1]\n    return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\n\ndef TranslateXAbs(img, v):  # [-150, 150] => percentage: [-0.45, 0.45]\n    assert 0 <= v <= 10\n    if random.random() > 0.5:\n        v = -v\n    return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\n\ndef TranslateYAbs(img, v):  # [-150, 150] => percentage: [-0.45, 0.45]\n    assert 0 <= v <= 10\n    if random.random() > 0.5:\n        v = -v\n    return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\n\ndef Rotate(img, v):  # [-30, 30]\n    assert -30 <= v <= 30\n    if random_mirror and random.random() > 0.5:\n        v = -v\n    return img.rotate(v)\n\n\ndef AutoContrast(img, _):\n    return PIL.ImageOps.autocontrast(img)\n\n\ndef Invert(img, _):\n    return PIL.ImageOps.invert(img)\n\n\ndef Equalize(img, _):\n    return PIL.ImageOps.equalize(img)\n\n\ndef Flip(img, _):  # not from the paper\n    return PIL.ImageOps.mirror(img)\n\n\ndef Solarize(img, v):  # [0, 256]\n    assert 0 <= v <= 256\n    return PIL.ImageOps.solarize(img, v)\n\n\ndef Posterize(img, v):  # [4, 8]\n    assert 4 <= v <= 8\n    v = int(v)\n    return PIL.ImageOps.posterize(img, v)\n\n\ndef Posterize2(img, v):  # [0, 4]\n    assert 0 <= v <= 4\n    v = int(v)\n    return PIL.ImageOps.posterize(img, v)\n\n\ndef Contrast(img, v):  # [0.1,1.9]\n    assert 0.1 <= v <= 1.9\n    return PIL.ImageEnhance.Contrast(img).enhance(v)\n\n\ndef Color(img, v):  # [0.1,1.9]\n    assert 0.1 <= v <= 1.9\n    return PIL.ImageEnhance.Color(img).enhance(v)\n\n\ndef Brightness(img, v):  # [0.1,1.9]\n    assert 0.1 <= v <= 1.9\n    return PIL.ImageEnhance.Brightness(img).enhance(v)\n\n\ndef Sharpness(img, v):  # [0.1,1.9]\n    assert 0.1 <= v <= 1.9\n    return PIL.ImageEnhance.Sharpness(img).enhance(v)\n\n\ndef Cutout(img, v):  # [0, 60] => percentage: [0, 0.2]\n    assert 0.0 <= v <= 0.2\n    if v <= 0.:\n        return img\n\n    v = v * img.size[0]\n    return CutoutAbs(img, v)\n\n\ndef CutoutAbs(img, v):  # [0, 60] => percentage: [0, 0.2]\n    # assert 0 <= v <= 20\n    if v < 0:\n        return img\n    w, h = img.size\n    x0 = np.random.uniform(w)\n    y0 = np.random.uniform(h)\n\n    x0 = int(max(0, x0 - v / 2.))\n    y0 = int(max(0, y0 - v / 2.))\n    x1 = min(w, x0 + v)\n    y1 = min(h, y0 + v)\n\n    xy = (x0, y0, x1, y1)\n    color = (125, 123, 114)\n    # color = (0, 0, 0)\n    img = img.copy()\n    PIL.ImageDraw.Draw(img).rectangle(xy, color)\n    return img\n\n\ndef SamplePairing(imgs):  # [0, 0.4]\n    def f(img1, v):\n        i = np.random.choice(len(imgs))\n        img2 = PIL.Image.fromarray(imgs[i])\n        return PIL.Image.blend(img1, img2, v)\n\n    return f\n\n\ndef augment_list(for_autoaug=True):  # 16 oeprations and their ranges\n    l = [\n        (ShearX, -0.3, 0.3),  # 0\n        (ShearY, -0.3, 0.3),  # 1\n        (TranslateX, -0.45, 0.45),  # 2\n        (TranslateY, -0.45, 0.45),  # 3\n        (Rotate, -30, 30),  # 4\n        (AutoContrast, 0, 1),  # 5\n        (Invert, 0, 1),  # 6\n        (Equalize, 0, 1),  # 7\n        (Solarize, 0, 256),  # 8\n        (Posterize, 4, 8),  # 9\n        (Contrast, 0.1, 1.9),  # 10\n        (Color, 0.1, 1.9),  # 11\n        (Brightness, 0.1, 1.9),  # 12\n        (Sharpness, 0.1, 1.9),  # 13\n        (Cutout, 0, 0.2),  # 14\n        # (SamplePairing(imgs), 0, 0.4),  # 15\n    ]\n    if for_autoaug:\n        l += [\n            (CutoutAbs, 0, 20),  # compatible with auto-augment\n            (Posterize2, 0, 4),  # 9\n            (TranslateXAbs, 0, 10),  # 9\n            (TranslateYAbs, 0, 10),  # 9\n        ]\n    return l\n\n\naugment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}\n\n\ndef get_augment(name):\n    return augment_dict[name]\n\n\ndef apply_augment(img, name, level):\n    augment_fn, low, high = get_augment(name)\n    return augment_fn(img.copy(), level * (high - low) + low)\n\n\nclass Lighting(object):\n    \"\"\"Lighting noise(AlexNet - style PCA - based noise)\"\"\"\n\n    def __init__(self, alphastd, eigval, eigvec):\n        self.alphastd = alphastd\n        self.eigval = torch.Tensor(eigval)\n        self.eigvec = torch.Tensor(eigvec)\n\n    def __call__(self, img):\n        if self.alphastd == 0:\n            return img\n\n        alpha = img.new().resize_(3).normal_(0, self.alphastd)\n        rgb = self.eigvec.type_as(img).clone() \\\n            .mul(alpha.view(1, 3).expand(3, 3)) \\\n            .mul(self.eigval.view(1, 3).expand(3, 3)) \\\n            .sum(1).squeeze()\n\n        return img.add(rgb.view(3, 1, 1).expand_as(img))\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/common.py",
    "content": "import copy\nimport logging\nimport warnings\n\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\nwarnings.filterwarnings(\"ignore\", \"DeprecationWarning: 'saved_variables' is deprecated\", UserWarning)\n\n\ndef get_logger(name, level=logging.DEBUG):\n    logger = logging.getLogger(name)\n    logger.handlers.clear()\n    logger.setLevel(level)\n    ch = logging.StreamHandler()\n    ch.setLevel(level)\n    ch.setFormatter(formatter)\n    logger.addHandler(ch)\n    return logger\n\n\ndef add_filehandler(logger, filepath, level=logging.DEBUG):\n    fh = logging.FileHandler(filepath)\n    fh.setLevel(level)\n    fh.setFormatter(formatter)\n    logger.addHandler(fh)\n\n\nclass EMA:\n    def __init__(self, mu):\n        self.mu = mu\n        self.shadow = {}\n\n    def state_dict(self):\n        return copy.deepcopy(self.shadow)\n\n    def __len__(self):\n        return len(self.shadow)\n\n    def __call__(self, module, step=None):\n        if step is None:\n            mu = self.mu\n        else:\n            # see : https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/train/ExponentialMovingAverage?hl=PL\n            mu = min(self.mu, (1. + step) / (10 + step))\n\n        for name, x in module.state_dict().items():\n            if name in self.shadow:\n                new_average = (1.0 - mu) * x + mu * self.shadow[name]\n                self.shadow[name] = new_average.clone()\n            else:\n                self.shadow[name] = x.clone()\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/data.py",
    "content": "import logging\n\nimport numpy as np\nimport os\n\nimport math\nimport random\nimport torch\nimport torchvision\nfrom PIL import Image\n\nfrom torch.utils.data import SubsetRandomSampler, Sampler, Subset, ConcatDataset\nimport torch.distributed as dist\nfrom torchvision.transforms import transforms\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom theconf import Config as C\n\nfrom FastAutoAugment.archive import arsaug_policy, autoaug_policy, autoaug_paper_cifar10, fa_reduced_cifar10, fa_reduced_svhn, fa_resnet50_rimagenet\nfrom FastAutoAugment.augmentations import *\nfrom FastAutoAugment.common import get_logger\nfrom FastAutoAugment.imagenet import ImageNet\nfrom FastAutoAugment.networks.efficientnet_pytorch.model import EfficientNet\n\nlogger = get_logger('Fast AutoAugment')\nlogger.setLevel(logging.INFO)\n_IMAGENET_PCA = {\n    'eigval': [0.2175, 0.0188, 0.0045],\n    'eigvec': [\n        [-0.5675,  0.7192,  0.4009],\n        [-0.5808, -0.0045, -0.8140],\n        [-0.5836, -0.6948,  0.4203],\n    ]\n}\n_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)\n\n\ndef get_dataloaders(dataset, batch, dataroot, split=0.15, split_idx=0, multinode=False, target_lb=-1):\n    if 'cifar' in dataset or 'svhn' in dataset:\n        transform_train = transforms.Compose([\n            transforms.RandomCrop(32, padding=4),\n            transforms.RandomHorizontalFlip(),\n            transforms.ToTensor(),\n            transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),\n        ])\n        transform_test = transforms.Compose([\n            transforms.ToTensor(),\n            transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),\n        ])\n    elif 'imagenet' in dataset:\n        input_size = 224\n        sized_size = 256\n\n        if 'efficientnet' in C.get()['model']['type']:\n            input_size = EfficientNet.get_image_size(C.get()['model']['type'])\n            sized_size = input_size + 32    # TODO\n            # sized_size = int(round(input_size / 224. * 256))\n            # sized_size = input_size\n            logger.info('size changed to %d/%d.' % (input_size, sized_size))\n\n        transform_train = transforms.Compose([\n            EfficientNetRandomCrop(input_size),\n            transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),\n            # transforms.RandomResizedCrop(input_size, scale=(0.1, 1.0), interpolation=Image.BICUBIC),\n            transforms.RandomHorizontalFlip(),\n            transforms.ColorJitter(\n                brightness=0.4,\n                contrast=0.4,\n                saturation=0.4,\n            ),\n            transforms.ToTensor(),\n            Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),\n            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n        ])\n\n        transform_test = transforms.Compose([\n            EfficientNetCenterCrop(input_size),\n            transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),\n            transforms.ToTensor(),\n            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n        ])\n\n    else:\n        raise ValueError('dataset=%s' % dataset)\n\n    total_aug = augs = None\n    if isinstance(C.get()['aug'], list):\n        logger.debug('augmentation provided.')\n        transform_train.transforms.insert(0, Augmentation(C.get()['aug']))\n    else:\n        logger.debug('augmentation: %s' % C.get()['aug'])\n        if C.get()['aug'] == 'fa_reduced_cifar10':\n            transform_train.transforms.insert(0, Augmentation(fa_reduced_cifar10()))\n\n        elif C.get()['aug'] == 'fa_reduced_imagenet':\n            transform_train.transforms.insert(0, Augmentation(fa_resnet50_rimagenet()))\n\n        elif C.get()['aug'] == 'fa_reduced_svhn':\n            transform_train.transforms.insert(0, Augmentation(fa_reduced_svhn()))\n\n        elif C.get()['aug'] == 'arsaug':\n            transform_train.transforms.insert(0, Augmentation(arsaug_policy()))\n        elif C.get()['aug'] == 'autoaug_cifar10':\n            transform_train.transforms.insert(0, Augmentation(autoaug_paper_cifar10()))\n        elif C.get()['aug'] == 'autoaug_extend':\n            transform_train.transforms.insert(0, Augmentation(autoaug_policy()))\n        elif C.get()['aug'] in ['default']:\n            pass\n        else:\n            raise ValueError('not found augmentations. %s' % C.get()['aug'])\n\n    if C.get()['cutout'] > 0:\n        transform_train.transforms.append(CutoutDefault(C.get()['cutout']))\n\n    if dataset == 'cifar10':\n        total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=transform_train)\n        testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=transform_test)\n    elif dataset == 'reduced_cifar10':\n        total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=transform_train)\n        sss = StratifiedShuffleSplit(n_splits=1, test_size=46000, random_state=0)   # 4000 trainset\n        sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)\n        train_idx, valid_idx = next(sss)\n        targets = [total_trainset.targets[idx] for idx in train_idx]\n        total_trainset = Subset(total_trainset, train_idx)\n        total_trainset.targets = targets\n\n        testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=transform_test)\n    elif dataset == 'cifar100':\n        total_trainset = torchvision.datasets.CIFAR100(root=dataroot, train=True, download=True, transform=transform_train)\n        testset = torchvision.datasets.CIFAR100(root=dataroot, train=False, download=True, transform=transform_test)\n    elif dataset == 'svhn':\n        trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=transform_train)\n        extraset = torchvision.datasets.SVHN(root=dataroot, split='extra', download=True, transform=transform_train)\n        total_trainset = ConcatDataset([trainset, extraset])\n        testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)\n    elif dataset == 'reduced_svhn':\n        total_trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=transform_train)\n        sss = StratifiedShuffleSplit(n_splits=1, test_size=73257-1000, random_state=0)  # 1000 trainset\n        sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)\n        train_idx, valid_idx = next(sss)\n        targets = [total_trainset.targets[idx] for idx in train_idx]\n        total_trainset = Subset(total_trainset, train_idx)\n        total_trainset.targets = targets\n\n        testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)\n    elif dataset == 'imagenet':\n        total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=transform_train)\n        testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test)\n\n        # compatibility\n        total_trainset.targets = [lb for _, lb in total_trainset.samples]\n    elif dataset == 'reduced_imagenet':\n        # randomly chosen indices\n#         idx120 = sorted(random.sample(list(range(1000)), k=120))\n        idx120 = [16, 23, 52, 57, 76, 93, 95, 96, 99, 121, 122, 128, 148, 172, 181, 189, 202, 210, 232, 238, 257, 258, 259, 277, 283, 289, 295, 304, 307, 318, 322, 331, 337, 338, 345, 350, 361, 375, 376, 381, 388, 399, 401, 408, 424, 431, 432, 440, 447, 462, 464, 472, 483, 497, 506, 512, 530, 541, 553, 554, 557, 564, 570, 584, 612, 614, 619, 626, 631, 632, 650, 657, 658, 660, 674, 675, 680, 682, 691, 695, 699, 711, 734, 736, 741, 754, 757, 764, 769, 770, 780, 781, 787, 797, 799, 811, 822, 829, 830, 835, 837, 842, 843, 845, 873, 883, 897, 900, 902, 905, 913, 920, 925, 937, 938, 940, 941, 944, 949, 959]\n        total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=transform_train)\n        testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test)\n\n        # compatibility\n        total_trainset.targets = [lb for _, lb in total_trainset.samples]\n\n        sss = StratifiedShuffleSplit(n_splits=1, test_size=len(total_trainset) - 50000, random_state=0)  # 4000 trainset\n        sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)\n        train_idx, valid_idx = next(sss)\n\n        # filter out\n        train_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, train_idx))\n        valid_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, valid_idx))\n        test_idx = list(filter(lambda x: testset.samples[x][1] in idx120, range(len(testset))))\n\n        targets = [idx120.index(total_trainset.targets[idx]) for idx in train_idx]\n        for idx in range(len(total_trainset.samples)):\n            if total_trainset.samples[idx][1] not in idx120:\n                continue\n            total_trainset.samples[idx] = (total_trainset.samples[idx][0], idx120.index(total_trainset.samples[idx][1]))\n        total_trainset = Subset(total_trainset, train_idx)\n        total_trainset.targets = targets\n\n        for idx in range(len(testset.samples)):\n            if testset.samples[idx][1] not in idx120:\n                continue\n            testset.samples[idx] = (testset.samples[idx][0], idx120.index(testset.samples[idx][1]))\n        testset = Subset(testset, test_idx)\n        print('reduced_imagenet train=', len(total_trainset))\n    else:\n        raise ValueError('invalid dataset name=%s' % dataset)\n\n    if total_aug is not None and augs is not None:\n        total_trainset.set_preaug(augs, total_aug)\n        print('set_preaug-')\n\n    train_sampler = None\n    if split > 0.0:\n        sss = StratifiedShuffleSplit(n_splits=5, test_size=split, random_state=0)\n        sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)\n        for _ in range(split_idx + 1):\n            train_idx, valid_idx = next(sss)\n\n        if target_lb >= 0:\n            train_idx = [i for i in train_idx if total_trainset.targets[i] == target_lb]\n            valid_idx = [i for i in valid_idx if total_trainset.targets[i] == target_lb]\n\n        train_sampler = SubsetRandomSampler(train_idx)\n        valid_sampler = SubsetSampler(valid_idx)\n\n        if multinode:\n            train_sampler = torch.utils.data.distributed.DistributedSampler(Subset(total_trainset, train_idx), num_replicas=dist.get_world_size(), rank=dist.get_rank())\n    else:\n        valid_sampler = SubsetSampler([])\n\n        if multinode:\n            train_sampler = torch.utils.data.distributed.DistributedSampler(total_trainset, num_replicas=dist.get_world_size(), rank=dist.get_rank())\n            logger.info(f'----- dataset with DistributedSampler  {dist.get_rank()}/{dist.get_world_size()}')\n\n    trainloader = torch.utils.data.DataLoader(\n        total_trainset, batch_size=batch, shuffle=True if train_sampler is None else False, num_workers=8, pin_memory=True,\n        sampler=train_sampler, drop_last=True)\n    validloader = torch.utils.data.DataLoader(\n        total_trainset, batch_size=batch, shuffle=False, num_workers=4, pin_memory=True,\n        sampler=valid_sampler, drop_last=False)\n\n    testloader = torch.utils.data.DataLoader(\n        testset, batch_size=batch, shuffle=False, num_workers=8, pin_memory=True,\n        drop_last=False\n    )\n    return train_sampler, trainloader, validloader, testloader\n\n\nclass CutoutDefault(object):\n    \"\"\"\n    Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py\n    \"\"\"\n    def __init__(self, length):\n        self.length = length\n\n    def __call__(self, img):\n        h, w = img.size(1), img.size(2)\n        mask = np.ones((h, w), np.float32)\n        y = np.random.randint(h)\n        x = np.random.randint(w)\n\n        y1 = np.clip(y - self.length // 2, 0, h)\n        y2 = np.clip(y + self.length // 2, 0, h)\n        x1 = np.clip(x - self.length // 2, 0, w)\n        x2 = np.clip(x + self.length // 2, 0, w)\n\n        mask[y1: y2, x1: x2] = 0.\n        mask = torch.from_numpy(mask)\n        mask = mask.expand_as(img)\n        img *= mask\n        return img\n\n\nclass Augmentation(object):\n    def __init__(self, policies):\n        self.policies = policies\n\n    def __call__(self, img):\n        for _ in range(1):\n            policy = random.choice(self.policies)\n            for name, pr, level in policy:\n                if random.random() > pr:\n                    continue\n                img = apply_augment(img, name, level)\n        return img\n\n\nclass EfficientNetRandomCrop:\n    def __init__(self, imgsize, min_covered=0.1, aspect_ratio_range=(3./4, 4./3), area_range=(0.08, 1.0), max_attempts=10):\n        assert 0.0 < min_covered\n        assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]\n        assert 0 < area_range[0] <= area_range[1]\n        assert 1 <= max_attempts\n\n        self.min_covered = min_covered\n        self.aspect_ratio_range = aspect_ratio_range\n        self.area_range = area_range\n        self.max_attempts = max_attempts\n        self._fallback = EfficientNetCenterCrop(imgsize)\n\n    def __call__(self, img):\n        # https://github.com/tensorflow/tensorflow/blob/9274bcebb31322370139467039034f8ff852b004/tensorflow/core/kernels/sample_distorted_bounding_box_op.cc#L111\n        original_width, original_height = img.size\n        min_area = self.area_range[0] * (original_width * original_height)\n        max_area = self.area_range[1] * (original_width * original_height)\n\n        for _ in range(self.max_attempts):\n            aspect_ratio = random.uniform(*self.aspect_ratio_range)\n            height = int(round(math.sqrt(min_area / aspect_ratio)))\n            max_height = int(round(math.sqrt(max_area / aspect_ratio)))\n\n            if max_height * aspect_ratio > original_width:\n                max_height = (original_width + 0.5 - 1e-7) / aspect_ratio\n                max_height = int(max_height)\n                if max_height * aspect_ratio > original_width:\n                    max_height -= 1\n\n            if max_height > original_height:\n                max_height = original_height\n\n            if height >= max_height:\n                height = max_height\n\n            height = int(round(random.uniform(height, max_height)))\n            width = int(round(height * aspect_ratio))\n            area = width * height\n\n            if area < min_area or area > max_area:\n                continue\n            if width > original_width or height > original_height:\n                continue\n            if area < self.min_covered * (original_width * original_height):\n                continue\n            if width == original_width and height == original_height:\n                return self._fallback(img)      # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py#L102\n\n            x = random.randint(0, original_width - width)\n            y = random.randint(0, original_height - height)\n            return img.crop((x, y, x + width, y + height))\n\n        return self._fallback(img)\n\n\nclass EfficientNetCenterCrop:\n    def __init__(self, imgsize):\n        self.imgsize = imgsize\n\n    def __call__(self, img):\n        \"\"\"Crop the given PIL Image and resize it to desired size.\n\n        Args:\n            img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.\n            output_size (sequence or int): (height, width) of the crop box. If int,\n                it is used for both directions\n        Returns:\n            PIL Image: Cropped image.\n        \"\"\"\n        image_width, image_height = img.size\n        image_short = min(image_width, image_height)\n\n        crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n        crop_height, crop_width = crop_size, crop_size\n        crop_top = int(round((image_height - crop_height) / 2.))\n        crop_left = int(round((image_width - crop_width) / 2.))\n        return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))\n\n\nclass SubsetSampler(Sampler):\n    r\"\"\"Samples elements from a given list of indices, without replacement.\n\n    Arguments:\n        indices (sequence): a sequence of indices\n    \"\"\"\n\n    def __init__(self, indices):\n        self.indices = indices\n\n    def __iter__(self):\n        return (i for i in self.indices)\n\n    def __len__(self):\n        return len(self.indices)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/imagenet.py",
    "content": "from __future__ import print_function\nimport os\nimport shutil\nimport torch\n\nARCHIVE_DICT = {\n    'train': {\n        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n        'md5': '1d675b47d978889d74fa0da5fadfb00e',\n    },\n    'val': {\n        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n        'md5': '29b22e2961454d5413ddabcf34fc5622',\n    },\n    'devkit': {\n        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n        'md5': 'fa75699e90414af021442c21a62c3abf',\n    }\n}\n\n\nimport torchvision\nfrom torchvision.datasets.utils import check_integrity, download_url\n\n\n# copy ILSVRC/ImageSets/CLS-LOC/train_cls.txt to ./root/\n# to skip os walk (it's too slow) using ILSVRC/ImageSets/CLS-LOC/train_cls.txt file\nclass ImageNet(torchvision.datasets.ImageFolder):\n    \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n    Args:\n        root (string): Root directory of the ImageNet Dataset.\n        split (string, optional): The dataset split, supports ``train``, or ``val``.\n        download (bool, optional): If true, downloads the dataset from the internet and\n            puts it in root directory. If dataset is already downloaded, it is not\n            downloaded again.\n        transform (callable, optional): A function/transform that  takes in an PIL image\n            and returns a transformed version. E.g, ``transforms.RandomCrop``\n        target_transform (callable, optional): A function/transform that takes in the\n            target and transforms it.\n        loader (callable, optional): A function to load an image given its path.\n\n     Attributes:\n        classes (list): List of the class names.\n        class_to_idx (dict): Dict with items (class_name, class_index).\n        wnids (list): List of the WordNet IDs.\n        wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n        imgs (list): List of (image path, class_index) tuples\n        targets (list): The class_index value for each image in the dataset\n    \"\"\"\n\n    def __init__(self, root, split='train', download=False, **kwargs):\n        root = self.root = os.path.expanduser(root)\n        self.split = self._verify_split(split)\n\n        if download:\n            self.download()\n        wnid_to_classes = self._load_meta_file()[0]\n\n        # to skip os walk (it's too slow) using ILSVRC/ImageSets/CLS-LOC/train_cls.txt file\n        listfile = os.path.join(root, 'train_cls.txt')\n        if split == 'train' and os.path.exists(listfile):\n            torchvision.datasets.VisionDataset.__init__(self, root, **kwargs)\n            with open(listfile, 'r') as f:\n                datalist = [\n                    line.strip().split(' ')[0]\n                    for line in f.readlines()\n                    if line.strip()\n                ]\n\n            classes = list(set([line.split('/')[0] for line in datalist]))\n            classes.sort()\n            class_to_idx = {classes[i]: i for i in range(len(classes))}\n\n            samples = [\n                (os.path.join(self.split_folder, line + '.JPEG'), class_to_idx[line.split('/')[0]])\n                for line in datalist\n            ]\n\n            self.loader = torchvision.datasets.folder.default_loader\n            self.extensions = torchvision.datasets.folder.IMG_EXTENSIONS\n\n            self.classes = classes\n            self.class_to_idx = class_to_idx\n            self.samples = samples\n            self.targets = [s[1] for s in samples]\n\n            self.imgs = self.samples\n        else:\n            super(ImageNet, self).__init__(self.split_folder, **kwargs)\n\n        self.root = root\n\n        idcs = [idx for _, idx in self.imgs]\n        self.wnids = self.classes\n        self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n        self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n        self.class_to_idx = {cls: idx\n                             for clss, idx in zip(self.classes, idcs)\n                             for cls in clss}\n\n    def download(self):\n        if not check_integrity(self.meta_file):\n            tmpdir = os.path.join(self.root, 'tmp')\n\n            archive_dict = ARCHIVE_DICT['devkit']\n            download_and_extract_tar(archive_dict['url'], self.root,\n                                     extract_root=tmpdir,\n                                     md5=archive_dict['md5'])\n            devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n            meta = parse_devkit(os.path.join(tmpdir, devkit_folder))\n            self._save_meta_file(*meta)\n\n            shutil.rmtree(tmpdir)\n\n        if not os.path.isdir(self.split_folder):\n            archive_dict = ARCHIVE_DICT[self.split]\n            download_and_extract_tar(archive_dict['url'], self.root,\n                                     extract_root=self.split_folder,\n                                     md5=archive_dict['md5'])\n\n            if self.split == 'train':\n                prepare_train_folder(self.split_folder)\n            elif self.split == 'val':\n                val_wnids = self._load_meta_file()[1]\n                prepare_val_folder(self.split_folder, val_wnids)\n        else:\n            msg = (\"You set download=True, but a folder '{}' already exist in \"\n                   \"the root directory. If you want to re-download or re-extract the \"\n                   \"archive, delete the folder.\")\n            print(msg.format(self.split))\n\n    @property\n    def meta_file(self):\n        return os.path.join(self.root, 'meta.bin')\n\n    def _load_meta_file(self):\n        if check_integrity(self.meta_file):\n            return torch.load(self.meta_file)\n        raise RuntimeError(\"Meta file not found or corrupted.\",\n                           \"You can use download=True to create it.\")\n\n    def _save_meta_file(self, wnid_to_class, val_wnids):\n        torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n    def _verify_split(self, split):\n        if split not in self.valid_splits:\n            msg = \"Unknown split {} .\".format(split)\n            msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n            raise ValueError(msg)\n        return split\n\n    @property\n    def valid_splits(self):\n        return 'train', 'val'\n\n    @property\n    def split_folder(self):\n        return os.path.join(self.root, self.split)\n\n    def extra_repr(self):\n        return \"Split: {split}\".format(**self.__dict__)\n\n\ndef extract_tar(src, dest=None, gzip=None, delete=False):\n    import tarfile\n\n    if dest is None:\n        dest = os.path.dirname(src)\n    if gzip is None:\n        gzip = src.lower().endswith('.gz')\n\n    mode = 'r:gz' if gzip else 'r'\n    with tarfile.open(src, mode) as tarfh:\n        tarfh.extractall(path=dest)\n\n    if delete:\n        os.remove(src)\n\n\ndef download_and_extract_tar(url, download_root, extract_root=None, filename=None,\n                             md5=None, **kwargs):\n    download_root = os.path.expanduser(download_root)\n    if extract_root is None:\n        extract_root = download_root\n    if filename is None:\n        filename = os.path.basename(url)\n\n    if not check_integrity(os.path.join(download_root, filename), md5):\n        download_url(url, download_root, filename=filename, md5=md5)\n\n    extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)\n\n\ndef parse_devkit(root):\n    idx_to_wnid, wnid_to_classes = parse_meta(root)\n    val_idcs = parse_val_groundtruth(root)\n    val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n    return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n    import scipy.io as sio\n\n    metafile = os.path.join(devkit_root, path, filename)\n    meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n    nums_children = list(zip(*meta))[4]\n    meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n            if num_children == 0]\n    idcs, wnids, classes = list(zip(*meta))[:3]\n    classes = [tuple(clss.split(', ')) for clss in classes]\n    idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n    wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n    return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n                          filename='ILSVRC2012_validation_ground_truth.txt'):\n    with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n        val_idcs = txtfh.readlines()\n    return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n    for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n        extract_tar(archive, os.path.splitext(archive)[0], delete=True)\n\n\ndef prepare_val_folder(folder, wnids):\n    img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n    for wnid in set(wnids):\n        os.mkdir(os.path.join(folder, wnid))\n\n    for wnid, img_file in zip(wnids, img_files):\n        shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n    exts = []\n    ext = '.'\n    while ext:\n        root, ext = os.path.splitext(root)\n        exts.append(ext)\n    return root, ''.join(reversed(exts))\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/lr_scheduler.py",
    "content": "import torch\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom theconf import Config as C\n\n\ndef adjust_learning_rate_resnet(optimizer):\n    \"\"\"\n    Sets the learning rate to the initial LR decayed by 10 on every predefined epochs\n    Ref: AutoAugment\n    \"\"\"\n\n    if C.get()['epoch'] == 90:\n        return MultiStepLR_HotFix(optimizer, [30, 60, 80])\n    elif C.get()['epoch'] == 270:   # autoaugment\n        return MultiStepLR_HotFix(optimizer, [90, 180, 240])\n    else:\n        raise ValueError('invalid epoch=%d for resnet scheduler' % C.get()['epoch'])\n\n        \nclass MultiStepLR_HotFix(MultiStepLR):\n    def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):\n        super(MultiStepLR_HotFix, self).__init__(optimizer, milestones, gamma, last_epoch)\n        self.milestones = list(milestones)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/metrics.py",
    "content": "import copy\n\nimport torch\nimport numpy as np\nfrom collections import defaultdict\n\nfrom torch import nn\n\n\ndef accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(1. / batch_size))\n    return res\n\n\nclass CrossEntropyLabelSmooth(torch.nn.Module):\n    def __init__(self, num_classes, epsilon, reduction='mean'):\n        super(CrossEntropyLabelSmooth, self).__init__()\n        self.num_classes = num_classes\n        self.epsilon = epsilon\n        self.reduction = reduction\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n\n    def forward(self, input, target):  # pylint: disable=redefined-builtin\n        log_probs = self.logsoftmax(input)\n        targets = torch.zeros_like(log_probs).scatter_(1, target.unsqueeze(1), 1)\n        if self.epsilon > 0.0:\n            targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n        targets = targets.detach()\n        loss = (-targets * log_probs)\n\n        if self.reduction in ['avg', 'mean']:\n            loss = torch.mean(torch.sum(loss, dim=1))\n        elif self.reduction == 'sum':\n            loss = loss.sum()\n        return loss\n\n\nclass Accumulator:\n    def __init__(self):\n        self.metrics = defaultdict(lambda: 0.)\n\n    def add(self, key, value):\n        self.metrics[key] += value\n\n    def add_dict(self, dict):\n        for key, value in dict.items():\n            self.add(key, value)\n\n    def __getitem__(self, item):\n        return self.metrics[item]\n\n    def __setitem__(self, key, value):\n        self.metrics[key] = value\n\n    def get_dict(self):\n        return copy.deepcopy(dict(self.metrics))\n\n    def items(self):\n        return self.metrics.items()\n\n    def __str__(self):\n        return str(dict(self.metrics))\n\n    def __truediv__(self, other):\n        newone = Accumulator()\n        for key, value in self.items():\n            if isinstance(other, str):\n                if other != key:\n                    newone[key] = value / self[other]\n                else:\n                    newone[key] = value\n            else:\n                newone[key] = value / other\n        return newone\n\n\nclass SummaryWriterDummy:\n    def __init__(self, log_dir):\n        pass\n\n    def add_scalar(self, *args, **kwargs):\n        pass\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/__init__.py",
    "content": "import torch\n\nfrom torch import nn\nfrom torch.nn import DataParallel\nfrom torch.nn.parallel import DistributedDataParallel\nimport torch.backends.cudnn as cudnn\n# from torchvision import models\nimport numpy as np\n\nfrom FastAutoAugment.networks.resnet import ResNet\nfrom FastAutoAugment.networks.pyramidnet import PyramidNet\nfrom FastAutoAugment.networks.shakeshake.shake_resnet import ShakeResNet\nfrom FastAutoAugment.networks.wideresnet import WideResNet\nfrom FastAutoAugment.networks.shakeshake.shake_resnext import ShakeResNeXt\nfrom FastAutoAugment.networks.efficientnet_pytorch import EfficientNet, RoutingFn\nfrom FastAutoAugment.tf_port.tpu_bn import TpuBatchNormalization\n\n\ndef get_model(conf, num_class=10, local_rank=-1):\n    name = conf['type']\n\n    if name == 'resnet50':\n        model = ResNet(dataset='imagenet', depth=50, num_classes=num_class, bottleneck=True)\n    elif name == 'resnet200':\n        model = ResNet(dataset='imagenet', depth=200, num_classes=num_class, bottleneck=True)\n    elif name == 'wresnet40_2':\n        model = WideResNet(40, 2, dropout_rate=0.0, num_classes=num_class)\n    elif name == 'wresnet28_10':\n        model = WideResNet(28, 10, dropout_rate=0.0, num_classes=num_class)\n\n    elif name == 'shakeshake26_2x32d':\n        model = ShakeResNet(26, 32, num_class)\n    elif name == 'shakeshake26_2x64d':\n        model = ShakeResNet(26, 64, num_class)\n    elif name == 'shakeshake26_2x96d':\n        model = ShakeResNet(26, 96, num_class)\n    elif name == 'shakeshake26_2x112d':\n        model = ShakeResNet(26, 112, num_class)\n\n    elif name == 'shakeshake26_2x96d_next':\n        model = ShakeResNeXt(26, 96, 4, num_class)\n\n    elif name == 'pyramid':\n        model = PyramidNet('cifar10', depth=conf['depth'], alpha=conf['alpha'], num_classes=num_class, bottleneck=conf['bottleneck'])\n\n    elif 'efficientnet' in name:\n        model = EfficientNet.from_name(name, condconv_num_expert=conf['condconv_num_expert'], norm_layer=None)  # TpuBatchNormalization\n        if local_rank >= 0:\n            model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n        def kernel_initializer(module):\n            def get_fan_in_out(module):\n                num_input_fmaps = module.weight.size(1)\n                num_output_fmaps = module.weight.size(0)\n                receptive_field_size = 1\n                if module.weight.dim() > 2:\n                    receptive_field_size = module.weight[0][0].numel()\n                fan_in = num_input_fmaps * receptive_field_size\n                fan_out = num_output_fmaps * receptive_field_size\n                return fan_in, fan_out\n\n            if isinstance(module, torch.nn.Conv2d):\n                # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L58\n                fan_in, fan_out = get_fan_in_out(module)\n                torch.nn.init.normal_(module.weight, mean=0.0, std=np.sqrt(2.0 / fan_out))\n                if module.bias is not None:\n                    torch.nn.init.constant_(module.bias, val=0.)\n            elif isinstance(module, RoutingFn):\n                torch.nn.init.xavier_uniform_(module.weight)\n                torch.nn.init.constant_(module.bias, val=0.)\n            elif isinstance(module, torch.nn.Linear):\n                # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L82\n                fan_in, fan_out = get_fan_in_out(module)\n                delta = 1.0 / np.sqrt(fan_out)\n                torch.nn.init.uniform_(module.weight, a=-delta, b=delta)\n                if module.bias is not None:\n                    torch.nn.init.constant_(module.bias, val=0.)\n        model.apply(kernel_initializer)\n    else:\n        raise NameError('no model named, %s' % name)\n\n    if local_rank >= 0:\n        device = torch.device('cuda', local_rank)\n        model = model.to(device)\n        model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)\n    else:\n        model = model.cuda()\n#         model = DataParallel(model)\n\n    cudnn.benchmark = True\n    return model\n\n\ndef num_class(dataset):\n    return {\n        'cifar10': 10,\n        'reduced_cifar10': 10,\n        'cifar10.1': 10,\n        'cifar100': 100,\n        'svhn': 10,\n        'reduced_svhn': 10,\n        'imagenet': 1000,\n        'reduced_imagenet': 120,\n    }[dataset]\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/__init__.py",
    "content": "__version__ = \"0.5.1\"\nfrom .model import EfficientNet, RoutingFn\nfrom .utils import (\n    GlobalParams,\n    BlockArgs,\n    BlockDecoder,\n    efficientnet,\n    get_model_params,\n)"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/condconv.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch._six import container_abcs\n\nfrom itertools import repeat\nfrom functools import partial\nfrom typing import Union, List, Tuple, Optional, Callable\nimport numpy as np\nimport math\n\n\ndef _ntuple(n):\n    def parse(x):\n        if isinstance(x, container_abcs.Iterable):\n            return x\n        return tuple(repeat(x, n))\n    return parse\n\n\n_single = _ntuple(1)\n_pair = _ntuple(2)\n_triple = _ntuple(3)\n_quadruple = _ntuple(4)\n\n\ndef _is_static_pad(kernel_size, stride=1, dilation=1, **_):\n    return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0\n\n\ndef _get_padding(kernel_size, stride=1, dilation=1, **_):\n    padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2\n    return padding\n\n\ndef _calc_same_pad(i: int, k: int, s: int, d: int):\n    return max((math.ceil(i / s) - 1) * s + (k - 1) * d + 1 - i, 0)\n\n\ndef conv2d_same(\n        x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1),\n        padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1):\n    ih, iw = x.size()[-2:]\n    kh, kw = weight.size()[-2:]\n    pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0])\n    pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1])\n    if pad_h > 0 or pad_w > 0:\n        x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])\n    return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)\n\n\ndef get_padding_value(padding, kernel_size, **kwargs):\n    dynamic = False\n    if isinstance(padding, str):\n        # for any string padding, the padding will be calculated for you, one of three ways\n        padding = padding.lower()\n        if padding == 'same':\n            # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact\n            if _is_static_pad(kernel_size, **kwargs):\n                # static case, no extra overhead\n                padding = _get_padding(kernel_size, **kwargs)\n            else:\n                # dynamic padding\n                padding = 0\n                dynamic = True\n        elif padding == 'valid':\n            # 'VALID' padding, same as padding=0\n            padding = 0\n        else:\n            # Default to PyTorch style 'same'-ish symmetric padding\n            padding = _get_padding(kernel_size, **kwargs)\n    return padding, dynamic\n\n\ndef get_condconv_initializer(initializer, num_experts, expert_shape):\n    def condconv_initializer(weight):\n        \"\"\"CondConv initializer function.\"\"\"\n        num_params = np.prod(expert_shape)\n        if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params):\n            raise (ValueError('CondConv variables must have shape [num_experts, num_params]'))\n        for i in range(num_experts):\n            initializer(weight[i].view(expert_shape))\n    return condconv_initializer\n\n\nclass CondConv2d(nn.Module):\n    \"\"\" Conditional Convolution\n    Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py\n    Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:\n    https://github.com/pytorch/pytorch/issues/17983\n    \"\"\"\n    __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding']\n\n    def __init__(self, in_channels, out_channels, kernel_size=3,\n                 stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):\n        super(CondConv2d, self).__init__()\n        assert num_experts > 1\n\n        if isinstance(stride, container_abcs.Iterable) and len(stride) == 1:\n            stride = stride[0]\n        # print('CondConv', num_experts)\n\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.kernel_size = _pair(kernel_size)\n        self.stride = _pair(stride)\n        padding_val, is_padding_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)\n        self.dynamic_padding = is_padding_dynamic  # if in forward to work with torchscript\n        self.padding = _pair(padding_val)\n        self.dilation = _pair(dilation)\n        self.groups = groups\n        self.num_experts = num_experts\n\n        self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size\n        weight_num_param = 1\n        for wd in self.weight_shape:\n            weight_num_param *= wd\n        self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))\n\n        if bias:\n            self.bias_shape = (self.out_channels,)\n            self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))\n        else:\n            self.register_parameter('bias', None)\n\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        num_input_fmaps = self.weight.size(1)\n        num_output_fmaps = self.weight.size(0)\n        receptive_field_size = 1\n        if self.weight.dim() > 2:\n            receptive_field_size = self.weight[0][0].numel()\n        fan_in = num_input_fmaps * receptive_field_size\n        fan_out = num_output_fmaps * receptive_field_size\n\n        init_weight = get_condconv_initializer(partial(nn.init.normal_, mean=0.0, std=np.sqrt(2.0 / fan_out)), self.num_experts, self.weight_shape)\n        init_weight(self.weight)\n        if self.bias is not None:\n            # fan_in = np.prod(self.weight_shape[1:])\n            # bound = 1 / math.sqrt(fan_in)\n            init_bias = get_condconv_initializer(partial(nn.init.constant_, val=0), self.num_experts, self.bias_shape)\n            init_bias(self.bias)\n\n    def forward(self, x, routing_weights):\n        x_orig = x\n        B, C, H, W = x.shape\n        weight = torch.matmul(routing_weights, self.weight)     # (Expert x out x in x 3x3) --> (B x out x in x 3x3)\n        new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size\n        weight = weight.view(new_weight_shape)                  # (B*out x in x 3 x 3)\n        bias = None\n        if self.bias is not None:\n            bias = torch.matmul(routing_weights, self.bias)\n            bias = bias.view(B * self.out_channels)\n        # move batch elements with channels so each batch element can be efficiently convolved with separate kernel\n        x = x.view(1, B * C, H, W)\n        if self.dynamic_padding:\n            out = conv2d_same(\n                x, weight, bias, stride=self.stride, padding=self.padding,\n                dilation=self.dilation, groups=self.groups * B)\n        else:\n            out = F.conv2d(\n                x, weight, bias, stride=self.stride, padding=self.padding,\n                dilation=self.dilation, groups=self.groups * B)\n\n        # out : (1 x B*out x ...)\n        out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])\n\n        # out2 = self.forward_legacy(x_orig, routing_weights)\n        # lt = torch.lt(torch.abs(torch.add(out, -out2)), 1e-8)\n        # assert torch.all(lt), torch.abs(torch.add(out, -out2))[lt]\n        # print('checked')\n        return out\n\n    def forward_legacy(self, x, routing_weights):\n        # Literal port (from TF definition)\n        B, C, H, W = x.shape\n        weight = torch.matmul(routing_weights, self.weight)  # (Expert x out x in x 3x3) --> (B x out x in x 3x3)\n        x = torch.split(x, 1, 0)\n        weight = torch.split(weight, 1, 0)\n        if self.bias is not None:\n            bias = torch.matmul(routing_weights, self.bias)\n            bias = torch.split(bias, 1, 0)\n        else:\n            bias = [None] * B\n        out = []\n        if self.dynamic_padding:\n            conv_fn = conv2d_same\n        else:\n            conv_fn = F.conv2d\n        for xi, wi, bi in zip(x, weight, bias):\n            wi = wi.view(*self.weight_shape)\n            if bi is not None:\n                bi = bi.view(*self.bias_shape)\n            out.append(conv_fn(\n                xi, wi, bi, stride=self.stride, padding=self.padding,\n                dilation=self.dilation, groups=self.groups))\n        out = torch.cat(out, 0)\n        return out\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/model.py",
    "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom functools import partial\nfrom .utils import (\n    round_filters,\n    round_repeats,\n    drop_connect,\n    get_same_padding_conv2d,\n    get_model_params,\n    efficientnet_params,\n    load_pretrained_weights,\n    MemoryEfficientSwish,\n)\n\n\nclass RoutingFn(nn.Linear):\n    pass\n\n\nclass MBConvBlock(nn.Module):\n    \"\"\"\n    Mobile Inverted Residual Bottleneck Block\n\n    Args:\n        block_args (namedtuple): BlockArgs, see above\n        global_params (namedtuple): GlobalParam, see above\n\n    Attributes:\n        has_se (bool): Whether the block contains a Squeeze and Excitation layer.\n    \"\"\"\n\n    def __init__(self, block_args, global_params, norm_layer=None):\n        super().__init__()\n        self._block_args = block_args\n        self._bn_mom = 1 - global_params.batch_norm_momentum\n        self._bn_eps = global_params.batch_norm_epsilon\n        self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)\n        self.id_skip = block_args.id_skip  # skip connection and drop connect\n        if norm_layer is None:\n            norm_layer = nn.BatchNorm2d\n\n        self.condconv_num_expert = block_args.condconv_num_expert\n        if self._is_condconv():\n            self.routing_fn = RoutingFn(self._block_args.input_filters, self.condconv_num_expert)\n\n        # Get static or dynamic convolution depending on image size\n        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size, condconv_num_expert=block_args.condconv_num_expert)\n        Conv2dse = get_same_padding_conv2d(image_size=global_params.image_size)\n\n        # Expansion phase\n        inp = self._block_args.input_filters  # number of input channels\n        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels\n        if self._block_args.expand_ratio != 1:\n            self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n            self._bn0 = norm_layer(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n        # Depthwise convolution phase\n        k = self._block_args.kernel_size\n        s = self._block_args.stride\n        self._depthwise_conv = Conv2d(\n            in_channels=oup, out_channels=oup, groups=oup,  # groups makes it depthwise\n            kernel_size=k, stride=s, bias=False)\n        self._bn1 = norm_layer(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n        # Squeeze and Excitation layer, if desired\n        if self.has_se:\n            num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))\n            self._se_reduce = Conv2dse(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n            self._se_expand = Conv2dse(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n\n        # Output phase\n        final_oup = self._block_args.output_filters\n        self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n        self._bn2 = norm_layer(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)\n        self._swish = MemoryEfficientSwish()\n\n    def _is_condconv(self):\n        return self.condconv_num_expert > 1\n\n    def forward(self, inputs, drop_connect_rate=None):\n        \"\"\"\n        :param inputs: input tensor\n        :param drop_connect_rate: drop connect rate (float, between 0 and 1)\n        :return: output of block\n        \"\"\"\n\n        if self._is_condconv():\n            feat = F.adaptive_avg_pool2d(inputs, 1).flatten(1)\n            routing_w = torch.sigmoid(self.routing_fn(feat))\n\n            if self._block_args.expand_ratio != 1:\n                _expand_conv = partial(self._expand_conv, routing_weights=routing_w)\n            _depthwise_conv = partial(self._depthwise_conv, routing_weights=routing_w)\n            _project_conv = partial(self._project_conv, routing_weights=routing_w)\n        else:\n            if self._block_args.expand_ratio != 1:\n                _expand_conv = self._expand_conv\n            _depthwise_conv, _project_conv = self._depthwise_conv, self._project_conv\n\n        # Expansion and Depthwise Convolution\n        x = inputs\n        if self._block_args.expand_ratio != 1:\n            x = self._swish(self._bn0(_expand_conv(inputs)))\n        x = self._swish(self._bn1(_depthwise_conv(x)))\n\n        # Squeeze and Excitation\n        if self.has_se:\n            x_squeezed = F.adaptive_avg_pool2d(x, 1)\n            x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))\n            x = torch.sigmoid(x_squeezed) * x\n\n        x = self._bn2(_project_conv(x))\n\n        # Skip connection and drop connect\n        input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters\n        if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:\n            if drop_connect_rate:\n                x = drop_connect(x, drop_p=drop_connect_rate, training=self.training)\n            x = x + inputs  # skip connection\n        return x\n\n    def set_swish(self):\n        \"\"\"Sets swish function as memory efficient (for training) or standard (for export)\"\"\"\n        self._swish = MemoryEfficientSwish()\n\n\nclass EfficientNet(nn.Module):\n    \"\"\"\n    An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods\n\n    Args:\n        blocks_args (list): A list of BlockArgs to construct blocks\n        global_params (namedtuple): A set of GlobalParams shared between blocks\n\n    Example:\n        model = EfficientNet.from_pretrained('efficientnet-b0')\n\n    \"\"\"\n\n    def __init__(self, blocks_args=None, global_params=None, norm_layer=None):\n        super().__init__()\n        assert isinstance(blocks_args, list), 'blocks_args should be a list'\n        assert len(blocks_args) > 0, 'block args must be greater than 0'\n        self._global_params = global_params\n        self._blocks_args = blocks_args\n        if norm_layer is None:\n            norm_layer = nn.BatchNorm2d\n\n        # Get static or dynamic convolution depending on image size\n        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)\n\n        # Batch norm parameters\n        bn_mom = 1 - self._global_params.batch_norm_momentum\n        bn_eps = self._global_params.batch_norm_epsilon\n\n        # Stem\n        in_channels = 3  # rgb\n        out_channels = round_filters(32, self._global_params)  # number of output channels\n        self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)\n        self._bn0 = norm_layer(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n        # Build blocks\n        self._blocks = nn.ModuleList([])\n        for idx, block_args in enumerate(self._blocks_args):\n            # Update block input and output filters based on depth multiplier.\n            block_args = block_args._replace(\n                input_filters=round_filters(block_args.input_filters, self._global_params),\n                output_filters=round_filters(block_args.output_filters, self._global_params),\n                num_repeat=round_repeats(block_args.num_repeat, self._global_params)\n            )\n\n            # The first block needs to take care of stride and filter size increase.\n            self._blocks.append(MBConvBlock(block_args, self._global_params, norm_layer=norm_layer))\n            if block_args.num_repeat > 1:\n                block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)\n            for _ in range(block_args.num_repeat - 1):\n                self._blocks.append(MBConvBlock(block_args, self._global_params, norm_layer=norm_layer))\n\n        # Head\n        in_channels = block_args.output_filters  # output of final block\n        out_channels = round_filters(1280, self._global_params)\n        self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\n        self._bn1 = norm_layer(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n        # Final linear layer\n        self._avg_pooling = nn.AdaptiveAvgPool2d(1)\n        self._dropout = nn.Dropout(self._global_params.dropout_rate)\n        self._fc = nn.Linear(out_channels, self._global_params.num_classes)\n        self._swish = MemoryEfficientSwish()\n\n    def set_swish(self):\n        \"\"\"Sets swish function as memory efficient (for training) or standard (for export)\"\"\"\n        self._swish = MemoryEfficientSwish()\n        for block in self._blocks:\n            block.set_swish()\n\n    def extract_features(self, inputs):\n        \"\"\" Returns output of the final convolution layer \"\"\"\n\n        # Stem\n        x = self._swish(self._bn0(self._conv_stem(inputs)))\n\n        # Blocks\n        for idx, block in enumerate(self._blocks):\n            drop_connect_rate = self._global_params.drop_connect_rate\n            if drop_connect_rate:\n                drop_connect_rate *= float(idx) / len(self._blocks)\n            x = block(x, drop_connect_rate=drop_connect_rate)\n\n        # Head\n        x = self._swish(self._bn1(self._conv_head(x)))\n\n        return x\n\n    def forward(self, inputs):\n        \"\"\" Calls extract_features to extract features, applies final linear layer, and returns logits. \"\"\"\n        bs = inputs.size(0)\n        # Convolution layers\n        x = self.extract_features(inputs)\n\n        # Pooling and final linear layer\n        x = self._avg_pooling(x)\n        x = x.view(bs, -1)\n        x = self._dropout(x)\n        x = self._fc(x)\n        return x\n\n    @classmethod\n    def from_name(cls, model_name, override_params=None, norm_layer=None, condconv_num_expert=1):\n        cls._check_model_name_is_valid(model_name)\n        blocks_args, global_params = get_model_params(model_name, override_params, condconv_num_expert=condconv_num_expert)\n        return cls(blocks_args, global_params, norm_layer=norm_layer)\n\n    @classmethod\n    def from_pretrained(cls, model_name, num_classes=1000):\n        model = cls.from_name(model_name, override_params={'num_classes': num_classes})\n        load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))\n\n        return model\n\n    @classmethod\n    def get_image_size(cls, model_name):\n        cls._check_model_name_is_valid(model_name)\n        _, _, res, _ = efficientnet_params(model_name)\n        return res\n\n    @classmethod\n    def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):\n        \"\"\" Validates model name. None that pretrained weights are only available for\n        the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. \"\"\"\n        num_models = 4 if also_need_pretrained_weights else 8\n        valid_models = ['efficientnet-b'+str(i) for i in range(num_models)]\n        if model_name not in valid_models:\n            raise ValueError(f'model_name={model_name} should be one of: ' + ', '.join(valid_models))\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/efficientnet_pytorch/utils.py",
    "content": "\"\"\"\nThis file contains helper functions for building the model and for loading model parameters.\nThese helper functions are built to mirror those in the official TensorFlow implementation.\n\"\"\"\n\nimport re\nimport math\nimport collections\nfrom functools import partial\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils import model_zoo\n\n########################################################################\n############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############\n########################################################################\n\n\n# Parameters for the entire model (stem, all blocks, and head)\nfrom FastAutoAugment.networks.efficientnet_pytorch.condconv import CondConv2d\n\nGlobalParams = collections.namedtuple('GlobalParams', [\n    'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',\n    'num_classes', 'width_coefficient', 'depth_coefficient',\n    'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])\n\n# Parameters for an individual model block\nBlockArgs = collections.namedtuple('BlockArgs', [\n    'kernel_size', 'num_repeat', 'input_filters', 'output_filters',\n    'expand_ratio', 'id_skip', 'stride', 'se_ratio', 'condconv_num_expert'])\n\n# Change namedtuple defaults\nGlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)\nBlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)\n\n\nclass SwishImplementation(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, i):\n        result = i * torch.sigmoid(i)\n        ctx.save_for_backward(i)\n        return result\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        i = ctx.saved_tensors[0]\n        sigmoid_i = torch.sigmoid(i)\n        return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\n\n\nclass MemoryEfficientSwish(nn.Module):\n    def forward(self, x):\n        return SwishImplementation.apply(x)\n\n\ndef round_filters(filters, global_params):\n    \"\"\" Calculate and round number of filters based on depth multiplier. \"\"\"\n    multiplier = global_params.width_coefficient\n    if not multiplier:\n        return filters\n    divisor = global_params.depth_divisor\n    min_depth = global_params.min_depth\n    filters *= multiplier\n    min_depth = min_depth or divisor\n    new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n    if new_filters < 0.9 * filters:  # prevent rounding by more than 10%\n        new_filters += divisor\n    return int(new_filters)\n\n\ndef round_repeats(repeats, global_params):\n    \"\"\" Round number of filters based on depth multiplier. \"\"\"\n    multiplier = global_params.depth_coefficient\n    if not multiplier:\n        return repeats\n    return int(math.ceil(multiplier * repeats))\n\n\ndef drop_connect(inputs, drop_p, training):\n    \"\"\" Drop connect. \"\"\"\n    if not training:\n        return inputs * (1. - drop_p)\n    batch_size = inputs.shape[0]\n    random_tensor = torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)\n    binary_tensor = random_tensor > drop_p\n    output = inputs * binary_tensor.float()\n    # output = inputs / (1. - drop_p) * binary_tensor.float()\n    return output\n\n    # if not training: return inputs\n    # batch_size = inputs.shape[0]\n    # keep_prob = 1 - drop_p\n    # random_tensor = keep_prob\n    # random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)\n    # binary_tensor = torch.floor(random_tensor)\n    # output = inputs / keep_prob * binary_tensor\n    # return output\n\n\ndef get_same_padding_conv2d(image_size=None, condconv_num_expert=1):\n    \"\"\" Chooses static padding if you have specified an image size, and dynamic padding otherwise.\n        Static padding is necessary for ONNX exporting of models. \"\"\"\n    if condconv_num_expert > 1:\n        return partial(CondConv2d, num_experts=condconv_num_expert)\n    elif image_size is None:\n        return Conv2dDynamicSamePadding\n    else:\n        return partial(Conv2dStaticSamePadding, image_size=image_size)\n\n\nclass Conv2dDynamicSamePadding(nn.Conv2d):\n    \"\"\" 2D Convolutions like TensorFlow, for a dynamic image size \"\"\"\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):\n        super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)\n        self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2\n\n    def forward(self, x):\n        ih, iw = x.size()[-2:]\n        kh, kw = self.weight.size()[-2:]\n        sh, sw = self.stride\n        oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)\n        pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)\n        pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)\n        if pad_h > 0 or pad_w > 0:\n            x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])\n        return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n\nclass Conv2dStaticSamePadding(nn.Conv2d):\n    \"\"\" 2D Convolutions like TensorFlow, for a fixed image size\"\"\"\n\n    def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):\n        super().__init__(in_channels, out_channels, kernel_size, **kwargs)\n        self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2\n\n        # Calculate padding based on image size and save it\n        assert image_size is not None\n        ih, iw = image_size if type(image_size) == list else [image_size, image_size]\n        kh, kw = self.weight.size()[-2:]\n        sh, sw = self.stride\n        oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)\n        pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)\n        pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)\n        if pad_h > 0 or pad_w > 0:\n            self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))\n        else:\n            self.static_padding = Identity()\n\n    def forward(self, x):\n        x = self.static_padding(x)\n        x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n        return x\n\n\nclass Identity(nn.Module):\n    def __init__(self, ):\n        super(Identity, self).__init__()\n\n    def forward(self, input):\n        return input\n\n\n########################################################################\n############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############\n########################################################################\n\n\ndef efficientnet_params(model_name):\n    \"\"\" Map EfficientNet model name to parameter coefficients. \"\"\"\n    params_dict = {\n        # Coefficients:   width,depth,res,dropout\n        'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n        'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n        'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n        'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n        'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n        'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n        'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n        'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n    }\n    return params_dict[model_name]\n\n\nclass BlockDecoder(object):\n    \"\"\" Block Decoder for readability, straight from the official TensorFlow repository \"\"\"\n\n    @staticmethod\n    def _decode_block_string(block_string):\n        \"\"\" Gets a block through a string notation of arguments. \"\"\"\n        assert isinstance(block_string, str)\n\n        ops = block_string.split('_')\n        options = {}\n        for op in ops:\n            splits = re.split(r'(\\d.*)', op)\n            if len(splits) >= 2:\n                key, value = splits[:2]\n                options[key] = value\n\n        # Check stride\n        assert (('s' in options and len(options['s']) == 1) or\n                (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n        return BlockArgs(\n            kernel_size=int(options['k']),\n            num_repeat=int(options['r']),\n            input_filters=int(options['i']),\n            output_filters=int(options['o']),\n            expand_ratio=int(options['e']),\n            id_skip=('noskip' not in block_string),\n            se_ratio=float(options['se']) if 'se' in options else None,\n            stride=[int(options['s'][0])],\n            condconv_num_expert=0\n        )\n\n    @staticmethod\n    def _encode_block_string(block):\n        \"\"\"Encodes a block to a string.\"\"\"\n        args = [\n            'r%d' % block.num_repeat,\n            'k%d' % block.kernel_size,\n            's%d%d' % (block.strides[0], block.strides[1]),\n            'e%s' % block.expand_ratio,\n            'i%d' % block.input_filters,\n            'o%d' % block.output_filters\n        ]\n        if 0 < block.se_ratio <= 1:\n            args.append('se%s' % block.se_ratio)\n        if block.id_skip is False:\n            args.append('noskip')\n        return '_'.join(args)\n\n    @staticmethod\n    def decode(string_list):\n        \"\"\"\n        Decodes a list of string notations to specify blocks inside the network.\n\n        :param string_list: a list of strings, each string is a notation of block\n        :return: a list of BlockArgs namedtuples of block args\n        \"\"\"\n        assert isinstance(string_list, list)\n        blocks_args = []\n        for block_string in string_list:\n            blocks_args.append(BlockDecoder._decode_block_string(block_string))\n        return blocks_args\n\n    @staticmethod\n    def encode(blocks_args):\n        \"\"\"\n        Encodes a list of BlockArgs to a list of strings.\n\n        :param blocks_args: a list of BlockArgs namedtuples of block args\n        :return: a list of strings, each string is a notation of block\n        \"\"\"\n        block_strings = []\n        for block in blocks_args:\n            block_strings.append(BlockDecoder._encode_block_string(block))\n        return block_strings\n\n\ndef efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,\n                 drop_connect_rate=0.2, image_size=None, num_classes=1000, condconv_num_expert=1):\n    \"\"\" Creates a efficientnet model. \"\"\"\n\n    blocks_args = [\n        'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',\n        'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',\n        'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',\n        'r1_k3_s11_e6_i192_o320_se0.25',\n    ]\n    blocks_args = BlockDecoder.decode(blocks_args)\n\n    blocks_args_new = blocks_args[:-3]\n    for blocks_arg in blocks_args[-3:]:\n        blocks_arg = blocks_arg._replace(condconv_num_expert=condconv_num_expert)\n        blocks_args_new.append(blocks_arg)\n    blocks_args = blocks_args_new\n\n    global_params = GlobalParams(\n        batch_norm_momentum=0.99,\n        batch_norm_epsilon=1e-3,\n        dropout_rate=dropout_rate,\n        drop_connect_rate=drop_connect_rate,\n        # data_format='channels_last',  # removed, this is always true in PyTorch\n        num_classes=num_classes,\n        width_coefficient=width_coefficient,\n        depth_coefficient=depth_coefficient,\n        depth_divisor=8,\n        min_depth=None,\n        image_size=image_size,\n    )\n\n    return blocks_args, global_params\n\n\ndef get_model_params(model_name, override_params, condconv_num_expert=1):\n    \"\"\" Get the block args and global params for a given model \"\"\"\n    if model_name.startswith('efficientnet'):\n        w, d, s, p = efficientnet_params(model_name)\n        # note: all models have drop connect rate = 0.2\n        blocks_args, global_params = efficientnet(\n            width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s, condconv_num_expert=condconv_num_expert)\n    else:\n        raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n    if override_params:\n        # ValueError will be raised here if override_params has fields not included in global_params.\n        global_params = global_params._replace(**override_params)\n    return blocks_args, global_params\n\n\nurl_map = {\n    'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b0-355c32eb.pth',\n    'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b1-f1951068.pth',\n    'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b2-8bb594d6.pth',\n    'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b3-5fb5a3c3.pth',\n    'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b4-6ed6700e.pth',\n    'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b5-b6417697.pth',\n    'efficientnet-b6': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b6-c76e70fd.pth',\n    'efficientnet-b7': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b7-dcc49843.pth',\n}\n\n\ndef load_pretrained_weights(model, model_name, load_fc=True):\n    \"\"\" Loads pretrained weights, and downloads if loading for the first time. \"\"\"\n    state_dict = model_zoo.load_url(url_map[model_name])\n    if load_fc:\n        model.load_state_dict(state_dict)\n    else:\n        state_dict.pop('_fc.weight')\n        state_dict.pop('_fc.bias')\n        res = model.load_state_dict(state_dict, strict=False)\n        assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'\n    print('Loaded pretrained weights for {}'.format(model_name))\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/pyramidnet.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\n\nfrom FastAutoAugment.networks.shakedrop import ShakeDrop\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n    \"\"\"\n    3x3 convolution with padding\n    \"\"\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n    outchannel_ratio = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, p_shakedrop=1.0):\n        super(BasicBlock, self).__init__()\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.conv1 = conv3x3(inplanes, planes, stride)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv2 = conv3x3(planes, planes)\n        self.bn3 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.shake_drop = ShakeDrop(p_shakedrop)\n\n    def forward(self, x):\n\n        out = self.bn1(x)\n        out = self.conv1(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n        out = self.conv2(out)\n        out = self.bn3(out)\n\n        out = self.shake_drop(out)\n\n        if self.downsample is not None:\n            shortcut = self.downsample(x)\n            featuremap_size = shortcut.size()[2:4]\n        else:\n            shortcut = x\n            featuremap_size = out.size()[2:4]\n\n        batch_size = out.size()[0]\n        residual_channel = out.size()[1]\n        shortcut_channel = shortcut.size()[1]\n\n        if residual_channel != shortcut_channel:\n            padding = torch.autograd.Variable(\n                torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0],\n                                       featuremap_size[1]).fill_(0))\n            out += torch.cat((shortcut, padding), 1)\n        else:\n            out += shortcut\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    outchannel_ratio = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, p_shakedrop=1.0):\n        super(Bottleneck, self).__init__()\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,\n                               padding=1, bias=False)\n        self.bn3 = nn.BatchNorm2d((planes * 1))\n        self.conv3 = nn.Conv2d((planes * 1), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False)\n        self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.shake_drop = ShakeDrop(p_shakedrop)\n\n    def forward(self, x):\n\n        out = self.bn1(x)\n        out = self.conv1(out)\n\n        out = self.bn2(out)\n        out = self.relu(out)\n        out = self.conv2(out)\n\n        out = self.bn3(out)\n        out = self.relu(out)\n        out = self.conv3(out)\n\n        out = self.bn4(out)\n\n        out = self.shake_drop(out)\n\n        if self.downsample is not None:\n            shortcut = self.downsample(x)\n            featuremap_size = shortcut.size()[2:4]\n        else:\n            shortcut = x\n            featuremap_size = out.size()[2:4]\n\n        batch_size = out.size()[0]\n        residual_channel = out.size()[1]\n        shortcut_channel = shortcut.size()[1]\n\n        if residual_channel != shortcut_channel:\n            padding = torch.autograd.Variable(\n                torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0],\n                                       featuremap_size[1]).fill_(0))\n            out += torch.cat((shortcut, padding), 1)\n        else:\n            out += shortcut\n\n        return out\n\n\nclass PyramidNet(nn.Module):\n\n    def __init__(self, dataset, depth, alpha, num_classes, bottleneck=True):\n        super(PyramidNet, self).__init__()\n        self.dataset = dataset\n        if self.dataset.startswith('cifar'):\n            self.inplanes = 16\n            if bottleneck:\n                n = int((depth - 2) / 9)\n                block = Bottleneck\n            else:\n                n = int((depth - 2) / 6)\n                block = BasicBlock\n\n            self.addrate = alpha / (3 * n * 1.0)\n            self.ps_shakedrop = [1. - (1.0 - (0.5 / (3 * n)) * (i + 1)) for i in range(3 * n)]\n\n            self.input_featuremap_dim = self.inplanes\n            self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)\n            self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)\n\n            self.featuremap_dim = self.input_featuremap_dim\n            self.layer1 = self.pyramidal_make_layer(block, n)\n            self.layer2 = self.pyramidal_make_layer(block, n, stride=2)\n            self.layer3 = self.pyramidal_make_layer(block, n, stride=2)\n\n            self.final_featuremap_dim = self.input_featuremap_dim\n            self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)\n            self.relu_final = nn.ReLU(inplace=True)\n            self.avgpool = nn.AvgPool2d(8)\n            self.fc = nn.Linear(self.final_featuremap_dim, num_classes)\n\n        elif dataset == 'imagenet':\n            blocks = {18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}\n            layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3],\n                      200: [3, 24, 36, 3]}\n\n            if layers.get(depth) is None:\n                if bottleneck == True:\n                    blocks[depth] = Bottleneck\n                    temp_cfg = int((depth - 2) / 12)\n                else:\n                    blocks[depth] = BasicBlock\n                    temp_cfg = int((depth - 2) / 8)\n\n                layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]\n                print('=> the layer configuration for each stage is set to', layers[depth])\n\n            self.inplanes = 64\n            self.addrate = alpha / (sum(layers[depth]) * 1.0)\n\n            self.input_featuremap_dim = self.inplanes\n            self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False)\n            self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)\n            self.relu = nn.ReLU(inplace=True)\n            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n            self.featuremap_dim = self.input_featuremap_dim\n            self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0])\n            self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2)\n            self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2)\n            self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2)\n\n            self.final_featuremap_dim = self.input_featuremap_dim\n            self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)\n            self.relu_final = nn.ReLU(inplace=True)\n            self.avgpool = nn.AvgPool2d(7)\n            self.fc = nn.Linear(self.final_featuremap_dim, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n        assert len(self.ps_shakedrop) == 0, self.ps_shakedrop\n\n    def pyramidal_make_layer(self, block, block_depth, stride=1):\n        downsample = None\n        if stride != 1:  # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:\n            downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)\n\n        layers = []\n        self.featuremap_dim = self.featuremap_dim + self.addrate\n        layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, p_shakedrop=self.ps_shakedrop.pop(0)))\n        for i in range(1, block_depth):\n            temp_featuremap_dim = self.featuremap_dim + self.addrate\n            layers.append(\n                block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1, p_shakedrop=self.ps_shakedrop.pop(0)))\n            self.featuremap_dim = temp_featuremap_dim\n        self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        if self.dataset == 'cifar10' or self.dataset == 'cifar100':\n            x = self.conv1(x)\n            x = self.bn1(x)\n\n            x = self.layer1(x)\n            x = self.layer2(x)\n            x = self.layer3(x)\n\n            x = self.bn_final(x)\n            x = self.relu_final(x)\n            x = self.avgpool(x)\n            x = x.view(x.size(0), -1)\n            x = self.fc(x)\n\n        elif self.dataset == 'imagenet':\n            x = self.conv1(x)\n            x = self.bn1(x)\n            x = self.relu(x)\n            x = self.maxpool(x)\n\n            x = self.layer1(x)\n            x = self.layer2(x)\n            x = self.layer3(x)\n            x = self.layer4(x)\n\n            x = self.bn_final(x)\n            x = self.relu_final(x)\n            x = self.avgpool(x)\n            x = x.view(x.size(0), -1)\n            x = self.fc(x)\n\n        return x\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/resnet.py",
    "content": "# Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nimport torch.nn as nn\nimport math\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n    \"3x3 convolution with padding\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                     padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None):\n        super(BasicBlock, self).__init__()\n        self.conv1 = conv3x3(inplanes, planes, stride)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = conv3x3(planes, planes)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None):\n        super(Bottleneck, self).__init__()\n\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)\n        self.relu = nn.ReLU(inplace=True)\n\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\nclass ResNet(nn.Module):\n    def __init__(self, dataset, depth, num_classes, bottleneck=False):\n        super(ResNet, self).__init__()        \n        self.dataset = dataset\n        if self.dataset.startswith('cifar'):\n            self.inplanes = 16\n            print(bottleneck)\n            if bottleneck == True:\n                n = int((depth - 2) / 9)\n                block = Bottleneck\n            else:\n                n = int((depth - 2) / 6)\n                block = BasicBlock\n\n            self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n            self.bn1 = nn.BatchNorm2d(self.inplanes)\n            self.relu = nn.ReLU(inplace=True)\n            self.layer1 = self._make_layer(block, 16, n)\n            self.layer2 = self._make_layer(block, 32, n, stride=2)\n            self.layer3 = self._make_layer(block, 64, n, stride=2) \n            # self.avgpool = nn.AvgPool2d(8)\n            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n            self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n        elif dataset == 'imagenet':\n            blocks ={18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}\n            layers ={18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}\n            assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'\n\n            self.inplanes = 64\n            self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n            self.bn1 = nn.BatchNorm2d(64)\n            self.relu = nn.ReLU(inplace=True)\n            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n            self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])\n            self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)\n            self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)\n            self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)\n            # self.avgpool = nn.AvgPool2d(7)\n            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n            self.fc = nn.Linear(512 * blocks[depth].expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        if self.dataset == 'cifar10' or self.dataset == 'cifar100':\n            x = self.conv1(x)\n            x = self.bn1(x)\n            x = self.relu(x)\n            \n            x = self.layer1(x)\n            x = self.layer2(x)\n            x = self.layer3(x)\n\n            x = self.avgpool(x)\n            x = x.view(x.size(0), -1)\n            x = self.fc(x)\n\n        elif self.dataset == 'imagenet':\n            x = self.conv1(x)\n            x = self.bn1(x)\n            x = self.relu(x)\n            x = self.maxpool(x)\n\n            x = self.layer1(x)\n            x = self.layer2(x)\n            x = self.layer3(x)\n            x = self.layer4(x)\n\n            x = self.avgpool(x)\n            x = x.view(x.size(0), -1)\n            x = self.fc(x)\n    \n        return x\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/shakedrop.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass ShakeDropFunction(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, x, training=True, p_drop=0.5, alpha_range=[-1, 1]):\n        if training:\n            gate = torch.cuda.FloatTensor([0]).bernoulli_(1 - p_drop)\n            ctx.save_for_backward(gate)\n            if gate.item() == 0:\n                alpha = torch.cuda.FloatTensor(x.size(0)).uniform_(*alpha_range)\n                alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x)\n                return alpha * x\n            else:\n                return x\n        else:\n            return (1 - p_drop) * x\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        gate = ctx.saved_tensors[0]\n        if gate.item() == 0:\n            beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_(0, 1)\n            beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)\n            beta = Variable(beta)\n            return beta * grad_output, None, None, None\n        else:\n            return grad_output, None, None, None\n\n\nclass ShakeDrop(nn.Module):\n\n    def __init__(self, p_drop=0.5, alpha_range=[-1, 1]):\n        super(ShakeDrop, self).__init__()\n        self.p_drop = p_drop\n        self.alpha_range = alpha_range\n\n    def forward(self, x):\n        return ShakeDropFunction.apply(x, self.training, self.p_drop, self.alpha_range)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/__init__.py",
    "content": ""
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnet.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom FastAutoAugment.networks.shakeshake.shakeshake import ShakeShake\nfrom FastAutoAugment.networks.shakeshake.shakeshake import Shortcut\n\n\nclass ShakeBlock(nn.Module):\n\n    def __init__(self, in_ch, out_ch, stride=1):\n        super(ShakeBlock, self).__init__()\n        self.equal_io = in_ch == out_ch\n        self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch, stride=stride)\n\n        self.branch1 = self._make_branch(in_ch, out_ch, stride)\n        self.branch2 = self._make_branch(in_ch, out_ch, stride)\n\n    def forward(self, x):\n        h1 = self.branch1(x)\n        h2 = self.branch2(x)\n        h = ShakeShake.apply(h1, h2, self.training)\n        h0 = x if self.equal_io else self.shortcut(x)\n        return h + h0\n\n    def _make_branch(self, in_ch, out_ch, stride=1):\n        return nn.Sequential(\n            nn.ReLU(inplace=False),\n            nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False),\n            nn.BatchNorm2d(out_ch),\n            nn.ReLU(inplace=False),\n            nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False),\n            nn.BatchNorm2d(out_ch))\n\n\nclass ShakeResNet(nn.Module):\n\n    def __init__(self, depth, w_base, label):\n        super(ShakeResNet, self).__init__()\n        n_units = (depth - 2) / 6\n\n        in_chs = [16, w_base, w_base * 2, w_base * 4]\n        self.in_chs = in_chs\n\n        self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)\n        self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])\n        self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)\n        self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)\n        self.fc_out = nn.Linear(in_chs[3], label)\n\n        # Initialize paramters\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                m.bias.data.zero_()\n\n    def forward(self, x):\n        h = self.c_in(x)\n        h = self.layer1(h)\n        h = self.layer2(h)\n        h = self.layer3(h)\n        h = F.relu(h)\n        h = F.avg_pool2d(h, 8)\n        h = h.view(-1, self.in_chs[3])\n        h = self.fc_out(h)\n        return h\n\n    def _make_layer(self, n_units, in_ch, out_ch, stride=1):\n        layers = []\n        for i in range(int(n_units)):\n            layers.append(ShakeBlock(in_ch, out_ch, stride=stride))\n            in_ch, stride = out_ch, 1\n        return nn.Sequential(*layers)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shake_resnext.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom FastAutoAugment.networks.shakeshake.shakeshake import ShakeShake\nfrom FastAutoAugment.networks.shakeshake.shakeshake import Shortcut\n\n\nclass ShakeBottleNeck(nn.Module):\n\n    def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):\n        super(ShakeBottleNeck, self).__init__()\n        self.equal_io = in_ch == out_ch\n        self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)\n\n        self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)\n        self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)\n\n    def forward(self, x):\n        h1 = self.branch1(x)\n        h2 = self.branch2(x)\n        h = ShakeShake.apply(h1, h2, self.training)\n        h0 = x if self.equal_io else self.shortcut(x)\n        return h + h0\n\n    def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):\n        return nn.Sequential(\n            nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False),\n            nn.BatchNorm2d(mid_ch),\n            nn.ReLU(inplace=False),\n            nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False),\n            nn.BatchNorm2d(mid_ch),\n            nn.ReLU(inplace=False),\n            nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),\n            nn.BatchNorm2d(out_ch))\n\n\nclass ShakeResNeXt(nn.Module):\n\n    def __init__(self, depth, w_base, cardinary, label):\n        super(ShakeResNeXt, self).__init__()\n        n_units = (depth - 2) // 9\n        n_chs = [64, 128, 256, 1024]\n        self.n_chs = n_chs\n        self.in_ch = n_chs[0]\n\n        self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)\n        self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)\n        self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)\n        self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)\n        self.fc_out = nn.Linear(n_chs[3], label)\n\n        # Initialize paramters\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                m.bias.data.zero_()\n\n    def forward(self, x):\n        h = self.c_in(x)\n        h = self.layer1(h)\n        h = self.layer2(h)\n        h = self.layer3(h)\n        h = F.relu(h)\n        h = F.avg_pool2d(h, 8)\n        h = h.view(-1, self.n_chs[3])\n        h = self.fc_out(h)\n        return h\n\n    def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):\n        layers = []\n        mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4\n        for i in range(n_units):\n            layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))\n            self.in_ch, stride = out_ch, 1\n        return nn.Sequential(*layers)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/shakeshake/shakeshake.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass ShakeShake(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, x1, x2, training=True):\n        if training:\n            alpha = torch.cuda.FloatTensor(x1.size(0)).uniform_()\n            alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)\n        else:\n            alpha = 0.5\n        return alpha * x1 + (1 - alpha) * x2\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_()\n        beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)\n        beta = Variable(beta)\n\n        return beta * grad_output, (1 - beta) * grad_output, None\n\n\nclass Shortcut(nn.Module):\n\n    def __init__(self, in_ch, out_ch, stride):\n        super(Shortcut, self).__init__()\n        self.stride = stride\n        self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)\n        self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)\n        self.bn = nn.BatchNorm2d(out_ch)\n\n    def forward(self, x):\n        h = F.relu(x)\n\n        h1 = F.avg_pool2d(h, 1, self.stride)\n        h1 = self.conv1(h1)\n\n        h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)\n        h2 = self.conv2(h2)\n\n        h = torch.cat((h1, h2), 1)\n        return self.bn(h)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/networks/wideresnet.py",
    "content": "import torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\ndef conv_init(m):\n    classname = m.__class__.__name__\n    if classname.find('Conv') != -1:\n        init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n        init.constant_(m.bias, 0)\n    elif classname.find('BatchNorm') != -1:\n        init.constant_(m.weight, 1)\n        init.constant_(m.bias, 0)\n\n\nclass WideBasic(nn.Module):\n    def __init__(self, in_planes, planes, dropout_rate, stride=1):\n        super(WideBasic, self).__init__()\n        self.bn1 = nn.BatchNorm2d(in_planes, momentum=0.9)\n        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)\n        self.dropout = nn.Dropout(p=dropout_rate)\n        self.bn2 = nn.BatchNorm2d(planes, momentum=0.9)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n        self.shortcut = nn.Sequential()\n        if stride != 1 or in_planes != planes:\n            self.shortcut = nn.Sequential(\n                nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),\n            )\n\n    def forward(self, x):\n        out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n        out = self.conv2(F.relu(self.bn2(out)))\n        out += self.shortcut(x)\n\n        return out\n\n\nclass WideResNet(nn.Module):\n    def __init__(self, depth, widen_factor, dropout_rate, num_classes):\n        super(WideResNet, self).__init__()\n        self.in_planes = 16\n\n        assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'\n        n = int((depth - 4) / 6)\n        k = widen_factor\n\n        nStages = [16, 16*k, 32*k, 64*k]\n\n        self.conv1 = conv3x3(3, nStages[0])\n        self.layer1 = self._wide_layer(WideBasic, nStages[1], n, dropout_rate, stride=1)\n        self.layer2 = self._wide_layer(WideBasic, nStages[2], n, dropout_rate, stride=2)\n        self.layer3 = self._wide_layer(WideBasic, nStages[3], n, dropout_rate, stride=2)\n        self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)\n        self.linear = nn.Linear(nStages[3], num_classes)\n\n        # self.apply(conv_init)\n\n    def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n        strides = [stride] + [1]*(num_blocks-1)\n        layers = []\n\n        for stride in strides:\n            layers.append(block(self.in_planes, planes, dropout_rate, stride))\n            self.in_planes = planes\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        out = self.conv1(x)\n        out = self.layer1(out)\n        out = self.layer2(out)\n        out = self.layer3(out)\n        out = F.relu(self.bn1(out))\n        # out = F.avg_pool2d(out, 8)\n        out = F.adaptive_avg_pool2d(out, (1, 1))\n        out = out.view(out.size(0), -1)\n        out = self.linear(out)\n\n        return out\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/safe_shell_exec.py",
    "content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport psutil\nimport re\nimport signal\nimport subprocess\nimport sys\nimport threading\nimport time\n\n\nGRACEFUL_TERMINATION_TIME_S = 5\n\n\ndef terminate_executor_shell_and_children(pid):\n    print('terminate_executor_shell_and_children+', pid)\n    # If the shell already ends, no need to terminate its child.\n    try:\n        p = psutil.Process(pid)\n    except psutil.NoSuchProcess:\n        print('nosuchprocess')\n        return\n\n    # Terminate children gracefully.\n    for child in p.children():\n        try:\n            child.terminate()\n        except psutil.NoSuchProcess:\n            pass\n\n    # Wait for graceful termination.\n    time.sleep(GRACEFUL_TERMINATION_TIME_S)\n\n    # Send STOP to executor shell to stop progress.\n    p.send_signal(signal.SIGSTOP)\n\n    # Kill children recursively.\n    for child in p.children(recursive=True):\n        try:\n            child.kill()\n        except psutil.NoSuchProcess:\n            pass\n\n    # Kill shell itself.\n    p.kill()\n    print('terminate_executor_shell_and_children-', pid)\n\n\ndef forward_stream(src_fd, dst_stream, prefix, index):\n    with os.fdopen(src_fd, 'r') as src:\n        line_buffer = ''\n        while True:\n            text = os.read(src.fileno(), 1000)\n            if not isinstance(text, str):\n                text = text.decode('utf-8')\n            if not text:\n                break\n\n            for line in re.split('([\\r\\n])', text):\n                line_buffer += line\n                if line == '\\r' or line == '\\n':\n                    if index is not None:\n                        localtime = time.asctime(time.localtime(time.time()))\n                        line_buffer = '{time}[{rank}]<{prefix}>:{line}'.format(\n                            time=localtime,\n                            rank=str(index),\n                            prefix=prefix,\n                            line=line_buffer\n                        )\n\n                    dst_stream.write(line_buffer)\n                    dst_stream.flush()\n                    line_buffer = ''\n\n\ndef execute(command, env=None, stdout=None, stderr=None, index=None, event=None):\n    # Make a pipe for the subprocess stdout/stderr.\n    (stdout_r, stdout_w) = os.pipe()\n    (stderr_r, stderr_w) = os.pipe()\n\n    # Make a pipe for notifying the child that parent has died.\n    (r, w) = os.pipe()\n\n    middleman_pid = os.fork()\n    if middleman_pid == 0:\n        # Close unused file descriptors to enforce PIPE behavior.\n        os.close(w)\n        os.setsid()\n\n        executor_shell = subprocess.Popen(command, shell=True, env=env,\n                                          stdout=stdout_w, stderr=stderr_w)\n\n        sigterm_received = threading.Event()\n\n        def set_sigterm_received(signum, frame):\n            sigterm_received.set()\n\n        signal.signal(signal.SIGINT, set_sigterm_received)\n        signal.signal(signal.SIGTERM, set_sigterm_received)\n\n        def kill_executor_children_if_parent_dies():\n            # This read blocks until the pipe is closed on the other side\n            # due to the process termination.\n            os.read(r, 1)\n            terminate_executor_shell_and_children(executor_shell.pid)\n\n        bg = threading.Thread(target=kill_executor_children_if_parent_dies)\n        bg.daemon = True\n        bg.start()\n\n        def kill_executor_children_if_sigterm_received():\n            sigterm_received.wait()\n            terminate_executor_shell_and_children(executor_shell.pid)\n\n        bg = threading.Thread(target=kill_executor_children_if_sigterm_received)\n        bg.daemon = True\n        bg.start()\n\n        exit_code = executor_shell.wait()\n        os._exit(exit_code)\n\n    # Close unused file descriptors to enforce PIPE behavior.\n    os.close(r)\n    os.close(stdout_w)\n    os.close(stderr_w)\n\n    # Redirect command stdout & stderr to provided streams or sys.stdout/sys.stderr.\n    # This is useful for Jupyter Notebook that uses custom sys.stdout/sys.stderr or\n    # for redirecting to a file on disk.\n    if stdout is None:\n        stdout = sys.stdout\n    if stderr is None:\n        stderr = sys.stderr\n    stdout_fwd = threading.Thread(target=forward_stream, args=(stdout_r, stdout, 'stdout', index))\n    stderr_fwd = threading.Thread(target=forward_stream, args=(stderr_r, stderr, 'stderr', index))\n    stdout_fwd.start()\n    stderr_fwd.start()\n\n    def kill_middleman_if_master_thread_terminate():\n        event.wait()\n        try:\n            os.kill(middleman_pid, signal.SIGTERM)\n        except:\n            # The process has already been killed elsewhere\n            pass\n\n    # TODO: Currently this requires explicitly declaration of the event and signal handler to set\n    #  the event (gloo_run.py:_launch_jobs()). Need to figure out a generalized way to hide this behind\n    #  interfaces.\n    if event is not None:\n        bg_thread = threading.Thread(target=kill_middleman_if_master_thread_terminate)\n        bg_thread.daemon = True\n        bg_thread.start()\n\n    try:\n        res, status = os.waitpid(middleman_pid, 0)\n    except:\n        # interrupted, send middleman TERM signal which will terminate children\n        os.kill(middleman_pid, signal.SIGTERM)\n        while True:\n            try:\n                _, status = os.waitpid(middleman_pid, 0)\n                break\n            except:\n                # interrupted, wait for middleman to finish\n                pass\n\n    stdout_fwd.join()\n    stderr_fwd.join()\n    exit_code = status >> 8\n    return exit_code\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/search.py",
    "content": "import copy\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport torch\n\nimport numpy as np\nfrom hyperopt import hp\nimport ray\nimport gorilla\nfrom ray.tune.trial import Trial\nfrom ray.tune.trial_runner import TrialRunner\nfrom ray.tune.suggest import HyperOptSearch\nfrom ray.tune import register_trainable, run_experiments\nfrom tqdm import tqdm\n\nfrom FastAutoAugment.archive import remove_deplicates, policy_decoder\nfrom FastAutoAugment.augmentations import augment_list\nfrom FastAutoAugment.common import get_logger, add_filehandler\nfrom FastAutoAugment.data import get_dataloaders\nfrom FastAutoAugment.metrics import Accumulator\nfrom FastAutoAugment.networks import get_model, num_class\nfrom FastAutoAugment.train import train_and_eval\nfrom theconf import Config as C, ConfigArgumentParser\n\n\ntop1_valid_by_cv = defaultdict(lambda: list)\n\n\ndef step_w_log(self):\n    original = gorilla.get_original_attribute(ray.tune.trial_runner.TrialRunner, 'step')\n\n    # log\n    cnts = OrderedDict()\n    for status in [Trial.RUNNING, Trial.TERMINATED, Trial.PENDING, Trial.PAUSED, Trial.ERROR]:\n        cnt = len(list(filter(lambda x: x.status == status, self._trials)))\n        cnts[status] = cnt\n    best_top1_acc = 0.\n    for trial in filter(lambda x: x.status == Trial.TERMINATED, self._trials):\n        if not trial.last_result:\n            continue\n        best_top1_acc = max(best_top1_acc, trial.last_result['top1_valid'])\n    print('iter', self._iteration, 'top1_acc=%.3f' % best_top1_acc, cnts, end='\\r')\n    return original(self)\n\n\npatch = gorilla.Patch(ray.tune.trial_runner.TrialRunner, 'step', step_w_log, settings=gorilla.Settings(allow_hit=True))\ngorilla.apply(patch)\n\n\nlogger = get_logger('Fast AutoAugment')\n\n\ndef _get_path(dataset, model, tag):\n    return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'models/%s_%s_%s.model' % (dataset, model, tag))     # TODO\n\n\n@ray.remote(num_gpus=4, max_calls=1)\ndef train_model(config, dataroot, augment, cv_ratio_test, cv_fold, save_path=None, skip_exist=False):\n    C.get()\n    C.get().conf = config\n    C.get()['aug'] = augment\n\n    result = train_and_eval(None, dataroot, cv_ratio_test, cv_fold, save_path=save_path, only_eval=skip_exist)\n    return C.get()['model']['type'], cv_fold, result\n\n\ndef eval_tta(config, augment, reporter):\n    C.get()\n    C.get().conf = config\n    cv_ratio_test, cv_fold, save_path = augment['cv_ratio_test'], augment['cv_fold'], augment['save_path']\n\n    # setup - provided augmentation rules\n    C.get()['aug'] = policy_decoder(augment, augment['num_policy'], augment['num_op'])\n\n    # eval\n    model = get_model(C.get()['model'], num_class(C.get()['dataset']))\n    ckpt = torch.load(save_path)\n    if 'model' in ckpt:\n        model.load_state_dict(ckpt['model'])\n    else:\n        model.load_state_dict(ckpt)\n    model.eval()\n\n    loaders = []\n    for _ in range(augment['num_policy']):  # TODO\n        _, tl, validloader, tl2 = get_dataloaders(C.get()['dataset'], C.get()['batch'], augment['dataroot'], cv_ratio_test, split_idx=cv_fold)\n        loaders.append(iter(validloader))\n        del tl, tl2\n\n    start_t = time.time()\n    metrics = Accumulator()\n    loss_fn = torch.nn.CrossEntropyLoss(reduction='none')\n    try:\n        while True:\n            losses = []\n            corrects = []\n            for loader in loaders:\n                data, label = next(loader)\n                data = data.cuda()\n                label = label.cuda()\n\n                pred = model(data)\n\n                loss = loss_fn(pred, label)\n                losses.append(loss.detach().cpu().numpy())\n\n                _, pred = pred.topk(1, 1, True, True)\n                pred = pred.t()\n                correct = pred.eq(label.view(1, -1).expand_as(pred)).detach().cpu().numpy()\n                corrects.append(correct)\n                del loss, correct, pred, data, label\n\n            losses = np.concatenate(losses)\n            losses_min = np.min(losses, axis=0).squeeze()\n\n            corrects = np.concatenate(corrects)\n            corrects_max = np.max(corrects, axis=0).squeeze()\n            metrics.add_dict({\n                'minus_loss': -1 * np.sum(losses_min),\n                'correct': np.sum(corrects_max),\n                'cnt': len(corrects_max)\n            })\n            del corrects, corrects_max\n    except StopIteration:\n        pass\n\n    del model\n    metrics = metrics / 'cnt'\n    gpu_secs = (time.time() - start_t) * torch.cuda.device_count()\n    reporter(minus_loss=metrics['minus_loss'], top1_valid=metrics['correct'], elapsed_time=gpu_secs, done=True)\n    return metrics['correct']\n\n\nif __name__ == '__main__':\n    import json\n    from pystopwatch2 import PyStopwatch\n    w = PyStopwatch()\n\n    parser = ConfigArgumentParser(conflict_handler='resolve')\n    parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')\n    parser.add_argument('--until', type=int, default=5)\n    parser.add_argument('--num-op', type=int, default=2)\n    parser.add_argument('--num-policy', type=int, default=5)\n    parser.add_argument('--num-search', type=int, default=200)\n    parser.add_argument('--cv-ratio', type=float, default=0.4)\n    parser.add_argument('--decay', type=float, default=-1)\n    parser.add_argument('--redis', type=str, default='gpu-cloud-vnode30.dakao.io:23655')\n    parser.add_argument('--per-class', action='store_true')\n    parser.add_argument('--resume', action='store_true')\n    parser.add_argument('--smoke-test', action='store_true')\n    args = parser.parse_args()\n\n    if args.decay > 0:\n        logger.info('decay=%.4f' % args.decay)\n        C.get()['optimizer']['decay'] = args.decay\n\n    add_filehandler(logger, os.path.join('models', '%s_%s_cv%.1f.log' % (C.get()['dataset'], C.get()['model']['type'], args.cv_ratio)))\n    logger.info('configuration...')\n    logger.info(json.dumps(C.get().conf, sort_keys=True, indent=4))\n    logger.info('initialize ray...')\n    ray.init(redis_address=args.redis)\n\n    num_result_per_cv = 10\n    cv_num = 5\n    copied_c = copy.deepcopy(C.get().conf)\n\n    logger.info('search augmentation policies, dataset=%s model=%s' % (C.get()['dataset'], C.get()['model']['type']))\n    logger.info('----- Train without Augmentations cv=%d ratio(test)=%.1f -----' % (cv_num, args.cv_ratio))\n    w.start(tag='train_no_aug')\n    paths = [_get_path(C.get()['dataset'], C.get()['model']['type'], 'ratio%.1f_fold%d' % (args.cv_ratio, i)) for i in range(cv_num)]\n    print(paths)\n    reqs = [\n        train_model.remote(copy.deepcopy(copied_c), args.dataroot, C.get()['aug'], args.cv_ratio, i, save_path=paths[i], skip_exist=True)\n        for i in range(cv_num)]\n\n    tqdm_epoch = tqdm(range(C.get()['epoch']))\n    is_done = False\n    for epoch in tqdm_epoch:\n        while True:\n            epochs_per_cv = OrderedDict()\n            for cv_idx in range(cv_num):\n                try:\n                    latest_ckpt = torch.load(paths[cv_idx])\n                    if 'epoch' not in latest_ckpt:\n                        epochs_per_cv['cv%d' % (cv_idx + 1)] = C.get()['epoch']\n                        continue\n                    epochs_per_cv['cv%d' % (cv_idx+1)] = latest_ckpt['epoch']\n                except Exception as e:\n                    continue\n            tqdm_epoch.set_postfix(epochs_per_cv)\n            if len(epochs_per_cv) == cv_num and min(epochs_per_cv.values()) >= C.get()['epoch']:\n                is_done = True\n            if len(epochs_per_cv) == cv_num and min(epochs_per_cv.values()) >= epoch:\n                break\n            time.sleep(10)\n        if is_done:\n            break\n\n    logger.info('getting results...')\n    pretrain_results = ray.get(reqs)\n    for r_model, r_cv, r_dict in pretrain_results:\n        logger.info('model=%s cv=%d top1_train=%.4f top1_valid=%.4f' % (r_model, r_cv+1, r_dict['top1_train'], r_dict['top1_valid']))\n    logger.info('processed in %.4f secs' % w.pause('train_no_aug'))\n\n    if args.until == 1:\n        sys.exit(0)\n\n    logger.info('----- Search Test-Time Augmentation Policies -----')\n    w.start(tag='search')\n\n    ops = augment_list(False)\n    space = {}\n    for i in range(args.num_policy):\n        for j in range(args.num_op):\n            space['policy_%d_%d' % (i, j)] = hp.choice('policy_%d_%d' % (i, j), list(range(0, len(ops))))\n            space['prob_%d_%d' % (i, j)] = hp.uniform('prob_%d_ %d' % (i, j), 0.0, 1.0)\n            space['level_%d_%d' % (i, j)] = hp.uniform('level_%d_ %d' % (i, j), 0.0, 1.0)\n\n    final_policy_set = []\n    total_computation = 0\n    reward_attr = 'top1_valid'      # top1_valid or minus_loss\n    for _ in range(1):  # run multiple times.\n        for cv_fold in range(cv_num):\n            name = \"search_%s_%s_fold%d_ratio%.1f\" % (C.get()['dataset'], C.get()['model']['type'], cv_fold, args.cv_ratio)\n            print(name)\n            register_trainable(name, lambda augs, rpt: eval_tta(copy.deepcopy(copied_c), augs, rpt))\n            algo = HyperOptSearch(space, max_concurrent=4*20, reward_attr=reward_attr)\n\n            exp_config = {\n                name: {\n                    'run': name,\n                    'num_samples': 4 if args.smoke_test else args.num_search,\n                    'resources_per_trial': {'gpu': 1},\n                    'stop': {'training_iteration': args.num_policy},\n                    'config': {\n                        'dataroot': args.dataroot, 'save_path': paths[cv_fold],\n                        'cv_ratio_test': args.cv_ratio, 'cv_fold': cv_fold,\n                        'num_op': args.num_op, 'num_policy': args.num_policy\n                    },\n                }\n            }\n            results = run_experiments(exp_config, search_alg=algo, scheduler=None, verbose=0, queue_trials=True, resume=args.resume, raise_on_failed_trial=False)\n            print()\n            results = [x for x in results if x.last_result is not None]\n            results = sorted(results, key=lambda x: x.last_result[reward_attr], reverse=True)\n\n            # calculate computation usage\n            for result in results:\n                total_computation += result.last_result['elapsed_time']\n\n            for result in results[:num_result_per_cv]:\n                final_policy = policy_decoder(result.config, args.num_policy, args.num_op)\n                logger.info('loss=%.12f top1_valid=%.4f %s' % (result.last_result['minus_loss'], result.last_result['top1_valid'], final_policy))\n\n                final_policy = remove_deplicates(final_policy)\n                final_policy_set.extend(final_policy)\n\n    logger.info(json.dumps(final_policy_set))\n    logger.info('final_policy=%d' % len(final_policy_set))\n    logger.info('processed in %.4f secs, gpu hours=%.4f' % (w.pause('search'), total_computation / 3600.))\n    logger.info('----- Train with Augmentations model=%s dataset=%s aug=%s ratio(test)=%.1f -----' % (C.get()['model']['type'], C.get()['dataset'], C.get()['aug'], args.cv_ratio))\n    w.start(tag='train_aug')\n\n    num_experiments = 5\n    default_path = [_get_path(C.get()['dataset'], C.get()['model']['type'], 'ratio%.1f_default%d' % (args.cv_ratio, _)) for _ in range(num_experiments)]\n    augment_path = [_get_path(C.get()['dataset'], C.get()['model']['type'], 'ratio%.1f_augment%d' % (args.cv_ratio, _)) for _ in range(num_experiments)]\n    reqs = [train_model.remote(copy.deepcopy(copied_c), args.dataroot, C.get()['aug'], 0.0, 0, save_path=default_path[_], skip_exist=True) for _ in range(num_experiments)] + \\\n        [train_model.remote(copy.deepcopy(copied_c), args.dataroot, final_policy_set, 0.0, 0, save_path=augment_path[_]) for _ in range(num_experiments)]\n\n    tqdm_epoch = tqdm(range(C.get()['epoch']))\n    is_done = False\n    for epoch in tqdm_epoch:\n        while True:\n            epochs = OrderedDict()\n            for exp_idx in range(num_experiments):\n                try:\n                    if os.path.exists(default_path[exp_idx]):\n                        latest_ckpt = torch.load(default_path[exp_idx])\n                        epochs['default_exp%d' % (exp_idx + 1)] = latest_ckpt['epoch']\n                except:\n                    pass\n                try:\n                    if os.path.exists(augment_path[exp_idx]):\n                        latest_ckpt = torch.load(augment_path[exp_idx])\n                        epochs['augment_exp%d' % (exp_idx + 1)] = latest_ckpt['epoch']\n                except:\n                    pass\n\n            tqdm_epoch.set_postfix(epochs)\n            if len(epochs) == num_experiments*2 and min(epochs.values()) >= C.get()['epoch']:\n                is_done = True\n            if len(epochs) == num_experiments*2 and min(epochs.values()) >= epoch:\n                break\n            time.sleep(10)\n        if is_done:\n            break\n\n    logger.info('getting results...')\n    final_results = ray.get(reqs)\n\n    for train_mode in ['default', 'augment']:\n        avg = 0.\n        for _ in range(num_experiments):\n            r_model, r_cv, r_dict = final_results.pop(0)\n            logger.info('[%s] top1_train=%.4f top1_test=%.4f' % (train_mode, r_dict['top1_train'], r_dict['top1_test']))\n            avg += r_dict['top1_test']\n        avg /= num_experiments\n        logger.info('[%s] top1_test average=%.4f (#experiments=%d)' % (train_mode, avg, num_experiments))\n    logger.info('processed in %.4f secs' % w.pause('train_aug'))\n\n    logger.info(w)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/tf_port/__init__.py",
    "content": ""
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/tf_port/rmsprop.py",
    "content": "import torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass RMSpropTF(Optimizer):\n    r\"\"\"Implements RMSprop algorithm.\n    Reimplement original formulation to match TF rmsprop\n    Proposed by G. Hinton in his\n    `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.\n    The centered version first appears in `Generating Sequences\n    With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.\n    The implementation here takes the square root of the gradient average before\n    adding epsilon (note that TensorFlow interchanges these two operations). The effective\n    learning rate is thus :math:`\\alpha/(\\sqrt{v + \\epsilon})` where :math:`\\alpha` from :math:`\\alpha/(\\sqrt{v} + \\epsilon)` where :math:`\\alpha`\n    is the scheduled learning rate and :math:`v` is the weighted moving average\n    of the squared gradient.\n    Arguments:\n        params (iterable): iterable of parameters to optimize or dicts defining\n            parameter groups\n        lr (float, optional): learning rate (default: 1e-2)\n        momentum (float, optional): momentum factor (default: 0)\n        alpha (float, optional): smoothing constant (default: 0.99)\n        eps (float, optional): term added to the denominator to improve\n            numerical stability (default: 1e-8)\n        centered (bool, optional) : if ``True``, compute the centered RMSProp,\n            the gradient is normalized by an estimation of its variance\n        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n    \"\"\"\n\n    def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, momentum=0, weight_decay=0.0):\n        if not 0.0 <= lr:\n            raise ValueError(\"Invalid learning rate: {}\".format(lr))\n        if not 0.0 <= eps:\n            raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n        if not 0.0 < momentum:\n            raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n        if not 0.0 <= alpha:\n            raise ValueError(\"Invalid alpha value: {}\".format(alpha))\n        assert momentum > 0.0\n\n        defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, weight_decay=weight_decay)\n        super(RMSpropTF, self).__init__(params, defaults)\n        self.initialized = False\n\n    def __setstate__(self, state):\n        super(RMSpropTF, self).__setstate__(state)\n        for group in self.param_groups:\n            group.setdefault('momentum', 0)\n\n    def load_state_dict(self, state_dict):\n        super(RMSpropTF, self).load_state_dict(state_dict)\n        self.initialized = True\n\n    def step(self, closure=None):\n        \"\"\"Performs a single optimization step.\n        We modified pytorch's RMSProp to be same as Tensorflow's\n        See : https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/training_ops.cc#L485\n\n        Arguments:\n            closure (callable, optional): A closure that reevaluates the model\n                and returns the loss.\n        \"\"\"\n        loss = None\n        if closure is not None:\n            loss = closure()\n\n        for group in self.param_groups:\n            for p in group['params']:\n                if p.grad is None:\n                    continue\n                grad = p.grad.data\n                if grad.is_sparse:\n                    raise RuntimeError('RMSprop does not support sparse gradients')\n                state = self.state[p]\n\n                # State initialization\n                if len(state) == 0:\n                    assert not self.initialized\n                    state['step'] = 0\n                    state['ms'] = torch.ones_like(p.data)  #, memory_format=torch.preserve_format)\n                    state['mom'] = torch.zeros_like(p.data)  #, memory_format=torch.preserve_format)\n\n                # weight decay -----\n                if group['weight_decay'] > 0:\n                    grad = grad.add(group['weight_decay'], p.data)\n\n                rho = group['alpha']\n                ms = state['ms']\n                mom = state['mom']\n                state['step'] += 1\n\n                # ms.mul_(rho).addcmul_(1 - rho, grad, grad)\n                ms.add_(torch.mul(grad, grad).add_(-ms) * (1. - rho))\n                assert group['momentum'] > 0\n\n                # new rmsprop\n                mom.mul_(group['momentum']).addcdiv_(group['lr'], grad, (ms + group['eps']).sqrt())\n\n                p.data.add_(-1.0, mom)\n\n        return loss\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/tf_port/tpu_bn.py",
    "content": "import torch\nfrom torch.nn import BatchNorm2d\nfrom torch.nn.parameter import Parameter\nimport torch.distributed as dist\nfrom torch import nn\n\n\nclass TpuBatchNormalization(nn.Module):\n    # Ref : https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/utils.py#L113\n    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,\n                 track_running_stats=True):\n        super(TpuBatchNormalization, self).__init__()   # num_features, eps, momentum, affine, track_running_stats)\n\n        self.weight = Parameter(torch.ones(num_features))\n        self.bias = Parameter(torch.zeros(num_features))\n\n        self.register_buffer('running_mean', torch.zeros(num_features))\n        self.register_buffer('running_var', torch.ones(num_features))\n        self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n\n        self.eps = eps\n        self.momentum = momentum\n\n    def _reduce_avg(self, t):\n        dist.all_reduce(t, dist.ReduceOp.SUM)\n        t.mul_(1. / dist.get_world_size())\n\n    def forward(self, input):\n        if not self.training or not dist.is_initialized():\n            bn = (input - self.running_mean.view(1, self.running_mean.shape[0], 1, 1)) / \\\n                 (torch.sqrt(self.running_var.view(1, self.running_var.shape[0], 1, 1) + self.eps))\n            # print(self.weight.shape, self.bias.shape)\n            return bn.mul(self.weight.view(1, self.weight.shape[0], 1, 1)).add(self.bias.view(1, self.bias.shape[0], 1, 1))\n\n        shard_mean, shard_invstd = torch.batch_norm_stats(input, self.eps)\n        shard_vars = (1. / shard_invstd) ** 2 - self.eps\n\n        shard_square_of_mean = torch.mul(shard_mean, shard_mean)\n        shard_mean_of_square = shard_vars + shard_square_of_mean\n\n        group_mean = shard_mean.clone().detach()\n        self._reduce_avg(group_mean)\n        group_mean_of_square = shard_mean_of_square.clone().detach()\n        self._reduce_avg(group_mean_of_square)\n        group_vars = group_mean_of_square - torch.mul(group_mean, group_mean)\n\n        group_mean = group_mean.detach()\n        group_vars = group_vars.detach()\n\n        # print(self.running_mean.shape, self.running_var.shape)\n        self.running_mean.mul_(1. - self.momentum).add_(group_mean.mul(self.momentum))\n        self.running_var.mul_(1. - self.momentum).add_(group_vars.mul(self.momentum))\n        self.num_batches_tracked.add_(1)\n\n        # print(input.shape, group_mean.view(1, group_mean.shape[0], 1, 1).shape, group_vars.view(1, group_vars.shape[0], 1, 1).shape, self.eps)\n        bn = (input - group_mean.view(1, group_mean.shape[0], 1, 1)) / (torch.sqrt(group_vars.view(1, group_vars.shape[0], 1, 1) + self.eps))\n        # print(self.weight.shape, self.bias.shape)\n        return bn.mul(self.weight.view(1, self.weight.shape[0], 1, 1)).add(self.bias.view(1, self.bias.shape[0], 1, 1))\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/train.py",
    "content": "import pathlib\nimport sys\n\nsys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))\n\nimport itertools\nimport json\nimport logging\nimport math\nimport os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn.parallel.data_parallel import DataParallel\nfrom torch.nn.parallel import DistributedDataParallel\nimport torch.distributed as dist\n\nfrom tqdm import tqdm\nfrom theconf import Config as C, ConfigArgumentParser\n\nfrom FastAutoAugment.common import get_logger, EMA, add_filehandler\nfrom FastAutoAugment.data import get_dataloaders\nfrom FastAutoAugment.lr_scheduler import adjust_learning_rate_resnet\nfrom FastAutoAugment.metrics import accuracy, Accumulator, CrossEntropyLabelSmooth\nfrom FastAutoAugment.networks import get_model, num_class\nfrom FastAutoAugment.tf_port.rmsprop import RMSpropTF\nfrom FastAutoAugment.aug_mixup import CrossEntropyMixUpLabelSmooth, mixup\nfrom warmup_scheduler import GradualWarmupScheduler\n\nlogger = get_logger('Fast AutoAugment')\nlogger.setLevel(logging.INFO)\n\n\ndef run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None, is_master=True, ema=None, wd=0.0, tqdm_disabled=False):\n    if verbose:\n        loader = tqdm(loader, disable=tqdm_disabled)\n        loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))\n\n    params_without_bn = [params for name, params in model.named_parameters() if not ('_bn' in name or '.bn' in name)]\n\n    loss_ema = None\n    metrics = Accumulator()\n    cnt = 0\n    total_steps = len(loader)\n    steps = 0\n    for data, label in loader:\n        steps += 1\n        data, label = data.cuda(), label.cuda()\n\n        if C.get().conf.get('mixup', 0.0) <= 0.0 or optimizer is None:\n            preds = model(data)\n            loss = loss_fn(preds, label)\n        else:   # mixup\n            data, targets, shuffled_targets, lam = mixup(data, label, C.get()['mixup'])\n            preds = model(data)\n            loss = loss_fn(preds, targets, shuffled_targets, lam)\n            del shuffled_targets, lam\n\n        if optimizer:\n            loss += wd * (1. / 2.) * sum([torch.sum(p ** 2) for p in params_without_bn])\n            loss.backward()\n            grad_clip = C.get()['optimizer'].get('clip', 5.0)\n            if grad_clip > 0:\n                nn.utils.clip_grad_norm_(model.parameters(), grad_clip)\n            optimizer.step()\n            optimizer.zero_grad()\n\n            if ema is not None:\n                ema(model, (epoch - 1) * total_steps + steps)\n\n        top1, top5 = accuracy(preds, label, (1, 5))\n        metrics.add_dict({\n            'loss': loss.item() * len(data),\n            'top1': top1.item() * len(data),\n            'top5': top5.item() * len(data),\n        })\n        cnt += len(data)\n        if loss_ema:\n            loss_ema = loss_ema * 0.9 + loss.item() * 0.1\n        else:\n            loss_ema = loss.item()\n        if verbose:\n            postfix = metrics / cnt\n            if optimizer:\n                postfix['lr'] = optimizer.param_groups[0]['lr']\n            postfix['loss_ema'] = loss_ema\n            loader.set_postfix(postfix)\n\n        if scheduler is not None:\n            scheduler.step(epoch - 1 + float(steps) / total_steps)\n\n        del preds, loss, top1, top5, data, label\n\n    if tqdm_disabled and verbose:\n        if optimizer:\n            logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics / cnt, optimizer.param_groups[0]['lr'])\n        else:\n            logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics / cnt)\n\n    metrics /= cnt\n    if optimizer:\n        metrics.metrics['lr'] = optimizer.param_groups[0]['lr']\n    if verbose:\n        for key, value in metrics.items():\n            writer.add_scalar(key, value, epoch)\n    return metrics\n\n\ndef train_and_eval(tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5):\n    total_batch = C.get()[\"batch\"]\n    if local_rank >= 0:\n        dist.init_process_group(backend='nccl', init_method='env://', world_size=int(os.environ['WORLD_SIZE']))\n        device = torch.device('cuda', local_rank)\n        torch.cuda.set_device(device)\n\n        C.get()['lr'] *= dist.get_world_size()\n        logger.info(f'local batch={C.get()[\"batch\"]} world_size={dist.get_world_size()} ----> total batch={C.get()[\"batch\"] * dist.get_world_size()}')\n        total_batch = C.get()[\"batch\"] * dist.get_world_size()\n\n    is_master = local_rank < 0 or dist.get_rank() == 0\n    if is_master:\n        add_filehandler(logger, args.save + '.log')\n\n    if not reporter:\n        reporter = lambda **kwargs: 0\n\n    max_epoch = C.get()['epoch']\n    trainsampler, trainloader, validloader, testloader_ = get_dataloaders(C.get()['dataset'], C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0))\n\n    # create a model & an optimizer\n    model = get_model(C.get()['model'], num_class(C.get()['dataset']), local_rank=local_rank)\n    model_ema = get_model(C.get()['model'], num_class(C.get()['dataset']), local_rank=-1)\n    model_ema.eval()\n\n    criterion_ce = criterion = CrossEntropyLabelSmooth(num_class(C.get()['dataset']), C.get().conf.get('lb_smooth', 0))\n    if C.get().conf.get('mixup', 0.0) > 0.0:\n        criterion = CrossEntropyMixUpLabelSmooth(num_class(C.get()['dataset']), C.get().conf.get('lb_smooth', 0))\n    if C.get()['optimizer']['type'] == 'sgd':\n        optimizer = optim.SGD(\n            model.parameters(),\n            lr=C.get()['lr'],\n            momentum=C.get()['optimizer'].get('momentum', 0.9),\n            weight_decay=0.0,\n            nesterov=C.get()['optimizer'].get('nesterov', True)\n        )\n    elif C.get()['optimizer']['type'] == 'rmsprop':\n        optimizer = RMSpropTF(\n            model.parameters(),\n            lr=C.get()['lr'],\n            weight_decay=0.0,\n            alpha=0.9, momentum=0.9,\n            eps=0.001\n        )\n    else:\n        raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])\n\n    lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')\n    if lr_scheduler_type == 'cosine':\n        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=C.get()['epoch'], eta_min=0.)\n    elif lr_scheduler_type == 'resnet':\n        scheduler = adjust_learning_rate_resnet(optimizer)\n    elif lr_scheduler_type == 'efficientnet':\n        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 0.97 ** int((x + C.get()['lr_schedule']['warmup']['epoch']) / 2.4))\n    else:\n        raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)\n\n    if C.get()['lr_schedule'].get('warmup', None) and C.get()['lr_schedule']['warmup']['epoch'] > 0:\n        scheduler = GradualWarmupScheduler(\n            optimizer,\n            multiplier=C.get()['lr_schedule']['warmup']['multiplier'],\n            total_epoch=C.get()['lr_schedule']['warmup']['epoch'],\n            after_scheduler=scheduler\n        )\n\n    if not tag or not is_master:\n        from FastAutoAugment.metrics import SummaryWriterDummy as SummaryWriter\n        logger.warning('tag not provided, no tensorboard log.')\n    else:\n        from tensorboardX import SummaryWriter\n    writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test']]\n\n    if C.get()['optimizer']['ema'] > 0.0 and is_master:\n        # https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet\n        ema = EMA(C.get()['optimizer']['ema'])\n    else:\n        ema = None\n\n    result = OrderedDict()\n    epoch_start = 1\n    if save_path != 'test.pth':     # and is_master: --> should load all data(not able to be broadcasted)\n        if save_path and os.path.exists(save_path):\n            logger.info('%s file found. loading...' % save_path)\n            data = torch.load(save_path)\n            key = 'model' if 'model' in data else 'state_dict'\n\n            if 'epoch' not in data:\n                model.load_state_dict(data)\n            else:\n                logger.info('checkpoint epoch@%d' % data['epoch'])\n                if not isinstance(model, (DataParallel, DistributedDataParallel)):\n                    model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})\n                else:\n                    model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})\n                logger.info('optimizer.load_state_dict+')\n                optimizer.load_state_dict(data['optimizer'])\n                if data['epoch'] < C.get()['epoch']:\n                    epoch_start = data['epoch']\n                else:\n                    only_eval = True\n                if ema is not None:\n                    ema.shadow = data.get('ema', {}) if isinstance(data.get('ema', {}), dict) else data['ema'].state_dict()\n            del data\n        else:\n            logger.info('\"%s\" file not found. skip to pretrain weights...' % save_path)\n            if only_eval:\n                logger.warning('model checkpoint not found. only-evaluation mode is off.')\n            only_eval = False\n\n    if local_rank >= 0:\n        for name, x in model.state_dict().items():\n            dist.broadcast(x, 0)\n        logger.info(f'multinode init. local_rank={dist.get_rank()} is_master={is_master}')\n        torch.cuda.synchronize()\n\n    tqdm_disabled = bool(os.environ.get('TASK_NAME', '')) and local_rank != 0  # KakaoBrain Environment\n\n    if only_eval:\n        logger.info('evaluation only+')\n        model.eval()\n        rs = dict()\n        rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0], is_master=is_master)\n\n        with torch.no_grad():\n            rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1], is_master=is_master)\n            rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2], is_master=is_master)\n            if ema is not None and len(ema) > 0:\n                model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})\n                rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)\n                rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)\n        for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):\n            if setname not in rs:\n                continue\n            result['%s_%s' % (key, setname)] = rs[setname][key]\n        result['epoch'] = 0\n        return result\n\n    # train loop\n    best_top1 = 0\n    for epoch in range(epoch_start, max_epoch + 1):\n        if local_rank >= 0:\n            trainsampler.set_epoch(epoch)\n\n        model.train()\n        rs = dict()\n        rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=(is_master and local_rank <= 0), scheduler=scheduler, ema=ema, wd=C.get()['optimizer']['decay'], tqdm_disabled=tqdm_disabled)\n        model.eval()\n\n        if math.isnan(rs['train']['loss']):\n            raise Exception('train loss is NaN.')\n\n        if ema is not None and C.get()['optimizer']['ema_interval'] > 0 and epoch % C.get()['optimizer']['ema_interval'] == 0:\n            logger.info(f'ema synced+ rank={dist.get_rank()}')\n            if ema is not None:\n                model.load_state_dict(ema.state_dict())\n            for name, x in model.state_dict().items():\n                # print(name)\n                dist.broadcast(x, 0)\n            torch.cuda.synchronize()\n            logger.info(f'ema synced- rank={dist.get_rank()}')\n\n        if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch):\n            with torch.no_grad():\n                rs['valid'] = run_epoch(model, validloader, criterion_ce, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)\n                rs['test'] = run_epoch(model, testloader_, criterion_ce, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)\n\n                if ema is not None:\n                    model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})\n                    rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)\n                    rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)\n\n            logger.info(\n                f'epoch={epoch} '\n                f'[train] loss={rs[\"train\"][\"loss\"]:.4f} top1={rs[\"train\"][\"top1\"]:.4f} '\n                f'[valid] loss={rs[\"valid\"][\"loss\"]:.4f} top1={rs[\"valid\"][\"top1\"]:.4f} '\n                f'[test] loss={rs[\"test\"][\"loss\"]:.4f} top1={rs[\"test\"][\"top1\"]:.4f} '\n            )\n\n            if metric == 'last' or rs[metric]['top1'] > best_top1:\n                if metric != 'last':\n                    best_top1 = rs[metric]['top1']\n                for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):\n                    result['%s_%s' % (key, setname)] = rs[setname][key]\n                result['epoch'] = epoch\n\n                writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)\n                writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)\n\n                reporter(\n                    loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'],\n                    loss_test=rs['test']['loss'], top1_test=rs['test']['top1']\n                )\n\n                # save checkpoint\n                if is_master and save_path:\n                    logger.info('save model@%d to %s, err=%.4f' % (epoch, save_path, 1 - best_top1))\n                    torch.save({\n                        'epoch': epoch,\n                        'log': {\n                            'train': rs['train'].get_dict(),\n                            'valid': rs['valid'].get_dict(),\n                            'test': rs['test'].get_dict(),\n                        },\n                        'optimizer': optimizer.state_dict(),\n                        'model': model.state_dict(),\n                        'ema': ema.state_dict() if ema is not None else None,\n                    }, save_path)\n\n    del model\n\n    result['top1_test'] = best_top1\n    return result\n\n\nif __name__ == '__main__':\n    parser = ConfigArgumentParser(conflict_handler='resolve')\n    parser.add_argument('--tag', type=str, default='')\n    parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')\n    parser.add_argument('--save', type=str, default='test.pth')\n    parser.add_argument('--cv-ratio', type=float, default=0.0)\n    parser.add_argument('--cv', type=int, default=0)\n    parser.add_argument('--local_rank', type=int, default=-1)\n    parser.add_argument('--evaluation-interval', type=int, default=5)\n    parser.add_argument('--only-eval', action='store_true')\n    args = parser.parse_args()\n\n    assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'\n\n    if not args.only_eval:\n        if args.save:\n            logger.info('checkpoint will be saved at %s' % args.save)\n        else:\n            logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')\n\n    import time\n    t = time.time()\n    result = train_and_eval(args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, local_rank=args.local_rank, metric='test', evaluation_interval=args.evaluation_interval)\n    elapsed = time.time() - t\n\n    logger.info('done.')\n    logger.info('model: %s' % C.get()['model'])\n    logger.info('augmentation: %s' % C.get()['aug'])\n    logger.info('\\n' + json.dumps(result, indent=4))\n    logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))\n    logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))\n    logger.info(args.save)\n"
  },
  {
    "path": "fast_autoaugment/FastAutoAugment/train_dist.py",
    "content": "import pathlib\nimport sys\n\nsys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))\n\nimport time\nimport os\nimport threading\nimport six\nfrom six.moves import queue\n\nfrom FastAutoAugment import safe_shell_exec\n\n\ndef _exec_command(command):\n    host_output = six.StringIO()\n    try:\n        exit_code = safe_shell_exec.execute(command,\n                                            stdout=host_output,\n                                            stderr=host_output)\n        if exit_code != 0:\n            print('Launching task function was not successful:\\n{host_output}'.format(host_output=host_output.getvalue()))\n            os._exit(exit_code)\n    finally:\n        host_output.close()\n    return exit_code\n\n\ndef execute_function_multithreaded(fn,\n                                   args_list,\n                                   block_until_all_done=True,\n                                   max_concurrent_executions=1000):\n    \"\"\"\n    Executes fn in multiple threads each with one set of the args in the\n    args_list.\n    :param fn: function to be executed\n    :type fn:\n    :param args_list:\n    :type args_list: list(list)\n    :param block_until_all_done: if is True, function will block until all the\n    threads are done and will return the results of each thread's execution.\n    :type block_until_all_done: bool\n    :param max_concurrent_executions:\n    :type max_concurrent_executions: int\n    :return:\n    If block_until_all_done is False, returns None. If block_until_all_done is\n    True, function returns the dict of results.\n        {\n            index: execution result of fn with args_list[index]\n        }\n    :rtype: dict\n    \"\"\"\n    result_queue = queue.Queue()\n    worker_queue = queue.Queue()\n\n    for i, arg in enumerate(args_list):\n        arg.append(i)\n        worker_queue.put(arg)\n\n    def fn_execute():\n        while True:\n            try:\n                arg = worker_queue.get(block=False)\n            except queue.Empty:\n                return\n            exec_index = arg[-1]\n            res = fn(*arg[:-1])\n            result_queue.put((exec_index, res))\n\n    threads = []\n    number_of_threads = min(max_concurrent_executions, len(args_list))\n\n    for _ in range(number_of_threads):\n        thread = threading.Thread(target=fn_execute)\n        if not block_until_all_done:\n            thread.daemon = True\n        thread.start()\n        threads.append(thread)\n\n    # Returns the results only if block_until_all_done is set.\n    results = None\n    if block_until_all_done:\n        # Because join() cannot be interrupted by signal, a single join()\n        # needs to be separated into join()s with timeout in a while loop.\n        have_alive_child = True\n        while have_alive_child:\n            have_alive_child = False\n            for t in threads:\n                t.join(0.1)\n                if t.is_alive():\n                    have_alive_child = True\n\n        results = {}\n        while not result_queue.empty():\n            item = result_queue.get()\n            results[item[0]] = item[1]\n\n        if len(results) != len(args_list):\n            raise RuntimeError(\n                'Some threads for func {func} did not complete '\n                'successfully.'.format(func=fn.__name__))\n    return results\n\n\nif __name__ == '__main__':\n    import argparse\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--host', type=str)\n    parser.add_argument('--num-gpus', type=int, default=4)\n    parser.add_argument('--master', type=str, default='task1')\n    parser.add_argument('--port', type=int, default=1958)\n    parser.add_argument('-c', '--conf', type=str)\n    parser.add_argument('--args', type=str, default='')\n\n    args = parser.parse_args()\n\n    try:\n        hosts = ['task%d' % (x + 1) for x in range(int(args.host))]\n    except:\n        hosts = args.host.split(',')\n\n    cwd = os.getcwd()\n    command_list = []\n    for node_rank, host in enumerate(hosts):\n        ssh_cmd = f'ssh -t -t -o StrictHostKeyChecking=no {host} -p 22 ' \\\n                  f'\\'bash -O huponexit -c \"cd {cwd} && ' \\\n                  f'python -m torch.distributed.launch --nproc_per_node={args.num_gpus} --nnodes={len(hosts)} ' \\\n                  f'--master_addr={args.master} --master_port={args.port} --node_rank={node_rank} ' \\\n                  f'FastAutoAugment/train.py -c {args.conf} {args.args}\"' \\\n                  '\\''\n        print(ssh_cmd)\n\n        command_list.append([ssh_cmd])\n\n    execute_function_multithreaded(_exec_command,\n                                   command_list[1:],\n                                   block_until_all_done=False)\n\n    print(command_list[0])\n\n    while True:\n        time.sleep(1)\n\n    # thread = threading.Thread(target=safe_shell_exec.execute, args=(command_list[0][0],))\n    # thread.start()\n    # thread.join()\n\n    # while True:\n    #     time.sleep(1)\n"
  },
  {
    "path": "fast_autoaugment/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 Ildoo Kim\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "fast_autoaugment/README.md",
    "content": "# Fast AutoAugment **(Accepted at NeurIPS 2019)**\n\nOfficial [Fast AutoAugment](https://arxiv.org/abs/1905.00397) implementation in PyTorch.\n\n- Fast AutoAugment learns augmentation policies using a more efficient search strategy based on density matching.\n- Fast AutoAugment speeds up the search time by orders of magnitude while maintaining the comparable performances.\n\n<p align=\"center\">\n<img src=\"etc/search.jpg\" height=350>\n</p>\n\n## Results\n\n### CIFAR-10 / 100\n\nSearch : **3.5 GPU Hours (1428x faster than AutoAugment)**, WResNet-40x2 on Reduced CIFAR-10\n\n| Model(CIFAR-10)         | Baseline   | Cutout     | AutoAugment | Fast AutoAugment<br/>(transfer/direct) |   |\n|-------------------------|------------|------------|-------------|------------------|----|\n| Wide-ResNet-40-2        | 5.3        | 4.1        | 3.7         | 3.6 / 3.7        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_wresnet40x2_top1_3.52.pth) |\n| Wide-ResNet-28-10       | 3.9        | 3.1        | 2.6         | 2.7 / 2.7        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_wresnet28x10_top1.pth) |\n| Shake-Shake(26 2x32d)   | 3.6        | 3.0        | 2.5         | 2.7 / 2.5        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_shake26_2x32d_top1_2.68.pth) |\n| Shake-Shake(26 2x96d)   | 2.9        | 2.6        | 2.0         | 2.0 / 2.0        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_shake26_2x96d_top1_1.97.pth) |\n| Shake-Shake(26 2x112d)  | 2.8        | 2.6        | 1.9         | 2.0 / 1.9        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_shake26_2x112d_top1_2.04.pth) |\n| PyramidNet+ShakeDrop    | 2.7        | 2.3        | 1.5         | 1.8 / 1.7        | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar10_pyramid272_top1_1.44.pth) |\n\n| Model(CIFAR-100)      | Baseline   | Cutout     | AutoAugment | Fast AutoAugment<br/>(transfer/direct) |    |\n|-----------------------|------------|------------|-------------|------------------|----|\n| Wide-ResNet-40-2      | 26.0       | 25.2       | 20.7        | 20.7 / 20.6      | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar100_wresnet40x2_top1_20.43.pth) |\n| Wide-ResNet-28-10     | 18.8       | 18.4       | 17.1        | 17.3 / 17.3      | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar100_wresnet28x10_top1_17.17.pth) |\n| Shake-Shake(26 2x96d) | 17.1       | 16.0       | 14.3        | 14.9 / 14.6      | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar100_shake26_2x96d_top1_15.15.pth) |\n| PyramidNet+ShakeDrop  | 14.0       | 12.2       | 10.7        | 11.9 / 11.7      | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/cifar100_pyramid272_top1_11.74.pth) |\n\n### ImageNet\n\nSearch : **450 GPU Hours (33x faster than AutoAugment)**, ResNet-50 on Reduced ImageNet\n\n| Model      | Baseline   | AutoAugment | Fast AutoAugment<br/>(Top1/Top5) |    |\n|------------|------------|-------------|------------------|----|\n| ResNet-50  | 23.7 / 6.9 | 22.4 / 6.2  | **22.4 / 6.3**   | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/imagenet_resnet50_top1_22.2.pth) |\n| ResNet-200 | 21.5 / 5.8 | 20.0 / 5.0  | **19.4 / 4.7**   | [Download](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/imagenet_resnet200_top1_19.4.pth) |\n\nNotes\n* We evaluated resnet-50 and resnet-200 with resolution of 224 and 320, respectively. According to the original resnet paper, resnet 200 was tested with the resolution of 320. Also our resnet-200 baseline's performance was similar when we use the resolution.\n* But with recent our code clean-up and bugfixes, we've found that the baseline performs similar to the baseline even using 224x224.\n* When we use 224x224, resnet-200 performs **20.0 / 5.2**. Download link for the trained model is [here](https://arena.kakaocdn.net/brainrepo/fast-autoaugment/imagenet_resnet200_res224.pth).\n\nWe have conducted additional experiments with EfficientNet.\n\n| Model | Baseline   | AutoAugment |   | Our Baseline(Batch) | +Fast AA |\n|-------|------------|-------------|---|---------------------|----------|\n| B0    | 23.2       | 22.7        |   | 22.96               | 22.68    |\n\n### SVHN Test\n\nSearch : **1.5 GPU Hours**\n\n|                                  | Baseline | AutoAug / Our | Fast AutoAugment  |\n|----------------------------------|---------:|--------------:|--------:|\n| Wide-Resnet28x10                 | 1.5      | 1.1           | 1.1     |\n\n## Run\n\nWe conducted experiments under\n\n- python 3.6.9\n- pytorch 1.2.0, torchvision 0.4.0, cuda10\n\n### Search a augmentation policy\n\nPlease read ray's document to construct a proper ray cluster : https://github.com/ray-project/ray, and run search.py with the master's redis address.\n\n```\n$ python search.py -c confs/wresnet40x2_cifar10_b512.yaml --dataroot ... --redis ...\n```\n\n### Train a model with found policies\n\nYou can train network architectures on CIFAR-10 / 100 and ImageNet with our searched policies.\n\n- fa_reduced_cifar10 : reduced CIFAR-10(4k images), WResNet-40x2\n- fa_reduced_imagenet : reduced ImageNet(50k images, 120 classes), ResNet-50\n\n```\n$ export PYTHONPATH=$PYTHONPATH:$PWD\n$ python FastAutoAugment/train.py -c confs/wresnet40x2_cifar10_b512.yaml --aug fa_reduced_cifar10 --dataset cifar10\n$ python FastAutoAugment/train.py -c confs/wresnet40x2_cifar10_b512.yaml --aug fa_reduced_cifar10 --dataset cifar100\n$ python FastAutoAugment/train.py -c confs/wresnet28x10_cifar10_b512.yaml --aug fa_reduced_cifar10 --dataset cifar10\n$ python FastAutoAugment/train.py -c confs/wresnet28x10_cifar10_b512.yaml --aug fa_reduced_cifar10 --dataset cifar100\n...\n$ python FastAutoAugment/train.py -c confs/resnet50_b512.yaml --aug fa_reduced_imagenet\n$ python FastAutoAugment/train.py -c confs/resnet200_b512.yaml --aug fa_reduced_imagenet\n```\n\nBy adding --only-eval and --save arguments, you can test trained models without training.\n\nIf you want to train with multi-gpu/node, use `torch.distributed.launch` such as\n\n```bash\n$ python -m torch.distributed.launch --nproc_per_node={num_gpu_per_node} --nnodes={num_node} --master_addr={master} --master_port={master_port} --node_rank={0,1,2,...,num_node} FastAutoAugment/train.py -c confs/efficientnet_b4.yaml --aug fa_reduced_imagenet\n```\n\n## Citation\n\nIf you use this code in your research, please cite our [paper](https://arxiv.org/abs/1905.00397).\n\n```\n@inproceedings{lim2019fast,\n  title={Fast AutoAugment},\n  author={Lim, Sungbin and Kim, Ildoo and Kim, Taesup and Kim, Chiheon and Kim, Sungwoong},\n  booktitle={Advances in Neural Information Processing Systems (NeurIPS)},\n  year={2019}\n}\n```\n\n## Contact for Issues\n- Ildoo Kim, ildoo.kim@kakaobrain.com\n\n## References & Opensources\n\nWe increase the batch size and adapt the learning rate accordingly to boost the training. Otherwise, we set other hyperparameters equal to AutoAugment if possible. For the unknown hyperparameters, we follow values from the original references or we tune them to match baseline performances.\n\n- **ResNet** : [paper1](https://arxiv.org/abs/1512.03385), [paper2](https://arxiv.org/abs/1603.05027), [code](https://github.com/osmr/imgclsmob/tree/master/pytorch/pytorchcv/models)\n- **PyramidNet** : [paper](https://arxiv.org/abs/1610.02915), [code](https://github.com/dyhan0920/PyramidNet-PyTorch)\n- **Wide-ResNet** : [code](https://github.com/meliketoy/wide-resnet.pytorch)\n- **Shake-Shake** : [code](https://github.com/owruby/shake-shake_pytorch)\n- **ShakeDrop Regularization** : [paper](https://arxiv.org/abs/1802.02375), [code](https://github.com/owruby/shake-drop_pytorch)\n- **AutoAugment** : [code](https://github.com/tensorflow/models/tree/master/research/autoaugment)\n- **Ray** : [code](https://github.com/ray-project/ray)\n- **HyperOpt** : [code](https://github.com/hyperopt/hyperopt)\n"
  },
  {
    "path": "fast_autoaugment/__init__.py",
    "content": ""
  },
  {
    "path": "fast_autoaugment/archive.py",
    "content": "# Policy found on CIFAR-10 and CIFAR-100\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\n\nfrom FastAutoAugment.augmentations import get_augment, augment_list\n\n\ndef arsaug_policy():\n    exp0_0 = [\n        [('Solarize', 0.66, 0.34), ('Equalize', 0.56, 0.61)],\n        [('Equalize', 0.43, 0.06), ('AutoContrast', 0.66, 0.08)],\n        [('Color', 0.72, 0.47), ('Contrast', 0.88, 0.86)],\n        [('Brightness', 0.84, 0.71), ('Color', 0.31, 0.74)],\n        [('Rotate', 0.68, 0.26), ('TranslateX', 0.38, 0.88)]]\n    exp0_1 = [\n        [('TranslateY', 0.88, 0.96), ('TranslateY', 0.53, 0.79)],\n        [('AutoContrast', 0.44, 0.36), ('Solarize', 0.22, 0.48)],\n        [('AutoContrast', 0.93, 0.32), ('Solarize', 0.85, 0.26)],\n        [('Solarize', 0.55, 0.38), ('Equalize', 0.43, 0.48)],\n        [('TranslateY', 0.72, 0.93), ('AutoContrast', 0.83, 0.95)]]\n    exp0_2 = [\n        [('Solarize', 0.43, 0.58), ('AutoContrast', 0.82, 0.26)],\n        [('TranslateY', 0.71, 0.79), ('AutoContrast', 0.81, 0.94)],\n        [('AutoContrast', 0.92, 0.18), ('TranslateY', 0.77, 0.85)],\n        [('Equalize', 0.71, 0.69), ('Color', 0.23, 0.33)],\n        [('Sharpness', 0.36, 0.98), ('Brightness', 0.72, 0.78)]]\n    exp0_3 = [\n        [('Equalize', 0.74, 0.49), ('TranslateY', 0.86, 0.91)],\n        [('TranslateY', 0.82, 0.91), ('TranslateY', 0.96, 0.79)],\n        [('AutoContrast', 0.53, 0.37), ('Solarize', 0.39, 0.47)],\n        [('TranslateY', 0.22, 0.78), ('Color', 0.91, 0.65)],\n        [('Brightness', 0.82, 0.46), ('Color', 0.23, 0.91)]]\n    exp0_4 = [\n        [('Cutout', 0.27, 0.45), ('Equalize', 0.37, 0.21)],\n        [('Color', 0.43, 0.23), ('Brightness', 0.65, 0.71)],\n        [('ShearX', 0.49, 0.31), ('AutoContrast', 0.92, 0.28)],\n        [('Equalize', 0.62, 0.59), ('Equalize', 0.38, 0.91)],\n        [('Solarize', 0.57, 0.31), ('Equalize', 0.61, 0.51)]]\n\n    exp0_5 = [\n        [('TranslateY', 0.29, 0.35), ('Sharpness', 0.31, 0.64)],\n        [('Color', 0.73, 0.77), ('TranslateX', 0.65, 0.76)],\n        [('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],\n        [('Color', 0.92, 0.79), ('Equalize', 0.68, 0.54)],\n        [('Sharpness', 0.87, 0.91), ('Sharpness', 0.93, 0.41)]]\n    exp0_6 = [\n        [('Solarize', 0.39, 0.35), ('Color', 0.31, 0.44)],\n        [('Color', 0.33, 0.77), ('Color', 0.25, 0.46)],\n        [('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],\n        [('AutoContrast', 0.32, 0.79), ('Cutout', 0.68, 0.34)],\n        [('AutoContrast', 0.67, 0.91), ('AutoContrast', 0.73, 0.83)]]\n\n    return exp0_0 + exp0_1 + exp0_2 + exp0_3 + exp0_4 + exp0_5 + exp0_6\n\n\ndef autoaug2arsaug(f):\n    def autoaug():\n        mapper = defaultdict(lambda: lambda x: x)\n        mapper.update({\n            'ShearX': lambda x: float_parameter(x, 0.3),\n            'ShearY': lambda x: float_parameter(x, 0.3),\n            'TranslateX': lambda x: int_parameter(x, 10),\n            'TranslateY': lambda x: int_parameter(x, 10),\n            'Rotate': lambda x: int_parameter(x, 30),\n            'Solarize': lambda x: 256 - int_parameter(x, 256),\n            'Posterize2': lambda x: 4 - int_parameter(x, 4),\n            'Contrast': lambda x: float_parameter(x, 1.8) + .1,\n            'Color': lambda x: float_parameter(x, 1.8) + .1,\n            'Brightness': lambda x: float_parameter(x, 1.8) + .1,\n            'Sharpness': lambda x: float_parameter(x, 1.8) + .1,\n            'CutoutAbs': lambda x: int_parameter(x, 20)\n        })\n\n        def low_high(name, prev_value):\n            _, low, high = get_augment(name)\n            return float(prev_value - low) / (high - low)\n\n        policies = f()\n        new_policies = []\n        for policy in policies:\n            new_policies.append([(name, pr, low_high(name, mapper[name](level))) for name, pr, level in policy])\n        return new_policies\n\n    return autoaug\n\n\n@autoaug2arsaug\ndef autoaug_paper_cifar10():\n    return [\n        [('Invert', 0.1, 7), ('Contrast', 0.2, 6)],\n        [('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],\n        [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],\n        [('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)],\n        [('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],\n        [('Color', 0.4, 3), ('Brightness', 0.6, 7)],\n        [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],\n        [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],\n        [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)],\n        [('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],\n        [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],\n        [('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],\n        [('Brightness', 0.9, 6), ('Color', 0.2, 8)],\n        [('Solarize', 0.5, 2), ('Invert', 0.0, 3)],\n        [('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)],\n        [('Equalize', 0.2, 8), ('Equalize', 0.6, 4)],\n        [('Color', 0.9, 9), ('Equalize', 0.6, 6)],\n        [('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)],\n        [('Brightness', 0.1, 3), ('Color', 0.7, 0)],\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],\n        [('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],\n        [('Equalize', 0.8, 8), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)],\n    ]\n\n\n@autoaug2arsaug\ndef autoaug_policy():\n    \"\"\"AutoAugment policies found on Cifar.\"\"\"\n    exp0_0 = [\n        [('Invert', 0.1, 7), ('Contrast', 0.2, 6)],\n        [('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],\n        [('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],\n        [('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]\n    exp0_1 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],\n        [('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],\n        [('Equalize', 0.8, 8), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)]]\n    exp0_2 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],\n        [('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)],\n        [('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],\n        [('Equalize', 0.7, 5), ('Invert', 0.1, 3)],\n        [('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)]]\n    exp0_3 = [\n        [('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],\n        [('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.9, 9)],\n        [('AutoContrast', 0.8, 0), ('TranslateYAbs', 0.7, 9)],\n        [('TranslateYAbs', 0.2, 7), ('Color', 0.9, 6)],\n        [('Equalize', 0.7, 6), ('Color', 0.4, 9)]]\n    exp1_0 = [\n        [('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],\n        [('Color', 0.4, 3), ('Brightness', 0.6, 7)],\n        [('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],\n        [('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],\n        [('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]\n    exp1_1 = [\n        [('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],\n        [('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],\n        [('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],\n        [('TranslateYAbs', 0.2, 4), ('Sharpness', 0.3, 3)],\n        [('Brightness', 0.0, 8), ('Color', 0.8, 8)]]\n    exp1_2 = [\n        [('Solarize', 0.2, 6), ('Color', 0.8, 6)],\n        [('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],\n        [('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],\n        [('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],\n        [('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]\n    exp1_3 = [\n        [('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],\n        [('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],\n        [('Contrast', 0.5, 1), ('TranslateYAbs', 0.2, 9)],\n        [('AutoContrast', 0.6, 5), ('TranslateYAbs', 0.0, 9)],\n        [('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]\n    exp1_4 = [\n        [('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],\n        [('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],\n        [('Equalize', 0.6, 8), ('Color', 0.6, 2)],\n        [('Color', 0.3, 7), ('Color', 0.2, 4)],\n        [('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]\n    exp1_5 = [\n        [('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],\n        [('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],\n        [('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],\n        [('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],\n        [('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]\n    exp1_6 = [\n        [('Equalize', 0.8, 4), ('TranslateYAbs', 0.8, 9)],\n        [('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.6, 9)],\n        [('TranslateYAbs', 0.9, 0), ('TranslateYAbs', 0.5, 9)],\n        [('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],\n        [('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]\n    exp2_0 = [\n        [('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],\n        [('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],\n        [('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],\n        [('Brightness', 0.9, 6), ('Color', 0.2, 8)],\n        [('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]\n    exp2_1 = [\n        [('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],\n        [('CutoutAbs', 0.2, 4), ('Equalize', 0.1, 1)],\n        [('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],\n        [('Color', 0.1, 8), ('ShearY', 0.2, 3)],\n        [('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]\n    exp2_2 = [\n        [('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],\n        [('TranslateYAbs', 0.3, 6), ('CutoutAbs', 0.3, 3)],\n        [('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],\n        [('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],\n        [('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]\n    exp2_3 = [\n        [('Equalize', 0.9, 5), ('Color', 0.7, 0)],\n        [('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],\n        [('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],\n        [('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],\n        [('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]\n    exp2_4 = [\n        [('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],\n        [('TranslateXAbs', 0.3, 0), ('TranslateXAbs', 0.6, 0)],\n        [('Equalize', 0.5, 9), ('TranslateYAbs', 0.6, 7)],\n        [('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],\n        [('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]\n    exp2_5 = [\n        [('AutoContrast', 0.3, 9), ('CutoutAbs', 0.5, 3)],\n        [('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],\n        [('ShearX', 0.0, 3), ('Posterize2', 0.0, 3)],\n        [('Solarize', 0.4, 3), ('Color', 0.2, 4)],\n        [('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]\n    exp2_6 = [\n        [('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],\n        [('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],\n        [('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],\n        [('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],\n        [('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]\n    exp2_7 = [\n        [('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],\n        [('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],\n        [('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],\n        [('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],\n        [('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]\n    exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3\n    exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6\n    exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7\n\n    return exp0s + exp1s + exp2s\n\n\nPARAMETER_MAX = 10\n\n\ndef float_parameter(level, maxval):\n    return float(level) * maxval / PARAMETER_MAX\n\n\ndef int_parameter(level, maxval):\n    return int(float_parameter(level, maxval))\n\n\ndef no_duplicates(f):\n    def wrap_remove_duplicates():\n        policies = f()\n        return remove_deplicates(policies)\n\n    return wrap_remove_duplicates\n\n\ndef remove_deplicates(policies):\n    s = set()\n    new_policies = []\n    for ops in policies:\n        key = []\n        for op in ops:\n            key.append(op[0])\n        key = '_'.join(key)\n        if key in s:\n            continue\n        else:\n            s.add(key)\n            new_policies.append(ops)\n\n    return new_policies\n\n\ndef fa_reduced_cifar10():\n    p = [[[\"Contrast\", 0.8320659688593578, 0.49884310562180767], [\"TranslateX\", 0.41849883971249136, 0.394023086494538]], [[\"Color\", 0.3500483749890918, 0.43355143929883955], [\"Color\", 0.5120716140300229, 0.7508299643325016]], [[\"Rotate\", 0.9447932604389472, 0.29723465088990375], [\"Sharpness\", 0.1564936149799504, 0.47169309978091745]], [[\"Rotate\", 0.5430015349185097, 0.6518626678905443], [\"Color\", 0.5694844928020679, 0.3494533005430269]], [[\"AutoContrast\", 0.5558922032451064, 0.783136004977799], [\"TranslateY\", 0.683914191471972, 0.7597025305860181]], [[\"TranslateX\", 0.03489224481658926, 0.021025488042663354], [\"Equalize\", 0.4788637403857401, 0.3535481281496117]], [[\"Sharpness\", 0.6428916269794158, 0.22791511918580576], [\"Contrast\", 0.016014045073950323, 0.26811312269487575]], [[\"Rotate\", 0.2972727228410451, 0.7654251516829896], [\"AutoContrast\", 0.16005809254943348, 0.5380523650108116]], [[\"Contrast\", 0.5823671057717301, 0.7521166301398389], [\"TranslateY\", 0.9949449214751978, 0.9612671341689751]], [[\"Equalize\", 0.8372126687702321, 0.6944127225621206], [\"Rotate\", 0.25393282929784755, 0.3261658365286546]], [[\"Invert\", 0.8222011603194572, 0.6597915864008403], [\"Posterize\", 0.31858707654447327, 0.9541013715579584]], [[\"Sharpness\", 0.41314621282107045, 0.9437344470879956], [\"Cutout\", 0.6610495837889337, 0.674411664255093]], [[\"Contrast\", 0.780121736705407, 0.40826152397463156], [\"Color\", 0.344019192125256, 0.1942922781355767]], [[\"Rotate\", 0.17153139555621344, 0.798745732456474], [\"Invert\", 0.6010555860501262, 0.320742172554767]], [[\"Invert\", 0.26816063450777416, 0.27152062163148327], [\"Equalize\", 0.6786829200236982, 0.7469412443514213]], [[\"Contrast\", 0.3920564414367518, 0.7493644582838497], [\"TranslateY\", 0.8941657805606704, 0.6580846856375955]], [[\"Equalize\", 0.875509207399372, 0.9061130537645283], [\"Cutout\", 0.4940280679087308, 0.7896229623628276]], [[\"Contrast\", 0.3331423298065147, 0.7170041362529597], [\"ShearX\", 0.7425484291842793, 0.5285117152426109]], [[\"Equalize\", 0.97344237365026, 0.4745759720473106], [\"TranslateY\", 0.055863458430295276, 0.9625142022954672]], [[\"TranslateX\", 0.6810614083109192, 0.7509937355495521], [\"TranslateY\", 0.3866463019475701, 0.5185481505576112]], [[\"Sharpness\", 0.4751529944753671, 0.550464012488733], [\"Cutout\", 0.9472914750534814, 0.5584925992985023]], [[\"Contrast\", 0.054606784909375095, 0.17257080196712182], [\"Cutout\", 0.6077026782754803, 0.7996504165944938]], [[\"ShearX\", 0.328798428243695, 0.2769563264079157], [\"Cutout\", 0.9037632437023772, 0.4915809476763595]], [[\"Cutout\", 0.6891202672363478, 0.9951490996172914], [\"Posterize\", 0.06532762462628705, 0.4005246609075227]], [[\"TranslateY\", 0.6908583592523334, 0.725612120376128], [\"Rotate\", 0.39907735501746666, 0.36505798032223147]], [[\"TranslateX\", 0.10398364107399072, 0.5913918470536627], [\"Rotate\", 0.7169811539340365, 0.8283850670648724]], [[\"ShearY\", 0.9526373530768361, 0.4482347365639251], [\"Contrast\", 0.4203947336351471, 0.41526799558953864]], [[\"Contrast\", 0.24894431199700073, 0.09578870500994707], [\"Solarize\", 0.2273713345927395, 0.6214942914963707]], [[\"TranslateX\", 0.06331228870032912, 0.8961907489444944], [\"Cutout\", 0.5110007859958743, 0.23704875994050723]], [[\"Cutout\", 0.3769183548846172, 0.6560944580253987], [\"TranslateY\", 0.7201924599434143, 0.4132476526938319]], [[\"Invert\", 0.6707431156338866, 0.11622795952464149], [\"Posterize\", 0.12075972752370845, 0.18024933294172307]], [[\"Color\", 0.5010057264087142, 0.5277767327434318], [\"Rotate\", 0.9486115946366559, 0.31485546630220784]], [[\"ShearX\", 0.31741302466630406, 0.1991215806270692], [\"Invert\", 0.3744727015523084, 0.6914113986757578]], [[\"Brightness\", 0.40348479064392617, 0.8924182735724888], [\"Brightness\", 0.1973098763857779, 0.3939288933689655]], [[\"Color\", 0.01208688664030888, 0.6055693000885217], [\"Equalize\", 0.433259451147881, 0.420711137966155]], [[\"Cutout\", 0.2620018360076487, 0.11594468278143644], [\"Rotate\", 0.1310401567856766, 0.7244318146544101]], [[\"ShearX\", 0.15249651845933576, 0.35277277071866986], [\"Contrast\", 0.28221794032094016, 0.42036586509397444]], [[\"Brightness\", 0.8492912150468908, 0.26386920887886056], [\"Solarize\", 0.8764208056263386, 0.1258195122766067]], [[\"ShearX\", 0.8537058239675831, 0.8415101816171269], [\"AutoContrast\", 0.23958568830416294, 0.9889049529564014]], [[\"Rotate\", 0.6463207930684552, 0.8750192129056532], [\"Contrast\", 0.6865032211768652, 0.8564981333033417]], [[\"Equalize\", 0.8877190311811044, 0.7370995897848609], [\"TranslateX\", 0.9979660314391368, 0.005683998913244781]], [[\"Color\", 0.6420017551677819, 0.6225337265571229], [\"Solarize\", 0.8344504978566362, 0.8332856969941151]], [[\"ShearX\", 0.7439332981992567, 0.9747608698582039], [\"Equalize\", 0.6259189804002959, 0.028017478098245174]], [[\"TranslateY\", 0.39794770293366843, 0.8482966537902709], [\"Rotate\", 0.9312935630405351, 0.5300586925826072]], [[\"Cutout\", 0.8904075572021911, 0.3522934742068766], [\"Equalize\", 0.6431186289473937, 0.9930577962126151]], [[\"Contrast\", 0.9183553386089476, 0.44974266209396685], [\"TranslateY\", 0.8193684583123862, 0.9633741156526566]], [[\"ShearY\", 0.616078299924283, 0.19219314358924766], [\"Solarize\", 0.1480945914138868, 0.05922109541654652]], [[\"Solarize\", 0.25332455064128157, 0.18853037431947994], [\"ShearY\", 0.9518390093954243, 0.14603930044061142]], [[\"Color\", 0.8094378664335412, 0.37029830225408433], [\"Contrast\", 0.29504113617467465, 0.065096365468442]], [[\"AutoContrast\", 0.7075167558685455, 0.7084621693458267], [\"Sharpness\", 0.03555539453323875, 0.5651948313888351]], [[\"TranslateY\", 0.5969982600930229, 0.9857264201029572], [\"Rotate\", 0.9898628564873607, 0.1985685534926911]], [[\"Invert\", 0.14915939942810352, 0.6595839632446547], [\"Posterize\", 0.768535289994361, 0.5997358684618563]], [[\"Equalize\", 0.9162691815967111, 0.3331035307653627], [\"Color\", 0.8169118187605557, 0.7653910258006366]], [[\"Rotate\", 0.43489185299530897, 0.752215269135173], [\"Brightness\", 0.1569828560334806, 0.8002808712857853]], [[\"Invert\", 0.931876215328345, 0.029428644395760872], [\"Equalize\", 0.6330036052674145, 0.7235531014288485]], [[\"ShearX\", 0.5216138393704968, 0.849272958911589], [\"AutoContrast\", 0.19572688655120263, 0.9786551568639575]], [[\"ShearX\", 0.9899586208275011, 0.22580547500610293], [\"Brightness\", 0.9831311903178727, 0.5055159610855606]], [[\"Brightness\", 0.29179117009211486, 0.48003584672937294], [\"Solarize\", 0.7544252317330058, 0.05806581735063043]], [[\"AutoContrast\", 0.8919800329537786, 0.8511261613698553], [\"Contrast\", 0.49199446084551035, 0.7302297140181429]], [[\"Cutout\", 0.7079723710644835, 0.032565015538375874], [\"AutoContrast\", 0.8259782090388609, 0.7860708789468442]], [[\"Posterize\", 0.9980262659801914, 0.6725084224935673], [\"ShearY\", 0.6195568269664682, 0.5444170291816751]], [[\"Posterize\", 0.8687351834713217, 0.9978004914422602], [\"Equalize\", 0.4532646848325955, 0.6486748015710573]], [[\"Contrast\", 0.2713928776950594, 0.15255249557027806], [\"ShearY\", 0.9276834387970199, 0.5266542862333478]], [[\"AutoContrast\", 0.5240786618055582, 0.9325642258930253], [\"Cutout\", 0.38448627892037357, 0.21219415055662394]], [[\"TranslateX\", 0.4299517937295352, 0.20133751201386152], [\"TranslateX\", 0.6753468310276597, 0.6985621035400441]], [[\"Rotate\", 0.4006472499103597, 0.6704748473357586], [\"Equalize\", 0.674161668148079, 0.6528530101705237]], [[\"Equalize\", 0.9139902833674455, 0.9015103149680278], [\"Sharpness\", 0.7289667720691948, 0.7623606352376232]], [[\"Cutout\", 0.5911267429414259, 0.5953141187177585], [\"Rotate\", 0.5219064817468504, 0.11085141355857986]], [[\"TranslateX\", 0.3620095133946267, 0.26194039409492476], [\"Rotate\", 0.3929841359545597, 0.4913406720338047]], [[\"Invert\", 0.5175298901458896, 0.001661410821811482], [\"Invert\", 0.004656581318332242, 0.8157622192213624]], [[\"AutoContrast\", 0.013609693335051465, 0.9318651749409604], [\"Invert\", 0.8980844358979592, 0.2268511862780368]], [[\"ShearY\", 0.7717126261142194, 0.09975547983707711], [\"Equalize\", 0.7808494401429572, 0.4141412091009955]], [[\"TranslateX\", 0.5878675721341552, 0.29813268038163376], [\"Posterize\", 0.21257276051591356, 0.2837285296666412]], [[\"Brightness\", 0.4268335108566488, 0.4723784991635417], [\"Cutout\", 0.9386262901570471, 0.6597686851494288]], [[\"ShearX\", 0.8259423807590159, 0.6215304795389204], [\"Invert\", 0.6663365779667443, 0.7729669184580387]], [[\"ShearY\", 0.4801338723951297, 0.5220145420100984], [\"Solarize\", 0.9165803796596582, 0.04299335502862134]], [[\"Color\", 0.17621114853558817, 0.7092601754635434], [\"ShearX\", 0.9014406936728542, 0.6028711944367818]], [[\"Rotate\", 0.13073284972300658, 0.9088831512880851], [\"ShearX\", 0.4228105332316806, 0.7985249783662675]], [[\"Brightness\", 0.9182753692730031, 0.0063635477774044436], [\"Color\", 0.4279825602663798, 0.28727149118585327]], [[\"Equalize\", 0.578218285372267, 0.9611758542158054], [\"Contrast\", 0.5471552264150691, 0.8819635504027596]], [[\"Brightness\", 0.3208589067274543, 0.45324733565167497], [\"Solarize\", 0.5218455808633233, 0.5946097503647126]], [[\"Equalize\", 0.3790381278653, 0.8796082535775276], [\"Solarize\", 0.4875526773149246, 0.5186585878052613]], [[\"ShearY\", 0.12026461479557571, 0.1336953429068397], [\"Posterize\", 0.34373988646025766, 0.8557727670803785]], [[\"Cutout\", 0.2396745247507467, 0.8123036135209865], [\"Equalize\", 0.05022807681008945, 0.6648492261984383]], [[\"Brightness\", 0.35226676470748264, 0.5950011514888855], [\"Rotate\", 0.27555076067000894, 0.9170063321486026]], [[\"ShearX\", 0.320224630647278, 0.9683584649071976], [\"Invert\", 0.6905585196648905, 0.5929115667894518]], [[\"Color\", 0.9941395717559652, 0.7474441679798101], [\"Sharpness\", 0.7559998478658021, 0.6656052889626682]], [[\"ShearY\", 0.4004220568345669, 0.5737646992826074], [\"Equalize\", 0.9983495213746147, 0.8307907033362303]], [[\"Color\", 0.13726809242038207, 0.9378850119950549], [\"Equalize\", 0.9853362454752445, 0.42670264496554156]], [[\"Invert\", 0.13514636153298576, 0.13516363849081958], [\"Sharpness\", 0.2031189356693901, 0.6110226359872745]], [[\"TranslateX\", 0.7360305209630797, 0.41849698571655614], [\"Contrast\", 0.8972161549144564, 0.7820296625565641]], [[\"Color\", 0.02713118828682548, 0.717110684828096], [\"TranslateY\", 0.8118759006836348, 0.9120098002024992]], [[\"Sharpness\", 0.2915428949403711, 0.7630303724396518], [\"Solarize\", 0.22030536162851078, 0.38654526772661757]], [[\"Equalize\", 0.9949114839538582, 0.7193630656062793], [\"AutoContrast\", 0.00889496657931299, 0.2291400476524672]], [[\"Rotate\", 0.7120948976490488, 0.7804359309791055], [\"Cutout\", 0.10445418104923654, 0.8022999156052766]], [[\"Equalize\", 0.7941710117902707, 0.8648170634288153], [\"Invert\", 0.9235642581144047, 0.23810725859722381]], [[\"Cutout\", 0.3669397998623156, 0.42612815083245004], [\"Solarize\", 0.5896322046441561, 0.40525016166956795]], [[\"Color\", 0.8389858785714184, 0.4805764176488667], [\"Rotate\", 0.7483931487048825, 0.4731174601400677]], [[\"Sharpness\", 0.19006538611394763, 0.9480745790240234], [\"TranslateY\", 0.13904429049439282, 0.04117685330615939]], [[\"TranslateY\", 0.9958097661701637, 0.34853788612580905], [\"Cutout\", 0.2235829624082113, 0.3737887095480745]], [[\"ShearX\", 0.635453761342424, 0.6063917273421382], [\"Posterize\", 0.8738297843709666, 0.4893042590265556]], [[\"Brightness\", 0.7907245198402727, 0.7082189713070691], [\"Color\", 0.030313003541849737, 0.6927897798493439]], [[\"Cutout\", 0.6965622481073525, 0.8103522907758203], [\"ShearY\", 0.6186794303078708, 0.28640671575703547]], [[\"ShearY\", 0.43734910588450226, 0.32549342535621517], [\"ShearX\", 0.08154980987651872, 0.3286764923112455]], [[\"AutoContrast\", 0.5262462005050853, 0.8175584582465848], [\"Contrast\", 0.8683217097363655, 0.548776281479276]], [[\"ShearY\", 0.03957878500311707, 0.5102350637943197], [\"Rotate\", 0.13794708520303778, 0.38035687712954236]], [[\"Sharpness\", 0.634288567312677, 0.6387948309075822], [\"AutoContrast\", 0.13437288694693272, 0.7150448869023095]], [[\"Contrast\", 0.5198339640088544, 0.9409429390321714], [\"Cutout\", 0.09489154903321972, 0.6228488803821982]], [[\"Equalize\", 0.8955909061806043, 0.7727336527163008], [\"AutoContrast\", 0.6459479564441762, 0.7065467781139214]], [[\"Invert\", 0.07214420843537739, 0.15334721382249505], [\"ShearX\", 0.9242027778363903, 0.5809187849982554]], [[\"Equalize\", 0.9144084379856188, 0.9457539278608998], [\"Sharpness\", 0.14337499858300173, 0.5978054365425495]], [[\"Posterize\", 0.18894269796951202, 0.14676331276539045], [\"Equalize\", 0.846204299950047, 0.0720601838168885]], [[\"Contrast\", 0.47354445405741163, 0.1793650330107468], [\"Solarize\", 0.9086106327264657, 0.7578807802091502]], [[\"AutoContrast\", 0.11805466892967886, 0.6773620948318575], [\"TranslateX\", 0.584222568299264, 0.9475693349391936]], [[\"Brightness\", 0.5833017701352768, 0.6892593824176294], [\"AutoContrast\", 0.9073141314561828, 0.5823085733964589]], [[\"TranslateY\", 0.5711231614144834, 0.6436240447620021], [\"Contrast\", 0.21466964050052473, 0.8042843954486391]], [[\"Contrast\", 0.22967904487976765, 0.2343103109298762], [\"Invert\", 0.5502897289159286, 0.386181060792375]], [[\"Invert\", 0.7008423439928628, 0.4234003051405053], [\"Rotate\", 0.77270460187611, 0.6650852696828039]], [[\"Invert\", 0.050618322309703534, 0.24277027926683614], [\"TranslateX\", 0.789703489736613, 0.5116446685339312]], [[\"Color\", 0.363898083076868, 0.7870323584210503], [\"ShearY\", 0.009608425513626617, 0.6188625018465327]], [[\"TranslateY\", 0.9447601615216088, 0.8605867115798349], [\"Equalize\", 0.24139180127003634, 0.9587337957930782]], [[\"Equalize\", 0.3968589440144503, 0.626206375426996], [\"Solarize\", 0.3215967960673186, 0.826785464835443]], [[\"TranslateX\", 0.06947339047121326, 0.016705969558222122], [\"Contrast\", 0.6203392406528407, 0.6433525559906872]], [[\"Solarize\", 0.2479835265518212, 0.6335009955617831], [\"Sharpness\", 0.6260191862978083, 0.18998095149428562]], [[\"Invert\", 0.9818841924943431, 0.03252098144087934], [\"TranslateY\", 0.9740718042586802, 0.32038951753031475]], [[\"Solarize\", 0.8795784664090814, 0.7014953994354041], [\"AutoContrast\", 0.8508018319577783, 0.09321935255338443]], [[\"Color\", 0.8067046326105318, 0.13732893832354054], [\"Contrast\", 0.7358549680271418, 0.7880588355974301]], [[\"Posterize\", 0.5005885536838065, 0.7152229305267599], [\"ShearX\", 0.6714249591308944, 0.7732232697859908]], [[\"TranslateY\", 0.5657943483353953, 0.04622399873706862], [\"AutoContrast\", 0.2787442688649845, 0.567024378767143]], [[\"ShearY\", 0.7589839214283295, 0.041071003934029404], [\"Equalize\", 0.3719852873722692, 0.43285778682687326]], [[\"Posterize\", 0.8841266183653291, 0.42441306955476366], [\"Cutout\", 0.06578801759412933, 0.5961125797961526]], [[\"Rotate\", 0.4057875004314082, 0.20241115848366442], [\"AutoContrast\", 0.19331542807918067, 0.7175484678480565]], [[\"Contrast\", 0.20331327116693088, 0.17135387852218742], [\"Cutout\", 0.6282459410351067, 0.6690015305529187]], [[\"ShearX\", 0.4309850328306535, 0.99321178125828], [\"AutoContrast\", 0.01809604030453338, 0.693838277506365]], [[\"Rotate\", 0.24343531125298268, 0.5326412444169899], [\"Sharpness\", 0.8663989992597494, 0.7643990609130789]], [[\"Rotate\", 0.9785019204622459, 0.8941922576710696], [\"ShearY\", 0.3823185048761075, 0.9258854046017292]], [[\"ShearY\", 0.5502613342963388, 0.6193478797355644], [\"Sharpness\", 0.2212116534610532, 0.6648232390110979]], [[\"TranslateY\", 0.43222920981513757, 0.5657636397633089], [\"ShearY\", 0.9153733286073634, 0.4868521171273169]], [[\"Posterize\", 0.12246560519738336, 0.9132288825898972], [\"Cutout\", 0.6058471327881816, 0.6426901876150983]], [[\"Color\", 0.3693970222695844, 0.038929141432555436], [\"Equalize\", 0.6228052875653781, 0.05064436511347281]], [[\"Color\", 0.7172600331356893, 0.2824542634766688], [\"Color\", 0.425293116261649, 0.1796441283313972]], [[\"Cutout\", 0.7539608428122959, 0.9896141728228921], [\"Solarize\", 0.17811081117364758, 0.9064195503634402]], [[\"AutoContrast\", 0.6761242607012717, 0.6484842446399923], [\"AutoContrast\", 0.1978135076901828, 0.42166879492601317]], [[\"ShearY\", 0.25901666379802524, 0.4770778270322449], [\"Solarize\", 0.7640963173407052, 0.7548463227094349]], [[\"TranslateY\", 0.9222487731783499, 0.33658389819616463], [\"Equalize\", 0.9159112511468139, 0.8877136302394797]], [[\"TranslateX\", 0.8994836977137054, 0.11036053676846591], [\"Sharpness\", 0.9040333410652747, 0.007266095214664592]], [[\"Invert\", 0.627758632524958, 0.8075245097227242], [\"Color\", 0.7525387912148516, 0.05950239294733184]], [[\"TranslateX\", 0.43505193292761857, 0.38108822876120796], [\"TranslateY\", 0.7432578052364004, 0.685678116134759]], [[\"Contrast\", 0.9293507582470425, 0.052266842951356196], [\"Posterize\", 0.45187123977747456, 0.8228290399726782]], [[\"ShearX\", 0.07240786542746291, 0.8945667925365756], [\"Brightness\", 0.5305443506561034, 0.12025274552427578]], [[\"Invert\", 0.40157564448143335, 0.5364745514006678], [\"Posterize\", 0.3316124671813876, 0.43002413237035997]], [[\"ShearY\", 0.7152314630009072, 0.1938339083417453], [\"Invert\", 0.14102478508140615, 0.41047623580174253]], [[\"Equalize\", 0.19862832613849246, 0.5058521685279254], [\"Sharpness\", 0.16481208629549782, 0.29126323102770557]], [[\"Equalize\", 0.6951591703541872, 0.7294822018800076], [\"ShearX\", 0.8726656726111219, 0.3151484225786487]], [[\"Rotate\", 0.17234370554263745, 0.9356543193000078], [\"TranslateX\", 0.4954374070084091, 0.05496727345849217]], [[\"Contrast\", 0.347405480122842, 0.831553005022885], [\"ShearX\", 0.28946367213071134, 0.11905898704394013]], [[\"Rotate\", 0.28096672507990683, 0.16181284050307398], [\"Color\", 0.6554918515385365, 0.8739728050797386]], [[\"Solarize\", 0.05408073374114053, 0.5357087283758337], [\"Posterize\", 0.42457175211495335, 0.051807130609045515]], [[\"TranslateY\", 0.6216669362331361, 0.9691341207381867], [\"Rotate\", 0.9833579358130944, 0.12227426932415297]], [[\"AutoContrast\", 0.7572619475282892, 0.8062834082727393], [\"Contrast\", 0.1447865402875591, 0.40242646573228436]], [[\"Rotate\", 0.7035658783466086, 0.9840285268256428], [\"Contrast\", 0.04613961510519471, 0.7666683217450163]], [[\"TranslateX\", 0.4580462177951252, 0.6448678609474686], [\"AutoContrast\", 0.14845695613708987, 0.1581134188537895]], [[\"Color\", 0.06795037145259564, 0.9115552821158709], [\"TranslateY\", 0.9972953449677655, 0.6791016521791214]], [[\"Cutout\", 0.3586908443690823, 0.11578558293480945], [\"Color\", 0.49083981719164294, 0.6924851425917189]], [[\"Brightness\", 0.7994717831637873, 0.7887316255321768], [\"Posterize\", 0.01280463502435425, 0.2799086732858721]], [[\"ShearY\", 0.6733451536131859, 0.8122332639516706], [\"AutoContrast\", 0.20433889615637357, 0.29023346867819966]], [[\"TranslateY\", 0.709913512385177, 0.6538196931503809], [\"Invert\", 0.06629795606579203, 0.40913219547548296]], [[\"Sharpness\", 0.4704559834362948, 0.4235993305308414], [\"Equalize\", 0.7578132044306966, 0.9388824249397175]], [[\"AutoContrast\", 0.5281702802395268, 0.8077253610116979], [\"Equalize\", 0.856446858814119, 0.0479755681647559]], [[\"Color\", 0.8244145826797791, 0.038409264586238945], [\"Equalize\", 0.4933123249234237, 0.8251940933672189]], [[\"TranslateX\", 0.23949314158035084, 0.13576027004706692], [\"ShearX\", 0.8547563771688399, 0.8309262160483606]], [[\"Cutout\", 0.4655680937486001, 0.2819807000622825], [\"Contrast\", 0.8439552665937905, 0.4843617871587037]], [[\"TranslateX\", 0.19142454476784831, 0.7516148119169537], [\"AutoContrast\", 0.8677128351329768, 0.34967990912346336]], [[\"Contrast\", 0.2997868299880966, 0.919508054854469], [\"AutoContrast\", 0.3003418493384957, 0.812314984368542]], [[\"Invert\", 0.1070424236198183, 0.614674386498809], [\"TranslateX\", 0.5010973510899923, 0.20828478805259465]], [[\"Contrast\", 0.6775882415611454, 0.6938564815591685], [\"Cutout\", 0.4814634264207498, 0.3086844939744179]], [[\"TranslateY\", 0.939427105020265, 0.02531043619423201], [\"Contrast\", 0.793754257944812, 0.6676072472565451]], [[\"Sharpness\", 0.09833672397575444, 0.5937214638292085], [\"Rotate\", 0.32530675291753763, 0.08302275740932441]], [[\"Sharpness\", 0.3096455511562728, 0.6726732004553959], [\"TranslateY\", 0.43268997648796537, 0.8755012330217743]], [[\"ShearY\", 0.9290771880324833, 0.22114736271319912], [\"Equalize\", 0.5520199288501478, 0.34269650332060553]], [[\"AutoContrast\", 0.39763980746649374, 0.4597414582725454], [\"Contrast\", 0.941507852412761, 0.24991270562477041]], [[\"Contrast\", 0.19419400547588095, 0.9127524785329233], [\"Invert\", 0.40544905179551727, 0.770081532844878]], [[\"Invert\", 0.30473757368608334, 0.23534811781828846], [\"Cutout\", 0.26090722356706686, 0.5478390909877727]], [[\"Posterize\", 0.49434361308057373, 0.05018423270527428], [\"Color\", 0.3041910676883317, 0.2603810415446437]], [[\"Invert\", 0.5149061746764011, 0.9507449210221298], [\"TranslateY\", 0.4458076521892904, 0.8235358255774426]], [[\"Cutout\", 0.7900006753351625, 0.905578861382507], [\"Cutout\", 0.6707153655762056, 0.8236715672258502]], [[\"Solarize\", 0.8750534386579575, 0.10337670467100568], [\"Posterize\", 0.6102379615481381, 0.9264503915416868]], [[\"ShearY\", 0.08448689377082852, 0.13981233725811626], [\"TranslateX\", 0.13979689669329498, 0.768774869872818]], [[\"TranslateY\", 0.35752572266759985, 0.22827299847812488], [\"Solarize\", 0.3906957174236011, 0.5663314388307709]], [[\"ShearY\", 0.29155240367061563, 0.8427516352971683], [\"ShearX\", 0.988825367441916, 0.9371258864857649]], [[\"Posterize\", 0.3470780859769458, 0.5467686612321239], [\"Rotate\", 0.5758606274160093, 0.8843838082656007]], [[\"Cutout\", 0.07825368363221841, 0.3230799425855425], [\"Equalize\", 0.2319163865298529, 0.42133965674727325]], [[\"Invert\", 0.41972172597448654, 0.34618622513582953], [\"ShearX\", 0.33638469398198834, 0.9098575535928108]], [[\"Invert\", 0.7322652233340448, 0.7747502957687412], [\"Cutout\", 0.9643121397298106, 0.7983335094634907]], [[\"TranslateY\", 0.30039942808098496, 0.229018798182827], [\"TranslateY\", 0.27009499739380194, 0.6435577237846236]], [[\"Color\", 0.38245274994070644, 0.7030758568461645], [\"ShearX\", 0.4429321461666281, 0.6963787864044149]], [[\"AutoContrast\", 0.8432798685515605, 0.5775214369578088], [\"Brightness\", 0.7140899735355927, 0.8545854720117658]], [[\"Rotate\", 0.14418935535613786, 0.5637968282213426], [\"Color\", 0.7115231912479835, 0.32584796564566776]], [[\"Sharpness\", 0.4023501062807533, 0.4162097130412771], [\"Brightness\", 0.5536372686153666, 0.03004023273348777]], [[\"TranslateX\", 0.7526053265574295, 0.5365938133399961], [\"Cutout\", 0.07914142706557492, 0.7544953091603148]], [[\"TranslateY\", 0.6932934644882822, 0.5302211727137424], [\"Invert\", 0.5040606028391255, 0.6074863635108957]], [[\"Sharpness\", 0.5013938602431629, 0.9572417724333157], [\"TranslateY\", 0.9160516359783026, 0.41798927975391675]], [[\"ShearY\", 0.5130018836722556, 0.30209438428424185], [\"Color\", 0.15017170588500262, 0.20653495360587826]], [[\"TranslateX\", 0.5293300090022314, 0.6407011888285266], [\"Rotate\", 0.4809817860439001, 0.3537850070371702]], [[\"Equalize\", 0.42243081336551014, 0.13472721311046565], [\"Posterize\", 0.4700309639484068, 0.5197704360874883]], [[\"AutoContrast\", 0.40674959899687235, 0.7312824868168921], [\"TranslateX\", 0.7397527975920833, 0.7068339877944815]], [[\"TranslateY\", 0.5880995184787206, 0.41294111378078946], [\"ShearX\", 0.3181387627799316, 0.4810010147143413]], [[\"Color\", 0.9898680233928507, 0.13241525577655167], [\"Contrast\", 0.9824932511238534, 0.5081145010853807]], [[\"Invert\", 0.1591854062582687, 0.9760371953250404], [\"Color\", 0.9913399302056851, 0.8388709501056177]], [[\"Rotate\", 0.6427451962231163, 0.9486793975292853], [\"AutoContrast\", 0.8501937877930463, 0.021326757974406196]], [[\"Contrast\", 0.13611684531087598, 0.3050858709483848], [\"Posterize\", 0.06618644756084646, 0.8776928511951034]], [[\"TranslateX\", 0.41021065663839407, 0.4965319749091702], [\"Rotate\", 0.07088831484595115, 0.4435516708223345]], [[\"Sharpness\", 0.3151707977154323, 0.28275482520179296], [\"Invert\", 0.36980384682133804, 0.20813616084536624]], [[\"Cutout\", 0.9979060206661017, 0.39712948644725854], [\"Brightness\", 0.42451052896163466, 0.942623075649937]], [[\"Equalize\", 0.5300853308425644, 0.010183500830128867], [\"AutoContrast\", 0.06930788523716991, 0.5403125318991522]], [[\"Contrast\", 0.010385458959237814, 0.2588311035539086], [\"ShearY\", 0.9347048553928764, 0.10439028366854963]], [[\"ShearY\", 0.9867649486508592, 0.8409258132716434], [\"ShearX\", 0.48031199530836444, 0.7703375364614137]], [[\"ShearY\", 0.04835889473136512, 0.2671081675890492], [\"Brightness\", 0.7856432618509617, 0.8032169570159564]], [[\"Posterize\", 0.11112884927351185, 0.7116956530752987], [\"TranslateY\", 0.7339151092128607, 0.3331241226029017]], [[\"Invert\", 0.13527036207875454, 0.8425980515358883], [\"Color\", 0.7836395778298139, 0.5517059252678862]], [[\"Sharpness\", 0.012541163521491816, 0.013197550692292892], [\"Invert\", 0.6295957932861318, 0.43276521236056054]], [[\"AutoContrast\", 0.7681480991225756, 0.3634284648496289], [\"Brightness\", 0.09708289828517969, 0.45016725043529726]], [[\"Brightness\", 0.5839450499487329, 0.47525965678316795], [\"Posterize\", 0.43096581990183735, 0.9332382960125196]], [[\"Contrast\", 0.9725334964552795, 0.9142902966863341], [\"Contrast\", 0.12376116410622995, 0.4355916974126801]], [[\"TranslateX\", 0.8572708473690132, 0.02544522678265526], [\"Sharpness\", 0.37902120723460364, 0.9606092969833118]], [[\"TranslateY\", 0.8907359001296927, 0.8011363927236099], [\"Color\", 0.7693777154407178, 0.0936768686746503]], [[\"Equalize\", 0.0002657688243309364, 0.08190798535970034], [\"Rotate\", 0.5215478065240905, 0.5773519995038368]], [[\"TranslateY\", 0.3383007813932477, 0.5733428274739165], [\"Sharpness\", 0.2436110797174722, 0.4757790814590501]], [[\"Cutout\", 0.0957402176213592, 0.8914395928996034], [\"Cutout\", 0.4959915628586883, 0.25890349461645246]], [[\"AutoContrast\", 0.594787300189186, 0.9627455357634459], [\"ShearY\", 0.5136027621132064, 0.10419602450259002]], [[\"Solarize\", 0.4684077211553732, 0.6592850629431414], [\"Sharpness\", 0.2382385935956325, 0.6589291408243176]], [[\"Cutout\", 0.4478786947325877, 0.6893616643143388], [\"TranslateX\", 0.2761781720270474, 0.21750622627277727]], [[\"Sharpness\", 0.39476077929016484, 0.930902796668923], [\"Cutout\", 0.9073012208742808, 0.9881122386614257]], [[\"TranslateY\", 0.0933719180021565, 0.7206252503441172], [\"ShearX\", 0.5151400441789256, 0.6307540083648309]], [[\"AutoContrast\", 0.7772689258806401, 0.8159317013156503], [\"AutoContrast\", 0.5932793713915097, 0.05262217353927168]], [[\"Equalize\", 0.38017352056118914, 0.8084724050448412], [\"ShearY\", 0.7239725628380852, 0.4246314890359326]], [[\"Cutout\", 0.741157483503503, 0.13244380646497977], [\"Invert\", 0.03395378056675935, 0.7140036618098844]], [[\"Rotate\", 0.0662727247460636, 0.7099861732415447], [\"Rotate\", 0.3168532707508249, 0.3553167425022127]], [[\"AutoContrast\", 0.7429303516734129, 0.07117444599776435], [\"Posterize\", 0.5379537435918104, 0.807221330263993]], [[\"TranslateY\", 0.9788586874795164, 0.7967243851346594], [\"Invert\", 0.4479103376922362, 0.04260360776727545]], [[\"Cutout\", 0.28318121763188997, 0.7748680701406292], [\"AutoContrast\", 0.9109258369403016, 0.17126397858002085]], [[\"Color\", 0.30183727885272027, 0.46718354750112456], [\"TranslateX\", 0.9628952256033627, 0.10269543754135535]], [[\"AutoContrast\", 0.6316709389784041, 0.84287698792044], [\"Brightness\", 0.5544761629904337, 0.025264772745200004]], [[\"Rotate\", 0.08803313299532567, 0.306059720523696], [\"Invert\", 0.5222165872425064, 0.045935208620454304]], [[\"TranslateY\", 0.21912346831923835, 0.48529224559004436], [\"TranslateY\", 0.15466734731903942, 0.8929485418495068]], [[\"ShearX\", 0.17141022847016563, 0.8607600402165531], [\"ShearX\", 0.6890511341106859, 0.7540899265679949]], [[\"Invert\", 0.9417455522972059, 0.9021733684991224], [\"Solarize\", 0.7693107057723746, 0.7268007946568782]], [[\"Posterize\", 0.02376991543373752, 0.6768442864453844], [\"Rotate\", 0.7736875065112697, 0.6706331753139825]], [[\"Contrast\", 0.3623841610390669, 0.15023657344457686], [\"Equalize\", 0.32975472189318666, 0.05629246869510651]], [[\"Sharpness\", 0.7874882420165824, 0.49535778020457066], [\"Posterize\", 0.09485578893387558, 0.6170768580482466]], [[\"Brightness\", 0.7099280202949585, 0.021523012961427335], [\"Posterize\", 0.2076371467666719, 0.17168118578815206]], [[\"Color\", 0.8546367645761538, 0.832011891505731], [\"Equalize\", 0.6429734783051777, 0.2618995960561532]], [[\"Rotate\", 0.8780793721476224, 0.5920897827664297], [\"ShearX\", 0.5338303685064825, 0.8605424531336439]], [[\"Sharpness\", 0.7504493806631884, 0.9723552387375258], [\"Sharpness\", 0.3206385634203266, 0.45127845905824693]], [[\"ShearX\", 0.23794709526711355, 0.06257530645720066], [\"Solarize\", 0.9132374030587093, 0.6240819934824045]], [[\"Sharpness\", 0.790583587969259, 0.28551171786655405], [\"Contrast\", 0.39872982844590554, 0.09644706751019538]], [[\"Equalize\", 0.30681999237432944, 0.5645045018157916], [\"Posterize\", 0.525966242669736, 0.7360106111256014]], [[\"TranslateX\", 0.4881014179825114, 0.6317220208872226], [\"ShearX\", 0.2935158995550958, 0.23104608987381758]], [[\"Rotate\", 0.49977116738568395, 0.6610761068306319], [\"TranslateY\", 0.7396566602715687, 0.09386747830045217]], [[\"ShearY\", 0.5909773790018789, 0.16229529902832718], [\"Equalize\", 0.06461394468918358, 0.6661349001143908]], [[\"TranslateX\", 0.7218443721851834, 0.04435720302810153], [\"Cutout\", 0.986686540951642, 0.734771197038724]], [[\"ShearX\", 0.5353800096911666, 0.8120139502148365], [\"Equalize\", 0.4613239578449774, 0.5159528929124512]], [[\"Color\", 0.0871713897628631, 0.7708895183198486], [\"Solarize\", 0.5811386808912219, 0.35260648120785887]], [[\"Posterize\", 0.3910857927477053, 0.4329219555775561], [\"Color\", 0.9115983668789468, 0.6043069944145293]], [[\"Posterize\", 0.07493067637060635, 0.4258000066006725], [\"AutoContrast\", 0.4740957581389772, 0.49069587151651295]], [[\"Rotate\", 0.34086200894268937, 0.9812149332288828], [\"Solarize\", 0.6801012471371733, 0.17271491146753837]], [[\"Color\", 0.20542270872895207, 0.5532087457727624], [\"Contrast\", 0.2718692536563381, 0.20313287569510108]], [[\"Equalize\", 0.05199827210980934, 0.0832859890912212], [\"AutoContrast\", 0.8092395764794107, 0.7778945136511004]], [[\"Sharpness\", 0.1907689513066838, 0.7705754572256907], [\"Color\", 0.3911178658498049, 0.41791326933095485]], [[\"Solarize\", 0.19611855804748257, 0.2407807485604081], [\"AutoContrast\", 0.5343964972940493, 0.9034209455548394]], [[\"Color\", 0.43586520148538865, 0.4711164626521439], [\"ShearY\", 0.28635408186820555, 0.8417816793020271]], [[\"Cutout\", 0.09818482420382535, 0.1649767430954796], [\"Cutout\", 0.34582392911178494, 0.3927982995799828]], [[\"ShearX\", 0.001253882705272269, 0.48661629027584596], [\"Solarize\", 0.9229221435457137, 0.44374894836659073]], [[\"Contrast\", 0.6829734655718668, 0.8201750485099037], [\"Cutout\", 0.7886756837648936, 0.8423285219631946]], [[\"TranslateY\", 0.857017093561528, 0.3038537151773969], [\"Invert\", 0.12809228606383538, 0.23637166191748027]], [[\"Solarize\", 0.9829027723424164, 0.9723093910674763], [\"Color\", 0.6346495302126811, 0.5405494753107188]], [[\"AutoContrast\", 0.06868643520377715, 0.23758659417688077], [\"AutoContrast\", 0.6648225411500879, 0.5618315648260103]], [[\"Invert\", 0.44202305603311676, 0.9945938909685547], [\"Equalize\", 0.7991650497684454, 0.16014142656347097]], [[\"AutoContrast\", 0.8778631604769588, 0.03951977631894088], [\"ShearY\", 0.8495160088963707, 0.35771447321250416]], [[\"Color\", 0.5365078341001592, 0.21102444169782308], [\"ShearX\", 0.7168869678248874, 0.3904298719872734]], [[\"TranslateX\", 0.6517203786101899, 0.6467598990650437], [\"Invert\", 0.26552491504364517, 0.1210812827294625]], [[\"Posterize\", 0.35196021684368994, 0.8420648319941891], [\"Invert\", 0.7796829363930631, 0.9520895999240896]], [[\"Sharpness\", 0.7391572148971984, 0.4853940393452846], [\"TranslateX\", 0.7641915295592839, 0.6351349057666782]], [[\"Posterize\", 0.18485880221115913, 0.6117603277356728], [\"Rotate\", 0.6541660490605724, 0.5704041108375348]], [[\"TranslateY\", 0.27517423188070533, 0.6610080904072458], [\"Contrast\", 0.6091250547289317, 0.7702443247557892]], [[\"Equalize\", 0.3611798581067118, 0.6623615672642768], [\"TranslateX\", 0.9537265090885917, 0.06352772509358584]], [[\"ShearX\", 0.09720029389103535, 0.7800423126320308], [\"Invert\", 0.30314352455858884, 0.8519925470889914]], [[\"Brightness\", 0.06931529763458055, 0.57760829499712], [\"Cutout\", 0.637251974467394, 0.7184346129191052]], [[\"AutoContrast\", 0.5026722100286064, 0.32025257156541886], [\"Contrast\", 0.9667478703047919, 0.14178519432669368]], [[\"Equalize\", 0.5924463845816984, 0.7187610262181517], [\"TranslateY\", 0.7059479079159405, 0.06551471830655187]], [[\"Sharpness\", 0.18161164512332928, 0.7576138481173385], [\"Brightness\", 0.19191138767695282, 0.7865880269424701]], [[\"Brightness\", 0.36780861866078696, 0.0677855546737901], [\"AutoContrast\", 0.8491446654142264, 0.09217782099938121]], [[\"TranslateY\", 0.06011399855120858, 0.8374487034710264], [\"TranslateY\", 0.8373922962070498, 0.1991295720254297]], [[\"Posterize\", 0.702559916122481, 0.30257509683007755], [\"Rotate\", 0.249899495398891, 0.9370437251176267]], [[\"ShearX\", 0.9237874098232075, 0.26241907483351146], [\"Brightness\", 0.7221766836146657, 0.6880749752986671]], [[\"Cutout\", 0.37994098189193104, 0.7836874473657957], [\"ShearX\", 0.9212861960976824, 0.8140948561570449]], [[\"Posterize\", 0.2584098274786417, 0.7990847652004848], [\"Invert\", 0.6357731737590063, 0.1066304859116326]], [[\"Sharpness\", 0.4412790857539922, 0.9692465283229825], [\"Color\", 0.9857401617339051, 0.26755393929808713]], [[\"Equalize\", 0.22348671644912665, 0.7370019910830038], [\"Posterize\", 0.5396106339575417, 0.5559536849843303]], [[\"Equalize\", 0.8742967663495852, 0.2797122599926307], [\"Rotate\", 0.4697322053105951, 0.8769872942579476]], [[\"Sharpness\", 0.44279911640509206, 0.07729581896071613], [\"Cutout\", 0.3589177366154631, 0.2704031551235969]], [[\"TranslateX\", 0.614216412574085, 0.47929659784170453], [\"Brightness\", 0.6686234118438007, 0.05700784068205689]], [[\"ShearY\", 0.17920614630857634, 0.4699685075827862], [\"Color\", 0.38251870810870003, 0.7262706923005887]], [[\"Solarize\", 0.4951799001144561, 0.212775278026479], [\"TranslateX\", 0.8666105646463097, 0.6750496637519537]], [[\"Color\", 0.8110864170849051, 0.5154263861958484], [\"Sharpness\", 0.2489044083898776, 0.3763372541462343]], [[\"Cutout\", 0.04888193613483871, 0.06041664638981603], [\"Color\", 0.06438587718683708, 0.5797881428892969]], [[\"Rotate\", 0.032427448352152166, 0.4445797818376559], [\"Posterize\", 0.4459357828482998, 0.5879865187630777]], [[\"ShearX\", 0.1617179557693058, 0.050796802246318884], [\"Cutout\", 0.8142465452060423, 0.3836391305618707]], [[\"TranslateY\", 0.1806857249209416, 0.36697730355422675], [\"Rotate\", 0.9897576550818276, 0.7483432452225264]], [[\"Brightness\", 0.18278016458098223, 0.952352527690299], [\"Cutout\", 0.3269735224453044, 0.3924869905012752]], [[\"ShearX\", 0.870832707718742, 0.3214743207190739], [\"Cutout\", 0.6805560681792573, 0.6984188155282459]], [[\"TranslateX\", 0.4157118388833776, 0.3964216288135384], [\"TranslateX\", 0.3253012682285006, 0.624835513104391]], [[\"Contrast\", 0.7678168037628158, 0.31033802162621793], [\"ShearX\", 0.27022424855977134, 0.3773245605126201]], [[\"TranslateX\", 0.37812621869017593, 0.7657993810740699], [\"Rotate\", 0.18081890120092914, 0.8893511219618171]], [[\"Posterize\", 0.8735859716088367, 0.18243793043074286], [\"TranslateX\", 0.90435994250313, 0.24116383818819453]], [[\"Invert\", 0.06666709253664793, 0.3881076083593933], [\"TranslateX\", 0.3783333964963522, 0.14411014979589543]], [[\"Equalize\", 0.8741147867162096, 0.14203839235846816], [\"TranslateX\", 0.7801536758037405, 0.6952401607812743]], [[\"Cutout\", 0.6095335117944475, 0.5679026063718094], [\"Posterize\", 0.06433868172233115, 0.07139559616012303]], [[\"TranslateY\", 0.3020364047315408, 0.21459810361176246], [\"Cutout\", 0.7097677414888889, 0.2942144632587549]], [[\"Brightness\", 0.8223662419048653, 0.195700694016108], [\"Invert\", 0.09345407040803999, 0.779843655582099]], [[\"TranslateY\", 0.7353462929356228, 0.0468520680237382], [\"Cutout\", 0.36530918247940425, 0.3897292909049672]], [[\"Invert\", 0.9676896451721213, 0.24473302189463453], [\"Invert\", 0.7369271521408992, 0.8193267003356975]], [[\"Sharpness\", 0.8691871972054326, 0.4441713912682772], [\"ShearY\", 0.47385584832119887, 0.23521684584675429]], [[\"ShearY\", 0.9266946026184021, 0.7611986713358834], [\"TranslateX\", 0.6195820760253926, 0.14661428669483678]], [[\"Sharpness\", 0.08470870576026868, 0.3380219099907229], [\"TranslateX\", 0.3062343307496658, 0.7135777338095889]], [[\"Sharpness\", 0.5246448204194909, 0.3193061215236702], [\"ShearX\", 0.8160637208508432, 0.9720697396582731]], [[\"Posterize\", 0.5249259956549405, 0.3492042382504774], [\"Invert\", 0.8183138799547441, 0.11107271762524618]], [[\"TranslateY\", 0.210869733350744, 0.7138905840721885], [\"Sharpness\", 0.7773226404450125, 0.8005353621959782]], [[\"Posterize\", 0.33067522385556025, 0.32046239220630124], [\"AutoContrast\", 0.18918147708798405, 0.4646281070474484]], [[\"TranslateX\", 0.929502026131094, 0.8029128121556285], [\"Invert\", 0.7319794306118105, 0.5421878712623392]], [[\"ShearX\", 0.25645940834182723, 0.42754710760160963], [\"ShearX\", 0.44640695310173306, 0.8132185532296811]], [[\"Color\", 0.018436846416536312, 0.8439313862001113], [\"Sharpness\", 0.3722867661453415, 0.5103570873163251]], [[\"TranslateX\", 0.7285989086776543, 0.4809027697099264], [\"TranslateY\", 0.9740807004893643, 0.8241085438636939]], [[\"Posterize\", 0.8721868989693397, 0.5700907310383815], [\"Posterize\", 0.4219074410577852, 0.8032643572845402]], [[\"Contrast\", 0.9811380092558266, 0.8498397471632105], [\"Sharpness\", 0.8380884329421594, 0.18351306571903125]], [[\"TranslateY\", 0.3878939366762001, 0.4699103438753077], [\"Invert\", 0.6055556353233807, 0.8774727658400134]], [[\"TranslateY\", 0.052317005261018346, 0.39471450378745787], [\"ShearX\", 0.8612486845942395, 0.28834103278807466]], [[\"Color\", 0.511993351208063, 0.07251427040525904], [\"Solarize\", 0.9898097047354855, 0.299761565689576]], [[\"Equalize\", 0.2721248231619904, 0.6870975927455507], [\"Cutout\", 0.8787327242363994, 0.06228061428917098]], [[\"Invert\", 0.8931880335225408, 0.49720931867378193], [\"Posterize\", 0.9619698792159256, 0.17859639696940088]], [[\"Posterize\", 0.0061688075074411985, 0.08082938731035938], [\"Brightness\", 0.27745128028826993, 0.8638528796903816]], [[\"ShearY\", 0.9140200609222026, 0.8240421430867707], [\"Invert\", 0.651734417415332, 0.08871906369930926]], [[\"Color\", 0.45585010413511196, 0.44705070078574316], [\"Color\", 0.26394624901633146, 0.11242877788650807]], [[\"ShearY\", 0.9200278466372522, 0.2995901331149652], [\"Cutout\", 0.8445407215116278, 0.7410524214287446]], [[\"ShearY\", 0.9950483746990132, 0.112964468262847], [\"ShearY\", 0.4118332303218585, 0.44839613407553636]], [[\"Contrast\", 0.7905821952255192, 0.23360046159385106], [\"Posterize\", 0.8611787233956044, 0.8984260048943528]], [[\"TranslateY\", 0.21448061359312853, 0.8228112806838331], [\"Contrast\", 0.8992297266152983, 0.9179231590570998]], [[\"Invert\", 0.3924194798946006, 0.31830516468371495], [\"Rotate\", 0.8399556845248508, 0.3764892022932781]], [[\"Cutout\", 0.7037916990046816, 0.9214620769502728], [\"AutoContrast\", 0.02913794613018239, 0.07808607528954048]], [[\"ShearY\", 0.6041490474263381, 0.6094184590800105], [\"Equalize\", 0.2932954517354919, 0.5840888946081727]], [[\"ShearX\", 0.6056801676269449, 0.6948580442549543], [\"Cutout\", 0.3028001021044615, 0.15117101733894078]], [[\"Brightness\", 0.8011486803860253, 0.18864079729374195], [\"Solarize\", 0.014965327213230961, 0.8842620292527029]], [[\"Invert\", 0.902244007904273, 0.5634673798052033], [\"Equalize\", 0.13422913507398349, 0.4110956745883727]], [[\"TranslateY\", 0.9981773319103838, 0.09568550987216096], [\"Color\", 0.7627662124105109, 0.8494409737419493]], [[\"Cutout\", 0.3013527640416782, 0.03377226729898486], [\"ShearX\", 0.5727964831614619, 0.8784196638222834]], [[\"TranslateX\", 0.6050722426803684, 0.3650103962378708], [\"TranslateX\", 0.8392084589130886, 0.6479816470292911]], [[\"Rotate\", 0.5032806606500023, 0.09276980118866307], [\"TranslateY\", 0.7800234515261191, 0.18896454379343308]], [[\"Invert\", 0.9266027256244017, 0.8246111062199752], [\"Contrast\", 0.12112023357797697, 0.33870762271759436]], [[\"Brightness\", 0.8688784756993134, 0.17263759696106606], [\"ShearX\", 0.5133700431071326, 0.6686811994542494]], [[\"Invert\", 0.8347840440941976, 0.03774897445901726], [\"Brightness\", 0.24925057499276548, 0.04293631677355758]], [[\"Color\", 0.5998145279485104, 0.4820093200092529], [\"TranslateY\", 0.6709586184077769, 0.07377334081382858]], [[\"AutoContrast\", 0.7898846202957984, 0.325293526672498], [\"Contrast\", 0.5156435596826767, 0.2889223168660645]], [[\"ShearX\", 0.08147389674998307, 0.7978924681113669], [\"Contrast\", 0.7270003309106291, 0.009571215234092656]], [[\"Sharpness\", 0.417607614440786, 0.9532566433338661], [\"Posterize\", 0.7186586546796782, 0.6936509907073302]], [[\"ShearX\", 0.9555300215926675, 0.1399385550263872], [\"Color\", 0.9981041061848231, 0.5037462398323248]], [[\"Equalize\", 0.8003487831375474, 0.5413759363796945], [\"ShearY\", 0.0026607045117773565, 0.019262273030984933]], [[\"TranslateY\", 0.04845391502469176, 0.10063445212118283], [\"Cutout\", 0.8273170186786745, 0.5045257728554577]], [[\"TranslateX\", 0.9690985344978033, 0.505202991815533], [\"TranslateY\", 0.7255326592928096, 0.02103609500701631]], [[\"Solarize\", 0.4030771176836736, 0.8424237871457034], [\"Cutout\", 0.28705805963928965, 0.9601617893682582]], [[\"Sharpness\", 0.16865290353070606, 0.6899673563468826], [\"Posterize\", 0.3985430034869616, 0.6540651997730774]], [[\"ShearY\", 0.21395578485362032, 0.09519358818949009], [\"Solarize\", 0.6692821708524135, 0.6462523623552485]], [[\"AutoContrast\", 0.912360598054091, 0.029800239085051583], [\"Invert\", 0.04319256403746308, 0.7712501517098587]], [[\"ShearY\", 0.9081969961839055, 0.4581560239984739], [\"AutoContrast\", 0.5313894814729159, 0.5508393335751848]], [[\"ShearY\", 0.860528568424097, 0.8196987216301588], [\"Posterize\", 0.41134650331494205, 0.3686632018978778]], [[\"AutoContrast\", 0.8753670810078598, 0.3679438326304749], [\"Invert\", 0.010444228965415858, 0.9581244779208277]], [[\"Equalize\", 0.07071836206680682, 0.7173594756186462], [\"Brightness\", 0.06111434312497388, 0.16175064669049277]], [[\"AutoContrast\", 0.10522219073562122, 0.9768776621069855], [\"TranslateY\", 0.2744795945215529, 0.8577967957127298]], [[\"AutoContrast\", 0.7628146493166175, 0.996157376418147], [\"Contrast\", 0.9255565598518469, 0.6826126662976868]], [[\"TranslateX\", 0.017225816199011312, 0.2470332491402908], [\"Solarize\", 0.44048494909493807, 0.4492422515972162]], [[\"ShearY\", 0.38885252627795064, 0.10272256704901939], [\"Equalize\", 0.686154959829183, 0.8973517148655337]], [[\"Rotate\", 0.29628991573592967, 0.16639926575004715], [\"ShearX\", 0.9013782324726413, 0.0838318162771563]], [[\"Color\", 0.04968391374688563, 0.6138600739645352], [\"Invert\", 0.11177127838716283, 0.10650198522261578]], [[\"Invert\", 0.49655016367624016, 0.8603374164829688], [\"ShearY\", 0.40625439617553727, 0.4516437918820778]], [[\"TranslateX\", 0.15015718916062992, 0.13867777502116208], [\"Brightness\", 0.3374464418810188, 0.7613355669536931]], [[\"Invert\", 0.644644393321966, 0.19005804481199562], [\"AutoContrast\", 0.2293259789431853, 0.30335723256340186]], [[\"Solarize\", 0.004968793254801596, 0.5370892072646645], [\"Contrast\", 0.9136902637865596, 0.9510587477779084]], [[\"Rotate\", 0.38991518440867123, 0.24796987467455756], [\"Sharpness\", 0.9911180315669776, 0.5265657122981591]], [[\"Solarize\", 0.3919646484436238, 0.6814994037194909], [\"Sharpness\", 0.4920838987787103, 0.023425724294012018]], [[\"TranslateX\", 0.25107587874378867, 0.5414936560189212], [\"Cutout\", 0.7932919623814599, 0.9891303444820169]], [[\"Brightness\", 0.07863012174272999, 0.045175652208389594], [\"Solarize\", 0.889609658064552, 0.8228793315963948]], [[\"Cutout\", 0.20477096178169596, 0.6535063675027364], [\"ShearX\", 0.9216318577173639, 0.2908690977359947]], [[\"Contrast\", 0.7035118947423187, 0.45982709058312454], [\"Contrast\", 0.7130268070749464, 0.8635123354235471]], [[\"Sharpness\", 0.26319477541228997, 0.7451278726847078], [\"Rotate\", 0.8170499362173754, 0.13998593411788207]], [[\"Rotate\", 0.8699365715164192, 0.8878057721750832], [\"Equalize\", 0.06682350555715044, 0.7164702080630689]], [[\"ShearY\", 0.3137466057521987, 0.6747433496011368], [\"Rotate\", 0.42118828936218133, 0.980121180104441]], [[\"Solarize\", 0.8470375049950615, 0.15287589264139223], [\"Cutout\", 0.14438435054693055, 0.24296463267973512]], [[\"TranslateY\", 0.08822241792224905, 0.36163911974799356], [\"TranslateY\", 0.11729726813270003, 0.6230889726445291]], [[\"ShearX\", 0.7720112337718541, 0.2773292905760122], [\"Sharpness\", 0.756290929398613, 0.27830353710507705]], [[\"Color\", 0.33825031007968287, 0.4657590047522816], [\"ShearY\", 0.3566628994713067, 0.859750504071925]], [[\"TranslateY\", 0.06830147433378053, 0.9348778582086664], [\"TranslateX\", 0.15509346516378553, 0.26320778885339435]], [[\"Posterize\", 0.20266751150740858, 0.008351463842578233], [\"Sharpness\", 0.06506971109417259, 0.7294471760284555]], [[\"TranslateY\", 0.6278911394418829, 0.8702181892620695], [\"Invert\", 0.9367073860264247, 0.9219230428944211]], [[\"Sharpness\", 0.1553425337673321, 0.17601557714491345], [\"Solarize\", 0.7040449681338888, 0.08764313147327729]], [[\"Equalize\", 0.6082233904624664, 0.4177428549911376], [\"AutoContrast\", 0.04987405274618151, 0.34516208204700916]], [[\"Brightness\", 0.9616085936167699, 0.14561237331885468], [\"Solarize\", 0.8927707736296572, 0.31176907850205704]], [[\"Brightness\", 0.6707778304730988, 0.9046457117525516], [\"Brightness\", 0.6801448953060988, 0.20015313057149042]], [[\"Color\", 0.8292680845499386, 0.5181603879593888], [\"Brightness\", 0.08549161770369762, 0.6567870536463203]], [[\"ShearY\", 0.267802208078051, 0.8388133819588173], [\"Sharpness\", 0.13453409120796123, 0.10028351311149486]], [[\"Posterize\", 0.775796593610272, 0.05359034561289766], [\"Cutout\", 0.5067360625733027, 0.054451986840317934]], [[\"TranslateX\", 0.5845238647690084, 0.7507147553486293], [\"Brightness\", 0.2642051786121197, 0.2578358927056452]], [[\"Cutout\", 0.10787517610922692, 0.8147986902794228], [\"Contrast\", 0.2190149206329539, 0.902210615462459]], [[\"TranslateX\", 0.5663614214181296, 0.05309965916414028], [\"ShearX\", 0.9682797885154938, 0.41791929533938466]], [[\"ShearX\", 0.2345325577621098, 0.383780128037189], [\"TranslateX\", 0.7298083748149163, 0.644325797667087]], [[\"Posterize\", 0.5138725709682734, 0.7901809917259563], [\"AutoContrast\", 0.7966018627776853, 0.14529337543427345]], [[\"Invert\", 0.5973031989249785, 0.417399314592829], [\"Solarize\", 0.9147539948653116, 0.8221272315548086]], [[\"Posterize\", 0.601596043336383, 0.18969646160963938], [\"Color\", 0.7527275484079655, 0.431793831326888]], [[\"Equalize\", 0.6731483454430538, 0.7866786558207602], [\"TranslateX\", 0.97574396899191, 0.5970255778044692]], [[\"Cutout\", 0.15919495850169718, 0.8916094305850562], [\"Invert\", 0.8351348834751027, 0.4029937360314928]], [[\"Invert\", 0.5894085405226027, 0.7283806854157764], [\"Brightness\", 0.3973976860470554, 0.949681121498567]], [[\"AutoContrast\", 0.3707914135327408, 0.21192068592079616], [\"ShearX\", 0.28040127351140676, 0.6754553511344856]], [[\"Solarize\", 0.07955132378694896, 0.15073572961927306], [\"ShearY\", 0.5735850168851625, 0.27147326850217746]], [[\"Equalize\", 0.678653949549764, 0.8097796067861455], [\"Contrast\", 0.2283048527510083, 0.15507804874474185]], [[\"Equalize\", 0.286013868374536, 0.186785848694501], [\"Posterize\", 0.16319021740810458, 0.1201304443285659]], [[\"Sharpness\", 0.9601590830563757, 0.06267915026513238], [\"AutoContrast\", 0.3813920685124327, 0.294224403296912]], [[\"Brightness\", 0.2703246632402241, 0.9168405377492277], [\"ShearX\", 0.6156009855831097, 0.4955986055846403]], [[\"Color\", 0.9065504424987322, 0.03393612216080133], [\"ShearY\", 0.6768595880405884, 0.9981068127818191]], [[\"Equalize\", 0.28812842368483904, 0.300387487349145], [\"ShearY\", 0.28812248704858345, 0.27105076231533964]], [[\"Brightness\", 0.6864882730513477, 0.8205553299102412], [\"Cutout\", 0.45995236371265424, 0.5422030370297759]], [[\"Color\", 0.34941404877084326, 0.25857961830158516], [\"AutoContrast\", 0.3451390878441899, 0.5000938249040454]], [[\"Invert\", 0.8268247541815854, 0.6691380821226468], [\"Cutout\", 0.46489193601530476, 0.22620873109485895]], [[\"Rotate\", 0.17879730528062376, 0.22670425330593935], [\"Sharpness\", 0.8692795688221834, 0.36586055020855723]], [[\"Brightness\", 0.31203975139659634, 0.6934046293010939], [\"Cutout\", 0.31649437872271236, 0.08078625004157935]], [[\"Cutout\", 0.3119482836150119, 0.6397160035509996], [\"Contrast\", 0.8311248624784223, 0.22897510169718616]], [[\"TranslateX\", 0.7631157841429582, 0.6482890521284557], [\"Brightness\", 0.12681196272427664, 0.3669813784257344]], [[\"TranslateX\", 0.06027722649179801, 0.3101104512201861], [\"Sharpness\", 0.5652076706249394, 0.05210008400968136]], [[\"AutoContrast\", 0.39213552101583127, 0.5047021194355596], [\"ShearY\", 0.7164003055682187, 0.8063370761002899]], [[\"Solarize\", 0.9574307011238342, 0.21472064809226854], [\"AutoContrast\", 0.8102612285047174, 0.716870148067014]], [[\"Rotate\", 0.3592634277567387, 0.6452602893051465], [\"AutoContrast\", 0.27188430331411506, 0.06003099168464854]], [[\"Cutout\", 0.9529536554825503, 0.5285505311027461], [\"Solarize\", 0.08478231903311029, 0.15986449762728216]], [[\"TranslateY\", 0.31176130458018936, 0.5642853506158253], [\"Equalize\", 0.008890883901317648, 0.5146121040955942]], [[\"Color\", 0.40773645085566157, 0.7110398926612682], [\"Color\", 0.18233100156439364, 0.7830036002758337]], [[\"Posterize\", 0.5793809197821732, 0.043748553135581236], [\"Invert\", 0.4479962016131668, 0.7349663010359488]], [[\"TranslateX\", 0.1994882312299382, 0.05216859488899439], [\"Rotate\", 0.48288726352035416, 0.44713829026777585]], [[\"Posterize\", 0.22122838185154603, 0.5034546841241283], [\"TranslateX\", 0.2538745835410222, 0.6129055170893385]], [[\"Color\", 0.6786559960640814, 0.4529749369803212], [\"Equalize\", 0.30215879674415336, 0.8733394611096772]], [[\"Contrast\", 0.47316062430673456, 0.46669538897311447], [\"Invert\", 0.6514906551984854, 0.3053339444067804]], [[\"Equalize\", 0.6443202625334524, 0.8689731394616441], [\"Color\", 0.7549183794057628, 0.8889001426329578]], [[\"Solarize\", 0.616709740662654, 0.7792180816399313], [\"ShearX\", 0.9659155537406062, 0.39436937531179495]], [[\"Equalize\", 0.23694011299406226, 0.027711152164392128], [\"TranslateY\", 0.1677339686527083, 0.3482126536808231]], [[\"Solarize\", 0.15234175951790285, 0.7893840414281341], [\"TranslateX\", 0.2396395768284183, 0.27727219214979715]], [[\"Contrast\", 0.3792017455380605, 0.32323660409845334], [\"Contrast\", 0.1356037413846466, 0.9127772969992305]], [[\"ShearX\", 0.02642732222284716, 0.9184662576502115], [\"Equalize\", 0.11504884472142995, 0.8957638893097964]], [[\"TranslateY\", 0.3193812913345325, 0.8828100030493128], [\"ShearY\", 0.9374975727563528, 0.09909415611083694]], [[\"AutoContrast\", 0.025840721736048122, 0.7941037581373024], [\"TranslateY\", 0.498518003323313, 0.5777122846572548]], [[\"ShearY\", 0.6042199307830248, 0.44809668754508836], [\"Cutout\", 0.3243978207701482, 0.9379740926294765]], [[\"ShearY\", 0.6858549297583574, 0.9993252035788924], [\"Sharpness\", 0.04682428732773203, 0.21698099707915652]], [[\"ShearY\", 0.7737469436637263, 0.8810127181224531], [\"ShearY\", 0.8995655445246451, 0.4312416220354539]], [[\"TranslateY\", 0.4953094136709374, 0.8144161580138571], [\"Solarize\", 0.26301211718928097, 0.518345311180405]], [[\"Brightness\", 0.8820246486031275, 0.571075863786249], [\"ShearX\", 0.8586669146703955, 0.0060476383595142735]], [[\"Sharpness\", 0.20519233710982254, 0.6144574759149729], [\"Posterize\", 0.07976625267460813, 0.7480145046726968]], [[\"ShearY\", 0.374075419680195, 0.3386105402023202], [\"ShearX\", 0.8228083637082115, 0.5885174783155361]], [[\"Brightness\", 0.3528780713814561, 0.6999884884306623], [\"Sharpness\", 0.3680348120526238, 0.16953358258959617]], [[\"Brightness\", 0.24891223104442084, 0.7973853494920095], [\"TranslateX\", 0.004256803835524736, 0.0470216343108546]], [[\"Posterize\", 0.1947344282646012, 0.7694802711054367], [\"Cutout\", 0.9594385534844785, 0.5469744140592429]], [[\"Invert\", 0.19012504762806026, 0.7816140211434693], [\"TranslateY\", 0.17479746932338402, 0.024249345245078602]], [[\"Rotate\", 0.9669262055946796, 0.510166180775991], [\"TranslateX\", 0.8990602034610352, 0.6657802719304693]], [[\"ShearY\", 0.5453049050407278, 0.8476872739603525], [\"Cutout\", 0.14226529093962592, 0.15756960661106634]], [[\"Equalize\", 0.5895291156113004, 0.6797218994447763], [\"TranslateY\", 0.3541442192192753, 0.05166001155849864]], [[\"Equalize\", 0.39530681662726097, 0.8448335365081087], [\"Brightness\", 0.6785483272734143, 0.8805568647038574]], [[\"Cutout\", 0.28633258271917905, 0.7750870268336066], [\"Equalize\", 0.7221097824537182, 0.5865506280531162]], [[\"Posterize\", 0.9044429629421187, 0.4620266401793388], [\"Invert\", 0.1803008045494473, 0.8073190766288534]], [[\"Sharpness\", 0.7054649148075851, 0.3877207948962055], [\"TranslateX\", 0.49260224225927285, 0.8987462620731029]], [[\"Sharpness\", 0.11196934729294483, 0.5953704422694938], [\"Contrast\", 0.13969334315069737, 0.19310569898434204]], [[\"Posterize\", 0.5484346101051778, 0.7914140118600685], [\"Brightness\", 0.6428044691630473, 0.18811316670808076]], [[\"Invert\", 0.22294834094984717, 0.05173157689962704], [\"Cutout\", 0.6091129168510456, 0.6280845506243643]], [[\"AutoContrast\", 0.5726444076195267, 0.2799840903601295], [\"Cutout\", 0.3055752727786235, 0.591639807512993]], [[\"Brightness\", 0.3707116723204462, 0.4049175910826627], [\"Rotate\", 0.4811601625588309, 0.2710760253723644]], [[\"ShearY\", 0.627791719653608, 0.6877498291550205], [\"TranslateX\", 0.8751753308366824, 0.011164650018719358]], [[\"Posterize\", 0.33832547954522263, 0.7087039872581657], [\"Posterize\", 0.6247474435007484, 0.7707784192114796]], [[\"Contrast\", 0.17620186308493468, 0.9946224854942095], [\"Solarize\", 0.5431896088395964, 0.5867904203742308]], [[\"ShearX\", 0.4667959516719652, 0.8938082224109446], [\"TranslateY\", 0.7311343008292865, 0.6829842246020277]], [[\"ShearX\", 0.6130281467237769, 0.9924010909612302], [\"Brightness\", 0.41039241699696916, 0.9753218875311392]], [[\"TranslateY\", 0.0747250386427123, 0.34602725521067534], [\"Rotate\", 0.5902597465515901, 0.361094672021087]], [[\"Invert\", 0.05234890878959486, 0.36914978664919407], [\"Sharpness\", 0.42140532878231374, 0.19204058551048275]], [[\"ShearY\", 0.11590485361909497, 0.6518540857972316], [\"Invert\", 0.6482444740361704, 0.48256237896163945]], [[\"Rotate\", 0.4931329446923608, 0.037076242417301675], [\"Contrast\", 0.9097939772412852, 0.5619594905306389]], [[\"Posterize\", 0.7311032479626216, 0.4796364593912915], [\"Color\", 0.13912123993932402, 0.03997286439663705]], [[\"AutoContrast\", 0.6196602944085344, 0.2531430457527588], [\"Rotate\", 0.5583937060431972, 0.9893379795224023]], [[\"AutoContrast\", 0.8847753125072959, 0.19123028952580057], [\"TranslateY\", 0.494361716097206, 0.14232297727461696]], [[\"Invert\", 0.6212360716340707, 0.033898871473033165], [\"AutoContrast\", 0.30839896957008295, 0.23603569542166247]], [[\"Equalize\", 0.8255583546605049, 0.613736933157845], [\"AutoContrast\", 0.6357166629525485, 0.7894617347709095]], [[\"Brightness\", 0.33840706322846814, 0.07917167871493658], [\"ShearY\", 0.15693175752528676, 0.6282773652129153]], [[\"Cutout\", 0.7550520024859294, 0.08982367300605598], [\"ShearX\", 0.5844942417320858, 0.36051195083380105]]]\n    return p\n\n\ndef fa_resnet50_rimagenet():\n    p = [[[\"ShearY\", 0.14143816458479197, 0.513124791615952], [\"Sharpness\", 0.9290316227291179, 0.9788406212603302]], [[\"Color\", 0.21502874228385338, 0.3698477943880306], [\"TranslateY\", 0.49865058747734736, 0.4352676987103321]], [[\"Brightness\", 0.6603452126485386, 0.6990174510500261], [\"Cutout\", 0.7742953773992511, 0.8362550883640804]], [[\"Posterize\", 0.5188375788270497, 0.9863648925446865], [\"TranslateY\", 0.8365230108655313, 0.6000972236440252]], [[\"ShearY\", 0.9714994964711299, 0.2563663552809896], [\"Equalize\", 0.8987567223581153, 0.1181761775609772]], [[\"Sharpness\", 0.14346409304565366, 0.5342189791746006], [\"Sharpness\", 0.1219714162835897, 0.44746801278319975]], [[\"TranslateX\", 0.08089260772173967, 0.028011721602479833], [\"TranslateX\", 0.34767877352421406, 0.45131294688688794]], [[\"Brightness\", 0.9191164585327378, 0.5143232242627864], [\"Color\", 0.9235247849934283, 0.30604586249462173]], [[\"Contrast\", 0.4584173187505879, 0.40314219914942756], [\"Rotate\", 0.550289356406774, 0.38419022293237126]], [[\"Posterize\", 0.37046156420799325, 0.052693291117634544], [\"Cutout\", 0.7597581409366909, 0.7535799791937421]], [[\"Color\", 0.42583964114658746, 0.6776641859552079], [\"ShearY\", 0.2864805671096011, 0.07580175477739545]], [[\"Brightness\", 0.5065952125552232, 0.5508640233704984], [\"Brightness\", 0.4760021616081475, 0.3544313318097987]], [[\"Posterize\", 0.5169630851995185, 0.9466018906715961], [\"Posterize\", 0.5390336503396841, 0.1171015788193209]], [[\"Posterize\", 0.41153170909576176, 0.7213063942615204], [\"Rotate\", 0.6232230424824348, 0.7291984098675746]], [[\"Color\", 0.06704687234714028, 0.5278429246040438], [\"Sharpness\", 0.9146652195810183, 0.4581415618941407]], [[\"ShearX\", 0.22404644446773492, 0.6508620171913467], [\"Brightness\", 0.06421961538672451, 0.06859528721039095]], [[\"Rotate\", 0.29864103693134797, 0.5244313199644495], [\"Sharpness\", 0.4006161706584276, 0.5203708477368657]], [[\"AutoContrast\", 0.5748186910788027, 0.8185482599354216], [\"Posterize\", 0.9571441684265188, 0.1921474117448481]], [[\"ShearY\", 0.5214786760436251, 0.8375629059785009], [\"Invert\", 0.6872393349333636, 0.9307694335024579]], [[\"Contrast\", 0.47219838080793364, 0.8228524484275648], [\"TranslateY\", 0.7435518856840543, 0.5888865560614439]], [[\"Posterize\", 0.10773482839638836, 0.6597021018893648], [\"Contrast\", 0.5218466423129691, 0.562985661685268]], [[\"Rotate\", 0.4401753067886466, 0.055198255925702475], [\"Rotate\", 0.3702153509335602, 0.5821574425474759]], [[\"TranslateY\", 0.6714729117832363, 0.7145542887432927], [\"Equalize\", 0.0023263758097700205, 0.25837341854887885]], [[\"Cutout\", 0.3159707561240235, 0.19539664199170742], [\"TranslateY\", 0.8702824829864558, 0.5832348977243467]], [[\"AutoContrast\", 0.24800812729140026, 0.08017301277245716], [\"Brightness\", 0.5775505849482201, 0.4905904775616114]], [[\"Color\", 0.4143517886294533, 0.8445937742921498], [\"ShearY\", 0.28688910858536587, 0.17539366839474402]], [[\"Brightness\", 0.6341134194059947, 0.43683815933640435], [\"Brightness\", 0.3362277685899835, 0.4612826163288225]], [[\"Sharpness\", 0.4504035748829761, 0.6698294470467474], [\"Posterize\", 0.9610055612671645, 0.21070714173174876]], [[\"Posterize\", 0.19490421920029832, 0.7235798208354267], [\"Rotate\", 0.8675551331308305, 0.46335565746433094]], [[\"Color\", 0.35097958351003306, 0.42199181561523186], [\"Invert\", 0.914112788087429, 0.44775583211984815]], [[\"Cutout\", 0.223575616055454, 0.6328591417299063], [\"TranslateY\", 0.09269465212259387, 0.5101073959070608]], [[\"Rotate\", 0.3315734525975911, 0.9983593458299167], [\"Sharpness\", 0.12245416662856974, 0.6258689139914664]], [[\"ShearY\", 0.696116760180471, 0.6317805202283014], [\"Color\", 0.847501151593963, 0.4440116609830195]], [[\"Solarize\", 0.24945891607225948, 0.7651150206105561], [\"Cutout\", 0.7229677092930331, 0.12674657348602494]], [[\"TranslateX\", 0.43461945065713675, 0.06476571036747841], [\"Color\", 0.6139316940180952, 0.7376264330632316]], [[\"Invert\", 0.1933003530637138, 0.4497819016184308], [\"Invert\", 0.18391634069983653, 0.3199769100951113]], [[\"Color\", 0.20418296626476137, 0.36785101882029814], [\"Posterize\", 0.624658293920083, 0.8390081535735991]], [[\"Sharpness\", 0.5864963540530814, 0.586672446690273], [\"Posterize\", 0.1980280647652339, 0.222114611452575]], [[\"Invert\", 0.3543654961628104, 0.5146369635250309], [\"Equalize\", 0.40751271919434434, 0.4325310837291978]], [[\"ShearY\", 0.22602859359451877, 0.13137880879778158], [\"Posterize\", 0.7475029061591305, 0.803900538461099]], [[\"Sharpness\", 0.12426276165599924, 0.5965912716602046], [\"Invert\", 0.22603903038966913, 0.4346802001255868]], [[\"TranslateY\", 0.010307035630661765, 0.16577665156754046], [\"Posterize\", 0.4114319141395257, 0.829872913683949]], [[\"TranslateY\", 0.9353069865746215, 0.5327821671247214], [\"Color\", 0.16990443486261103, 0.38794866007484197]], [[\"Cutout\", 0.1028174322829021, 0.3955952903458266], [\"ShearY\", 0.4311995281335693, 0.48024695395374734]], [[\"Posterize\", 0.1800334334284686, 0.0548749478418862], [\"Brightness\", 0.7545808536793187, 0.7699080551646432]], [[\"Color\", 0.48695305373084197, 0.6674269768464615], [\"ShearY\", 0.4306032279086781, 0.06057690550239343]], [[\"Brightness\", 0.4919399683825053, 0.677338905806407], [\"Brightness\", 0.24112708387760828, 0.42761103121157656]], [[\"Posterize\", 0.4434818644882532, 0.9489450593207714], [\"Posterize\", 0.40957675116385955, 0.015664946759584186]], [[\"Posterize\", 0.41307949855153797, 0.6843276552020272], [\"Rotate\", 0.8003545094091291, 0.7002300783416026]], [[\"Color\", 0.7038570031770905, 0.4697612983649519], [\"Sharpness\", 0.9700016496081002, 0.25185103545948884]], [[\"AutoContrast\", 0.714641656154856, 0.7962423001719023], [\"Sharpness\", 0.2410097684093468, 0.5919171048019731]], [[\"TranslateX\", 0.8101567644494714, 0.7156447005337443], [\"Solarize\", 0.5634727831229329, 0.8875158446846]], [[\"Sharpness\", 0.5335258857303261, 0.364743126378182], [\"Color\", 0.453280875871377, 0.5621962714743068]], [[\"Cutout\", 0.7423678127672542, 0.7726370777867049], [\"Invert\", 0.2806161382641934, 0.6021111986900146]], [[\"TranslateY\", 0.15190341320343761, 0.3860373175487939], [\"Cutout\", 0.9980805818665679, 0.05332384819400854]], [[\"Posterize\", 0.36518675678786605, 0.2935819027397963], [\"TranslateX\", 0.26586180351840005, 0.303641300745208]], [[\"Brightness\", 0.19994509744377761, 0.90813953707639], [\"Equalize\", 0.8447217761297836, 0.3449396603478335]], [[\"Sharpness\", 0.9294773669936768, 0.999713346583839], [\"Brightness\", 0.1359744825665662, 0.1658489221872924]], [[\"TranslateX\", 0.11456529257659381, 0.9063795878367734], [\"Equalize\", 0.017438134319894553, 0.15776887259743755]], [[\"ShearX\", 0.9833726383270114, 0.5688194948373335], [\"Equalize\", 0.04975615490994345, 0.8078130016227757]], [[\"Brightness\", 0.2654654830488695, 0.8989789725280538], [\"TranslateX\", 0.3681535065952329, 0.36433345713161036]], [[\"Rotate\", 0.04956524209892327, 0.5371942433238247], [\"ShearY\", 0.0005527499145153714, 0.56082571605602]], [[\"Rotate\", 0.7918337108932019, 0.5906896260060501], [\"Posterize\", 0.8223967034091191, 0.450216998388943]], [[\"Color\", 0.43595106766978337, 0.5253013785221605], [\"Sharpness\", 0.9169421073531799, 0.8439997639348893]], [[\"TranslateY\", 0.20052300197155504, 0.8202662448307549], [\"Sharpness\", 0.2875792108435686, 0.6997181624527842]], [[\"Color\", 0.10568089980973616, 0.3349467065132249], [\"Brightness\", 0.13070947282207768, 0.5757725013960775]], [[\"AutoContrast\", 0.3749999712869779, 0.6665578760607657], [\"Brightness\", 0.8101178402610292, 0.23271946112218125]], [[\"Color\", 0.6473605933679651, 0.7903409763232029], [\"ShearX\", 0.588080941572581, 0.27223524148254086]], [[\"Cutout\", 0.46293361616697304, 0.7107761001833921], [\"AutoContrast\", 0.3063766931658412, 0.8026114219854579]], [[\"Brightness\", 0.7884854981520251, 0.5503669863113797], [\"Brightness\", 0.5832456158675261, 0.5840349298921661]], [[\"Solarize\", 0.4157539625058916, 0.9161905834309929], [\"Sharpness\", 0.30628197221802017, 0.5386291658995193]], [[\"Sharpness\", 0.03329610069672856, 0.17066672983670506], [\"Invert\", 0.9900547302690527, 0.6276238841220477]], [[\"Solarize\", 0.551015648982762, 0.6937104775938737], [\"Color\", 0.8838491591064375, 0.31596634380795385]], [[\"AutoContrast\", 0.16224182418148447, 0.6068227969351896], [\"Sharpness\", 0.9599468096118623, 0.4885289719905087]], [[\"TranslateY\", 0.06576432526133724, 0.6899544605400214], [\"Posterize\", 0.2177096480169678, 0.9949164789616582]], [[\"Solarize\", 0.529820544480292, 0.7576047224165541], [\"Sharpness\", 0.027047878909321643, 0.45425231553970685]], [[\"Sharpness\", 0.9102526010473146, 0.8311987141993857], [\"Invert\", 0.5191838751826638, 0.6906136644742229]], [[\"Solarize\", 0.4762773516008588, 0.7703654263842423], [\"Color\", 0.8048437792602289, 0.4741523094238038]], [[\"Sharpness\", 0.7095055508594206, 0.7047344238075169], [\"Sharpness\", 0.5059623654132546, 0.6127255499234886]], [[\"TranslateY\", 0.02150725921966186, 0.3515764519224378], [\"Posterize\", 0.12482170119714735, 0.7829851754051393]], [[\"Color\", 0.7983830079184816, 0.6964694521670339], [\"Brightness\", 0.3666527856286296, 0.16093151636495978]], [[\"AutoContrast\", 0.6724982375829505, 0.536777706678488], [\"Sharpness\", 0.43091754837597646, 0.7363240924241439]], [[\"Brightness\", 0.2889770401966227, 0.4556557902380539], [\"Sharpness\", 0.8805303296690755, 0.6262218017754902]], [[\"Sharpness\", 0.5341939854581068, 0.6697109101429343], [\"Rotate\", 0.6806606655137529, 0.4896914517968317]], [[\"Sharpness\", 0.5690509737059344, 0.32790632371915096], [\"Posterize\", 0.7951894258661069, 0.08377850335209162]], [[\"Color\", 0.6124132978216081, 0.5756485920709012], [\"Brightness\", 0.33053544654445344, 0.23321841707002083]], [[\"TranslateX\", 0.0654795026615917, 0.5227246924310244], [\"ShearX\", 0.2932320531132063, 0.6732066478183716]], [[\"Cutout\", 0.6226071187083615, 0.01009274433736012], [\"ShearX\", 0.7176799968189801, 0.3758780240463811]], [[\"Rotate\", 0.18172339508029314, 0.18099184896819184], [\"ShearY\", 0.7862658331645667, 0.295658135767252]], [[\"Contrast\", 0.4156099177015862, 0.7015784500878446], [\"Sharpness\", 0.6454135310009, 0.32335858947955287]], [[\"Color\", 0.6215885089922037, 0.6882673235388836], [\"Brightness\", 0.3539881732605379, 0.39486736455795496]], [[\"Invert\", 0.8164816716866418, 0.7238192000817796], [\"Sharpness\", 0.3876355847343607, 0.9870077619731956]], [[\"Brightness\", 0.1875628712629315, 0.5068115936257], [\"Sharpness\", 0.8732419122060423, 0.5028019258530066]], [[\"Sharpness\", 0.6140734993408259, 0.6458239834366959], [\"Rotate\", 0.5250107862824867, 0.533419456933602]], [[\"Sharpness\", 0.5710893143725344, 0.15551651073007305], [\"ShearY\", 0.6548487860151722, 0.021365083044319146]], [[\"Color\", 0.7610250354649954, 0.9084452893074055], [\"Brightness\", 0.6934611792619156, 0.4108071412071374]], [[\"ShearY\", 0.07512550098923898, 0.32923768385754293], [\"ShearY\", 0.2559588911696498, 0.7082337365398496]], [[\"Cutout\", 0.5401319018926146, 0.004750568603408445], [\"ShearX\", 0.7473354415031975, 0.34472481968368773]], [[\"Rotate\", 0.02284154583679092, 0.1353450082435801], [\"ShearY\", 0.8192458031684238, 0.2811653613473772]], [[\"Contrast\", 0.21142896718139154, 0.7230739568811746], [\"Sharpness\", 0.6902690582665707, 0.13488436112901683]], [[\"Posterize\", 0.21701219600958138, 0.5900695769640687], [\"Rotate\", 0.7541095031505971, 0.5341162375286219]], [[\"Posterize\", 0.5772853064792737, 0.45808311743269936], [\"Brightness\", 0.14366050177823675, 0.4644871239446629]], [[\"Cutout\", 0.8951718842805059, 0.4970074074310499], [\"Equalize\", 0.3863835903119882, 0.9986531042150006]], [[\"Equalize\", 0.039411354473938925, 0.7475477254908457], [\"Sharpness\", 0.8741966378291861, 0.7304822679596362]], [[\"Solarize\", 0.4908704265218634, 0.5160677350249471], [\"Color\", 0.24961813832742435, 0.09362352627360726]], [[\"Rotate\", 7.870457075154214e-05, 0.8086950025500952], [\"Solarize\", 0.10200484521793163, 0.12312889222989265]], [[\"Contrast\", 0.8052564975559727, 0.3403813036543645], [\"Solarize\", 0.7690158533600184, 0.8234626822018851]], [[\"AutoContrast\", 0.680362728854513, 0.9415320040873628], [\"TranslateY\", 0.5305871824686941, 0.8030609611614028]], [[\"Cutout\", 0.1748050257378294, 0.06565343731910589], [\"TranslateX\", 0.1812738872339903, 0.6254461448344308]], [[\"Brightness\", 0.4230502644722749, 0.3346463682905031], [\"ShearX\", 0.19107198973659312, 0.6715789128604919]], [[\"ShearX\", 0.1706528684548394, 0.7816570201200446], [\"TranslateX\", 0.494545185948171, 0.4710810058360291]], [[\"TranslateX\", 0.42356251508933324, 0.23865307292867322], [\"TranslateX\", 0.24407503619326745, 0.6013778508137331]], [[\"AutoContrast\", 0.7719512185744232, 0.3107905373009763], [\"ShearY\", 0.49448082925617176, 0.5777951230577671]], [[\"Cutout\", 0.13026983827940525, 0.30120438757485657], [\"Brightness\", 0.8857896834516185, 0.7731541459513939]], [[\"AutoContrast\", 0.6422800349197934, 0.38637401090264556], [\"TranslateX\", 0.25085431400995084, 0.3170642592664873]], [[\"Sharpness\", 0.22336654455367122, 0.4137774852324138], [\"ShearY\", 0.22446851054920894, 0.518341735882535]], [[\"Color\", 0.2597579403253848, 0.7289643913060193], [\"Sharpness\", 0.5227416670468619, 0.9239943674030637]], [[\"Cutout\", 0.6835337711563527, 0.24777620448593812], [\"AutoContrast\", 0.37260245353051846, 0.4840361183247263]], [[\"Posterize\", 0.32756602788628375, 0.21185124493743707], [\"ShearX\", 0.25431504951763967, 0.19585996561416225]], [[\"AutoContrast\", 0.07930627591849979, 0.5719381348340309], [\"AutoContrast\", 0.335512380071304, 0.4208050118308541]], [[\"Rotate\", 0.2924360268257798, 0.5317629242879337], [\"Sharpness\", 0.4531050021499891, 0.4102650087199528]], [[\"Equalize\", 0.5908862210984079, 0.468742362277498], [\"Brightness\", 0.08571766548550425, 0.5629320703375056]], [[\"Cutout\", 0.52751122383816, 0.7287774744737556], [\"Equalize\", 0.28721628275296274, 0.8075179887475786]], [[\"AutoContrast\", 0.24208377391366226, 0.34616549409607644], [\"TranslateX\", 0.17454707403766834, 0.5278055700078459]], [[\"Brightness\", 0.5511881924749478, 0.999638675514418], [\"Equalize\", 0.14076197797220913, 0.2573030693317552]], [[\"ShearX\", 0.668731433926434, 0.7564253049646743], [\"Color\", 0.63235486543845, 0.43954436063340785]], [[\"ShearX\", 0.40511960873276237, 0.5710419512142979], [\"Contrast\", 0.9256769948746423, 0.7461350716211649]], [[\"Cutout\", 0.9995917204023061, 0.22908419326246265], [\"TranslateX\", 0.5440902956629469, 0.9965570051216295]], [[\"Color\", 0.22552987172228894, 0.4514558960849747], [\"Sharpness\", 0.638058150559443, 0.9987829481002615]], [[\"Contrast\", 0.5362775837534763, 0.7052133185951871], [\"ShearY\", 0.220369845547023, 0.7593922994775721]], [[\"ShearX\", 0.0317785822935219, 0.775536785253455], [\"TranslateX\", 0.7939510227015061, 0.5355620618496535]], [[\"Cutout\", 0.46027969917602196, 0.31561199122527517], [\"Color\", 0.06154066467629451, 0.5384660000729091]], [[\"Sharpness\", 0.7205483743301113, 0.552222392539886], [\"Posterize\", 0.5146496404711752, 0.9224333144307473]], [[\"ShearX\", 0.00014547730356910538, 0.3553954298642108], [\"TranslateY\", 0.9625736029090676, 0.57403418640424]], [[\"Posterize\", 0.9199917903297341, 0.6690259107633706], [\"Posterize\", 0.0932558110217602, 0.22279303372106138]], [[\"Invert\", 0.25401453476874863, 0.3354329544078385], [\"Posterize\", 0.1832673201325652, 0.4304718799821412]], [[\"TranslateY\", 0.02084122674367607, 0.12826181437197323], [\"ShearY\", 0.655862534043703, 0.3838330909470975]], [[\"Contrast\", 0.35231797644104523, 0.3379356652070079], [\"Cutout\", 0.19685599014304822, 0.1254328595280942]], [[\"Sharpness\", 0.18795594984191433, 0.09488678946484895], [\"ShearX\", 0.33332876790679306, 0.633523782574133]], [[\"Cutout\", 0.28267175940290246, 0.7901991550267817], [\"Contrast\", 0.021200195312951198, 0.4733128702798515]], [[\"ShearX\", 0.966231043411256, 0.7700673327786812], [\"TranslateX\", 0.7102390777763321, 0.12161245817120675]], [[\"Cutout\", 0.5183324259533826, 0.30766086003013055], [\"Color\", 0.48399078150128927, 0.4967477809069189]], [[\"Sharpness\", 0.8160855187385873, 0.47937658961644], [\"Posterize\", 0.46360395447862535, 0.7685454058155061]], [[\"ShearX\", 0.10173571421694395, 0.3987290690178754], [\"TranslateY\", 0.8939980277379345, 0.5669994143735713]], [[\"Posterize\", 0.6768089584801844, 0.7113149244621721], [\"Posterize\", 0.054896856043358935, 0.3660837250743921]], [[\"AutoContrast\", 0.5915576211896306, 0.33607718177676493], [\"Contrast\", 0.3809408206617828, 0.5712201773913784]], [[\"AutoContrast\", 0.012321347472748323, 0.06379072432796573], [\"Rotate\", 0.0017964439160045656, 0.7598026295973337]], [[\"Contrast\", 0.6007100085192627, 0.36171972473370206], [\"Invert\", 0.09553573684975913, 0.12218510774295901]], [[\"AutoContrast\", 0.32848604643836266, 0.2619457656206414], [\"Invert\", 0.27082113532501784, 0.9967965642293485]], [[\"AutoContrast\", 0.6156282120903395, 0.9422706516080884], [\"Sharpness\", 0.4215509247379262, 0.4063347716503587]], [[\"Solarize\", 0.25059210436331264, 0.7215305521159305], [\"Invert\", 0.1654465185253614, 0.9605851884186778]], [[\"AutoContrast\", 0.4464438610980994, 0.685334175815482], [\"Cutout\", 0.24358625461158645, 0.4699066834058694]], [[\"Rotate\", 0.5931657741857909, 0.6813978655574067], [\"AutoContrast\", 0.9259100547738681, 0.4903201223870492]], [[\"Color\", 0.8203976071280751, 0.9777824466585101], [\"Posterize\", 0.4620669369254169, 0.2738895968716055]], [[\"Contrast\", 0.13754352055786848, 0.3369433962088463], [\"Posterize\", 0.48371187792441916, 0.025718004361451302]], [[\"Rotate\", 0.5208233630704999, 0.1760188899913535], [\"TranslateX\", 0.49753461392937226, 0.4142935276250922]], [[\"Cutout\", 0.5967418240931212, 0.8028675552639539], [\"Cutout\", 0.20021854152659121, 0.19426330549590076]], [[\"ShearY\", 0.549583567386676, 0.6601326640171705], [\"Cutout\", 0.6111813470383047, 0.4141935587984994]], [[\"Brightness\", 0.6354891977535064, 0.31591459747846745], [\"AutoContrast\", 0.7853952208711621, 0.6555861906702081]], [[\"AutoContrast\", 0.7333725370546154, 0.9919410576081586], [\"Cutout\", 0.9984177877923588, 0.2938253683694291]], [[\"Color\", 0.33219296307742263, 0.6378995578424113], [\"AutoContrast\", 0.15432820754183288, 0.7897899838932103]], [[\"Contrast\", 0.5905289460222578, 0.8158577207653422], [\"Cutout\", 0.3980284381203051, 0.43030531250317217]], [[\"TranslateX\", 0.452093693346745, 0.5251475931559115], [\"Rotate\", 0.991422504871258, 0.4556503729269001]], [[\"Color\", 0.04560406292983776, 0.061574671308480766], [\"Brightness\", 0.05161079440128734, 0.6718398142425688]], [[\"Contrast\", 0.02913302416506853, 0.14402056093217708], [\"Rotate\", 0.7306930378774588, 0.47088249057922094]], [[\"Solarize\", 0.3283072384190169, 0.82680847744367], [\"Invert\", 0.21632614168418854, 0.8792241691482687]], [[\"Equalize\", 0.4860808352478527, 0.9440534949023064], [\"Cutout\", 0.31395897639184694, 0.41805859306017523]], [[\"Rotate\", 0.2816043232522335, 0.5451282807926706], [\"Color\", 0.7388520447173302, 0.7706503658143311]], [[\"Color\", 0.9342776719536201, 0.9039981381514299], [\"Rotate\", 0.6646389177840164, 0.5147917008383647]], [[\"Cutout\", 0.08929430082050335, 0.22416445996932374], [\"Posterize\", 0.454485751267457, 0.500958345348237]], [[\"TranslateX\", 0.14674201106374488, 0.7018633472428202], [\"Sharpness\", 0.6128796723832848, 0.743535235614809]], [[\"TranslateX\", 0.5189900164469432, 0.6491132403587601], [\"Contrast\", 0.26309555778227806, 0.5976857969656114]], [[\"Solarize\", 0.23569808291972655, 0.3315781686591778], [\"ShearY\", 0.07292078937544964, 0.7460326987587573]], [[\"ShearY\", 0.7090542757477153, 0.5246437008439621], [\"Sharpness\", 0.9666919148538443, 0.4841687888767071]], [[\"Solarize\", 0.3486952615189488, 0.7012877201721799], [\"Invert\", 0.1933387967311534, 0.9535472742828175]], [[\"AutoContrast\", 0.5393460721514914, 0.6924005011697713], [\"Cutout\", 0.16988156769247176, 0.3667207571712882]], [[\"Rotate\", 0.5815329514554719, 0.5390406879316949], [\"AutoContrast\", 0.7370538341589625, 0.7708822194197815]], [[\"Color\", 0.8463701017918459, 0.9893491045831084], [\"Invert\", 0.06537367901579016, 0.5238468509941635]], [[\"Contrast\", 0.8099771812443645, 0.39371603893945184], [\"Posterize\", 0.38273629875646487, 0.46493786058573966]], [[\"Color\", 0.11164686537114032, 0.6771450570033168], [\"Posterize\", 0.27921361289661406, 0.7214300893597819]], [[\"Contrast\", 0.5958265906571906, 0.5963959447666958], [\"Sharpness\", 0.2640889223630885, 0.3365870842641453]], [[\"Color\", 0.255634146724125, 0.5610029792926452], [\"ShearY\", 0.7476893976084721, 0.36613194760395557]], [[\"ShearX\", 0.2167581882130063, 0.022978065071245002], [\"TranslateX\", 0.1686864409720319, 0.4919575435512007]], [[\"Solarize\", 0.10702753776284957, 0.3954707963684698], [\"Contrast\", 0.7256100635368403, 0.48845259655719686]], [[\"Sharpness\", 0.6165615058519549, 0.2624079463213861], [\"ShearX\", 0.3804820351860919, 0.4738994677544202]], [[\"TranslateX\", 0.18066394808448177, 0.8174509422318228], [\"Solarize\", 0.07964569396290502, 0.45495935736800974]], [[\"Sharpness\", 0.2741884021129658, 0.9311045302358317], [\"Cutout\", 0.0009101326429323388, 0.5932102256756948]], [[\"Rotate\", 0.8501796375826188, 0.5092564038282137], [\"Brightness\", 0.6520146983999912, 0.724091283316938]], [[\"Brightness\", 0.10079744898900078, 0.7644088017429471], [\"AutoContrast\", 0.33540215138213575, 0.1487538541758792]], [[\"ShearY\", 0.10632545944757177, 0.9565164562996977], [\"Rotate\", 0.275833816849538, 0.6200731548023757]], [[\"Color\", 0.6749819274397422, 0.41042188598168844], [\"AutoContrast\", 0.22396590966461932, 0.5048018491863738]], [[\"Equalize\", 0.5044277111650255, 0.2649182381110667], [\"Brightness\", 0.35715133289571355, 0.8653260893016869]], [[\"Cutout\", 0.49083594426355326, 0.5602781291093129], [\"Posterize\", 0.721795488514384, 0.5525847430754974]], [[\"Sharpness\", 0.5081835448947317, 0.7453323423804428], [\"TranslateX\", 0.11511932212234266, 0.4337766796030984]], [[\"Solarize\", 0.3817050641766593, 0.6879004573473403], [\"Invert\", 0.0015041436267447528, 0.9793134066888262]], [[\"AutoContrast\", 0.5107410439697935, 0.8276720355454423], [\"Cutout\", 0.2786270701864015, 0.43993387208414564]], [[\"Rotate\", 0.6711202569428987, 0.6342930903972932], [\"Posterize\", 0.802820231163559, 0.42770002619222053]], [[\"Color\", 0.9426854321337312, 0.9055431782458764], [\"AutoContrast\", 0.3556422423506799, 0.2773922428787449]], [[\"Contrast\", 0.10318991257659992, 0.30841372533347416], [\"Posterize\", 0.4202264962677853, 0.05060395018085634]], [[\"Invert\", 0.549305630337048, 0.886056156681853], [\"Cutout\", 0.9314157033373055, 0.3485836940307909]], [[\"ShearX\", 0.5642891775895684, 0.16427372934801418], [\"Invert\", 0.228741164726475, 0.5066345406806475]], [[\"ShearY\", 0.5813123201003086, 0.33474363490586106], [\"Equalize\", 0.11803439432255824, 0.8583936440614798]], [[\"Sharpness\", 0.1642809706111211, 0.6958675237301609], [\"ShearY\", 0.5989560762277414, 0.6194018060415276]], [[\"Rotate\", 0.05092104774529638, 0.9358045394527796], [\"Cutout\", 0.6443254331615441, 0.28548414658857657]], [[\"Brightness\", 0.6986036769232594, 0.9618046340942727], [\"Sharpness\", 0.5564490243465492, 0.6295231286085622]], [[\"Brightness\", 0.42725649792574105, 0.17628028916784244], [\"Equalize\", 0.4425109360966546, 0.6392872650036018]], [[\"ShearY\", 0.5758622795525444, 0.8773349286588288], [\"ShearX\", 0.038525646435423666, 0.8755366512394268]], [[\"Sharpness\", 0.3704459924265827, 0.9236361456197351], [\"Color\", 0.6379842432311235, 0.4548767717224531]], [[\"Contrast\", 0.1619523824549347, 0.4506528800882731], [\"AutoContrast\", 0.34513874426188385, 0.3580290330996726]], [[\"Contrast\", 0.728699731513527, 0.6932238009822878], [\"Brightness\", 0.8602917375630352, 0.5341445123280423]], [[\"Equalize\", 0.3574552353044203, 0.16814745124536548], [\"Rotate\", 0.24191717169379262, 0.3279497108179034]], [[\"ShearY\", 0.8567478695576244, 0.37746117240238164], [\"ShearX\", 0.9654125389830487, 0.9283047610798827]], [[\"ShearY\", 0.4339052480582405, 0.5394548246617406], [\"Cutout\", 0.5070570647967001, 0.7846286976687882]], [[\"AutoContrast\", 0.021620100406875065, 0.44425839772845227], [\"AutoContrast\", 0.33978157614075183, 0.47716564815092244]], [[\"Contrast\", 0.9727600659025666, 0.6651758819229426], [\"Brightness\", 0.9893133904996626, 0.39176397622636105]], [[\"Equalize\", 0.283428620586305, 0.18727922861893637], [\"Rotate\", 0.3556063466797136, 0.3722839913107821]], [[\"ShearY\", 0.7276172841941864, 0.4834188516302227], [\"ShearX\", 0.010783217950465884, 0.9756458772142235]], [[\"ShearY\", 0.2901753295101581, 0.5684700238749064], [\"Cutout\", 0.655585564610337, 0.9490071307790201]], [[\"AutoContrast\", 0.008507193981450278, 0.4881150103902877], [\"AutoContrast\", 0.6561989723231185, 0.3715071329838596]], [[\"Contrast\", 0.7702505530948414, 0.6961371266519999], [\"Brightness\", 0.9953051630261895, 0.3861962467326121]], [[\"Equalize\", 0.2805270012472756, 0.17715406116880994], [\"Rotate\", 0.3111256593947474, 0.15824352183820073]], [[\"Brightness\", 0.9888680802094193, 0.4856236485253163], [\"ShearX\", 0.022370252047332284, 0.9284975906226682]], [[\"ShearY\", 0.4065719044318099, 0.7468528006921563], [\"AutoContrast\", 0.19494427109708126, 0.8613186475174786]], [[\"AutoContrast\", 0.023296727279367765, 0.9170949567425306], [\"AutoContrast\", 0.11663051100921168, 0.7908646792175343]], [[\"AutoContrast\", 0.7335191671571732, 0.4958357308292425], [\"Color\", 0.7964964008349845, 0.4977687544324929]], [[\"ShearX\", 0.19905221600021472, 0.3033081933150046], [\"Equalize\", 0.9383410219319321, 0.3224669877230161]], [[\"ShearX\", 0.8265450331466404, 0.6509091423603757], [\"Sharpness\", 0.7134181178748723, 0.6472835976443643]], [[\"ShearY\", 0.46962439525486044, 0.223433110541722], [\"Rotate\", 0.7749806946212373, 0.5337060376916906]], [[\"Posterize\", 0.1652499695106796, 0.04860659068586126], [\"Brightness\", 0.6644577712782511, 0.4144528269429337]], [[\"TranslateY\", 0.6220449565731829, 0.4917495676722932], [\"Posterize\", 0.6255000355409635, 0.8374266890984867]], [[\"AutoContrast\", 0.4887160797052227, 0.7106426020530529], [\"Sharpness\", 0.7684218571497236, 0.43678474722954763]], [[\"Invert\", 0.13178101535845366, 0.8301141976359813], [\"Color\", 0.002820877424219378, 0.49444413062487075]], [[\"TranslateX\", 0.9920683666478188, 0.5862245842588877], [\"Posterize\", 0.5536357075855376, 0.5454300367281468]], [[\"Brightness\", 0.8150181219663427, 0.1411060258870707], [\"Sharpness\", 0.8548823004164599, 0.77008691072314]], [[\"Brightness\", 0.9580478020413399, 0.7198667636628974], [\"ShearY\", 0.8431585033377366, 0.38750016565010803]], [[\"Solarize\", 0.2331505347152334, 0.25754361489084787], [\"TranslateY\", 0.447431373734262, 0.5782399531772253]], [[\"TranslateY\", 0.8904927998691309, 0.25872872455072315], [\"AutoContrast\", 0.7129888139716263, 0.7161603231650524]], [[\"ShearY\", 0.6336216800247362, 0.5247508616674911], [\"Cutout\", 0.9167315119726633, 0.2060557387978919]], [[\"ShearX\", 0.001661782345968199, 0.3682225725445044], [\"Solarize\", 0.12303352043754572, 0.5014989548584458]], [[\"Brightness\", 0.9723625105116246, 0.6555444729681099], [\"Contrast\", 0.5539208721135375, 0.7819973409318487]], [[\"Equalize\", 0.3262607499912611, 0.0006745572802121513], [\"Contrast\", 0.35341551623767103, 0.36814689398886347]], [[\"ShearY\", 0.7478539900243613, 0.37322078030129185], [\"TranslateX\", 0.41558847793529247, 0.7394615158544118]], [[\"Invert\", 0.13735541232529067, 0.5536403864332143], [\"Cutout\", 0.5109718190377135, 0.0447509485253679]], [[\"AutoContrast\", 0.09403602327274725, 0.5909250807862687], [\"ShearY\", 0.53234060616395, 0.5316981359469398]], [[\"ShearX\", 0.5651922367876323, 0.6794110241313183], [\"Posterize\", 0.7431624856363638, 0.7896861463783287]], [[\"Brightness\", 0.30949179379286806, 0.7650569096019195], [\"Sharpness\", 0.5461629122105034, 0.6814369444005866]], [[\"Sharpness\", 0.28459340191768434, 0.7802208350806028], [\"Rotate\", 0.15097973114238117, 0.5259683294104645]], [[\"ShearX\", 0.6430803693700531, 0.9333735880102375], [\"Contrast\", 0.7522209520030653, 0.18831747966185058]], [[\"Contrast\", 0.4219455937915647, 0.29949769435499646], [\"Color\", 0.6925322933509542, 0.8095523885795443]], [[\"ShearX\", 0.23553236193043048, 0.17966207900468323], [\"AutoContrast\", 0.9039700567886262, 0.21983629944639108]], [[\"ShearX\", 0.19256223146671514, 0.31200739880443584], [\"Sharpness\", 0.31962196883294713, 0.6828107668550425]], [[\"Cutout\", 0.5947690279080912, 0.21728220253899178], [\"Rotate\", 0.6757188879871141, 0.489460599679474]], [[\"ShearY\", 0.18365897125470526, 0.3988571115918058], [\"Brightness\", 0.7727489489504, 0.4790369956329955]], [[\"Contrast\", 0.7090301084131432, 0.5178303607560537], [\"ShearX\", 0.16749258277688506, 0.33061773301592356]], [[\"ShearX\", 0.3706690885419934, 0.38510677124319415], [\"AutoContrast\", 0.8288356276501032, 0.16556487668770264]], [[\"TranslateY\", 0.16758043046445614, 0.30127092823893986], [\"Brightness\", 0.5194636577132354, 0.6225165310621702]], [[\"Cutout\", 0.6087289363049726, 0.10439287037803044], [\"Rotate\", 0.7503452083033819, 0.7425316019981433]], [[\"ShearY\", 0.24347189588329932, 0.5554979486672325], [\"Brightness\", 0.9468115239174161, 0.6132449358023568]], [[\"Brightness\", 0.7144508395807994, 0.4610594769966929], [\"ShearX\", 0.16466683833092968, 0.3382903812375781]], [[\"Sharpness\", 0.27743648684265465, 0.17200038071656915], [\"Color\", 0.47404262107546236, 0.7868991675614725]], [[\"Sharpness\", 0.8603993513633618, 0.324604728411791], [\"TranslateX\", 0.3331597130403763, 0.9369586812977804]], [[\"Color\", 0.1535813630595832, 0.4700116846558207], [\"Color\", 0.5435647971896318, 0.7639291483525243]], [[\"Brightness\", 0.21486188101947656, 0.039347277341450576], [\"Cutout\", 0.7069526940684954, 0.39273934115015696]], [[\"ShearY\", 0.7267130888840517, 0.6310800726389485], [\"AutoContrast\", 0.662163190824139, 0.31948540372237766]], [[\"ShearX\", 0.5123132117185981, 0.1981015909438834], [\"AutoContrast\", 0.9009347363863067, 0.26790399126924036]], [[\"Brightness\", 0.24245061453231648, 0.2673478678291436], [\"ShearX\", 0.31707976089283946, 0.6800582845544948]], [[\"Cutout\", 0.9257780138367764, 0.03972673526848819], [\"Rotate\", 0.6807858944518548, 0.46974332280612097]], [[\"ShearY\", 0.1543443071262312, 0.6051682587030671], [\"Brightness\", 0.9758203119828304, 0.4941406868162414]], [[\"Contrast\", 0.07578049236491124, 0.38953819133407647], [\"ShearX\", 0.20194918288164293, 0.4141510791947318]], [[\"Color\", 0.27826402243792286, 0.43517491081531157], [\"AutoContrast\", 0.6159269026143263, 0.2021846783488046]], [[\"AutoContrast\", 0.5039377966534692, 0.19241507605941105], [\"Invert\", 0.5563931144385394, 0.7069728937319112]], [[\"Sharpness\", 0.19031632433810566, 0.26310171056096743], [\"Color\", 0.4724537593175573, 0.6715201448387876]], [[\"ShearY\", 0.2280910467786642, 0.33340559088059313], [\"ShearY\", 0.8858560034869303, 0.2598627441471076]], [[\"ShearY\", 0.07291814128021593, 0.5819462692986321], [\"Cutout\", 0.27605696060512147, 0.9693427371868695]], [[\"Posterize\", 0.4249871586563321, 0.8256952014328607], [\"Posterize\", 0.005907466926447169, 0.8081353382152597]], [[\"Brightness\", 0.9071305290601128, 0.4781196213717954], [\"Posterize\", 0.8996214311439275, 0.5540717376630279]], [[\"Brightness\", 0.06560728936236392, 0.9920627849065685], [\"TranslateX\", 0.04530789794044952, 0.5318568944702607]], [[\"TranslateX\", 0.6800263601084814, 0.4611536772507228], [\"Rotate\", 0.7245888375283157, 0.0914772551375381]], [[\"Sharpness\", 0.879556061897963, 0.42272481462067535], [\"TranslateX\", 0.4600350422524085, 0.5742175429334919]], [[\"AutoContrast\", 0.5005776243176145, 0.22597121331684505], [\"Invert\", 0.10763286370369299, 0.6841782704962373]], [[\"Sharpness\", 0.7422908472000116, 0.6850324203882405], [\"TranslateX\", 0.3832914614128403, 0.34798646673324896]], [[\"ShearY\", 0.31939465302679326, 0.8792088167639516], [\"Brightness\", 0.4093604352811235, 0.21055483197261338]], [[\"AutoContrast\", 0.7447595860998638, 0.19280222555998586], [\"TranslateY\", 0.317754779431227, 0.9983454520593591]], [[\"Equalize\", 0.27706973689750847, 0.6447455020660622], [\"Contrast\", 0.5626579126863761, 0.7920049962776781]], [[\"Rotate\", 0.13064369451773816, 0.1495367590684905], [\"Sharpness\", 0.24893941981801215, 0.6295943894521504]], [[\"ShearX\", 0.6856269993063254, 0.5167938584189854], [\"Sharpness\", 0.24835352574609537, 0.9990550493102627]], [[\"AutoContrast\", 0.461654115871693, 0.43097388896245004], [\"Cutout\", 0.366359682416437, 0.08011826474215511]], [[\"AutoContrast\", 0.993892672935951, 0.2403608711236933], [\"ShearX\", 0.6620817870694181, 0.1744814077869482]], [[\"ShearY\", 0.6396747719986443, 0.15031017143644265], [\"Brightness\", 0.9451954879495629, 0.26490678840264714]], [[\"Color\", 0.19311480787397262, 0.15712300697448575], [\"Posterize\", 0.05391448762015258, 0.6943963643155474]], [[\"Sharpness\", 0.6199669674684085, 0.5412492335319072], [\"Invert\", 0.14086213450149815, 0.2611850277919339]], [[\"Posterize\", 0.5533129268803405, 0.5332478159319912], [\"ShearX\", 0.48956244029096635, 0.09223930853562916]], [[\"ShearY\", 0.05871590849449765, 0.19549715278943228], [\"TranslateY\", 0.7208521362741379, 0.36414003004659434]], [[\"ShearY\", 0.7316263417917531, 0.0629747985768501], [\"Contrast\", 0.036359793501448245, 0.48658745414898386]], [[\"Rotate\", 0.3301497610942963, 0.5686622043085637], [\"ShearX\", 0.40581487555676843, 0.5866127743850192]], [[\"ShearX\", 0.6679039628249283, 0.5292270693200821], [\"Sharpness\", 0.25901391739310703, 0.9778360586541461]], [[\"AutoContrast\", 0.27373222012596854, 0.14456771405730712], [\"Contrast\", 0.3877220783523938, 0.7965158941894336]], [[\"Solarize\", 0.29440905483979096, 0.06071633809388455], [\"Equalize\", 0.5246736285116214, 0.37575084834661976]], [[\"TranslateY\", 0.2191269464520395, 0.7444942293988484], [\"Posterize\", 0.3840878524812771, 0.31812671711741247]], [[\"Solarize\", 0.25159267140731356, 0.5833264622559661], [\"Brightness\", 0.07552262572348738, 0.33210648549288435]], [[\"AutoContrast\", 0.9770099298399954, 0.46421915310428197], [\"AutoContrast\", 0.04707358934642503, 0.24922048012183493]], [[\"Cutout\", 0.5379685806621965, 0.02038212605928355], [\"Brightness\", 0.5900728303717965, 0.28807872931416956]], [[\"Sharpness\", 0.11596624872886108, 0.6086947716949325], [\"AutoContrast\", 0.34876470059667525, 0.22707897759730578]], [[\"Contrast\", 0.276545513135698, 0.8822580384226156], [\"Rotate\", 0.04874027684061846, 0.6722214281612163]], [[\"ShearY\", 0.595839851757025, 0.4389866852785822], [\"Equalize\", 0.5225492356128832, 0.2735290854063459]], [[\"Sharpness\", 0.9918029636732927, 0.9919926583216121], [\"Sharpness\", 0.03672376137997366, 0.5563865980047012]], [[\"AutoContrast\", 0.34169589759999847, 0.16419911552645738], [\"Invert\", 0.32995953043129234, 0.15073174739720568]], [[\"Posterize\", 0.04600255098477292, 0.2632612790075844], [\"TranslateY\", 0.7852153329831825, 0.6990722310191976]], [[\"AutoContrast\", 0.4414653815356372, 0.2657468780017082], [\"Posterize\", 0.30647061536763337, 0.3688222724948656]], [[\"Contrast\", 0.4239361091421837, 0.6076562806342001], [\"Cutout\", 0.5780707784165284, 0.05361325256745192]], [[\"Sharpness\", 0.7657895907855394, 0.9842407321667671], [\"Sharpness\", 0.5416352696151596, 0.6773681575200902]], [[\"AutoContrast\", 0.13967381098331305, 0.10787258006315015], [\"Posterize\", 0.5019536507897069, 0.9881978222469807]], [[\"Brightness\", 0.030528346448984903, 0.31562058762552847], [\"TranslateY\", 0.0843808140595676, 0.21019213305350526]], [[\"AutoContrast\", 0.6934579165006736, 0.2530484168209199], [\"Rotate\", 0.0005751408130693636, 0.43790043943210005]], [[\"TranslateX\", 0.611258547664328, 0.25465240215894935], [\"Sharpness\", 0.5001446909868196, 0.36102204109889413]], [[\"Contrast\", 0.8995127327150193, 0.5493190695343996], [\"Brightness\", 0.242708780669213, 0.5461116653329015]], [[\"AutoContrast\", 0.3751825351022747, 0.16845985803896962], [\"Cutout\", 0.25201103287363663, 0.0005893331783358435]], [[\"ShearX\", 0.1518985779435941, 0.14768180777304504], [\"Color\", 0.85133530274324, 0.4006641163378305]], [[\"TranslateX\", 0.5489668255504668, 0.4694591826554948], [\"Rotate\", 0.1917354490155893, 0.39993269385802177]], [[\"ShearY\", 0.6689267479532809, 0.34304285013663577], [\"Equalize\", 0.24133154048883143, 0.279324043138247]], [[\"Contrast\", 0.3412544002099494, 0.20217358823930232], [\"Color\", 0.8606984790510235, 0.14305503544676373]], [[\"Cutout\", 0.21656155695311988, 0.5240101349572595], [\"Brightness\", 0.14109877717636352, 0.2016827341210295]], [[\"Sharpness\", 0.24764371218833872, 0.19655480259925423], [\"Posterize\", 0.19460398862039913, 0.4975414350200679]], [[\"Brightness\", 0.6071850094982323, 0.7270716448607151], [\"Solarize\", 0.111786402398499, 0.6325641684614275]], [[\"Contrast\", 0.44772949532200856, 0.44267502710695955], [\"AutoContrast\", 0.360117506402693, 0.2623958228760273]], [[\"Sharpness\", 0.8888131688583053, 0.936897400764746], [\"Sharpness\", 0.16080674198274894, 0.5681119841445879]], [[\"AutoContrast\", 0.8004456226590612, 0.1788600469525269], [\"Brightness\", 0.24832285390647374, 0.02755350284841604]], [[\"ShearY\", 0.06910320102646594, 0.26076407321544054], [\"Contrast\", 0.8633703022354964, 0.38968514704043056]], [[\"AutoContrast\", 0.42306251382780613, 0.6883260271268138], [\"Rotate\", 0.3938724346852023, 0.16740881249086037]], [[\"Contrast\", 0.2725343884286728, 0.6468194318074759], [\"Sharpness\", 0.32238942646494745, 0.6721149242783824]], [[\"AutoContrast\", 0.942093919956842, 0.14675331481712853], [\"Posterize\", 0.5406276708262192, 0.683901182218153]], [[\"Cutout\", 0.5386811894643584, 0.04498833938429728], [\"Posterize\", 0.17007257321724775, 0.45761177118620633]], [[\"Contrast\", 0.13599408935104654, 0.53282738083886], [\"Solarize\", 0.26941667995081114, 0.20958261079465895]], [[\"Color\", 0.6600788518606634, 0.9522228302165842], [\"Invert\", 0.0542722262516899, 0.5152431169321683]], [[\"Contrast\", 0.5328934819727553, 0.2376220512388278], [\"Posterize\", 0.04890422575781711, 0.3182233123739474]], [[\"AutoContrast\", 0.9289628064340965, 0.2976678437448435], [\"Color\", 0.20936893798507963, 0.9649612821434217]], [[\"Cutout\", 0.9019423698575457, 0.24002036989728096], [\"Brightness\", 0.48734445615892974, 0.047660899809176316]], [[\"Sharpness\", 0.09347824275711591, 0.01358686275590612], [\"Posterize\", 0.9248539660538934, 0.4064232632650468]], [[\"Brightness\", 0.46575675383704634, 0.6280194775484345], [\"Invert\", 0.17276207634499413, 0.21263495428839635]], [[\"Brightness\", 0.7238014711679732, 0.6178946027258592], [\"Equalize\", 0.3815496086340364, 0.07301281068847276]], [[\"Contrast\", 0.754557393588416, 0.895332753570098], [\"Color\", 0.32709957750707447, 0.8425486003491515]], [[\"Rotate\", 0.43406698081696576, 0.28628263254953723], [\"TranslateY\", 0.43949548709125374, 0.15927082198238685]], [[\"Brightness\", 0.0015838339831640708, 0.09341692553352654], [\"AutoContrast\", 0.9113966907329718, 0.8345900469751112]], [[\"ShearY\", 0.46698796308585017, 0.6150701348176804], [\"Invert\", 0.14894062704815722, 0.2778388046184728]], [[\"Color\", 0.30360499169455957, 0.995713092016834], [\"Contrast\", 0.2597016288524961, 0.8654420870658932]], [[\"Brightness\", 0.9661642031891435, 0.7322006407169436], [\"TranslateY\", 0.4393502786333408, 0.33934762664274265]], [[\"Color\", 0.9323638351992302, 0.912776309755293], [\"Brightness\", 0.1618274755371618, 0.23485741708056307]], [[\"Color\", 0.2216470771158821, 0.3359240197334976], [\"Sharpness\", 0.6328691811471494, 0.6298393874452548]], [[\"Solarize\", 0.4772769142265505, 0.7073470698713035], [\"ShearY\", 0.2656114148206966, 0.31343097010487253]], [[\"Solarize\", 0.3839017339304234, 0.5985505779429036], [\"Equalize\", 0.002412059429196589, 0.06637506181196245]], [[\"Contrast\", 0.12751196553017863, 0.46980311434237976], [\"Sharpness\", 0.3467487455865491, 0.4054907610444406]], [[\"AutoContrast\", 0.9321813669127206, 0.31328471589533274], [\"Rotate\", 0.05801738717432747, 0.36035756254444273]], [[\"TranslateX\", 0.52092390458353, 0.5261722561643886], [\"Contrast\", 0.17836804476171306, 0.39354333443158535]], [[\"Posterize\", 0.5458100909925713, 0.49447244994482603], [\"Brightness\", 0.7372536822363605, 0.5303409097463796]], [[\"Solarize\", 0.1913974941725724, 0.5582966653986761], [\"Equalize\", 0.020733669175727026, 0.9377467166472878]], [[\"Equalize\", 0.16265732137763889, 0.5206282340874929], [\"Sharpness\", 0.2421533133595281, 0.506389065871883]], [[\"AutoContrast\", 0.9787324801448523, 0.24815051941486466], [\"Rotate\", 0.2423487151245957, 0.6456493129745148]], [[\"TranslateX\", 0.6809867726670327, 0.6949687002397612], [\"Contrast\", 0.16125673359747458, 0.7582679978218987]], [[\"Posterize\", 0.8212000950994955, 0.5225012157831872], [\"Brightness\", 0.8824891856626245, 0.4499216779709508]], [[\"Solarize\", 0.12061313332505218, 0.5319371283368052], [\"Equalize\", 0.04120865969945108, 0.8179402157299602]], [[\"Rotate\", 0.11278256686005855, 0.4022686554165438], [\"ShearX\", 0.2983451019112792, 0.42782525461812604]], [[\"ShearY\", 0.8847385513289983, 0.5429227024179573], [\"Rotate\", 0.21316428726607445, 0.6712120087528564]], [[\"TranslateX\", 0.46448081241068717, 0.4746090648963252], [\"Brightness\", 0.19973580961271142, 0.49252862676553605]], [[\"Posterize\", 0.49664100539481526, 0.4460713166484651], [\"Brightness\", 0.6629559985581529, 0.35192346529003693]], [[\"Color\", 0.22710733249173676, 0.37943185764616194], [\"ShearX\", 0.015809774971472595, 0.8472080190835669]], [[\"Contrast\", 0.4187366322381491, 0.21621979869256666], [\"AutoContrast\", 0.7631045030367304, 0.44965231251615134]], [[\"Sharpness\", 0.47240637876720515, 0.8080091811749525], [\"Cutout\", 0.2853425420104144, 0.6669811510150936]], [[\"Posterize\", 0.7830320527127324, 0.2727062685529881], [\"Solarize\", 0.527834000867504, 0.20098218845222998]], [[\"Contrast\", 0.366380535288225, 0.39766001659663075], [\"Cutout\", 0.8708808878088891, 0.20669525734273086]], [[\"ShearX\", 0.6815427281122932, 0.6146858582671569], [\"AutoContrast\", 0.28330622372053493, 0.931352024154997]], [[\"AutoContrast\", 0.8668174463154519, 0.39961453880632863], [\"AutoContrast\", 0.5718557712359253, 0.6337062930797239]], [[\"ShearY\", 0.8923152519411871, 0.02480062504737446], [\"Cutout\", 0.14954159341231515, 0.1422219808492364]], [[\"Rotate\", 0.3733718175355636, 0.3861928572224287], [\"Sharpness\", 0.5651126520194574, 0.6091103847442831]], [[\"Posterize\", 0.8891714191922857, 0.29600154265251016], [\"TranslateY\", 0.7865351723963945, 0.5664998548985523]], [[\"TranslateX\", 0.9298214806998273, 0.729856565052017], [\"AutoContrast\", 0.26349082482341846, 0.9638882609038888]], [[\"Sharpness\", 0.8387378377527128, 0.42146721129032494], [\"AutoContrast\", 0.9860522000876452, 0.4200699464169384]], [[\"ShearY\", 0.019609159303115145, 0.37197835936879514], [\"Cutout\", 0.22199340461754258, 0.015932573201085848]], [[\"Rotate\", 0.43871085583928443, 0.3283504258860078], [\"Sharpness\", 0.6077702068037776, 0.6830305349618742]], [[\"Contrast\", 0.6160211756538094, 0.32029451083389626], [\"Cutout\", 0.8037631428427006, 0.4025688837399259]], [[\"TranslateY\", 0.051637820936985435, 0.6908417834391846], [\"Sharpness\", 0.7602756948473368, 0.4927111506643095]], [[\"Rotate\", 0.4973618638052235, 0.45931479729281227], [\"TranslateY\", 0.04701789716427618, 0.9408779705948676]], [[\"Rotate\", 0.5214194592768602, 0.8371249272013652], [\"Solarize\", 0.17734812472813338, 0.045020798970228315]], [[\"ShearX\", 0.7457999920079351, 0.19025612553075893], [\"Sharpness\", 0.5994846101703786, 0.5665094068864229]], [[\"Contrast\", 0.6172655452900769, 0.7811432139704904], [\"Cutout\", 0.09915620454670282, 0.3963692287596121]], [[\"TranslateX\", 0.2650112299235817, 0.7377261946165307], [\"AutoContrast\", 0.5019539734059677, 0.26905046992024506]], [[\"Contrast\", 0.6646299821370135, 0.41667784809592945], [\"Cutout\", 0.9698457154992128, 0.15429001887703997]], [[\"Sharpness\", 0.9467079029475773, 0.44906457469098204], [\"Cutout\", 0.30036908747917396, 0.4766149689663106]], [[\"Equalize\", 0.6667517691051055, 0.5014839828447363], [\"Solarize\", 0.4127890336820831, 0.9578274770236529]], [[\"Cutout\", 0.6447384874120834, 0.2868806107728985], [\"Cutout\", 0.4800990488106021, 0.4757538246206956]], [[\"Solarize\", 0.12560195032363236, 0.5557473475801568], [\"Equalize\", 0.019957161871490228, 0.5556797187823773]], [[\"Contrast\", 0.12607637375759484, 0.4300633627435161], [\"Sharpness\", 0.3437273670109087, 0.40493203127714417]], [[\"AutoContrast\", 0.884353334807183, 0.5880138314357569], [\"Rotate\", 0.9846032404597116, 0.3591877296622974]], [[\"TranslateX\", 0.6862295865975581, 0.5307482119690076], [\"Contrast\", 0.19439251187251982, 0.3999195825722808]], [[\"Posterize\", 0.4187641835025246, 0.5008988942651585], [\"Brightness\", 0.6665805605402482, 0.3853288204214253]], [[\"Posterize\", 0.4507470690013903, 0.4232437206624681], [\"TranslateX\", 0.6054107416317659, 0.38123828040922203]], [[\"AutoContrast\", 0.29562338573283276, 0.35608605102687474], [\"TranslateX\", 0.909954785390274, 0.20098894888066549]], [[\"Contrast\", 0.6015278411777212, 0.6049140992035096], [\"Cutout\", 0.47178713636517855, 0.5333747244651914]], [[\"TranslateX\", 0.490851976691112, 0.3829593925141144], [\"Sharpness\", 0.2716675173824095, 0.5131696240367152]], [[\"Posterize\", 0.4190558294646337, 0.39316689077269873], [\"Rotate\", 0.5018526072725914, 0.295712490156129]], [[\"AutoContrast\", 0.29624715560691617, 0.10937329832409388], [\"Posterize\", 0.8770505275992637, 0.43117765012206943]], [[\"Rotate\", 0.6649970092751698, 0.47767131373391974], [\"ShearX\", 0.6257923540490786, 0.6643337040198358]], [[\"Sharpness\", 0.5553620705849509, 0.8467799429696928], [\"Cutout\", 0.9006185811918932, 0.3537270716262]], [[\"ShearY\", 0.0007619678283789788, 0.9494591850536303], [\"Invert\", 0.24267733654007673, 0.7851608409575828]], [[\"Contrast\", 0.9730916198112872, 0.404670123321921], [\"Sharpness\", 0.5923587793251186, 0.7405792404430281]], [[\"Cutout\", 0.07393909593373034, 0.44569630026328344], [\"TranslateX\", 0.2460593252211425, 0.4817527814541055]], [[\"Brightness\", 0.31058654119340867, 0.7043749950260936], [\"ShearX\", 0.7632161538947713, 0.8043681264908555]], [[\"AutoContrast\", 0.4352334371415373, 0.6377550087204297], [\"Rotate\", 0.2892714673415678, 0.49521052050510556]], [[\"Equalize\", 0.509071051375276, 0.7352913414974414], [\"ShearX\", 0.5099959429711828, 0.7071566714593619]], [[\"Posterize\", 0.9540506532512889, 0.8498853304461906], [\"ShearY\", 0.28199061357155397, 0.3161715627214629]], [[\"Posterize\", 0.6740855359097433, 0.684004694936616], [\"Posterize\", 0.6816720350737863, 0.9654766942980918]], [[\"Solarize\", 0.7149344531717328, 0.42212789795181643], [\"Brightness\", 0.686601460864528, 0.4263050070610551]], [[\"Cutout\", 0.49577164991501, 0.08394890892056037], [\"Rotate\", 0.5810369852730606, 0.3320732965776973]], [[\"TranslateY\", 0.1793755480490623, 0.6006520265468684], [\"Brightness\", 0.3769016576438939, 0.7190746300828186]], [[\"TranslateX\", 0.7226363597757153, 0.3847027238123509], [\"Brightness\", 0.7641713191794035, 0.36234003077512544]], [[\"TranslateY\", 0.1211227055347106, 0.6693523474608023], [\"Brightness\", 0.13011180247738063, 0.5126647617294864]], [[\"Equalize\", 0.1501070550869129, 0.0038548909451806557], [\"Posterize\", 0.8266535939653881, 0.5502199643499207]], [[\"Sharpness\", 0.550624117428359, 0.2023044586648523], [\"Brightness\", 0.06291556314780017, 0.7832635398703937]], [[\"Color\", 0.3701578205508141, 0.9051537973590863], [\"Contrast\", 0.5763972727739397, 0.4905511239739898]], [[\"Rotate\", 0.7678527224046323, 0.6723066265307555], [\"Solarize\", 0.31458533097383207, 0.38329324335154524]], [[\"Brightness\", 0.292050127929522, 0.7047582807953063], [\"ShearX\", 0.040541891910333805, 0.06639328601282746]], [[\"TranslateY\", 0.4293891393238555, 0.6608516902234284], [\"Sharpness\", 0.7794685477624004, 0.5168044063408147]], [[\"Color\", 0.3682450402286552, 0.17274523597220048], [\"ShearY\", 0.3936056470397763, 0.5702597289866161]], [[\"Equalize\", 0.43436990310624657, 0.9207072627823626], [\"Contrast\", 0.7608688260846083, 0.4759023148841439]], [[\"Brightness\", 0.7926088966143935, 0.8270093925674497], [\"ShearY\", 0.4924174064969461, 0.47424347505831244]], [[\"Contrast\", 0.043917555279430476, 0.15861903591675125], [\"ShearX\", 0.30439480405505853, 0.1682659341098064]], [[\"TranslateY\", 0.5598255583454538, 0.721352536005039], [\"Posterize\", 0.9700921973303752, 0.6882015184440126]], [[\"AutoContrast\", 0.3620887415037668, 0.5958176322317132], [\"TranslateX\", 0.14213781552733287, 0.6230799786459947]], [[\"Color\", 0.490366889723972, 0.9863152892045195], [\"Color\", 0.817792262022319, 0.6755656429452775]], [[\"Brightness\", 0.7030707021937771, 0.254633187122679], [\"Color\", 0.13977318232688843, 0.16378180123959793]], [[\"AutoContrast\", 0.2933247831326118, 0.6283663376211102], [\"Sharpness\", 0.85430478154147, 0.9753613184208796]], [[\"Rotate\", 0.6674299955457268, 0.48571208708018976], [\"Contrast\", 0.47491370175907016, 0.6401079552479657]], [[\"Sharpness\", 0.37589579644127863, 0.8475131989077025], [\"TranslateY\", 0.9985149867598191, 0.057815729375099975]], [[\"Equalize\", 0.0017194373841596389, 0.7888361311461602], [\"Contrast\", 0.6779293670669408, 0.796851411454113]], [[\"TranslateY\", 0.3296782119072306, 0.39765117357271834], [\"Sharpness\", 0.5890554357001884, 0.6318339473765834]], [[\"Posterize\", 0.25423810893163856, 0.5400430289894207], [\"Sharpness\", 0.9273643918988342, 0.6480913470982622]], [[\"Cutout\", 0.850219975768305, 0.4169812455601289], [\"Solarize\", 0.5418755745870089, 0.5679666650495466]], [[\"Brightness\", 0.008881361977310959, 0.9282562314720516], [\"TranslateY\", 0.7736066471553994, 0.20041167606029642]], [[\"Brightness\", 0.05382537581401925, 0.6405265501035952], [\"Contrast\", 0.30484329473639593, 0.5449338155734242]], [[\"Color\", 0.613257119787967, 0.4541503912724138], [\"Brightness\", 0.9061572524724674, 0.4030159294447347]], [[\"Brightness\", 0.02739111568942537, 0.006028056532326534], [\"ShearX\", 0.17276751958646486, 0.05967365780621859]], [[\"TranslateY\", 0.4376298213047888, 0.7691816164456199], [\"Sharpness\", 0.8162292718857824, 0.6054926462265117]], [[\"Color\", 0.37963069679121214, 0.5946919433483344], [\"Posterize\", 0.08485417284005387, 0.5663580913231766]], [[\"Equalize\", 0.49785780226818316, 0.9999137109183761], [\"Sharpness\", 0.7685879484682496, 0.6260846154212211]], [[\"AutoContrast\", 0.4190931409670763, 0.2374852525139795], [\"Posterize\", 0.8797422264608563, 0.3184738541692057]], [[\"Rotate\", 0.7307269024632872, 0.41523609600701106], [\"ShearX\", 0.6166685870692289, 0.647133807748274]], [[\"Sharpness\", 0.5633713231039904, 0.8276694754755876], [\"Cutout\", 0.8329340776895764, 0.42656043027424073]], [[\"ShearY\", 0.14934828370884312, 0.8622510773680372], [\"Invert\", 0.25925989086863277, 0.8813283584888576]], [[\"Contrast\", 0.9457071292265932, 0.43228655518614034], [\"Sharpness\", 0.8485316947644338, 0.7590298998732413]], [[\"AutoContrast\", 0.8386103589399184, 0.5859583131318076], [\"Solarize\", 0.466758711343543, 0.9956215363818983]], [[\"Rotate\", 0.9387133710926467, 0.19180564509396503], [\"Rotate\", 0.5558247609706255, 0.04321698692007105]], [[\"ShearX\", 0.3608716600695567, 0.15206159451532864], [\"TranslateX\", 0.47295292905710146, 0.5290760596129888]], [[\"TranslateX\", 0.8357685981547495, 0.5991305115727084], [\"Posterize\", 0.5362929404188211, 0.34398525441943373]], [[\"ShearY\", 0.6751984031632811, 0.6066293622133011], [\"Contrast\", 0.4122723990263818, 0.4062467515095566]], [[\"Color\", 0.7515349936021702, 0.5122124665429213], [\"Contrast\", 0.03190514292904123, 0.22903520154660545]], [[\"Contrast\", 0.5448962625054385, 0.38655673938910545], [\"AutoContrast\", 0.4867400684894492, 0.3433111101096984]], [[\"Rotate\", 0.0008372434310827959, 0.28599951781141714], [\"Equalize\", 0.37113686925530087, 0.5243929348114981]], [[\"Color\", 0.720054993488857, 0.2010177651701808], [\"TranslateX\", 0.23036196506059398, 0.11152764304368781]], [[\"Cutout\", 0.859134208332423, 0.6727345740185254], [\"ShearY\", 0.02159833505865088, 0.46390076266538544]], [[\"Sharpness\", 0.3428232157391428, 0.4067874527486514], [\"Brightness\", 0.5409415136577347, 0.3698432231874003]], [[\"Solarize\", 0.27303978936454776, 0.9832186173589548], [\"ShearY\", 0.08831127213044043, 0.4681870331149774]], [[\"TranslateY\", 0.2909309268736869, 0.4059460811623174], [\"Sharpness\", 0.6425125139803729, 0.20275737203293587]], [[\"Contrast\", 0.32167626214661627, 0.28636162794046977], [\"Invert\", 0.4712405253509603, 0.7934644799163176]], [[\"Color\", 0.867993060896951, 0.96574321666213], [\"Color\", 0.02233897320328512, 0.44478933557303063]], [[\"AutoContrast\", 0.1841254751814967, 0.2779992148017741], [\"Color\", 0.3586283093530607, 0.3696246850445087]], [[\"Posterize\", 0.2052935984046965, 0.16796913860308244], [\"ShearX\", 0.4807226832843722, 0.11296747254563266]], [[\"Cutout\", 0.2016411266364791, 0.2765295444084803], [\"Brightness\", 0.3054112810424313, 0.695924264931216]], [[\"Rotate\", 0.8405872184910479, 0.5434142541450815], [\"Cutout\", 0.4493615138203356, 0.893453735250007]], [[\"Contrast\", 0.8433310507685494, 0.4915423577963278], [\"ShearX\", 0.22567799557913246, 0.20129892537008834]], [[\"Contrast\", 0.045954277103674224, 0.5043900167190442], [\"Cutout\", 0.5552992473054611, 0.14436447810888237]], [[\"AutoContrast\", 0.7719296115130478, 0.4440417544621306], [\"Sharpness\", 0.13992809206158283, 0.7988278670709781]], [[\"Color\", 0.7838574233513952, 0.5971351401625151], [\"TranslateY\", 0.13562290583925385, 0.2253039635819158]], [[\"Cutout\", 0.24870301109385806, 0.6937886690381568], [\"TranslateY\", 0.4033400068952813, 0.06253378991880915]], [[\"TranslateX\", 0.0036059390486775644, 0.5234723884081843], [\"Solarize\", 0.42724862530733526, 0.8697702564187633]], [[\"Equalize\", 0.5446026737834311, 0.9367992979112202], [\"ShearY\", 0.5943478903735789, 0.42345889214100046]], [[\"ShearX\", 0.18611885697957506, 0.7320849092947314], [\"ShearX\", 0.3796416430900566, 0.03817761920009881]], [[\"Posterize\", 0.37636778506979124, 0.26807924785236537], [\"Brightness\", 0.4317372554383255, 0.5473346211870932]], [[\"Brightness\", 0.8100436240916665, 0.3817612088285007], [\"Brightness\", 0.4193974619003253, 0.9685902764026623]], [[\"Contrast\", 0.701776402197012, 0.6612786008858009], [\"Color\", 0.19882787177960912, 0.17275597188875483]], [[\"Color\", 0.9538303302832989, 0.48362384535228686], [\"ShearY\", 0.2179980837345602, 0.37027290936457313]], [[\"TranslateY\", 0.6068028691503798, 0.3919346523454841], [\"Cutout\", 0.8228303342563138, 0.18372280287814613]], [[\"Equalize\", 0.016416758802906828, 0.642838949194916], [\"Cutout\", 0.5761717838655257, 0.7600661153497648]], [[\"Color\", 0.9417761826818639, 0.9916074035986558], [\"Equalize\", 0.2524209308597042, 0.6373703468715077]], [[\"Brightness\", 0.75512589439513, 0.6155072321007569], [\"Contrast\", 0.32413476940254515, 0.4194739830159837]], [[\"Sharpness\", 0.3339450765586968, 0.9973297539194967], [\"AutoContrast\", 0.6523930242124429, 0.1053482471037186]], [[\"ShearX\", 0.2961391955838801, 0.9870036064904368], [\"ShearY\", 0.18705025965909403, 0.4550895821154484]], [[\"TranslateY\", 0.36956447983807883, 0.36371471767143543], [\"Sharpness\", 0.6860051967688487, 0.2850190720087796]], [[\"Cutout\", 0.13017742151902967, 0.47316674150067195], [\"Invert\", 0.28923829959551883, 0.9295585654924601]], [[\"Contrast\", 0.7302368472279086, 0.7178974949876642], [\"TranslateY\", 0.12589674152030433, 0.7485392909494947]], [[\"Color\", 0.6474693117772619, 0.5518269515590674], [\"Contrast\", 0.24643004970708016, 0.3435581358079418]], [[\"Contrast\", 0.5650327855750835, 0.4843031798040887], [\"Brightness\", 0.3526684005761239, 0.3005305004600969]], [[\"Rotate\", 0.09822284968122225, 0.13172798244520356], [\"Equalize\", 0.38135066977857157, 0.5135129123554154]], [[\"Contrast\", 0.5902590645585712, 0.2196062383730596], [\"ShearY\", 0.14188379126120954, 0.1582612142182743]], [[\"Cutout\", 0.8529913814417812, 0.89734031211874], [\"Color\", 0.07293767043078672, 0.32577659205278897]], [[\"Equalize\", 0.21401668971453247, 0.040015259500028266], [\"ShearY\", 0.5126400895338797, 0.4726484828276388]], [[\"Brightness\", 0.8269430025954498, 0.9678362841865166], [\"ShearY\", 0.17142069814830432, 0.4726727848289514]], [[\"Brightness\", 0.699707089334018, 0.2795501395789335], [\"ShearX\", 0.5308818178242845, 0.10581814221896294]], [[\"Equalize\", 0.32519644258946145, 0.15763390340309183], [\"TranslateX\", 0.6149090364414208, 0.7454832565718259]], [[\"AutoContrast\", 0.5404508567155423, 0.7472387762067986], [\"Equalize\", 0.05649876539221024, 0.5628180219887216]]]\n    return p\n\n\ndef fa_reduced_svhn():\n    p = [[[\"TranslateX\", 0.001576965129744562, 0.43180488809874773], [\"Invert\", 0.7395307279252639, 0.7538444307982558]], [[\"Contrast\", 0.5762062225409211, 0.7532431872873473], [\"TranslateX\", 0.45212523461624615, 0.02451684483019846]], [[\"Contrast\", 0.18962433143225088, 0.29481185671147325], [\"Contrast\", 0.9998112218299271, 0.813015355163255]], [[\"Posterize\", 0.9633391295905683, 0.4136786222304747], [\"TranslateY\", 0.8011655496664203, 0.44102126789970797]], [[\"Color\", 0.8231185187716968, 0.4171602946893402], [\"TranslateX\", 0.8684965619113907, 0.36514568324909674]], [[\"Color\", 0.904075230324581, 0.46319140331093767], [\"Contrast\", 0.4115196534764559, 0.7773329158740563]], [[\"Sharpness\", 0.6600262774093967, 0.8045637700026345], [\"TranslateY\", 0.5917663766021198, 0.6844241908520602]], [[\"AutoContrast\", 0.16223989311434306, 0.48169653554195924], [\"ShearX\", 0.5433173232860344, 0.7460278151912152]], [[\"ShearX\", 0.4913604762760715, 0.83391837859561], [\"Color\", 0.5580367056511908, 0.2961512691312932]], [[\"Color\", 0.18567091721211237, 0.9296983204905286], [\"Cutout\", 0.6074026199060156, 0.03303273406448193]], [[\"Invert\", 0.8049054771963224, 0.1340792344927909], [\"Color\", 0.4208839940504979, 0.7096454840962345]], [[\"ShearX\", 0.7997786664546294, 0.6492629575700173], [\"AutoContrast\", 0.3142777134084793, 0.6526010594925064]], [[\"TranslateX\", 0.2581027144644976, 0.6997433332894101], [\"Rotate\", 0.45490480973606834, 0.238620570022944]], [[\"Solarize\", 0.837397161027719, 0.9311141273136286], [\"Contrast\", 0.640364826293148, 0.6299761518677469]], [[\"Brightness\", 0.3782457347141744, 0.7085036717054278], [\"Brightness\", 0.5346150083208507, 0.5858930737867671]], [[\"Invert\", 0.48780391510474086, 0.610086407879722], [\"Color\", 0.5601999247616932, 0.5393836220423195]], [[\"Brightness\", 0.00250086643283564, 0.5003355864896979], [\"Brightness\", 0.003922153283353616, 0.41107110154584925]], [[\"TranslateX\", 0.4073069009685957, 0.9843435292693372], [\"Invert\", 0.38837085318721926, 0.9298542033875989]], [[\"ShearY\", 0.05479740443795811, 0.9113983424872698], [\"AutoContrast\", 0.2181108114232728, 0.713996037012164]], [[\"Brightness\", 0.27747508429413903, 0.3217467607288693], [\"ShearX\", 0.02715239061946995, 0.5430731635396449]], [[\"Sharpness\", 0.08994432959374538, 0.004706443546453831], [\"Posterize\", 0.10768206853226996, 0.39020299239900236]], [[\"Cutout\", 0.37498679037853905, 0.20784809761469553], [\"Color\", 0.9825516352194511, 0.7654155662756019]], [[\"Color\", 0.8899349124453552, 0.7797700766409008], [\"Rotate\", 0.1370222187174981, 0.2622119295138398]], [[\"Cutout\", 0.7088223332663685, 0.7884456023190028], [\"Solarize\", 0.5362257505160836, 0.6426837537811545]], [[\"Invert\", 0.15686225694987552, 0.5500563899117913], [\"Rotate\", 0.16315224193260078, 0.4246854030170752]], [[\"Rotate\", 0.005266247922433631, 0.06612026206223394], [\"Contrast\", 0.06494357829209037, 0.2738420319474947]], [[\"Cutout\", 0.30200619566806275, 0.06558008068236942], [\"Rotate\", 0.2168576483823022, 0.878645566986328]], [[\"Color\", 0.6358930679444622, 0.613404714161498], [\"Rotate\", 0.08733206733004326, 0.4348276574435751]], [[\"Cutout\", 0.8834634887239585, 0.0006853845293474659], [\"Solarize\", 0.38132051231951847, 0.42558752668491195]], [[\"ShearY\", 0.08830136548479937, 0.5522438878371283], [\"Brightness\", 0.23816560427834074, 0.3033709051157141]], [[\"Solarize\", 0.9015331490756151, 0.9108788708847556], [\"Contrast\", 0.2057898014670072, 0.03260096030427456]], [[\"Equalize\", 0.9455978685121174, 0.14850077333434056], [\"TranslateY\", 0.6888705996522545, 0.5300565492007543]], [[\"Cutout\", 0.16942673959343585, 0.7294197201361826], [\"TranslateX\", 0.41184830642301534, 0.7060207449376135]], [[\"Color\", 0.30133344118702166, 0.24384417956342314], [\"Sharpness\", 0.4640904544421743, 0.32431840288061864]], [[\"Sharpness\", 0.5195055033472676, 0.9386677467005835], [\"Color\", 0.9536519432978372, 0.9624043444556467]], [[\"Rotate\", 0.8689597230556101, 0.23955490826730633], [\"Contrast\", 0.050071600927462656, 0.1309891556004179]], [[\"Cutout\", 0.5349421090878962, 0.08239510727779054], [\"Rotate\", 0.46064964710717216, 0.9037689320897339]], [[\"AutoContrast\", 0.5625256909986802, 0.5358003783186498], [\"Equalize\", 0.09204330691163354, 0.4386906784850649]], [[\"ShearX\", 0.0011061172864470226, 0.07150284682189278], [\"AutoContrast\", 0.6015956946553209, 0.4375362295530898]], [[\"ShearY\", 0.25294276499800983, 0.7937560397859562], [\"Brightness\", 0.30834103299704474, 0.21960258701547009]], [[\"Posterize\", 0.7423948904688074, 0.4598609935109695], [\"Rotate\", 0.5510348811675979, 0.26763724868985933]], [[\"TranslateY\", 0.3208729319318745, 0.945513054853888], [\"ShearX\", 0.4916473963030882, 0.8743840560039451]], [[\"ShearY\", 0.7557718687011286, 0.3125397104722828], [\"Cutout\", 0.5565359791865849, 0.5151359251135629]], [[\"AutoContrast\", 0.16652786355571275, 0.1101575800958632], [\"Rotate\", 0.05108851703032641, 0.2612966401802814]], [[\"Brightness\", 0.380296489835016, 0.0428162454174662], [\"ShearX\", 0.3911934083168285, 0.18933607362790178]], [[\"Color\", 0.002476250465397678, 0.07795275305347571], [\"Posterize\", 0.08131841266654188, 0.14843363184306413]], [[\"Cutout\", 0.36664558716104434, 0.20904484995063996], [\"Cutout\", 0.07986452057223141, 0.9287747671053432]], [[\"Color\", 0.9296812469919231, 0.6634239915141935], [\"Rotate\", 0.07632463573240006, 0.408624029443747]], [[\"Cutout\", 0.7594470171961278, 0.9834672124229463], [\"Solarize\", 0.4471371303745053, 0.5751101102286562]], [[\"Posterize\", 0.051186719734032285, 0.5110941294710823], [\"Sharpness\", 0.040432522797391596, 0.42652298706992164]], [[\"Sharpness\", 0.2645335264327221, 0.8844553189835457], [\"Brightness\", 0.7229600357932696, 0.16660749270785696]], [[\"Sharpness\", 0.6296376086802589, 0.15564989758083458], [\"Sharpness\", 0.7913410481400365, 0.7022615408082826]], [[\"Cutout\", 0.5517247347343883, 0.43794888517764674], [\"ShearX\", 0.6951051782530201, 0.6230992857867065]], [[\"ShearX\", 0.9015708556331022, 0.6322135168527783], [\"Contrast\", 0.4285629283441831, 0.18158321019502988]], [[\"Brightness\", 0.9014292329524769, 0.3660463325457713], [\"Invert\", 0.6700729097206592, 0.16502732071917703]], [[\"AutoContrast\", 0.6432764477303431, 0.9998909112400834], [\"Invert\", 0.8124063975545761, 0.8149683327882365]], [[\"Cutout\", 0.6023944009428617, 0.9630976951918225], [\"ShearX\", 0.2734723568803071, 0.3080911542121765]], [[\"Sharpness\", 0.048949115014412806, 0.44497866256845164], [\"Brightness\", 0.5611832867244329, 0.12994217480426257]], [[\"TranslateY\", 0.4619112333002525, 0.47317728091588396], [\"Solarize\", 0.618638784910472, 0.9508297099190338]], [[\"Sharpness\", 0.9656274391147018, 0.3402622993963962], [\"Cutout\", 0.8452511174508919, 0.3094717093312621]], [[\"ShearX\", 0.04942201651478659, 0.6910568465705691], [\"AutoContrast\", 0.7155342517619936, 0.8565418847743523]], [[\"Brightness\", 0.5222290590721783, 0.6462675303633422], [\"Sharpness\", 0.7756317511341633, 0.05010730683866704]], [[\"Contrast\", 0.17098396012942796, 0.9128908626236187], [\"TranslateY\", 0.1523815376677518, 0.4269909829886339]], [[\"Cutout\", 0.7679024720089866, 0.22229116396644455], [\"Sharpness\", 0.47714827844878843, 0.8242815864830401]], [[\"Brightness\", 0.9321772357292445, 0.11339758604001371], [\"Invert\", 0.7021078495093375, 0.27507749184928154]], [[\"ShearY\", 0.7069449324510433, 0.07262757954730437], [\"Cutout\", 0.6298690227159313, 0.8866813664859028]], [[\"ShearX\", 0.8153137620199989, 0.8478194179953927], [\"ShearX\", 0.7519451353411938, 0.3914579556959725]], [[\"Cutout\", 0.07152574469472753, 0.2629935229222503], [\"TranslateX\", 0.43728405510089485, 0.2610201002449789]], [[\"AutoContrast\", 0.5824529633013098, 0.5619551536261955], [\"Rotate\", 0.45434137552116965, 0.7567169855140041]], [[\"TranslateY\", 0.9338431187142137, 0.14230481341042783], [\"Cutout\", 0.744797723251028, 0.4346601666787713]], [[\"ShearX\", 0.3197252560289169, 0.8770408070016171], [\"Color\", 0.7657013088540465, 0.2685586719812284]], [[\"ShearY\", 0.6542181749801549, 0.8148188744344297], [\"Sharpness\", 0.5108985661436543, 0.9926016115463769]], [[\"ShearY\", 0.39218730620135694, 0.857769946478945], [\"Color\", 0.39588355914920886, 0.9910530523789284]], [[\"Invert\", 0.4993610396803735, 0.08449723470758526], [\"TranslateX\", 0.46267456928508305, 0.46691125646493964]], [[\"Equalize\", 0.8640576819821256, 0.3973808869887604], [\"ShearY\", 0.5491163877063172, 0.422429328786161]], [[\"Contrast\", 0.6146206387722841, 0.8453559854684094], [\"TranslateX\", 0.7974333014574718, 0.47395476786951773]], [[\"Contrast\", 0.6828704722015236, 0.6952755697785722], [\"Brightness\", 0.7903069452567497, 0.8350915035109574]], [[\"Rotate\", 0.1211091761531299, 0.9667702562228727], [\"Color\", 0.47888534537103344, 0.8298620028065332]], [[\"Equalize\", 0.20009722872711086, 0.21851235854853018], [\"Invert\", 0.4433641154198673, 0.41902203581091935]], [[\"AutoContrast\", 0.6333190204577053, 0.23965630032835372], [\"Color\", 0.38651217030044804, 0.06447323778198723]], [[\"Brightness\", 0.378274337541471, 0.5482593116308322], [\"Cutout\", 0.4856574442608347, 0.8889688535495244]], [[\"Rotate\", 0.8201259323479384, 0.7404525573938633], [\"Color\", 0.28371236449364595, 0.7866003515933161]], [[\"Brightness\", 0.10053196350009105, 0.18814037089411267], [\"Sharpness\", 0.5572102497672569, 0.04458217557977126]], [[\"AutoContrast\", 0.6445330112376135, 0.48082049184921843], [\"TranslateY\", 0.378898917914949, 0.9338102625289362]], [[\"AutoContrast\", 0.08482623401924708, 0.25199930695784384], [\"Solarize\", 0.5981823550521426, 0.19626357596662092]], [[\"Solarize\", 0.4373030803918095, 0.22907881245285625], [\"AutoContrast\", 0.6383084635487905, 0.29517603235993883]], [[\"AutoContrast\", 0.922112624726991, 0.29398098144910145], [\"AutoContrast\", 0.8550184811514672, 0.8030331582292343]], [[\"ShearX\", 0.38761582800913896, 0.06304125015084923], [\"Contrast\", 0.3225758804984975, 0.7089696696094797]], [[\"TranslateY\", 0.27499498563849206, 0.1917583097241206], [\"Color\", 0.5845853711746438, 0.5353520071667661]], [[\"ShearY\", 0.530881951424285, 0.47961248148116453], [\"ShearX\", 0.04666387744533289, 0.275772822690165]], [[\"Solarize\", 0.5727309318844802, 0.02889734544563341], [\"AutoContrast\", 0.638852434854615, 0.9819440776921611]], [[\"AutoContrast\", 0.9766868312173507, 0.9651796447738792], [\"AutoContrast\", 0.3489760216898085, 0.3082182741354106]], [[\"Sharpness\", 0.13693510871346704, 0.08297205456926067], [\"Contrast\", 0.3155812019005854, 0.031402991638917896]], [[\"TranslateY\", 0.2664707540547008, 0.4838091910041236], [\"ShearX\", 0.5935665395229432, 0.7813088248538167]], [[\"ShearY\", 0.7578577752251343, 0.5116014090216161], [\"ShearX\", 0.8332831240873545, 0.26781876290841017]], [[\"TranslateY\", 0.473254381651761, 0.4203181582821155], [\"ShearY\", 0.732848696900726, 0.47895514793728433]], [[\"Solarize\", 0.6922689176672292, 0.36403255869823725], [\"AutoContrast\", 0.910654040826914, 0.888651414068326]], [[\"ShearX\", 0.37326536936166244, 0.47830923320699525], [\"Equalize\", 0.4724702976076929, 0.8176108279939023]], [[\"Contrast\", 0.3839906424759326, 0.09109695563933692], [\"Invert\", 0.36305435543972325, 0.5701589223795499]], [[\"Invert\", 0.5175591137387999, 0.38815675919253867], [\"TranslateY\", 0.1354848160153554, 0.41734106283245065]], [[\"Color\", 0.829616006981199, 0.18631472346156963], [\"Color\", 0.2465115448326214, 0.9439365672808333]], [[\"Contrast\", 0.18207939197942158, 0.39841173152850873], [\"ShearX\", 0.16723588254695632, 0.2868649619006758]], [[\"Posterize\", 0.1941909136988733, 0.6322499882557473], [\"Contrast\", 0.6109060391509794, 0.27329598688783296]], [[\"AutoContrast\", 0.9148775146158022, 0.09129288311923844], [\"Sharpness\", 0.4222442287436423, 0.847961820057229]], [[\"Color\", 0.21084007475489852, 0.008218056412554131], [\"Contrast\", 0.43996934555301637, 0.500680146508504]], [[\"ShearY\", 0.6745287915240038, 0.6120305524405164], [\"Equalize\", 0.467403794543269, 0.2207148995882467]], [[\"Color\", 0.7712823974371379, 0.2839161885566902], [\"Color\", 0.8725368489709752, 0.3349470222415115]], [[\"Solarize\", 0.5563976601161562, 0.540446614847802], [\"Invert\", 0.14228071175107454, 0.2242332811481905]], [[\"Contrast\", 0.34596757983998383, 0.9158971503395041], [\"Cutout\", 0.6823724203724072, 0.5221518922863516]], [[\"Posterize\", 0.3275475232882672, 0.6520033254468702], [\"Color\", 0.7434224109271398, 0.0824308188060544]], [[\"Cutout\", 0.7295122229650082, 0.277887573018184], [\"Brightness\", 0.5303655506515258, 0.28628046739964497]], [[\"Color\", 0.8533293996815943, 0.24909788223027743], [\"Color\", 0.6915962825167857, 0.33592561040195834]], [[\"TranslateX\", 0.0761441550001345, 0.7043906245420134], [\"Equalize\", 0.670845297717783, 0.30986063097084215]], [[\"Contrast\", 0.30592723366237995, 0.7365013059287382], [\"Color\", 0.6173835128817455, 0.6417028717640598]], [[\"Rotate\", 0.05558240682703821, 0.7284722849011761], [\"Color\", 0.7814801133853666, 0.13335113981884217]], [[\"ShearY\", 0.6521743070190724, 0.6272195913574455], [\"Rotate\", 0.36278432239870423, 0.2335623679787695]], [[\"Color\", 0.6799351102482663, 0.3850250771244986], [\"Brightness\", 0.613901077818094, 0.2374900558949702]], [[\"Color\", 0.551451255148252, 0.7284757153447965], [\"Solarize\", 0.4863815212982878, 0.3857941567681324]], [[\"Contrast\", 0.32516343965159267, 0.689921852601276], [\"Cutout\", 0.5922142001124506, 0.7709605594115009]], [[\"Brightness\", 0.23760063764495856, 0.6392077018854179], [\"Brightness\", 0.7288124083714078, 0.4487520490201095]], [[\"Sharpness\", 0.5631112298553713, 0.6803534985114782], [\"ShearX\", 0.6743791169050775, 0.34039227245151127]], [[\"AutoContrast\", 0.8260911840078349, 0.7705607269534767], [\"Rotate\", 0.8880749478363638, 0.8182460047684648]], [[\"ShearY\", 0.7037620764408412, 0.5219573160970589], [\"Posterize\", 0.7186150466761102, 0.6187857686944253]], [[\"TranslateY\", 0.2140494926702246, 0.9104233882669488], [\"TranslateX\", 0.4096039512896902, 0.9692703030784571]], [[\"Equalize\", 0.5404313549028165, 0.04094078980738014], [\"AutoContrast\", 0.07870278300673744, 0.841020779977939]], [[\"ShearY\", 0.2684638876128488, 0.5599793678740521], [\"Cutout\", 0.19537995362704022, 0.2400995206366768]], [[\"AutoContrast\", 0.19366394417090382, 0.4130755503251951], [\"Sharpness\", 0.11735660606190662, 0.39276612830651914]], [[\"Cutout\", 0.8313266945081518, 0.37171822186374703], [\"Contrast\", 0.5088549187459019, 0.2956405118511817]], [[\"Cutout\", 0.28375485371479847, 0.37020183949342683], [\"Posterize\", 0.718761436947423, 0.2278804627251678]], [[\"ShearY\", 0.6625840735667625, 0.5045065697748213], [\"Rotate\", 0.5175257698523389, 0.39496923901188824]], [[\"Color\", 0.6498154010188212, 0.38674158604408604], [\"Brightness\", 0.8157804892728057, 0.05660118670560971]], [[\"Color\", 0.5512855420254102, 0.7812054820692542], [\"Solarize\", 0.8851292984174468, 0.2808951606943277]], [[\"Contrast\", 0.35258433539074363, 0.8085377169629859], [\"Cutout\", 0.5197965849563265, 0.8657111726930974]], [[\"Cutout\", 0.23650925054419358, 0.746860862983295], [\"Brightness\", 0.8842190203336139, 0.4389347348156118]], [[\"Rotate\", 0.8651460526861932, 0.0031372441327392753], [\"Equalize\", 0.3909498933963822, 0.6221687914603954]], [[\"TranslateX\", 0.5793690303540427, 0.37939687327382987], [\"Invert\", 0.846172545690258, 0.36950442052945853]], [[\"Invert\", 0.5151721602607067, 0.5860134277259832], [\"Contrast\", 0.6868708526377458, 0.2188104093363727]], [[\"Contrast\", 0.28019632529718025, 0.8403553410328943], [\"Cutout\", 0.5238340355491738, 0.6948434115725599]], [[\"Rotate\", 0.1592592617684533, 0.5212044951482974], [\"Color\", 0.42404215473874546, 0.45894052919059103]], [[\"AutoContrast\", 0.21780978427851283, 0.11813011387113281], [\"Contrast\", 0.14557770349869537, 0.5468616480449002]], [[\"Cutout\", 0.03573873600256905, 0.8747186430368771], [\"AutoContrast\", 0.4804465018567564, 0.3968185812087325]], [[\"ShearY\", 0.027192162947493492, 0.35923750027515866], [\"Sharpness\", 0.03207302705814674, 0.25868625346023777]], [[\"AutoContrast\", 0.9111793886013045, 0.33534571661592005], [\"ShearY\", 0.31365410004768934, 0.37055495208177025]], [[\"Color\", 0.5119732811716222, 0.10635303813092001], [\"Solarize\", 0.9828759703639677, 0.33302532900783466]], [[\"Contrast\", 0.9652840964645487, 0.9550826002089741], [\"ShearY\", 0.16934262075572262, 0.35893022906919625]], [[\"Invert\", 0.21526903298837538, 0.5491812432380025], [\"TranslateX\", 0.27691575128765095, 0.9916365493500338]], [[\"AutoContrast\", 0.7223428288831728, 0.3001506080569529], [\"Posterize\", 0.28280773693692957, 0.5630226986948541]], [[\"TranslateY\", 0.5334698670580152, 0.4329627064903895], [\"Solarize\", 0.11621274404555687, 0.38564564358937725]], [[\"Brightness\", 0.9001900081991266, 0.15453762529292236], [\"Equalize\", 0.6749827304986464, 0.2174408558291521]], [[\"TranslateY\", 0.703293071780793, 0.20371204513522137], [\"Invert\", 0.7921926919880306, 0.2647654009616249]], [[\"AutoContrast\", 0.32650519442680254, 0.5567514700913352], [\"ShearY\", 0.7627653627354407, 0.5363510886152073]], [[\"Rotate\", 0.364293676091047, 0.4262321334071656], [\"Posterize\", 0.7284189361001443, 0.6052618047275847]], [[\"Contrast\", 0.004679138490284229, 0.6985327823420937], [\"Posterize\", 0.25412559986607497, 0.969098825421215]], [[\"ShearY\", 0.6831738973100172, 0.6916463366962687], [\"TranslateY\", 0.8744153159733203, 0.3667879549647143]], [[\"Posterize\", 0.39138456188265913, 0.8617909225610128], [\"TranslateX\", 0.5198303654364824, 0.5518823068009463]], [[\"Invert\", 0.6471155996761706, 0.4793957129423701], [\"ShearX\", 0.8046274258703997, 0.9711394307595065]], [[\"Solarize\", 0.2442520851809611, 0.5518114414771629], [\"Sharpness\", 0.02324109511463257, 0.18216585433541427]], [[\"Cutout\", 0.7004457278387007, 0.4904439660213413], [\"Contrast\", 0.6516622044646659, 0.7324290164242575]], [[\"Brightness\", 0.594212018801632, 0.5624822682300464], [\"ShearX\", 0.47929863548325596, 0.5610640338380719]], [[\"TranslateX\", 0.20863492063218445, 0.23761872077836552], [\"Color\", 0.9374148559524687, 0.06390809573246009]], [[\"AutoContrast\", 0.5548946725094693, 0.40547561665765874], [\"Equalize\", 0.26341425401933344, 0.2763692089379619]], [[\"Invert\", 0.8224614398122034, 0.15547159819315676], [\"Rotate\", 0.4915912924663281, 0.6995695827608112]], [[\"Equalize\", 0.05752620481520809, 0.80230125774557], [\"Rotate\", 0.16338857010673558, 0.8066738989167762]], [[\"ShearY\", 0.5437502855505825, 0.252101665309144], [\"Contrast\", 0.9268450172095902, 0.13437399256747992]], [[\"TranslateY\", 0.6946438457089812, 0.35376889837139813], [\"Sharpness\", 0.15438234648960253, 0.2668696344562673]], [[\"Invert\", 0.24506516252953542, 0.1939315433476327], [\"Sharpness\", 0.8921986990130818, 0.21478051316241717]], [[\"TranslateY\", 0.5292829065905086, 0.6896826369723732], [\"Invert\", 0.4461047865540309, 0.9854416526561315]], [[\"Posterize\", 0.8085062334285464, 0.4538963572040656], [\"Brightness\", 0.2623572045603854, 0.16723779221170698]], [[\"Solarize\", 0.1618752496191097, 0.6007634864056693], [\"TranslateY\", 0.07808851801433346, 0.3951252736249746]], [[\"TranslateX\", 0.35426056783145843, 0.8875451782909476], [\"Brightness\", 0.5537927990151869, 0.3042790536918476]], [[\"Cutout\", 0.9051584028783342, 0.6050507821593669], [\"ShearX\", 0.31185875057627255, 0.39145181108334876]], [[\"Brightness\", 0.43157388465566776, 0.45511767545129933], [\"ShearY\", 0.626464342187273, 0.5251031991594401]], [[\"Contrast\", 0.7978520212540166, 0.45088491126800995], [\"ShearY\", 0.20415027867560143, 0.24369493783350643]], [[\"ShearX\", 0.48152242363853065, 0.001652619381325604], [\"Sharpness\", 0.6154899720956758, 0.22465778944283568]], [[\"Posterize\", 0.0008092255557418104, 0.8624848793450179], [\"Solarize\", 0.7580784903978838, 0.4141187863855049]], [[\"TranslateY\", 0.4829597846471378, 0.6077028815706373], [\"ShearX\", 0.43316420981872894, 0.007119694447608018]], [[\"Equalize\", 0.2914045973615852, 0.6298874433109889], [\"Cutout\", 0.18663096101056076, 0.20634383363149222]], [[\"TranslateX\", 0.6909947340830737, 0.40843889682671003], [\"ShearX\", 0.3693105697811625, 0.070573833710386]], [[\"Rotate\", 0.6184027722396339, 0.6483359499288176], [\"AutoContrast\", 0.8658233903089285, 0.31462524418660626]], [[\"Brightness\", 0.8165837262133947, 0.38138221738335765], [\"Contrast\", 0.01566790570443702, 0.1250581265407818]], [[\"Equalize\", 0.16745169701901802, 0.9239433721204139], [\"ShearY\", 0.5535908803004554, 0.35879199699526654]], [[\"Color\", 0.9675880875486578, 0.19745998576077994], [\"Posterize\", 0.641736196661405, 0.5702363593336868]], [[\"ShearY\", 0.27730895136251943, 0.4730273890919014], [\"Posterize\", 0.35829530316120517, 0.9040968539551122]], [[\"Cutout\", 0.9989158254302966, 0.3210048366589035], [\"Equalize\", 0.9226385492886618, 0.21132010337062]], [[\"Posterize\", 0.32861829410989934, 0.7608163668499222], [\"TranslateY\", 0.528381246453454, 0.6837459631017135]], [[\"ShearY\", 0.6786278797045173, 0.49006792710382946], [\"ShearX\", 0.7860409944610941, 0.7960317025665418]], [[\"Solarize\", 0.4420731874598513, 0.7163961196254427], [\"Sharpness\", 0.11927615232343353, 0.3649599343067734]], [[\"Cutout\", 0.4606157449857542, 0.4682141505042986], [\"Contrast\", 0.8955528913735222, 0.8468556570983498]], [[\"Brightness\", 0.5742349576881501, 0.5633914487991978], [\"ShearX\", 0.8288987143597276, 0.5937556836469728]], [[\"Posterize\", 0.05362153577922808, 0.40072961361335696], [\"Rotate\", 0.6681795049585278, 0.5348470042353504]], [[\"TranslateY\", 0.6190833866612555, 0.7338431624993972], [\"Color\", 0.5352400737236565, 0.1598194251940268]], [[\"Brightness\", 0.9942846465176832, 0.11918348505217388], [\"Brightness\", 0.0659098729688602, 0.6558077481794591]], [[\"Equalize\", 0.34089122700685126, 0.048940774058585546], [\"ShearX\", 0.5472987107071652, 0.2965222509150173]], [[\"Sharpness\", 0.3660728361470086, 0.37607120931207433], [\"Sharpness\", 0.9974987257291261, 0.2483317486035219]], [[\"Posterize\", 0.931283270966942, 0.7525022430475327], [\"Cutout\", 0.6299208568533524, 0.3313382622423058]], [[\"Invert\", 0.5074998650080915, 0.9722820836624784], [\"Solarize\", 0.13997049847474802, 0.19340041815763026]], [[\"AutoContrast\", 0.6804950477263457, 0.31675149536227815], [\"Solarize\", 0.800632422196852, 0.09054278636377117]], [[\"TranslateY\", 0.6886579465517867, 0.549118383513461], [\"Brightness\", 0.7298771973550124, 0.59421647759784]], [[\"Equalize\", 0.8117050130827859, 0.22494316766261946], [\"AutoContrast\", 0.5217061631918504, 0.6106946809838144]], [[\"Equalize\", 0.4734718117645248, 0.7746036952254298], [\"Posterize\", 0.032049205574512685, 0.9681402692267316]], [[\"Brightness\", 0.4724177066851541, 0.7969700024018729], [\"Solarize\", 0.6930049134926459, 0.3880086567038069]], [[\"TranslateX\", 0.2833979092130342, 0.6873833799104118], [\"Rotate\", 0.37167767436617366, 0.03249352593350204]], [[\"Posterize\", 0.7080588381354884, 0.03014586990329654], [\"Posterize\", 0.20883930954891392, 0.1328596635826556]], [[\"Cutout\", 0.1992050307454733, 0.8079881690617468], [\"ShearY\", 0.3057279570820446, 0.34868823290010564]], [[\"TranslateY\", 0.6204358851346782, 0.24978856155434062], [\"ShearX\", 0.2403059671388028, 0.6706906799258086]], [[\"Contrast\", 0.5527380063918701, 0.27504242043334765], [\"Rotate\", 0.37361791978638376, 0.17818567121454373]], [[\"Cutout\", 0.3368229687890997, 0.013512329226772313], [\"Contrast\", 0.18480406673028238, 0.21653280083721013]], [[\"AutoContrast\", 0.13634047961070397, 0.5322441057075571], [\"Posterize\", 0.3409948654529233, 0.2562132228604077]], [[\"Invert\", 0.3375636037272626, 0.5417577242453775], [\"Sharpness\", 0.10271458969925179, 0.5125859420868099]], [[\"Invert\", 0.26465503753231256, 0.7386494688407392], [\"AutoContrast\", 0.5310106090963371, 0.14699248759273964]], [[\"Sharpness\", 0.8494538270706318, 0.9524607358113082], [\"Solarize\", 0.21142978953773187, 0.10711867917080763]], [[\"Equalize\", 0.5185117903942263, 0.06342404369282638], [\"ShearY\", 0.26812877371366156, 0.32386585917978056]], [[\"TranslateY\", 0.42724471339053904, 0.5218262942425845], [\"Brightness\", 0.7618037699290332, 0.5773256674209075]], [[\"Solarize\", 0.5683461491921462, 0.7988018975591509], [\"AutoContrast\", 0.21826664523938988, 0.4395073407383595]], [[\"Posterize\", 0.2564295537162734, 0.6778150727248975], [\"Equalize\", 0.7571361164411801, 0.4281744623444925]], [[\"Invert\", 0.5171620125994946, 0.8719074953677988], [\"ShearX\", 0.10216776728552601, 0.20888013515457593]], [[\"Equalize\", 0.934033636879294, 0.7724470445507672], [\"ShearX\", 0.14671590364536757, 0.06500753170863127]], [[\"Cutout\", 0.48433709681747783, 0.8989915985203363], [\"ShearY\", 0.5161346572684965, 0.3154078452465332]], [[\"AutoContrast\", 0.4337913490682531, 0.8651407398083308], [\"AutoContrast\", 0.31402168607643444, 0.5001710653814162]], [[\"Brightness\", 0.4805460794016203, 0.8182812769485313], [\"Equalize\", 0.6811585495672738, 0.25172380097389147]], [[\"TranslateX\", 0.05384872718386273, 0.7854623644701991], [\"Color\", 0.12583336502656287, 0.08656304042059215]], [[\"TranslateX\", 0.3949348949001942, 0.0668909826131569], [\"ShearX\", 0.2895255694762277, 0.23998090792480392]], [[\"TranslateY\", 0.3183346601371876, 0.5869865305603826], [\"Cutout\", 0.38601500458347904, 0.37785641359408184]], [[\"Sharpness\", 0.3676509660134142, 0.6370727445512337], [\"Rotate\", 0.17589815946040205, 0.912442427082365]], [[\"Equalize\", 0.46427003979798154, 0.7771177715171392], [\"Cutout\", 0.6622980582423883, 0.47780927252115374]], [[\"TranslateX\", 0.4535588156726688, 0.9548833090146791], [\"ShearY\", 0.18609208838268262, 0.034329918652624025]], [[\"Rotate\", 0.4896172340987028, 0.4842683413051553], [\"Brightness\", 0.08416972178617699, 0.2946109607041465]], [[\"TranslateY\", 0.1443363248914217, 0.7352253161146544], [\"ShearX\", 0.025210952382823004, 0.6249971039957651]], [[\"Brightness\", 0.08771030702840285, 0.5926338109828604], [\"Contrast\", 0.629121304110493, 0.36114268164347396]], [[\"Cutout\", 0.003318169533990778, 0.984234627407162], [\"Color\", 0.5656264894233379, 0.9913705503959709]], [[\"Cutout\", 0.17582168928005226, 0.5163176285036686], [\"Sharpness\", 0.42976684239235224, 0.9936723374147685]], [[\"Rotate\", 0.13343297511611085, 0.730719022391835], [\"Cutout\", 0.43419793455016154, 0.9802436121876401]], [[\"ShearX\", 0.8761482122895571, 0.11688364945899332], [\"Solarize\", 0.6071032746712549, 0.9972373138154098]], [[\"Contrast\", 0.2721995133325574, 0.9467839388553563], [\"AutoContrast\", 0.357368427575824, 0.6530359095247653]], [[\"Equalize\", 0.5334298945812708, 0.7157629957411794], [\"Brightness\", 0.8885107405370157, 0.2909013041171791]], [[\"Equalize\", 0.4907081744271751, 0.9999203497290372], [\"ShearX\", 0.0055186544890628575, 0.20501406304441697]], [[\"Color\", 0.4865852751351166, 0.14717278223914915], [\"TranslateX\", 0.0492335566831905, 0.01654291587484527]], [[\"Contrast\", 0.3753662301521211, 0.866484274102244], [\"Color\", 0.21148416029328898, 0.37861792266657684]], [[\"TranslateY\", 0.03960047686663052, 0.9948086048192006], [\"TranslateX\", 0.5802633545422445, 0.7696464344779717]], [[\"Contrast\", 0.6456791961464718, 0.6304663998505495], [\"Sharpness\", 0.594774521429873, 0.8024138008893688]], [[\"Equalize\", 0.5326123709954759, 0.7361990154971826], [\"Invert\", 0.5337609996065145, 0.06826577456972233]], [[\"ShearY\", 0.7177596430755101, 0.16672206074906565], [\"Equalize\", 0.1847132768987843, 0.16186121936769876]], [[\"ShearY\", 0.037342495065949534, 0.7762322168034441], [\"Rotate\", 0.28731231550023495, 0.4605573565280328]], [[\"Contrast\", 0.6815742688289678, 0.04073638022156048], [\"Cutout\", 0.20201133153964437, 0.048429819360450654]], [[\"Color\", 0.5295323372448824, 0.8591352159356821], [\"Posterize\", 0.7743900815037675, 0.8308865010050488]], [[\"Solarize\", 0.9325362059095493, 0.4070769736318192], [\"Contrast\", 0.09359008071252661, 0.2808191171337515]], [[\"Sharpness\", 0.6413241263332543, 0.5493867784897841], [\"Solarize\", 0.021951790397463734, 0.1045868634597023]], [[\"Color\", 0.006027943433085061, 0.698043169126901], [\"TranslateX\", 0.06672167045857719, 0.6096719632236709]], [[\"TranslateX\", 0.42167004878865333, 0.8844171486107537], [\"Color\", 0.12383835252312375, 0.9559595374068695]], [[\"Posterize\", 0.5382560989047361, 0.6014252438301297], [\"Color\", 0.26197040526014054, 0.3423981550778665]], [[\"Cutout\", 0.33150268513579584, 0.40828564490879615], [\"AutoContrast\", 0.6907753092981255, 0.05779246756831708]], [[\"Equalize\", 0.31608006376116865, 0.9958870759781376], [\"TranslateY\", 0.15842255624921547, 0.5764254535539765]], [[\"Contrast\", 0.19859706438565994, 0.12680764238281503], [\"TranslateY\", 0.4694115475285127, 0.45831161348904836]], [[\"TranslateX\", 0.18768081492494126, 0.7718605539481094], [\"Cutout\", 0.2340834739291012, 0.3290460999084155]], [[\"Posterize\", 0.17300123510877463, 0.5276823821218432], [\"AutoContrast\", 0.5861008799330297, 0.31557924295308126]], [[\"TranslateX\", 0.36140745478517367, 0.4172762477431993], [\"Sharpness\", 0.6518477061748665, 0.9033991248207786]], [[\"AutoContrast\", 0.1757278990984992, 0.9562490311064124], [\"Invert\", 0.43712652497757065, 0.26925880337078234]], [[\"TranslateX\", 0.38113274849599377, 0.35742156735271613], [\"TranslateY\", 0.47708889990018216, 0.7975974044609476]], [[\"Brightness\", 0.39538470887490523, 0.09692156164771923], [\"Equalize\", 0.876825166573471, 0.0979346217138612]], [[\"Solarize\", 0.07679586061933875, 0.45996163577975313], [\"Invert\", 0.039726680682847904, 0.23574574397443826]], [[\"ShearX\", 0.9739648414905278, 0.5217986621319772], [\"TranslateY\", 0.21653455086845896, 0.30415852174016683]], [[\"TranslateY\", 0.26965366633030263, 0.4355259497820251], [\"Sharpness\", 0.6343493801543757, 0.9337027079656623]], [[\"Rotate\", 0.42301232492240126, 0.07813015342326983], [\"AutoContrast\", 0.28524730310382906, 0.24127293503900557]], [[\"Color\", 0.826300213905907, 0.008451115447607682], [\"Equalize\", 0.6770124607838715, 0.2889698349030014]], [[\"Cutout\", 0.3461911530045792, 0.7481322146924341], [\"Brightness\", 0.1831459184570124, 0.5487074846857195]], [[\"Brightness\", 0.8455429603962046, 0.4838335496721761], [\"Cutout\", 0.5778222397066808, 0.7789798279724414]], [[\"Brightness\", 0.7859388330361665, 0.5907006126719181], [\"Brightness\", 0.5299842953874527, 0.008670514958094622]], [[\"Rotate\", 0.9584331504536162, 0.7242692977964363], [\"TranslateY\", 0.46941406313257866, 0.748911298847083]], [[\"AutoContrast\", 0.5878130357161462, 0.25218818797390996], [\"Solarize\", 0.815466142337258, 0.20231731395730107]], [[\"ShearX\", 0.15594838773787617, 0.9764784874102524], [\"TranslateY\", 0.5805369037495945, 0.1412009058745196]], [[\"Sharpness\", 0.7936370935749524, 0.5142489498674206], [\"Sharpness\", 0.1544307510097193, 0.3678451501088748]], [[\"TranslateY\", 0.29391437860633873, 0.3520843012638746], [\"Brightness\", 0.5885278199370352, 0.04915265122854349]], [[\"AutoContrast\", 0.3329771519033218, 0.2459852352278583], [\"Equalize\", 0.8674782697650298, 0.2900192232303214]], [[\"Cutout\", 0.58997726901359, 0.9910393463442352], [\"Contrast\", 0.09792234559792412, 0.23341828880112486]], [[\"Cutout\", 0.4643317809492098, 0.3224299097542076], [\"TranslateY\", 0.7998033586490294, 0.27086436352896565]], [[\"AutoContrast\", 0.13138317155414905, 0.3419742927322439], [\"TranslateY\", 0.05413070060788905, 0.5504283113763994]], [[\"Posterize\", 0.3645493423712921, 0.10684861674653627], [\"Color\", 0.6343589365592908, 0.9712261380583729]], [[\"Color\", 0.06539862123316142, 0.34370535435837324], [\"Equalize\", 0.8098077629435421, 0.1272416658849032]], [[\"Invert\", 0.3600258964493429, 0.7455698641930473], [\"Color\", 0.4118102215241555, 0.4489347750419333]], [[\"Sharpness\", 0.2230673636976691, 0.2240713255305713], [\"AutoContrast\", 0.5039292091174429, 0.033700713206763835]], [[\"ShearX\", 0.10611028325684749, 0.4235430688519599], [\"Brightness\", 0.354597328722803, 0.6835155193055997]], [[\"ShearX\", 0.101313662029975, 0.3048854771395032], [\"ShearX\", 0.39832929626318425, 0.5569152062399838]], [[\"ShearX\", 0.46033087857932264, 0.5976525683159943], [\"Color\", 0.8117411866929898, 0.22950658046373415]], [[\"Cutout\", 0.04125062306390376, 0.5021647863925347], [\"TranslateY\", 0.4949139091550513, 0.40234738545601595]], [[\"TranslateX\", 0.9982425877241792, 0.3912268450702254], [\"Cutout\", 0.8094853705295444, 0.4628037417520003]], [[\"Contrast\", 0.47154787535001147, 0.5116549800625204], [\"Invert\", 0.4929108509901112, 0.713690694626014]], [[\"ShearX\", 0.3073913369156325, 0.5912409524756753], [\"Equalize\", 0.5603975982699875, 0.12046838435247365]], [[\"TranslateY\", 0.8622939212850868, 0.057802109037417344], [\"Invert\", 0.7577173459800602, 0.33727019024447835]], [[\"Cutout\", 0.3646694663986778, 0.6285264075514656], [\"Color\", 0.5589259087346165, 0.6650676195317845]], [[\"Invert\", 0.8563008117600374, 0.6216056385231019], [\"AutoContrast\", 0.7575002303510038, 0.6906934785154547]], [[\"ShearX\", 0.4415411885102101, 0.301535484182858], [\"TranslateY\", 0.779716145113622, 0.5792057745092073]], [[\"Invert\", 0.10736083594024397, 0.10640910911300788], [\"Posterize\", 0.5923391813408784, 0.5437447559328059]], [[\"Color\", 0.4745215286268124, 0.08046291318852558], [\"Rotate\", 0.1642897827127771, 0.20754337935267492]], [[\"Invert\", 0.3141086213412405, 0.5865422721808763], [\"AutoContrast\", 0.7551954144793225, 0.5588044000850431]], [[\"Equalize\", 0.979500405577596, 0.6846916489547885], [\"Rotate\", 0.11257616752512875, 0.8137724117751907]], [[\"Equalize\", 0.6315666801659133, 0.71548254701219], [\"Cutout\", 0.38805635642306224, 0.29282906744304604]], [[\"Posterize\", 0.022485702859896456, 0.2794994040845844], [\"Color\", 0.4554990465860552, 0.5842888808848151]], [[\"Invert\", 0.15787502346886398, 0.5137397924063724], [\"TranslateY\", 0.487638703473969, 0.6428121360825987]], [[\"Rotate\", 0.20473927977443407, 0.6090899892067203], [\"Contrast\", 0.3794752343740154, 0.8056548374185936]], [[\"AutoContrast\", 0.35889225269685354, 0.7311496777471619], [\"Sharpness\", 0.10152796686794396, 0.34768639850633193]], [[\"Rotate\", 0.6298704242033275, 0.09649334401126405], [\"Solarize\", 0.24713244934163017, 0.4292117526982358]], [[\"Contrast\", 0.9851015107131748, 0.30895068679118054], [\"Sharpness\", 0.7167845732283787, 0.36269175386392893]], [[\"Equalize\", 0.49699932368219435, 0.21262924430159158], [\"Contrast\", 0.8497731498354579, 0.672321242252727]], [[\"ShearX\", 0.18955591368056923, 0.47178691165954034], [\"Sharpness\", 0.17732805705271348, 0.5486957094984023]], [[\"ShearY\", 0.5087926728214892, 0.8236809302978783], [\"AutoContrast\", 0.9661195881001936, 0.1309360428195535]], [[\"Rotate\", 0.7825835251082691, 0.8292427086033229], [\"TranslateX\", 0.2034110174253454, 0.4073091408820304]], [[\"Cutout\", 0.33457316681888716, 0.480098511703719], [\"Sharpness\", 0.8686004956803908, 0.21719357589897192]], [[\"ShearX\", 0.30750577846813, 0.6349236735519613], [\"Color\", 0.5096781256213182, 0.5367289796478476]], [[\"Rotate\", 0.7881847986981432, 0.846966895144323], [\"Posterize\", 0.33955649631388407, 0.9484449471562024]], [[\"Posterize\", 0.5154127791998345, 0.8765287012129974], [\"Posterize\", 0.09621562708431097, 0.42108077474553995]], [[\"ShearX\", 0.5513772653411826, 0.27285892893658015], [\"AutoContrast\", 0.027608088485522986, 0.1738173285576814]], [[\"Equalize\", 0.7950881609822011, 0.05938388811616446], [\"ShearX\", 0.7864733097562856, 0.5928584864954718]], [[\"Equalize\", 0.03401947599579436, 0.4936643525799874], [\"Solarize\", 0.8445332527647407, 0.4695434980914176]], [[\"AutoContrast\", 0.9656295942383031, 0.6330670076537706], [\"Brightness\", 0.303859679517296, 0.8882002295195086]], [[\"ShearY\", 0.5242765280639856, 0.7977406809732712], [\"Rotate\", 0.24810823616083127, 0.41392557985700773]], [[\"Posterize\", 0.6824268148168342, 0.21831492475831715], [\"ShearY\", 0.0008811906288737209, 0.1939566265644924]], [[\"ShearY\", 0.8413370823124643, 0.7075999817793881], [\"Brightness\", 0.7942266192900009, 0.0384845738170444]], [[\"ShearY\", 0.9003919463843213, 0.5068340457708402], [\"AutoContrast\", 0.9990937631537938, 0.35323621376481695]], [[\"Contrast\", 0.3266913024108897, 0.5470774782762176], [\"Contrast\", 0.31235464476196995, 0.5723334696204473]], [[\"AutoContrast\", 0.40137522654585955, 0.4274859892417776], [\"Sharpness\", 0.6173858127038773, 0.9629236289042568]], [[\"Sharpness\", 0.3728210261025356, 0.7873518787942092], [\"Solarize\", 0.4319848902062112, 0.799524274852396]], [[\"Sharpness\", 0.009379857090624758, 0.3143858944787348], [\"ShearY\", 0.20273037650420184, 0.3501104740582885]], [[\"Color\", 0.1837135820716444, 0.5709648984713641], [\"Solarize\", 0.36312838060628455, 0.3753448575775562]], [[\"Cutout\", 0.3400431457353702, 0.6871688775988243], [\"ShearX\", 0.42524570507364123, 0.7108865889616602]], [[\"Sharpness\", 0.30703348499729893, 0.885278643437672], [\"Cutout\", 0.04407034125935705, 0.6821013415071144]], [[\"Brightness\", 0.7164362367177879, 0.3383891625406651], [\"Posterize\", 0.002136409392137939, 0.5744439712876557]], [[\"Rotate\", 0.757566991428807, 0.41351586654059386], [\"TranslateY\", 0.6716670812367449, 0.45381701497377025]], [[\"Color\", 0.29554345831738604, 0.5747484938203239], [\"Brightness\", 0.6495565535422139, 0.38353714282675055]], [[\"Color\", 0.6552239827844064, 0.6396684879350223], [\"Rotate\", 0.4078437959841622, 0.8229364582618871]], [[\"ShearX\", 0.3325165311431108, 0.99875651917317], [\"Cutout\", 0.060614087173980605, 0.8655206968462149]], [[\"ShearY\", 0.8591223614020521, 0.47375809606391645], [\"ShearY\", 0.09964216351993155, 0.7076762087109618]], [[\"Color\", 0.9353968383925787, 0.5171703648813921], [\"Cutout\", 0.7542267059402566, 0.4591488152776885]], [[\"ShearX\", 0.6832456179177027, 0.6798505733549863], [\"Color\", 0.7408439718746301, 0.5061967673457707]], [[\"Equalize\", 0.4451729339243929, 0.9242958562575693], [\"Posterize\", 0.2426742903818478, 0.7914731845374992]], [[\"Posterize\", 0.6241497285503436, 0.6800650930438693], [\"Rotate\", 0.8212761169895445, 0.42470879405266637]], [[\"Sharpness\", 0.35467334577635123, 0.4150922293649909], [\"Color\", 0.38988011871489925, 0.08762395748275534]], [[\"Invert\", 0.20231176261188386, 0.34300045056881756], [\"Color\", 0.6311643386438919, 0.4311911861691113]], [[\"Contrast\", 0.2892223327756343, 0.533349670629816], [\"ShearY\", 0.6483243327679983, 0.37584367848303185]], [[\"Contrast\", 0.6516401043089397, 0.3801387361685983], [\"Contrast\", 0.38470661862567795, 0.994720698440467]], [[\"Contrast\", 0.44558087160644655, 0.4234506152228727], [\"AutoContrast\", 0.30132391715441104, 0.7758068064149011]], [[\"ShearY\", 0.8336612877669443, 0.6961881064757953], [\"TranslateX\", 0.111182606133131, 0.7138593872015647]], [[\"Brightness\", 0.7252053408816349, 0.6883715819669095], [\"Cutout\", 0.6664014893052573, 0.5118622737562747]], [[\"TranslateX\", 0.04294623433241698, 0.4737274091618545], [\"Solarize\", 0.15848056715239178, 0.436678451116009]], [[\"ShearX\", 0.41843604414439584, 0.5571669083243844], [\"Solarize\", 0.31754187268874345, 0.643294796216908]], [[\"Cutout\", 0.308644829376876, 0.9455913104658791], [\"Cutout\", 0.04221174396591258, 0.8004389485099825]], [[\"Invert\", 0.7644819805649288, 0.393641460630097], [\"Posterize\", 0.20832144467525543, 0.6449709932505365]], [[\"ShearY\", 0.60954354330238, 0.45193814135157406], [\"Rotate\", 0.07564178568434804, 0.5700158941616946]], [[\"Color\", 0.47993653910354905, 0.18770437256254732], [\"Equalize\", 0.16540989366253533, 0.3295832145751728]], [[\"Sharpness\", 0.773656112445468, 0.899183686347773], [\"AutoContrast\", 0.6225833171499476, 0.8375805811436356]], [[\"Brightness\", 0.3119630413126101, 0.21694186245727698], [\"Cutout\", 0.08263220622864997, 0.9910421137289533]], [[\"TranslateY\", 0.5200200210314198, 0.44467464167817444], [\"Cutout\", 0.3466375681433383, 0.22385957813397142]], [[\"ShearY\", 0.4445374219718209, 0.23917745675733915], [\"Equalize\", 0.32094329607540717, 0.6286388268054685]], [[\"Invert\", 0.6194633221674505, 0.6219326801360905], [\"Color\", 0.43219405413154555, 0.5463431710956901]], [[\"ShearX\", 0.5491808798436206, 0.4485147269153593], [\"ShearX\", 0.9624243432991532, 0.581319457926692]], [[\"Cutout\", 0.8486066390061917, 0.48538785811340557], [\"Cutout\", 0.15945182827781573, 0.4114259503742423]], [[\"TranslateX\", 0.9845485123667319, 0.7590166645874611], [\"Solarize\", 0.9920857955871512, 0.33259831689209834]], [[\"Brightness\", 0.3985764491687188, 0.3516086190155328], [\"Cutout\", 0.13907765098725244, 0.42430309616193995]], [[\"Color\", 0.35877942890428727, 0.363294622757879], [\"Equalize\", 0.4997709941984466, 0.34475754120666147]], [[\"Sharpness\", 0.5234916035905941, 0.8988480410886609], [\"AutoContrast\", 0.793554237802939, 0.2575758806963965]], [[\"Brightness\", 0.36998588693418133, 0.24144652775222428], [\"Cutout\", 0.06610767765334377, 0.9979246311006975]], [[\"TranslateY\", 0.6132425595571164, 0.43952345951359123], [\"Cutout\", 0.361849532200793, 0.8462247954545264]], [[\"Posterize\", 0.36953849915949677, 0.3144747463577223], [\"Equalize\", 0.3258985378881982, 0.6314053736452068]], [[\"TranslateY\", 0.35835648104981205, 0.08075066564380576], [\"TranslateX\", 0.5242389109555177, 0.11959330395816647]], [[\"ShearX\", 0.32773751079554303, 0.9307864751586945], [\"Sharpness\", 0.006921805496030664, 0.8736511230672348]], [[\"TranslateY\", 0.48202000226401526, 0.7058919195136056], [\"ShearY\", 0.6998308555145181, 0.21074360071080764]], [[\"AutoContrast\", 0.7615852152325713, 0.24914859158079972], [\"Cutout\", 0.8270894478252626, 0.5804285538051077]], [[\"AutoContrast\", 0.5391662421077847, 0.5233969710179517], [\"Brightness\", 0.04205906143049083, 0.382677139318253]], [[\"Brightness\", 0.6904817357054526, 0.9116378156160974], [\"Invert\", 0.24305250280628815, 0.2384731852843838]], [[\"TranslateX\", 0.2661235046256291, 0.9705982948874188], [\"Sharpness\", 0.35821873293899625, 0.0030835471296858444]], [[\"Posterize\", 0.39029991982997647, 0.4286238191447004], [\"TranslateX\", 0.08954883207184736, 0.7263973533121859]], [[\"Cutout\", 0.040284118298638344, 0.0388330236482832], [\"Posterize\", 0.7807814946471116, 0.5238352731112299]], [[\"ShearY\", 0.43556653451802413, 0.6924037743225071], [\"Contrast\", 0.001081515338562919, 0.7340363920548519]], [[\"Sharpness\", 0.6966467544442373, 0.10202517317137291], [\"Color\", 0.18836344735972566, 0.31736252662501935]], [[\"Contrast\", 0.6460000689193517, 0.16242196500430484], [\"AutoContrast\", 0.6003831047484897, 0.8612141912778188]], [[\"Brightness\", 0.9172874494072921, 0.292364504408795], [\"Solarize\", 0.344602582555059, 0.7054248176903991]], [[\"Brightness\", 0.020940469451794064, 0.5051042440134866], [\"Cutout\", 0.569500058123745, 0.9091247933460598]], [[\"Invert\", 0.7367715506799225, 0.636137024500329], [\"TranslateY\", 0.6186960283294023, 0.37626001619073624]], [[\"TranslateX\", 0.2863246154089121, 0.7454318730628517], [\"ShearY\", 0.6649909124084395, 0.37639265910774133]], [[\"Equalize\", 0.34603376919062656, 0.9324026002997775], [\"Sharpness\", 0.8481669261233902, 0.14545759197862507]], [[\"Contrast\", 0.6184370038862784, 0.8074198580702933], [\"TranslateX\", 0.07036135693949985, 0.46222686847401306]], [[\"Invert\", 0.9304884364616345, 0.26298808050002387], [\"Color\", 0.8027813156985396, 0.7748486756116594]], [[\"Posterize\", 0.2887993806199106, 0.9576118517235523], [\"Contrast\", 0.07498577510121784, 0.09131727137211232]], [[\"Contrast\", 0.8110536569461197, 0.051038215841138386], [\"Solarize\", 0.8799018446258887, 0.25028365826721977]], [[\"Cutout\", 0.006954733791187662, 0.030507696587206496], [\"Brightness\", 0.45329597160103124, 0.9623148451520953]], [[\"TranslateX\", 0.7436227980344521, 0.45996857241163086], [\"Solarize\", 0.9682234479355196, 0.70777684485634]], [[\"Brightness\", 0.2080557865889058, 0.025557286020371328], [\"AutoContrast\", 0.4786039197123853, 0.9271157120589375]], [[\"Solarize\", 0.1822930503108656, 0.8448222682426465], [\"ShearX\", 0.6221001240196488, 0.207994745014715]], [[\"Color\", 0.27879201870553094, 0.9112278219836276], [\"Color\", 0.7508664408516654, 0.14885798940641318]], [[\"ShearX\", 0.5496326925552889, 0.7643918760952656], [\"AutoContrast\", 0.7887459433195374, 0.5993900500657054]], [[\"ShearY\", 0.7182376017241904, 0.7470412126724141], [\"Rotate\", 0.7644845975844854, 0.38510752407409893]], [[\"Contrast\", 0.7984591239416293, 0.054767400038152704], [\"Posterize\", 0.7324315466290486, 0.41749946919991243]], [[\"Contrast\", 0.596887781894766, 0.14832691232456097], [\"Contrast\", 0.05140651977459313, 0.14459348285712803]], [[\"TranslateX\", 0.32766681876233766, 0.5291103977440215], [\"Color\", 0.6039423443931029, 0.6280077043167083]], [[\"Invert\", 0.5267106136816635, 0.9429838545064784], [\"Sharpness\", 0.9999053422304087, 0.24764251340211074]], [[\"Contrast\", 0.495767451313242, 0.6744720418896594], [\"Brightness\", 0.2220993631062378, 0.023842431692152832]], [[\"Invert\", 0.7609399278201697, 0.38010826932678554], [\"Color\", 0.8454251931688355, 0.5876680099851194]], [[\"Posterize\", 0.24967505238473384, 0.3801835337368412], [\"Contrast\", 0.15106121477353399, 0.6785384814310887]], [[\"Invert\", 0.49594153211743874, 0.32307787492774986], [\"Contrast\", 0.46822075688054793, 0.7106858486805577]], [[\"Sharpness\", 0.7204076261101202, 0.5928585438185809], [\"Rotate\", 0.2922878012111486, 0.2742491027179961]], [[\"Solarize\", 0.2866813728691532, 0.2856363754608978], [\"TranslateY\", 0.7817609208793659, 0.17156048740523572]], [[\"Cutout\", 0.03345540659323987, 0.30068271036485605], [\"ShearY\", 0.2556603044234358, 0.32397855468866993]], [[\"TranslateY\", 0.20032231858163152, 0.4577561841994639], [\"Cutout\", 0.8063563515601337, 0.9224365467344459]], [[\"TranslateY\", 0.27130034613023113, 0.7446375583249849], [\"ShearX\", 0.8254766023480402, 0.4187078898038131]], [[\"ShearX\", 0.2937536068210411, 0.3864492533047109], [\"Contrast\", 0.7069611463424469, 0.686695922492015]], [[\"TranslateX\", 0.5869084659063555, 0.7866008068031776], [\"Invert\", 0.289041613918004, 0.5774431720429087]], [[\"Posterize\", 0.6199250263408456, 0.36010044446077893], [\"Color\", 0.7216853388297056, 0.18586684958836489]], [[\"Posterize\", 0.16831615585406814, 0.08052519983493259], [\"Cutout\", 0.7325882891023244, 0.77416439921321]], [[\"Posterize\", 0.3000961100422498, 0.5181759282337892], [\"Contrast\", 0.40376073196794304, 0.613724714153924]], [[\"ShearX\", 0.32203193464136226, 0.037459860897434916], [\"Solarize\", 0.961542785512965, 0.5176575408248285]], [[\"Posterize\", 0.8986732529036036, 0.7773257927223327], [\"AutoContrast\", 0.9765986969928243, 0.2092264330225745]], [[\"Posterize\", 0.7463386563644007, 0.7086671048242543], [\"Posterize\", 0.6433819807034994, 0.00541136425219968]], [[\"Contrast\", 0.8810746688690078, 0.4821029611474963], [\"Invert\", 0.5121169325265204, 0.6360694878582249]], [[\"AutoContrast\", 0.457606735372388, 0.6104794570624505], [\"Color\", 0.0020511991982608124, 0.6488142202778011]], [[\"Invert\", 0.01744463899367027, 0.9799156424364703], [\"ShearY\", 0.3448213456605478, 0.04437356383800711]], [[\"Solarize\", 0.28511589596283315, 0.283465265528744], [\"Rotate\", 0.6831807199089897, 0.0617176467316177]], [[\"Sharpness\", 0.329148970281285, 0.398397318402924], [\"Color\", 0.9125837011914073, 0.4724426676489746]], [[\"Posterize\", 0.05701522811381192, 0.17109014518445975], [\"Cutout\", 0.785885656821686, 0.39072624694455804]], [[\"TranslateY\", 0.36644251447248277, 0.5818480868136134], [\"Equalize\", 0.06162286852923926, 0.710929848709861]], [[\"ShearY\", 0.8667124241442813, 0.7556246528256454], [\"ShearY\", 0.505190335528531, 0.2935701441277698]], [[\"Brightness\", 0.6369570015916268, 0.5131486964430919], [\"Color\", 0.4887119711633827, 0.9364572089679907]], [[\"Equalize\", 0.06596702627228657, 0.42632445412423303], [\"Equalize\", 0.583434672187985, 0.045592788478947655]], [[\"ShearY\", 0.12701084021549092, 0.501622939075192], [\"Cutout\", 0.7948319202684251, 0.5662618207034569]], [[\"Posterize\", 0.24586808377061664, 0.5178008194277262], [\"Contrast\", 0.1647040530405073, 0.7459410952796975]], [[\"Solarize\", 0.346601298126444, 0.02933266448415553], [\"ShearY\", 0.9571781647031095, 0.4992610484566735]], [[\"Brightness\", 0.5174960605130408, 0.4387498174634591], [\"AutoContrast\", 0.6327403754086753, 0.8279630556620247]], [[\"Posterize\", 0.7591448754183128, 0.6265369743070788], [\"Posterize\", 0.5030300462943854, 0.00401699185532868]], [[\"Contrast\", 0.02643254602183477, 0.44677741300429646], [\"Invert\", 0.2921779546234399, 0.732876182854368]], [[\"TranslateY\", 0.3516821152310867, 0.7142224211142528], [\"Brightness\", 0.07382104862245475, 0.45368581543623165]], [[\"Invert\", 0.21382474908836685, 0.8413922690356168], [\"Invert\", 0.4082563426777157, 0.17018243778787834]], [[\"Brightness\", 0.9533955059573749, 0.8279651051553477], [\"Cutout\", 0.6730769221406385, 0.07780554260470988]], [[\"Brightness\", 0.6022173063382547, 0.6008500678386571], [\"Sharpness\", 0.5051909719558138, 0.002298383273851839]], [[\"Contrast\", 0.03373395758348563, 0.3343918835437655], [\"Sharpness\", 0.8933651164916847, 0.21738300404986516]], [[\"TranslateX\", 0.7095755408419822, 0.26445508146225394], [\"Equalize\", 0.18255527363432034, 0.38857557766574147]], [[\"Solarize\", 0.4045911117686074, 0.009106925727519921], [\"Posterize\", 0.9380296936271705, 0.5485821516085955]], [[\"Posterize\", 0.20361995432403968, 0.45378735898242406], [\"AutoContrast\", 0.9020357653982511, 0.7880592087609304]], [[\"AutoContrast\", 0.9921550787672145, 0.7396130723399785], [\"Cutout\", 0.4203609896071977, 0.13000504717682415]], [[\"Equalize\", 0.1917806394805356, 0.5549114911941102], [\"Posterize\", 0.27636900597148506, 0.02953514963949344]], [[\"AutoContrast\", 0.5427071893197213, 0.6650127340685553], [\"Color\", 0.011762461060904839, 0.3793508738225649]], [[\"Invert\", 0.18495006059896424, 0.8561476625981166], [\"ShearY\", 0.6417068692813954, 0.9908751019535517]], [[\"Solarize\", 0.2992385431633619, 0.33622162977907644], [\"Rotate\", 0.6070550252540432, 0.010205544695142064]], [[\"Sharpness\", 0.33292787606841845, 0.549446566149951], [\"Color\", 0.9097665730481233, 0.9947658451503181]], [[\"Posterize\", 0.11207465085954937, 0.23296263754645155], [\"Cutout\", 0.6159972426858633, 0.38289684517298556]], [[\"TranslateX\", 0.7343689718523805, 0.16303049089087485], [\"Equalize\", 0.3138385390145809, 0.6096356352129273]], [[\"Solarize\", 0.4807269891506887, 0.28116279654856363], [\"Posterize\", 0.9753467973380021, 0.6327025372916857]], [[\"Posterize\", 0.837244997106023, 0.5586046483574153], [\"AutoContrast\", 0.9005775602024721, 0.7983389828641411]], [[\"AutoContrast\", 0.8347112949943837, 0.7321850307727004], [\"Cutout\", 0.3322676575657192, 0.14409873524237032]], [[\"Equalize\", 0.12285967262649124, 0.5368519477089722], [\"Posterize\", 0.2693593445898034, 0.15098267759162076]], [[\"Invert\", 0.331021587020619, 0.3140868578915853], [\"Cutout\", 0.48268387543799884, 0.7642598986625201]], [[\"Equalize\", 0.47573794714622175, 0.8628185952549363], [\"Solarize\", 0.14860046214144496, 0.3739284346347912]], [[\"AutoContrast\", 0.6747373196190459, 0.2912917979635714], [\"Posterize\", 0.27259573208358623, 0.9643671211873469]], [[\"Sharpness\", 0.15019788105901233, 0.7289238028242861], [\"ShearY\", 0.7998448015985137, 0.5924798900807636]], [[\"Brightness\", 0.7874052186079156, 0.9446398428550358], [\"Equalize\", 0.5105557539139616, 0.6719808885741001]], [[\"ShearX\", 0.783252331899515, 0.74960184771181], [\"ShearX\", 0.4327935527932927, 0.29980994764698565]], [[\"Rotate\", 0.03892023906368644, 0.24868635699639904], [\"Cutout\", 0.6408903979315637, 0.32135851733523907]], [[\"Invert\", 0.9972802027590713, 0.9374194642823106], [\"ShearX\", 0.20016463162924894, 0.0052278586143255645]], [[\"AutoContrast\", 0.9328687102578992, 0.44280614999256235], [\"Color\", 0.05637751621265141, 0.26921974769786455]], [[\"AutoContrast\", 0.2798532308065416, 0.5283914274806746], [\"Cutout\", 0.12930089032151, 0.25624459046884057]], [[\"Invert\", 0.2397428994839993, 0.31011715409282065], [\"Cutout\", 0.5875151915473042, 0.7454458580264322]], [[\"Equalize\", 0.374815667651982, 0.9502053862625081], [\"Solarize\", 0.10100323698574426, 0.5124939317648691]], [[\"AutoContrast\", 0.6009889057852652, 0.3080148907275367], [\"Posterize\", 0.6543352447742621, 0.17498668744492413]], [[\"Sharpness\", 0.14402909409016001, 0.9239239955843186], [\"ShearY\", 0.8959818090635513, 0.7258262803413784]], [[\"Brightness\", 0.8672271320432974, 0.8241439816189235], [\"Equalize\", 0.4954433852960082, 0.6687050430971254]], [[\"Solarize\", 0.47813402689782114, 0.9447222576804901], [\"TranslateY\", 0.32546974113401694, 0.8367777573080345]], [[\"Sharpness\", 0.48098022972519927, 0.2731904819197933], [\"Rotate\", 0.14601550238940067, 0.3955290089346866]], [[\"AutoContrast\", 0.3777442613874327, 0.9991495158709968], [\"TranslateY\", 0.2951496731751222, 0.6276755696126608]], [[\"Cutout\", 0.487150344941835, 0.7976642551725155], [\"Solarize\", 0.643407733524025, 0.6313641977306543]], [[\"Rotate\", 0.35017053741686033, 0.23960877779589906], [\"Sharpness\", 0.8741761196478873, 0.12362019972427862]], [[\"Invert\", 0.8849459784626776, 0.48532144354199647], [\"Invert\", 0.702430443380318, 0.924655906426149]], [[\"Equalize\", 0.6324140359298986, 0.9780539325897597], [\"AutoContrast\", 0.39105074227907843, 0.3636856607173081]], [[\"AutoContrast\", 0.8049993541952016, 0.3231157206314408], [\"ShearY\", 0.6675686366141409, 0.7345332792455934]], [[\"Sharpness\", 0.12332351413693327, 0.9345179453120547], [\"Solarize\", 0.1594280186083361, 0.422049311332906]], [[\"Rotate\", 0.38227253679386375, 0.7664364038099101], [\"AutoContrast\", 0.5725492572719726, 0.21049701651094446]], [[\"Brightness\", 0.6432891832524184, 0.8243948738979008], [\"Equalize\", 0.20355899618080098, 0.7983877568044979]], [[\"ShearY\", 0.694393675204811, 0.3686964692262895], [\"TranslateX\", 0.5593122846101599, 0.3378904046390629]], [[\"Invert\", 0.9139730140623171, 0.7183505086140822], [\"Posterize\", 0.2675839177893596, 0.21399738931234905]], [[\"TranslateX\", 0.05309461965184896, 0.032983777975422554], [\"Sharpness\", 0.412621944330688, 0.4752089612268503]], [[\"Equalize\", 0.06901149860261116, 0.27405796188385945], [\"AutoContrast\", 0.7710451977604326, 0.20474249114426807]], [[\"ShearX\", 0.47416427531072325, 0.2738614239087857], [\"Cutout\", 0.2820106413231565, 0.6295219975308107]], [[\"Cutout\", 0.19984489885141582, 0.7019895950299546], [\"ShearX\", 0.4264722378410729, 0.8483962467724536]], [[\"ShearY\", 0.42111446850243256, 0.1837626718066795], [\"Brightness\", 0.9187856196205942, 0.07478292286531767]], [[\"Solarize\", 0.2832036589192868, 0.8253473638854684], [\"Cutout\", 0.7279303826662196, 0.615420010694839]], [[\"ShearX\", 0.963251873356884, 0.5625577053738846], [\"Color\", 0.9637046840298858, 0.9992644813427337]], [[\"Invert\", 0.7976502716811696, 0.43330238739921956], [\"ShearY\", 0.9113181667853614, 0.9066729024232627]], [[\"Posterize\", 0.5750620807485399, 0.7729691927432935], [\"Contrast\", 0.4527879467651071, 0.9647739595774402]], [[\"Posterize\", 0.5918751472569104, 0.26467375535556653], [\"Posterize\", 0.6347402742279589, 0.7476940787143674]], [[\"Invert\", 0.16552404612306285, 0.9829939598708993], [\"Solarize\", 0.29886553921638087, 0.22487098773064948]], [[\"Cutout\", 0.24209211313246753, 0.5522928952260516], [\"AutoContrast\", 0.6212831649673523, 0.4191071063984261]], [[\"ShearX\", 0.4726406722647257, 0.26783614257572447], [\"TranslateY\", 0.251078162624763, 0.26103450676044304]], [[\"Cutout\", 0.8721775527314426, 0.6284108541347894], [\"ShearX\", 0.7063325779145683, 0.8467168866724094]], [[\"ShearY\", 0.42226987564279606, 0.18012694533480308], [\"Brightness\", 0.858499853702629, 0.4738929353785444]], [[\"Solarize\", 0.30039851082582764, 0.8151511479162529], [\"Cutout\", 0.7228873804059033, 0.6174351379837011]], [[\"ShearX\", 0.4921198221896609, 0.5678998037958154], [\"Color\", 0.7865298825314806, 0.9309020966406338]], [[\"Invert\", 0.8077821007916464, 0.7375015762124386], [\"Cutout\", 0.032464574567796195, 0.25405044477004846]], [[\"Color\", 0.6061325441870133, 0.2813794250571565], [\"TranslateY\", 0.5882949270385848, 0.33262043078220227]], [[\"ShearX\", 0.7877331864215293, 0.8001131937448647], [\"Cutout\", 0.19828215489868783, 0.5949317580743655]], [[\"Contrast\", 0.529508728421701, 0.36477855845285007], [\"Color\", 0.7145481740509138, 0.2950794787786947]], [[\"Contrast\", 0.9932891064746089, 0.46930062926732646], [\"Posterize\", 0.9033014136780437, 0.5745902253320527]]]\n    return p\n\n\ndef policy_decoder(augment, num_policy, num_op):\n    op_list = augment_list(False)\n    policies = []\n    for i in range(num_policy):\n        ops = []\n        for j in range(num_op):\n            op_idx = augment['policy_%d_%d' % (i, j)]\n            op_prob = augment['prob_%d_%d' % (i, j)]\n            op_level = augment['level_%d_%d' % (i, j)]\n            ops.append((op_list[op_idx][0].__name__, op_prob, op_level))\n        policies.append(ops)\n    return policies\n"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b0.yaml",
    "content": "model:\n  type: efficientnet-b0\n  condconv_num_expert: 1  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128    # per gpu\nepoch: 350\nlr: 0.008     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b0_condconv.yaml",
    "content": "model:\n  type: efficientnet-b0\n  condconv_num_expert: 8  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128    # per gpu\nepoch: 350\nlr: 0.008     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1\nmixup: 0.2\n"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b1.yaml",
    "content": "model:\n  type: efficientnet-b1\n  condconv_num_expert: 1  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128    # per gpu\nepoch: 350\nlr: 0.008     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1\n"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b2.yaml",
    "content": "model:\n  type: efficientnet-b2\n  condconv_num_expert: 1  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128    # per gpu\nepoch: 350\nlr: 0.008     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1\n"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b3.yaml",
    "content": "model:\n  type: efficientnet-b3\n  condconv_num_expert: 1  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 64    # per gpu\nepoch: 350\nlr: 0.004     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1\n"
  },
  {
    "path": "fast_autoaugment/confs/efficientnet_b4.yaml",
    "content": "model:\n  type: efficientnet-b4\n  condconv_num_expert: 1  # if this is greater than 1(eg. 4), it activates condconv.\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 32    # per gpu\nepoch: 350\nlr: 0.002     # 0.256 for 4096 batch\nlr_schedule:\n  type: 'efficientnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: rmsprop\n  decay: 0.00001\n  clip: 0\n  ema: 0.9999\n  ema_interval: -1\nlb_smooth: 0.1\n"
  },
  {
    "path": "fast_autoaugment/confs/pyramid272_cifar.yaml",
    "content": "model:\n  type: pyramid\n  depth: 272\n  alpha: 200\n  bottleneck: True\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 64\nepoch: 1800\nlr: 0.05\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.00005\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/resnet200.yaml",
    "content": "model:\n  type: resnet200\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 64\nepoch: 270\nlr: 0.025\nlr_schedule:\n  type: 'resnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0001\n  clip: 0\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/resnet50.yaml",
    "content": "model:\n  type: resnet50\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128\nepoch: 270\nlr: 0.05\nlr_schedule:\n  type: 'resnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0001\n  clip: 0\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/resnet50_mixup.yaml",
    "content": "model:\n  type: resnet50\ndataset: imagenet\naug: fa_reduced_imagenet\ncutout: 0\nbatch: 128\nepoch: 270\nlr: 0.05\nlr_schedule:\n  type: 'resnet'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0001\n  clip: 0\n  ema: 0\n#lb_smooth: 0.1\nmixup: 0.2\n"
  },
  {
    "path": "fast_autoaugment/confs/shake26_2x112d_cifar.yaml",
    "content": "model:\n  type: shakeshake26_2x112d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.002\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/shake26_2x32d_cifar.yaml",
    "content": "model:\n  type: shakeshake26_2x32d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.001\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/shake26_2x96d_cifar.yaml",
    "content": "model:\n  type: shakeshake26_2x96d\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 1800\nlr: 0.01\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.001\n  ema: 0\n"
  },
  {
    "path": "fast_autoaugment/confs/wresnet28x10_cifar.yaml",
    "content": "model:\n  type: wresnet28_10\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 200\nlr: 0.1\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0005\n  ema: 0"
  },
  {
    "path": "fast_autoaugment/confs/wresnet28x10_svhn.yaml",
    "content": "model:\n  type: wresnet28_10\ndataset: svhn\naug: fa_reduced_svhn\ncutout: 20\nbatch: 128\nepoch: 200\nlr: 0.01\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0005\n  ema: 0"
  },
  {
    "path": "fast_autoaugment/confs/wresnet40x2_cifar.yaml",
    "content": "model:\n  type: wresnet40_2\ndataset: cifar10\naug: fa_reduced_cifar10\ncutout: 16\nbatch: 128\nepoch: 200\nlr: 0.1\nlr_schedule:\n  type: 'cosine'\n  warmup:\n    multiplier: 1\n    epoch: 5\noptimizer:\n  type: sgd\n  nesterov: True\n  decay: 0.0002\n  ema: 0"
  },
  {
    "path": "fast_autoaugment/requirements.txt",
    "content": "git+https://github.com/wbaek/theconf@de32022f8c0651a043dc812d17194cdfd62066e8\ngit+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git@08f7d5e\ngit+https://github.com/ildoonet/pystopwatch2.git\ngit+https://github.com/hyperopt/hyperopt.git\n\npretrainedmodels\ntqdm\ntensorboardx\nsklearn\nray\nmatplotlib\npsutil\nrequests"
  },
  {
    "path": "madrys.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models\nfrom torch.autograd import Variable\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\nclass MadrysLoss(nn.Module):\n    def __init__(self, step_size=0.007, epsilon=0.031, perturb_steps=10, distance='l_inf', cutmix=False):\n        super(MadrysLoss, self).__init__()\n        self.step_size = step_size\n        self.epsilon = epsilon\n        self.perturb_steps = perturb_steps\n        self.distance = distance\n        self.cross_entropy = models.CutMixCrossEntropyLoss() if cutmix else torch.nn.CrossEntropyLoss()\n\n    def forward(self, model, x_natural, y, optimizer):\n        model.eval()\n        for param in model.parameters():\n            param.requires_grad = False\n\n        # generate adversarial example\n        x_adv = x_natural.clone() + self.step_size * torch.randn(x_natural.shape).to(device)\n        if self.distance == 'l_inf':\n            for _ in range(self.perturb_steps):\n                x_adv.requires_grad_()\n                loss_ce = self.cross_entropy(model(x_adv), y)\n                grad = torch.autograd.grad(loss_ce, [x_adv])[0]\n                x_adv = x_adv.detach() + self.step_size * torch.sign(grad.detach())\n                x_adv = torch.min(torch.max(x_adv, x_natural - self.epsilon), x_natural + self.epsilon)\n                x_adv = torch.clamp(x_adv, 0.0, 1.0)\n        else:\n            x_adv = torch.clamp(x_adv, 0.0, 1.0)\n\n        for param in model.parameters():\n            param.requires_grad = True\n\n        model.train()\n        # x_adv = Variable(x_adv, requires_grad=False)\n        optimizer.zero_grad()\n        logits = model(x_adv)\n        loss = self.cross_entropy(logits, y)\n\n        return logits, loss\n"
  },
  {
    "path": "main.py",
    "content": "import argparse\nimport datetime\nimport os\nimport shutil\nimport time\nimport numpy as np\nimport dataset\nimport mlconfig\nimport torch\nimport util\nimport madrys\nimport models\nfrom evaluator import Evaluator\nfrom trainer import Trainer\nmlconfig.register(madrys.MadrysLoss)\n\n# General Options\nparser = argparse.ArgumentParser(description='ClasswiseNoise')\nparser.add_argument('--seed', type=int, default=0, help='seed')\nparser.add_argument('--version', type=str, default=\"resnet18\")\nparser.add_argument('--exp_name', type=str, default=\"test_exp\")\nparser.add_argument('--config_path', type=str, default='configs/cifar10')\nparser.add_argument('--load_model', action='store_true', default=False)\nparser.add_argument('--data_parallel', action='store_true', default=False)\nparser.add_argument('--train', action='store_true', default=False)\nparser.add_argument('--save_frequency', default=-1, type=int)\n# Datasets Options\nparser.add_argument('--train_face', action='store_true', default=False)\nparser.add_argument('--train_portion', default=1.0, type=float)\nparser.add_argument('--train_batch_size', default=128, type=int, help='perturb step size')\nparser.add_argument('--eval_batch_size', default=256, type=int, help='perturb step size')\nparser.add_argument('--num_of_workers', default=8, type=int, help='workers for loader')\nparser.add_argument('--train_data_type', type=str, default='CIFAR10')\nparser.add_argument('--test_data_type', type=str, default='CIFAR10')\nparser.add_argument('--train_data_path', type=str, default='../datasets')\nparser.add_argument('--test_data_path', type=str, default='../datasets')\nparser.add_argument('--perturb_type', default='classwise', type=str, choices=['classwise', 'samplewise'], help='Perturb type')\nparser.add_argument('--patch_location', default='center', type=str, choices=['center', 'random'], help='Location of the noise')\nparser.add_argument('--poison_rate', default=1.0, type=float)\nparser.add_argument('--perturb_tensor_filepath', default=None, type=str)\nargs = parser.parse_args()\n\n\n# Set up Experiments\nif args.exp_name == '':\n    args.exp_name = 'exp_' + datetime.datetime.now()\n\nexp_path = os.path.join(args.exp_name, args.version)\nlog_file_path = os.path.join(exp_path, args.version)\ncheckpoint_path = os.path.join(exp_path, 'checkpoints')\ncheckpoint_path_file = os.path.join(checkpoint_path, args.version)\nutil.build_dirs(exp_path)\nutil.build_dirs(checkpoint_path)\nlogger = util.setup_logger(name=args.version, log_file=log_file_path + \".log\")\n\n# CUDA Options\nlogger.info(\"PyTorch Version: %s\" % (torch.__version__))\nif torch.cuda.is_available():\n    torch.cuda.manual_seed(args.seed)\n    torch.backends.cudnn.enabled = True\n    torch.backends.cudnn.benchmark = True\n    device = torch.device('cuda')\n    device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]\n    logger.info(\"GPU List: %s\" % (device_list))\nelse:\n    device = torch.device('cpu')\n\n# Load Exp Configs\nconfig_file = os.path.join(args.config_path, args.version)+'.yaml'\nconfig = mlconfig.load(config_file)\nconfig.set_immutable()\nfor key in config:\n    logger.info(\"%s: %s\" % (key, config[key]))\nshutil.copyfile(config_file, os.path.join(exp_path, args.version+'.yaml'))\n\n\ndef train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader):\n    for epoch in range(starting_epoch, config.epochs):\n        logger.info(\"\")\n        logger.info(\"=\"*20 + \"Training Epoch %d\" % (epoch) + \"=\"*20)\n\n        # Train\n        ENV['global_step'] = trainer.train(epoch, model, criterion, optimizer)\n        ENV['train_history'].append(trainer.acc_meters.avg*100)\n        scheduler.step()\n\n        # Eval\n        logger.info(\"=\"*20 + \"Eval Epoch %d\" % (epoch) + \"=\"*20)\n        is_best = False\n        if not args.train_face:\n            evaluator.eval(epoch, model)\n            payload = ('Eval Loss:%.4f\\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100))\n            logger.info(payload)\n            ENV['eval_history'].append(evaluator.acc_meters.avg*100)\n            ENV['curren_acc'] = evaluator.acc_meters.avg*100\n            ENV['cm_history'].append(evaluator.confusion_matrix.cpu().numpy().tolist())\n            # Reset Stats\n            trainer._reset_stats()\n            evaluator._reset_stats()\n        else:\n            pass\n            # model.eval()\n            # model.module.classify = True\n            # evaluator.eval(epoch, model)\n            # payload = ('Eval Loss:%.4f\\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100))\n            # logger.info(payload)\n            # model.classify = False\n            # identity_list = lfw_test.get_lfw_list('lfw_test_pair.txt')\n            # img_paths = [os.path.join('../datasets/lfw-112x112', each) for each in identity_list]\n            # eval_acc = lfw_test.lfw_test(model, img_paths, identity_list, 'lfw_test_pair.txt', args.eval_batch_size, logger=logger)\n            # ENV['curren_acc'] = eval_acc\n            # ENV['best_acc'] = max(ENV['best_acc'], eval_acc)\n            # ENV['eval_history'].append(eval_acc)\n            # # Reset Stats\n            # trainer._reset_stats()\n            # evaluator._reset_stats()\n\n        # Save Model\n        target_model = model.module if args.data_parallel else model\n        util.save_model(ENV=ENV,\n                        epoch=epoch,\n                        model=target_model,\n                        optimizer=optimizer,\n                        scheduler=scheduler,\n                        is_best=is_best,\n                        filename=checkpoint_path_file)\n        logger.info('Model Saved at %s', checkpoint_path_file)\n\n        if args.save_frequency > 0 and epoch % args.save_frequency == 0:\n            filename = checkpoint_path_file + '_epoch%d' % (epoch)\n            util.save_model(ENV=ENV,\n                            epoch=epoch,\n                            model=target_model,\n                            optimizer=optimizer,\n                            scheduler=scheduler,\n                            filename=filename)\n            logger.info('Model Saved at %s', filename)\n\n    return\n\n\ndef main():\n    model = config.model().to(device)\n    datasets_generator = config.dataset(train_data_type=args.train_data_type,\n                                        train_data_path=args.train_data_path,\n                                        test_data_type=args.test_data_type,\n                                        test_data_path=args.test_data_path,\n                                        train_batch_size=args.train_batch_size,\n                                        eval_batch_size=args.eval_batch_size,\n                                        num_of_workers=args.num_of_workers,\n                                        poison_rate=args.poison_rate,\n                                        perturb_type=args.perturb_type,\n                                        patch_location=args.patch_location,\n                                        perturb_tensor_filepath=args.perturb_tensor_filepath,\n                                        seed=args.seed)\n    logger.info('Training Dataset: %s' % str(datasets_generator.datasets['train_dataset']))\n    logger.info('Test Dataset: %s' % str(datasets_generator.datasets['test_dataset']))\n    if 'Poison' in args.train_data_type:\n        with open(os.path.join(exp_path, 'poison_targets.npy'), 'wb') as f:\n            if not (isinstance(datasets_generator.datasets['train_dataset'], dataset.MixUp) or isinstance(datasets_generator.datasets['train_dataset'], dataset.CutMix)):\n                poison_targets = np.array(datasets_generator.datasets['train_dataset'].poison_samples_idx)\n                np.save(f, poison_targets)\n                logger.info(poison_targets)\n                logger.info('Poisoned: %d/%d' % (len(poison_targets), len(datasets_generator.datasets['train_dataset'])))\n                logger.info('Poisoned samples idx saved at %s' % (os.path.join(exp_path, 'poison_targets')))\n                logger.info('Poisoned Class %s' % (str(datasets_generator.datasets['train_dataset'].poison_class)))\n\n    if args.train_portion == 1.0:\n        data_loader = datasets_generator.getDataLoader()\n        train_target = 'train_dataset'\n    else:\n        train_target = 'train_subset'\n        data_loader = datasets_generator._split_validation_set(args.train_portion,\n                                                               train_shuffle=True,\n                                                               train_drop_last=True)\n\n    logger.info(\"param size = %fMB\", util.count_parameters_in_MB(model))\n    optimizer = config.optimizer(model.parameters())\n    scheduler = config.scheduler(optimizer)\n    criterion = config.criterion()\n    trainer = Trainer(criterion, data_loader, logger, config, target=train_target)\n    evaluator = Evaluator(data_loader, logger, config)\n\n    starting_epoch = 0\n    ENV = {'global_step': 0,\n           'best_acc': 0.0,\n           'curren_acc': 0.0,\n           'best_pgd_acc': 0.0,\n           'train_history': [],\n           'eval_history': [],\n           'pgd_eval_history': [],\n           'genotype_list': [],\n           'cm_history': []}\n\n    if args.load_model:\n        checkpoint = util.load_model(filename=checkpoint_path_file,\n                                     model=model,\n                                     optimizer=optimizer,\n                                     alpha_optimizer=None,\n                                     scheduler=scheduler)\n        starting_epoch = checkpoint['epoch']\n        ENV = checkpoint['ENV']\n        trainer.global_step = ENV['global_step']\n        logger.info(\"File %s loaded!\" % (checkpoint_path_file))\n\n    if args.data_parallel:\n        model = torch.nn.DataParallel(model)\n\n    if args.train:\n        train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader)\n\n\nif __name__ == '__main__':\n    for arg in vars(args):\n        logger.info(\"%s: %s\" % (arg, getattr(args, arg)))\n    start = time.time()\n    main()\n    end = time.time()\n    cost = (end - start) / 86400\n    payload = \"Running Cost %.2f Days \\n\" % cost\n    logger.info(payload)\n"
  },
  {
    "path": "models/DenseNet.py",
    "content": "'''\nhttps://github.com/kuangliu/pytorch-cifar\nDenseNet in PyTorch.\n'''\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n    def __init__(self, in_planes, growth_rate):\n        super(Bottleneck, self).__init__()\n        self.bn1 = nn.BatchNorm2d(in_planes)\n        self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(4*growth_rate)\n        self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n    def forward(self, x):\n        out = self.conv1(F.relu(self.bn1(x)))\n        out = self.conv2(F.relu(self.bn2(out)))\n        out = torch.cat([out, x], 1)\n        return out\n\n\nclass Transition(nn.Module):\n    def __init__(self, in_planes, out_planes):\n        super(Transition, self).__init__()\n        self.bn = nn.BatchNorm2d(in_planes)\n        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)\n\n    def forward(self, x):\n        out = self.conv(F.relu(self.bn(x)))\n        out = F.avg_pool2d(out, 2)\n        return out\n\n\nclass DenseNet(nn.Module):\n    def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):\n        super(DenseNet, self).__init__()\n        self.growth_rate = growth_rate\n\n        num_planes = 2*growth_rate\n        self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)\n\n        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])\n        num_planes += nblocks[0]*growth_rate\n        out_planes = int(math.floor(num_planes*reduction))\n        self.trans1 = Transition(num_planes, out_planes)\n        num_planes = out_planes\n\n        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])\n        num_planes += nblocks[1]*growth_rate\n        out_planes = int(math.floor(num_planes*reduction))\n        self.trans2 = Transition(num_planes, out_planes)\n        num_planes = out_planes\n\n        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])\n        num_planes += nblocks[2]*growth_rate\n        out_planes = int(math.floor(num_planes*reduction))\n        self.trans3 = Transition(num_planes, out_planes)\n        num_planes = out_planes\n\n        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])\n        num_planes += nblocks[3]*growth_rate\n\n        self.bn = nn.BatchNorm2d(num_planes)\n        self.linear = nn.Linear(num_planes, num_classes)\n\n    def _make_dense_layers(self, block, in_planes, nblock):\n        layers = []\n        for i in range(nblock):\n            layers.append(block(in_planes, self.growth_rate))\n            in_planes += self.growth_rate\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        out = self.conv1(x)\n        out = self.trans1(self.dense1(out))\n        out = self.trans2(self.dense2(out))\n        out = self.trans3(self.dense3(out))\n        out = self.dense4(out)\n        out = F.avg_pool2d(F.relu(self.bn(out)), 4)\n        out = out.view(out.size(0), -1)\n        out = self.linear(out)\n        return out\n\n\ndef DenseNet121(num_classes=10):\n    return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32, num_classes=num_classes)\n\n\ndef DenseNet169(num_classes=10):\n    return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32, num_classes=num_classes)\n\n\ndef DenseNet201(num_classes=10):\n    return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32, num_classes=num_classes)\n\n\ndef DenseNet161(num_classes=10):\n    return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48, num_classes=num_classes)\n\n\ndef densenet_cifar():\n    return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)\n"
  },
  {
    "path": "models/ResNet.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, in_planes, planes, stride=1):\n        super(BasicBlock, self).__init__()\n        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes)\n\n        self.shortcut = nn.Sequential()\n        if stride != 1 or in_planes != self.expansion * planes:\n            self.shortcut = nn.Sequential(\n                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(self.expansion * planes)\n            )\n\n    def forward(self, x):\n        out = F.relu(self.bn1(self.conv1(x)))\n        out = self.bn2(self.conv2(out))\n        out += self.shortcut(x)\n        out = F.relu(out)\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, in_planes, planes, stride=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n        self.shortcut = nn.Sequential()\n        if stride != 1 or in_planes != self.expansion * planes:\n            self.shortcut = nn.Sequential(\n                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(self.expansion * planes)\n            )\n\n    def forward(self, x):\n        out = F.relu(self.bn1(self.conv1(x)))\n        out = F.relu(self.bn2(self.conv2(out)))\n        out = self.bn3(self.conv3(out))\n        out += self.shortcut(x)\n        out = F.relu(out)\n        return out\n\n\nclass ResNet(nn.Module):\n    def __init__(self, block, num_blocks, num_classes=10):\n        super(ResNet, self).__init__()\n        self.in_planes = 64\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n        self.linear = nn.Linear(512 * block.expansion, num_classes)\n\n    def _make_layer(self, block, planes, num_blocks, stride):\n        strides = [stride] + [1] * (num_blocks - 1)\n        layers = []\n        for stride in strides:\n            layers.append(block(self.in_planes, planes, stride))\n            self.in_planes = planes * block.expansion\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        out = F.relu(self.bn1(self.conv1(x)))\n        out = self.layer1(out)\n        out = self.layer2(out)\n        out = self.layer3(out)\n        out = self.layer4(out)\n        out = F.avg_pool2d(out, 4)\n        out = out.view(out.size(0), -1)\n        out = self.linear(out)\n        return out\n\n\ndef ResNet18(num_classes=10):\n    return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n\n\ndef ResNet34(num_classes=10):\n    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n\n\ndef ResNet50(num_classes=10):\n    return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)\n\n\ndef ResNet101(num_classes=10):\n    return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n\n\ndef ResNet152(num_classes=10):\n    return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)\n\n\ndef test():\n    net = ResNet18()\n    y = net(torch.randn(1, 3, 32, 32))\n    print(y.size())\n\n\nclass BasicConv2d(nn.Module):\n    def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):\n        super().__init__()\n        self.conv = nn.Conv2d(\n            in_planes, out_planes,\n            kernel_size=kernel_size, stride=stride,\n            padding=padding, bias=False\n        )\n        self.bn = nn.BatchNorm2d(\n            out_planes,\n            eps=0.001,\n            momentum=0.1,\n            affine=True\n        )\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x = self.conv(x)\n        x = self.bn(x)\n        x = self.relu(x)\n        return x\n\n\nclass Block35(nn.Module):\n\n    def __init__(self, scale=1.0):\n        super().__init__()\n\n        self.scale = scale\n\n        self.branch0 = BasicConv2d(256, 32, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(256, 32, kernel_size=1, stride=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n        )\n\n        self.branch2 = nn.Sequential(\n            BasicConv2d(256, 32, kernel_size=1, stride=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n        )\n\n        self.conv2d = nn.Conv2d(96, 256, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        out = torch.cat((x0, x1, x2), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        out = self.relu(out)\n        return out\n\n\nclass Block17(nn.Module):\n\n    def __init__(self, scale=1.0):\n        super().__init__()\n\n        self.scale = scale\n\n        self.branch0 = BasicConv2d(896, 128, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(896, 128, kernel_size=1, stride=1),\n            BasicConv2d(128, 128, kernel_size=(1, 7), stride=1, padding=(0, 3)),\n            BasicConv2d(128, 128, kernel_size=(7, 1), stride=1, padding=(3, 0))\n        )\n\n        self.conv2d = nn.Conv2d(256, 896, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        out = torch.cat((x0, x1), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        out = self.relu(out)\n        return out\n\n\nclass Block8(nn.Module):\n\n    def __init__(self, scale=1.0, noReLU=False):\n        super().__init__()\n\n        self.scale = scale\n        self.noReLU = noReLU\n\n        self.branch0 = BasicConv2d(1792, 192, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(1792, 192, kernel_size=1, stride=1),\n            BasicConv2d(192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1)),\n            BasicConv2d(192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0))\n        )\n\n        self.conv2d = nn.Conv2d(384, 1792, kernel_size=1, stride=1)\n        if not self.noReLU:\n            self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        out = torch.cat((x0, x1), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        if not self.noReLU:\n            out = self.relu(out)\n        return out\n\n\nclass Mixed_6a(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n\n        self.branch0 = BasicConv2d(256, 384, kernel_size=3, stride=2)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(256, 192, kernel_size=1, stride=1),\n            BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(192, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch2 = nn.MaxPool2d(3, stride=2)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        out = torch.cat((x0, x1, x2), 1)\n        return out\n\n\nclass Mixed_7a(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n\n        self.branch0 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 384, kernel_size=3, stride=2)\n        )\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch2 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch3 = nn.MaxPool2d(3, stride=2)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        x3 = self.branch3(x)\n        out = torch.cat((x0, x1, x2, x3), 1)\n        return out\n\n\nclass InceptionResnetV1(nn.Module):\n    def __init__(self, num_classes=10575, face_features=512, dropout_prob=0.6):\n        super().__init__()\n        self.num_classes = num_classes\n\n        # Define layers\n        self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n        self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)\n        self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)\n        self.maxpool_3a = nn.MaxPool2d(3, stride=2)\n        self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)\n        self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)\n        self.conv2d_4b = BasicConv2d(192, 256, kernel_size=3, stride=2)\n        self.repeat_1 = nn.Sequential(\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n        )\n        self.mixed_6a = Mixed_6a()\n        self.repeat_2 = nn.Sequential(\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n        )\n        self.mixed_7a = Mixed_7a()\n        self.repeat_3 = nn.Sequential(\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n        )\n        self.block8 = Block8(noReLU=True)\n        self.avgpool_1a = nn.AdaptiveAvgPool2d(1)\n        self.dropout = nn.Dropout(dropout_prob)\n        self.last_linear = nn.Linear(1792, face_features, bias=False)\n        self.last_bn = nn.BatchNorm1d(512, eps=0.001, momentum=0.1, affine=True)\n        self.fc = nn.Linear(512, self.num_classes)\n\n    def forward(self, x):\n        x = self.conv2d_1a(x)\n        x = self.conv2d_2a(x)\n        x = self.conv2d_2b(x)\n        x = self.maxpool_3a(x)\n        x = self.conv2d_3b(x)\n        x = self.conv2d_4a(x)\n        x = self.conv2d_4b(x)\n        x = self.repeat_1(x)\n        x = self.mixed_6a(x)\n        x = self.repeat_2(x)\n        x = self.mixed_7a(x)\n        x = self.repeat_3(x)\n        x = self.block8(x)\n        x = self.avgpool_1a(x)\n        x = self.dropout(x)\n        x = self.last_linear(x.view(x.shape[0], -1))\n        x = self.last_bn(x)\n        if self.training:\n            return self.fc(x)\n        else:\n            return F.normalize(x, p=2, dim=1)\n"
  },
  {
    "path": "models/ToyModel.py",
    "content": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvBrunch(nn.Module):\n    def __init__(self, in_planes, out_planes, kernel_size=3):\n        super(ConvBrunch, self).__init__()\n        padding = (kernel_size - 1) // 2\n        self.out_conv = nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=padding),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU())\n\n    def forward(self, x):\n        return self.out_conv(x)\n\n\nclass ToyModel(nn.Module):\n    def __init__(self, num_classes=10):\n        super(ToyModel, self).__init__()\n        self.block1 = nn.Sequential(\n            ConvBrunch(3, 64, 3),\n            nn.MaxPool2d(kernel_size=2, stride=2),\n            ConvBrunch(64, 128, 3),\n            nn.MaxPool2d(kernel_size=2, stride=2),\n            ConvBrunch(128, 256, 3),\n            nn.MaxPool2d(kernel_size=2, stride=2))\n        self.global_avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n        self.fc = nn.Linear(256, num_classes)\n\n    def forward(self, x):\n        x = self.block1(x)\n        x = self.global_avg_pool(x)\n        x = x.view(-1, 256)\n        x = self.fc(x)\n        return x\n"
  },
  {
    "path": "models/__init__.py",
    "content": "import mlconfig\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nfrom . import DenseNet, ResNet, ToyModel, inception_resnet_v1\n\nmlconfig.register(torch.optim.SGD)\nmlconfig.register(torch.optim.Adam)\nmlconfig.register(torch.optim.lr_scheduler.MultiStepLR)\nmlconfig.register(torch.optim.lr_scheduler.CosineAnnealingLR)\nmlconfig.register(torch.optim.lr_scheduler.StepLR)\nmlconfig.register(torch.optim.lr_scheduler.ExponentialLR)\nmlconfig.register(torch.nn.CrossEntropyLoss)\n\n# Models\nmlconfig.register(ResNet.ResNet)\nmlconfig.register(ResNet.ResNet18)\nmlconfig.register(ResNet.ResNet34)\nmlconfig.register(ResNet.ResNet50)\nmlconfig.register(ResNet.ResNet101)\nmlconfig.register(ResNet.ResNet152)\nmlconfig.register(ToyModel.ToyModel)\nmlconfig.register(DenseNet.DenseNet121)\nmlconfig.register(inception_resnet_v1.InceptionResnetV1)\n# torchvision models\nmlconfig.register(torchvision.models.resnet18)\nmlconfig.register(torchvision.models.resnet50)\nmlconfig.register(torchvision.models.densenet121)\n\n# CUDA Options\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\n@mlconfig.register\nclass FocalLoss(nn.Module):\n    def __init__(self, gamma=0, eps=1e-7):\n        super(FocalLoss, self).__init__()\n        self.gamma = gamma\n        self.eps = eps\n        self.ce = torch.nn.CrossEntropyLoss()\n\n    def forward(self, input, target):\n        logp = self.ce(input, target)\n        p = torch.exp(-logp)\n        loss = (1 - p) ** self.gamma * logp\n        return loss.mean()\n\n\ndef cross_entropy(input, target, size_average=True):\n    \"\"\" Cross entropy that accepts soft targets\n    Args:\n         pred: predictions for neural network\n         targets: targets, can be soft\n         size_average: if false, sum is returned instead of mean\n    Examples::\n        input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])\n        input = torch.autograd.Variable(out, requires_grad=True)\n        target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])\n        target = torch.autograd.Variable(y1)\n        loss = cross_entropy(input, target)\n        loss.backward()\n    \"\"\"\n    logsoftmax = torch.nn.LogSoftmax(dim=1)\n    if size_average:\n        return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))\n    else:\n        return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))\n\n\n@mlconfig.register\nclass CutMixCrossEntropyLoss(torch.nn.Module):\n    def __init__(self, size_average=True):\n        super().__init__()\n        self.size_average = size_average\n\n    def forward(self, input, target):\n        if len(target.size()) == 1:\n            target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))\n            target = target.float().cuda()\n        return cross_entropy(input, target, self.size_average)\n"
  },
  {
    "path": "models/download.py",
    "content": "import hashlib\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom urllib.request import urlopen, Request\n\ntry:\n    from tqdm.auto import tqdm  # automatically select proper tqdm submodule if available\nexcept ImportError:\n    try:\n        from tqdm import tqdm\n    except ImportError:\n        # fake tqdm if it's not installed\n        class tqdm(object):  # type: ignore\n\n            def __init__(self, total=None, disable=False,\n                         unit=None, unit_scale=None, unit_divisor=None):\n                self.total = total\n                self.disable = disable\n                self.n = 0\n                # ignore unit, unit_scale, unit_divisor; they're just for real tqdm\n\n            def update(self, n):\n                if self.disable:\n                    return\n\n                self.n += n\n                if self.total is None:\n                    sys.stderr.write(\"\\r{0:.1f} bytes\".format(self.n))\n                else:\n                    sys.stderr.write(\"\\r{0:.1f}%\".format(100 * self.n / float(self.total)))\n                sys.stderr.flush()\n\n            def __enter__(self):\n                return self\n\n            def __exit__(self, exc_type, exc_val, exc_tb):\n                if self.disable:\n                    return\n\n                sys.stderr.write('\\n')\n\n\ndef download_url_to_file(url, dst, hash_prefix=None, progress=True):\n    r\"\"\"Download object at the given URL to a local path.\n    Args:\n        url (string): URL of the object to download\n        dst (string): Full path where object will be saved, e.g. `/tmp/temporary_file`\n        hash_prefix (string, optional): If not None, the SHA256 downloaded file should start with `hash_prefix`.\n            Default: None\n        progress (bool, optional): whether or not to display a progress bar to stderr\n            Default: True\n    Example:\n        >>> torch.hub.download_url_to_file('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth', '/tmp/temporary_file')\n    \"\"\"\n    file_size = None\n    # We use a different API for python2 since urllib(2) doesn't recognize the CA\n    # certificates in older Python\n    req = Request(url, headers={\"User-Agent\": \"torch.hub\"})\n    u = urlopen(req)\n    meta = u.info()\n    if hasattr(meta, 'getheaders'):\n        content_length = meta.getheaders(\"Content-Length\")\n    else:\n        content_length = meta.get_all(\"Content-Length\")\n    if content_length is not None and len(content_length) > 0:\n        file_size = int(content_length[0])\n\n    # We deliberately save it in a temp file and move it after\n    # download is complete. This prevents a local working checkpoint\n    # being overridden by a broken download.\n    dst = os.path.expanduser(dst)\n    dst_dir = os.path.dirname(dst)\n    f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)\n\n    try:\n        if hash_prefix is not None:\n            sha256 = hashlib.sha256()\n        with tqdm(total=file_size, disable=not progress,\n                  unit='B', unit_scale=True, unit_divisor=1024) as pbar:\n            while True:\n                buffer = u.read(8192)\n                if len(buffer) == 0:\n                    break\n                f.write(buffer)\n                if hash_prefix is not None:\n                    sha256.update(buffer)\n                pbar.update(len(buffer))\n\n        f.close()\n        if hash_prefix is not None:\n            digest = sha256.hexdigest()\n            if digest[:len(hash_prefix)] != hash_prefix:\n                raise RuntimeError('invalid hash value (expected \"{}\", got \"{}\")'\n                                   .format(hash_prefix, digest))\n        shutil.move(f.name, dst)\n    finally:\n        f.close()\n        if os.path.exists(f.name):\n            os.remove(f.name)\n"
  },
  {
    "path": "models/inception_resnet_v1.py",
    "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .download import download_url_to_file\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\nclass BasicConv2d(nn.Module):\n    def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):\n        super().__init__()\n        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,\n                              stride=stride, padding=padding, bias=False)\n        self.bn = nn.BatchNorm2d(out_planes, eps=0.001, momentum=0.1, affine=True)\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x = self.conv(x)\n        x = self.bn(x)\n        x = self.relu(x)\n        return x\n\n\nclass Block35(nn.Module):\n\n    def __init__(self, scale=1.0):\n        super().__init__()\n\n        self.scale = scale\n\n        self.branch0 = BasicConv2d(256, 32, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(256, 32, kernel_size=1, stride=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n        )\n\n        self.branch2 = nn.Sequential(\n            BasicConv2d(256, 32, kernel_size=1, stride=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n        )\n\n        self.conv2d = nn.Conv2d(96, 256, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        out = torch.cat((x0, x1, x2), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        out = self.relu(out)\n        return out\n\n\nclass Block17(nn.Module):\n\n    def __init__(self, scale=1.0):\n        super().__init__()\n\n        self.scale = scale\n\n        self.branch0 = BasicConv2d(896, 128, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(896, 128, kernel_size=1, stride=1),\n            BasicConv2d(128, 128, kernel_size=(1, 7), stride=1, padding=(0, 3)),\n            BasicConv2d(128, 128, kernel_size=(7, 1), stride=1, padding=(3, 0))\n        )\n\n        self.conv2d = nn.Conv2d(256, 896, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        out = torch.cat((x0, x1), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        out = self.relu(out)\n        return out\n\n\nclass Block8(nn.Module):\n\n    def __init__(self, scale=1.0, noReLU=False):\n        super().__init__()\n\n        self.scale = scale\n        self.noReLU = noReLU\n\n        self.branch0 = BasicConv2d(1792, 192, kernel_size=1, stride=1)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(1792, 192, kernel_size=1, stride=1),\n            BasicConv2d(192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1)),\n            BasicConv2d(192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0))\n        )\n\n        self.conv2d = nn.Conv2d(384, 1792, kernel_size=1, stride=1)\n        if not self.noReLU:\n            self.relu = nn.ReLU(inplace=False)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        out = torch.cat((x0, x1), 1)\n        out = self.conv2d(out)\n        out = out * self.scale + x\n        if not self.noReLU:\n            out = self.relu(out)\n        return out\n\n\nclass Mixed_6a(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n\n        self.branch0 = BasicConv2d(256, 384, kernel_size=3, stride=2)\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(256, 192, kernel_size=1, stride=1),\n            BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(192, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch2 = nn.MaxPool2d(3, stride=2)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        out = torch.cat((x0, x1, x2), 1)\n        return out\n\n\nclass Mixed_7a(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n\n        self.branch0 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 384, kernel_size=3, stride=2)\n        )\n\n        self.branch1 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch2 = nn.Sequential(\n            BasicConv2d(896, 256, kernel_size=1, stride=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),\n            BasicConv2d(256, 256, kernel_size=3, stride=2)\n        )\n\n        self.branch3 = nn.MaxPool2d(3, stride=2)\n\n    def forward(self, x):\n        x0 = self.branch0(x)\n        x1 = self.branch1(x)\n        x2 = self.branch2(x)\n        x3 = self.branch3(x)\n        out = torch.cat((x0, x1, x2, x3), 1)\n        return out\n\n\nclass InceptionResnetV1(nn.Module):\n    \"\"\"Inception Resnet V1 model with optional loading of pretrained weights.\n    Model parameters can be loaded based on pretraining on the VGGFace2 or CASIA-Webface\n    datasets. Pretrained state_dicts are automatically downloaded on model instantiation if\n    requested and cached in the torch cache. Subsequent instantiations use the cache rather than\n    redownloading.\n    Keyword Arguments:\n        pretrained {str} -- Optional pretraining dataset. Either 'vggface2' or 'casia-webface'.\n            (default: {None})\n        classify {bool} -- Whether the model should output classification probabilities or feature\n            embeddings. (default: {False})\n        num_classes {int} -- Number of output classes. If 'pretrained' is set and num_classes not\n            equal to that used for the pretrained model, the final linear layer will be randomly\n            initialized. (default: {None})\n        dropout_prob {float} -- Dropout probability. (default: {0.6})\n    \"\"\"\n    def __init__(self, pretrained=None, classify=False, num_classes=None, dropout_prob=0.6):\n        super().__init__()\n\n        # Set simple attributes\n        self.pretrained = pretrained\n        self.classify = classify\n        self.num_classes = num_classes\n\n        if pretrained == 'vggface2':\n            tmp_classes = 8631\n        elif pretrained == 'casia-webface':\n            tmp_classes = 10575\n        elif pretrained is None and self.classify and self.num_classes is None:\n            raise Exception('If \"pretrained\" is not specified and \"classify\" is True, \"num_classes\" must be specified')\n\n        # Define layers\n        self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n        self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)\n        self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)\n        self.maxpool_3a = nn.MaxPool2d(3, stride=2)\n        self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)\n        self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)\n        self.conv2d_4b = BasicConv2d(192, 256, kernel_size=3, stride=2)\n        self.repeat_1 = nn.Sequential(\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n            Block35(scale=0.17),\n        )\n        self.mixed_6a = Mixed_6a()\n        self.repeat_2 = nn.Sequential(\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n            Block17(scale=0.10),\n        )\n        self.mixed_7a = Mixed_7a()\n        self.repeat_3 = nn.Sequential(\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n            Block8(scale=0.20),\n        )\n        self.block8 = Block8(noReLU=True)\n        self.avgpool_1a = nn.AdaptiveAvgPool2d(1)\n        self.dropout = nn.Dropout(dropout_prob)\n        self.last_linear = nn.Linear(1792, 512, bias=False)\n        self.last_bn = nn.BatchNorm1d(512, eps=0.001, momentum=0.1, affine=True)\n        self.logits = nn.Linear(512, self.num_classes)\n\n        if pretrained is not None:\n            self.logits = nn.Linear(512, tmp_classes)\n            load_weights(self, pretrained)\n\n    def forward(self, x):\n        \"\"\"Calculate embeddings or logits given a batch of input image tensors.\n        Arguments:\n            x {torch.tensor} -- Batch of image tensors representing faces.\n        Returns:\n            torch.tensor -- Batch of embedding vectors or multinomial logits.\n        \"\"\"\n        x = self.conv2d_1a(x)\n        x = self.conv2d_2a(x)\n        x = self.conv2d_2b(x)\n        x = self.maxpool_3a(x)\n        x = self.conv2d_3b(x)\n        x = self.conv2d_4a(x)\n        x = self.conv2d_4b(x)\n        x = self.repeat_1(x)\n        x = self.mixed_6a(x)\n        x = self.repeat_2(x)\n        x = self.mixed_7a(x)\n        x = self.repeat_3(x)\n        x = self.block8(x)\n        x = self.avgpool_1a(x)\n        x = self.dropout(x)\n        x = self.last_linear(x.view(x.shape[0], -1))\n        x = self.last_bn(x)\n        if self.training or self.classify:\n            x = self.logits(x)\n        else:\n            x = F.normalize(x, p=2, dim=1)\n        return x\n\n\ndef load_weights(mdl, name):\n    \"\"\"Download pretrained state_dict and load into model.\n    Arguments:\n        mdl {torch.nn.Module} -- Pytorch model.\n        name {str} -- Name of dataset that was used to generate pretrained state_dict.\n    Raises:\n        ValueError: If 'pretrained' not equal to 'vggface2' or 'casia-webface'.\n    \"\"\"\n    if name == 'vggface2':\n        path = 'https://github.com/timesler/facenet-pytorch/releases/download/v2.2.9/20180402-114759-vggface2.pt'\n    elif name == 'casia-webface':\n        path = 'https://github.com/timesler/facenet-pytorch/releases/download/v2.2.9/20180408-102900-casia-webface.pt'\n    else:\n        raise ValueError('Pretrained models only exist for \"vggface2\" and \"casia-webface\"')\n\n    model_dir = 'pretrained_checkpoints'\n    os.makedirs(model_dir, exist_ok=True)\n\n    cached_file = os.path.join(model_dir, os.path.basename(path))\n    if not os.path.exists(cached_file):\n        download_url_to_file(path, cached_file)\n\n    state_dict = torch.load(cached_file)\n    mdl.load_state_dict(state_dict)\n\n\ndef get_torch_home():\n    torch_home = os.path.expanduser(\n        os.getenv(\n            'TORCH_HOME',\n            os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')\n        )\n    )\n    return torch_home\n"
  },
  {
    "path": "perturbation.py",
    "content": "import argparse\nimport collections\nimport datetime\nimport os\nimport shutil\nimport time\nimport dataset\nimport mlconfig\nimport toolbox\nimport torch\nimport util\nimport madrys\nimport numpy as np\nfrom evaluator import Evaluator\nfrom tqdm import tqdm\nfrom trainer import Trainer\nmlconfig.register(madrys.MadrysLoss)\n\n# General Options\nparser = argparse.ArgumentParser(description='ClasswiseNoise')\nparser.add_argument('--seed', type=int, default=0, help='seed')\nparser.add_argument('--version', type=str, default=\"resnet18\")\nparser.add_argument('--exp_name', type=str, default=\"test_exp\")\nparser.add_argument('--config_path', type=str, default='configs/cifar10')\nparser.add_argument('--load_model', action='store_true', default=False)\nparser.add_argument('--data_parallel', action='store_true', default=False)\n# Datasets Options\nparser.add_argument('--train_batch_size', default=512, type=int, help='perturb step size')\nparser.add_argument('--eval_batch_size', default=512, type=int, help='perturb step size')\nparser.add_argument('--num_of_workers', default=8, type=int, help='workers for loader')\nparser.add_argument('--train_data_type', type=str, default='CIFAR10')\nparser.add_argument('--train_data_path', type=str, default='../datasets')\nparser.add_argument('--test_data_type', type=str, default='CIFAR10')\nparser.add_argument('--test_data_path', type=str, default='../datasets')\n# Perturbation Options\nparser.add_argument('--universal_train_portion', default=0.2, type=float)\nparser.add_argument('--universal_stop_error', default=0.5, type=float)\nparser.add_argument('--universal_train_target', default='train_subset', type=str)\nparser.add_argument('--train_step', default=10, type=int)\nparser.add_argument('--use_subset', action='store_true', default=False)\nparser.add_argument('--attack_type', default='min-min', type=str, choices=['min-min', 'min-max', 'random'], help='Attack type')\nparser.add_argument('--perturb_type', default='classwise', type=str, choices=['classwise', 'samplewise'], help='Perturb type')\nparser.add_argument('--patch_location', default='center', type=str, choices=['center', 'random'], help='Location of the noise')\nparser.add_argument('--noise_shape', default=[10, 3, 32, 32], nargs='+', type=int, help='noise shape')\nparser.add_argument('--epsilon', default=8, type=float, help='perturbation')\nparser.add_argument('--num_steps', default=1, type=int, help='perturb number of steps')\nparser.add_argument('--step_size', default=0.8, type=float, help='perturb step size')\nparser.add_argument('--random_start', action='store_true', default=False)\nargs = parser.parse_args()\n\n# Convert Eps\nargs.epsilon = args.epsilon / 255\nargs.step_size = args.step_size / 255\n\n# Set up Experiments\nif args.exp_name == '':\n    args.exp_name = 'exp_' + datetime.datetime.now()\n\nexp_path = os.path.join(args.exp_name, args.version)\nlog_file_path = os.path.join(exp_path, args.version)\ncheckpoint_path = os.path.join(exp_path, 'checkpoints')\ncheckpoint_path_file = os.path.join(checkpoint_path, args.version)\nutil.build_dirs(exp_path)\nutil.build_dirs(checkpoint_path)\nlogger = util.setup_logger(name=args.version, log_file=log_file_path + \".log\")\n\n# CUDA Options\nlogger.info(\"PyTorch Version: %s\" % (torch.__version__))\nif torch.cuda.is_available():\n    torch.cuda.manual_seed(args.seed)\n    torch.backends.cudnn.enabled = True\n    torch.backends.cudnn.benchmark = True\n    device = torch.device('cuda')\n    device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]\n    logger.info(\"GPU List: %s\" % (device_list))\nelse:\n    device = torch.device('cpu')\n\n# Load Exp Configs\nconfig_file = os.path.join(args.config_path, args.version)+'.yaml'\nconfig = mlconfig.load(config_file)\nconfig.set_immutable()\nfor key in config:\n    logger.info(\"%s: %s\" % (key, config[key]))\nshutil.copyfile(config_file, os.path.join(exp_path, args.version+'.yaml'))\n\n\ndef train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader):\n    for epoch in range(starting_epoch, config.epochs):\n        logger.info(\"\")\n        logger.info(\"=\"*20 + \"Training Epoch %d\" % (epoch) + \"=\"*20)\n\n        # Train\n        ENV['global_step'] = trainer.train(epoch, model, criterion, optimizer)\n        ENV['train_history'].append(trainer.acc_meters.avg*100)\n        scheduler.step()\n\n        # Eval\n        logger.info(\"=\"*20 + \"Eval Epoch %d\" % (epoch) + \"=\"*20)\n        evaluator.eval(epoch, model)\n        payload = ('Eval Loss:%.4f\\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100))\n        logger.info(payload)\n        ENV['eval_history'].append(evaluator.acc_meters.avg*100)\n        ENV['curren_acc'] = evaluator.acc_meters.avg*100\n\n        # Reset Stats\n        trainer._reset_stats()\n        evaluator._reset_stats()\n\n        # Save Model\n        target_model = model.module if args.data_parallel else model\n        util.save_model(ENV=ENV,\n                        epoch=epoch,\n                        model=target_model,\n                        optimizer=optimizer,\n                        scheduler=scheduler,\n                        filename=checkpoint_path_file)\n        logger.info('Model Saved at %s', checkpoint_path_file)\n    return\n\n\ndef universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target):\n    loss_meter = util.AverageMeter()\n    err_meter = util.AverageMeter()\n    random_noise = random_noise.to(device)\n    model = model.to(device)\n    for i, (images, labels) in enumerate(data_loader[eval_target]):\n        images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)\n        if random_noise is not None:\n            for i in range(len(labels)):\n                class_index = labels[i].item()\n                noise = random_noise[class_index]\n                mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=images[i].shape, patch_location=args.patch_location)\n                images[i] += class_noise\n        pred = model(images)\n        err = (pred.data.max(1)[1] != labels.data).float().sum()\n        loss = torch.nn.CrossEntropyLoss()(pred, labels)\n        loss_meter.update(loss.item(), len(labels))\n        err_meter.update(err / len(labels))\n    return loss_meter.avg, err_meter.avg\n\n\ndef universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):\n    # Class-Wise perturbation\n    # Generate Data loader\n    datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,\n                                                  eval_batch_size=args.eval_batch_size,\n                                                  train_data_type=args.train_data_type,\n                                                  train_data_path=args.train_data_path,\n                                                  test_data_type=args.test_data_type,\n                                                  test_data_path=args.test_data_path,\n                                                  num_of_workers=args.num_of_workers,\n                                                  seed=args.seed, no_train_augments=True)\n\n    if args.use_subset:\n        data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion,\n                                                               train_shuffle=True, train_drop_last=True)\n    else:\n        data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True)\n\n    condition = True\n    data_iter = iter(data_loader['train_dataset'])\n    logger.info('=' * 20 + 'Searching Universal Perturbation' + '=' * 20)\n    if hasattr(model, 'classify'):\n        model.classify = True\n    while condition:\n        if args.attack_type == 'min-min' and not args.load_model:\n            # Train Batch for min-min noise\n            for j in range(0, args.train_step):\n                try:\n                    (images, labels) = next(data_iter)\n                except:\n                    data_iter = iter(data_loader['train_dataset'])\n                    (images, labels) = next(data_iter)\n\n                images, labels = images.to(device), labels.to(device)\n                # Add Class-wise Noise to each sample\n                train_imgs = []\n                for i, (image, label) in enumerate(zip(images, labels)):\n                    noise = random_noise[label.item()]\n                    mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)\n                    train_imgs.append(images[i]+class_noise)\n                # Train\n                model.train()\n                for param in model.parameters():\n                    param.requires_grad = True\n                trainer.train_batch(torch.stack(train_imgs).to(device), labels, model, optimizer)\n\n        for i, (images, labels) in tqdm(enumerate(data_loader[args.universal_train_target]), total=len(data_loader[args.universal_train_target])):\n            images, labels, model = images.to(device), labels.to(device), model.to(device)\n            # Add Class-wise Noise to each sample\n            batch_noise, mask_cord_list = [], []\n            for i, (image, label) in enumerate(zip(images, labels)):\n                noise = random_noise[label.item()]\n                mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)\n                batch_noise.append(class_noise)\n                mask_cord_list.append(mask_cord)\n\n            # Update universal perturbation\n            model.eval()\n            for param in model.parameters():\n                param.requires_grad = False\n\n            batch_noise = torch.stack(batch_noise).to(device)\n            if args.attack_type == 'min-min':\n                perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)\n            elif args.attack_type == 'min-max':\n                perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)\n            else:\n                raise('Invalid attack')\n\n            class_noise_eta = collections.defaultdict(list)\n            for i in range(len(eta)):\n                x1, x2, y1, y2 = mask_cord_list[i]\n                delta = eta[i][:, x1: x2, y1: y2]\n                class_noise_eta[labels[i].item()].append(delta.detach().cpu())\n\n            for key in class_noise_eta:\n                delta = torch.stack(class_noise_eta[key]).mean(dim=0) - random_noise[key]\n                class_noise = random_noise[key]\n                class_noise += delta\n                random_noise[key] = torch.clamp(class_noise, -args.epsilon, args.epsilon)\n\n        # Eval termination conditions\n        loss_avg, error_rate = universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target)\n        logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))\n        random_noise = random_noise.detach()\n        ENV['random_noise'] = random_noise\n        if args.attack_type == 'min-min':\n            condition = error_rate > args.universal_stop_error\n        elif args.attack_type == 'min-max':\n            condition = error_rate < args.universal_stop_error\n    return random_noise\n\n\ndef samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset', mask_cord_list=[]):\n    loss_meter = util.AverageMeter()\n    err_meter = util.AverageMeter()\n    # random_noise = random_noise.to(device)\n    model = model.to(device)\n    idx = 0\n    for i, (images, labels) in enumerate(data_loader[eval_target]):\n        images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)\n        if random_noise is not None:\n            for i, (image, label) in enumerate(zip(images, labels)):\n                if not torch.is_tensor(random_noise):\n                    sample_noise = torch.tensor(random_noise[idx]).to(device)\n                else:\n                    sample_noise = random_noise[idx].to(device)\n                c, h, w = image.shape[0], image.shape[1], image.shape[2]\n                mask = np.zeros((c, h, w), np.float32)\n                x1, x2, y1, y2 = mask_cord_list[idx]\n                mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n                sample_noise = torch.from_numpy(mask).to(device)\n                images[i] = images[i] + sample_noise\n                idx += 1\n        pred = model(images)\n        err = (pred.data.max(1)[1] != labels.data).float().sum()\n        loss = torch.nn.CrossEntropyLoss()(pred, labels)\n        loss_meter.update(loss.item(), len(labels))\n        err_meter.update(err / len(labels))\n    return loss_meter.avg, err_meter.avg\n\n\ndef sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):\n    datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,\n                                                  eval_batch_size=args.eval_batch_size,\n                                                  train_data_type=args.train_data_type,\n                                                  train_data_path=args.train_data_path,\n                                                  test_data_type=args.test_data_type,\n                                                  test_data_path=args.test_data_path,\n                                                  num_of_workers=args.num_of_workers,\n                                                  seed=args.seed, no_train_augments=True)\n\n    if args.train_data_type == 'ImageNetMini' and args.perturb_type == 'samplewise':\n        data_loader = datasets_generator._split_validation_set(0.2, train_shuffle=False, train_drop_last=False)\n        data_loader['train_dataset'] = data_loader['train_subset']\n    else:\n        data_loader = datasets_generator.getDataLoader(train_shuffle=False, train_drop_last=False)\n    mask_cord_list = []\n    idx = 0\n    for images, labels in data_loader['train_dataset']:\n        for i, (image, label) in enumerate(zip(images, labels)):\n            noise = random_noise[idx]\n            mask_cord, _ = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)\n            mask_cord_list.append(mask_cord)\n            idx += 1\n\n    condition = True\n    train_idx = 0\n    data_iter = iter(data_loader['train_dataset'])\n    logger.info('=' * 20 + 'Searching Samplewise Perturbation' + '=' * 20)\n    while condition:\n        if args.attack_type == 'min-min' and not args.load_model:\n            # Train Batch for min-min noise\n            for j in tqdm(range(0, args.train_step), total=args.train_step):\n                try:\n                    (images, labels) = next(data_iter)\n                except:\n                    train_idx = 0\n                    data_iter = iter(data_loader['train_dataset'])\n                    (images, labels) = next(data_iter)\n\n                images, labels = images.to(device), labels.to(device)\n                # Add Sample-wise Noise to each sample\n                for i, (image, label) in enumerate(zip(images, labels)):\n                    sample_noise = random_noise[train_idx]\n                    c, h, w = image.shape[0], image.shape[1], image.shape[2]\n                    mask = np.zeros((c, h, w), np.float32)\n                    x1, x2, y1, y2 = mask_cord_list[train_idx]\n                    if type(sample_noise) is np.ndarray:\n                        mask[:, x1: x2, y1: y2] = sample_noise\n                    else:\n                        mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n                    # mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n                    sample_noise = torch.from_numpy(mask).to(device)\n                    images[i] = images[i] + sample_noise\n                    train_idx += 1\n\n                model.train()\n                for param in model.parameters():\n                    param.requires_grad = True\n                trainer.train_batch(images, labels, model, optimizer)\n\n        # Search For Noise\n        idx = 0\n        for i, (images, labels) in tqdm(enumerate(data_loader['train_dataset']), total=len(data_loader['train_dataset'])):\n            images, labels, model = images.to(device), labels.to(device), model.to(device)\n\n            # Add Sample-wise Noise to each sample\n            batch_noise, batch_start_idx = [], idx\n            for i, (image, label) in enumerate(zip(images, labels)):\n                sample_noise = random_noise[idx]\n                c, h, w = image.shape[0], image.shape[1], image.shape[2]\n                mask = np.zeros((c, h, w), np.float32)\n                x1, x2, y1, y2 = mask_cord_list[idx]\n                if type(sample_noise) is np.ndarray:\n                    mask[:, x1: x2, y1: y2] = sample_noise\n                else:\n                    mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n                # mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n                sample_noise = torch.from_numpy(mask).to(device)\n                batch_noise.append(sample_noise)\n                idx += 1\n\n            # Update sample-wise perturbation\n            model.eval()\n            for param in model.parameters():\n                param.requires_grad = False\n            batch_noise = torch.stack(batch_noise).to(device)\n            if args.attack_type == 'min-min':\n                perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)\n            elif args.attack_type == 'min-max':\n                perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)\n            else:\n                raise('Invalid attack')\n\n            for i, delta in enumerate(eta):\n                x1, x2, y1, y2 = mask_cord_list[batch_start_idx+i]\n                delta = delta[:, x1: x2, y1: y2]\n                if torch.is_tensor(random_noise):\n                    random_noise[batch_start_idx+i] = delta.detach().cpu().clone()\n                else:\n                    random_noise[batch_start_idx+i] = delta.detach().cpu().numpy()\n\n        # Eval termination conditions\n        loss_avg, error_rate = samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset',\n                                                            mask_cord_list=mask_cord_list)\n        logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))\n\n        if torch.is_tensor(random_noise):\n            random_noise = random_noise.detach()\n            ENV['random_noise'] = random_noise\n        if args.attack_type == 'min-min':\n            condition = error_rate > args.universal_stop_error\n        elif args.attack_type == 'min-max':\n            condition = error_rate < args.universal_stop_error\n\n    # Update Random Noise to shape\n    if torch.is_tensor(random_noise):\n        new_random_noise = []\n        for idx in range(len(random_noise)):\n            sample_noise = random_noise[idx]\n            c, h, w = image.shape[0], image.shape[1], image.shape[2]\n            mask = np.zeros((c, h, w), np.float32)\n            x1, x2, y1, y2 = mask_cord_list[idx]\n            mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()\n            new_random_noise.append(torch.from_numpy(mask))\n        new_random_noise = torch.stack(new_random_noise)\n        return new_random_noise\n    else:\n        return random_noise\n\n\ndef main():\n    # Setup ENV\n    datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,\n                                                  eval_batch_size=args.eval_batch_size,\n                                                  train_data_type=args.train_data_type,\n                                                  train_data_path=args.train_data_path,\n                                                  test_data_type=args.test_data_type,\n                                                  test_data_path=args.test_data_path,\n                                                  num_of_workers=args.num_of_workers,\n                                                  seed=args.seed)\n    data_loader = datasets_generator.getDataLoader()\n    model = config.model().to(device)\n    logger.info(\"param size = %fMB\", util.count_parameters_in_MB(model))\n    optimizer = config.optimizer(model.parameters())\n    scheduler = config.scheduler(optimizer)\n    criterion = config.criterion()\n    if args.perturb_type == 'samplewise':\n        train_target = 'train_dataset'\n    else:\n        if args.use_subset:\n            data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion,\n                                                                   train_shuffle=True, train_drop_last=True)\n            train_target = 'train_subset'\n        else:\n            data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True)\n            train_target = 'train_dataset'\n\n    trainer = Trainer(criterion, data_loader, logger, config, target=train_target)\n    evaluator = Evaluator(data_loader, logger, config)\n    ENV = {'global_step': 0,\n           'best_acc': 0.0,\n           'curren_acc': 0.0,\n           'best_pgd_acc': 0.0,\n           'train_history': [],\n           'eval_history': [],\n           'pgd_eval_history': [],\n           'genotype_list': []}\n\n    if args.data_parallel:\n        model = torch.nn.DataParallel(model)\n\n    if args.load_model:\n        checkpoint = util.load_model(filename=checkpoint_path_file,\n                                     model=model,\n                                     optimizer=optimizer,\n                                     alpha_optimizer=None,\n                                     scheduler=scheduler)\n        ENV = checkpoint['ENV']\n        trainer.global_step = ENV['global_step']\n        logger.info(\"File %s loaded!\" % (checkpoint_path_file))\n\n    noise_generator = toolbox.PerturbationTool(epsilon=args.epsilon,\n                                               num_steps=args.num_steps,\n                                               step_size=args.step_size)\n\n    if args.attack_type == 'random':\n        noise = noise_generator.random_noise(noise_shape=args.noise_shape)\n        torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt'))\n        logger.info(noise)\n        logger.info(noise.shape)\n        logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt')))\n    elif args.attack_type == 'min-min' or args.attack_type == 'min-max':\n        if args.attack_type == 'min-max':\n            # min-max noise need model to converge first\n            train(0, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader)\n        if args.random_start:\n            random_noise = noise_generator.random_noise(noise_shape=args.noise_shape)\n        else:\n            random_noise = torch.zeros(*args.noise_shape)\n        if args.perturb_type == 'samplewise':\n            noise = sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV)\n        elif args.perturb_type == 'classwise':\n            noise = universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV)\n        torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt'))\n        logger.info(noise)\n        logger.info(noise.shape)\n        logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt')))\n    else:\n        raise('Not implemented yet')\n    return\n\n\nif __name__ == '__main__':\n    for arg in vars(args):\n        logger.info(\"%s: %s\" % (arg, getattr(args, arg)))\n    start = time.time()\n    main()\n    end = time.time()\n    cost = (end - start) / 86400\n    payload = \"Running Cost %.2f Days \\n\" % cost\n    logger.info(payload)\n"
  },
  {
    "path": "requirements.txt",
    "content": "torch\ntorchvision\nmlconfig\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/classwise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-max\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.8\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n# Remove previous files\necho $exp_path\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_path\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\nrm -rf $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-max\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport universal_stop_error=0.9\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n# Remove previous files\necho $exp_path\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_path\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n# \n# # Submit Adv Training\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-max-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/classwise-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.01\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    \"resnet18_augmentation\"\n    \"resnet18_add-uniform-noise\"\n    \"resnet18_classpoison\"\n    \"resnet18_add-uniform-noise-aug\"\n    \"resnet18_cutout\"\n    \"resnet18_cutmix\"\n    \"resnet18_mixup\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.1\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    \"resnet18_augmentation\"\n    \"resnet18_add-uniform-noise\"\n    \"resnet18_classpoison\"\n    \"resnet18_add-uniform-noise-aug\"\n    \"resnet18_cutout\"\n    \"resnet18_cutmix\"\n    \"resnet18_mixup\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.1\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n\n\necho resnet18-madrys-1.0-${exp_args}\nsbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\necho resnet18-madrys-0.0-${exp_args}\nsbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/min-min-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/random-noise/classwise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=random\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.9\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/random-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/cifar10/random-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# echo ${attack_type}-${perturb_type}-resnet18-madrys-1.0\n# sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00                 \\\n#        --job-name ${attack_type}-${perturb_type}-resnet18-madrys-1.0  \\\n#        train.slurm resnet18_madrys 1.0 $scripts_path\n#\n# echo ${attack_type}-${perturb_type}-resnet18-madrys-0.0\n# sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00                 \\\n#        --job-name ${attack_type}-${perturb_type}-resnet18-madrys-0.0  \\\n#        train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10/random-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/random-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/random-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=random\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport universal_stop_error=0.9\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar10/random-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_path\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/cifar10/random-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n# \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n# \n# # Submit Adv Training\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10/random-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10/random-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-2noise\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-2\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_add-uniform-noise-aug\"\n    # \"resnet18_classpoison\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-2/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=1\nexport universal_stop_error=0.01\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-eps=16\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 8:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=16/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=24\nexport step_size=2.4\nexport num_steps=1\nexport universal_stop_error=0.01\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-eps=24\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-eps=24/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=16\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch16/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=24\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch24/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=8\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             10 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-random-patch8/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport exp_args=TinyImageNet-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-transfer-tiny-imagenet\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    \"resnet18_augmentation\"\n    \"resnet18_add-uniform-noise\"\n    \"resnet18_classpoison\"\n    # \"resnet18_classpoison_targeted\"\n\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    0.8\n    0.6\n    0.4\n    0.2\n    0.0\n)\n\necho $scripts_path\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/classwise-noise-transfer-tiny-imagenet/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\npwd\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-eps=16\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --random_start\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n\n\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=16/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=24\nexport step_size=2.4\nexport num_steps=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-eps=24\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --random_start\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# Submit Adv Training\nfor poison_rate in \"${poison_rate_arr[@]}\"\n  do\n    job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n    echo $job_name\n    sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\ndone\n\n\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-eps=24/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=16\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch16/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=24\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch24/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport patch_location='random'\nexport patch_size=8\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}-${patch_location}${patch_size}\nexport exp_path=experiments/cifar10-extension/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar10-extension/${attack_type}-noise/${perturb_type}-noise-${patch_location}-patch${patch_size}\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --noise_shape             50000 3 $patch_size $patch_size \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --patch_location          $patch_location    \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    # \"resnet50\"\n    # \"dense121\"\n    \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n    # \"resnet18_classpoison\"\n    # \"resnet18_add-uniform-noise-aug\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    # 0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar10-extension/min-min-noise/samplewise-noise-random-patch8/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/classwise-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar100\nexport dataset_type=CIFAR100\nexport poison_dataset_type=PoisonCIFAR100\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.01\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar100/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar100/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             100 3 32 32        \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 8:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n#\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar100\nexport dataset_type=CIFAR100\nexport poison_dataset_type=PoisonCIFAR100\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport train_step=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar100/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/cifar100/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             50000 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --train_step              $train_step        \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 8:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n\n\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar100/min-min-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/cifar101/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/cifar10\nexport dataset_type=CIFAR10\nexport poison_dataset_type=PoisonCIFAR10\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/cifar10/${attack_type}_${perturb_type}/${exp_args}\n"
  },
  {
    "path": "scripts/cifar101/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=1.0\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                experiments/cifar101_transfer \\\n                      --config_path             configs/cifar101            \\\n                      --train_data_type         PoisonCIFAR101              \\\n                      --test_data_type          PoisonCIFAR101              \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/face/min-min-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n# Exp Setting\nexport config_path=configs/face\nexport dataset_path=../datasets/casia-112x112-protected-train\nexport test_dataset_path=../datasets/casia-112x112-protected-val\nexport dataset_type=WebFace\nexport poison_dataset_type=WebFace\nexport base_version=InceptionResnet\nexport attack_type=min-min\nexport perturb_type=classwise\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=1\nexport train_step=30\nexport universal_stop_error=0.1\nexport universal_train_target='train_dataset'\nexport exp_path=experiments/face\nexport scripts_path=scripts/face\n"
  },
  {
    "path": "scripts/face/min-min-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\nexp_name=${exp_path}/search_noise\necho $exp_name\n\n# Search Universal Perturbation and build datasets\ncd ../../../\n# rm -rf $exp_name\npwd\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_name          \\\n                        --version                 $base_version      \\\n                        --train_data_type         WebFace            \\\n                        --test_data_type          WebFace            \\\n                        --train_data_path         /home/lemonbear/DriveN/data/face-search      \\\n                        --test_data_path          /home/lemonbear/DriveN/data/face-search      \\\n                        --noise_shape             150 3 112 112      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --train_step              $train_step        \\\n                        --train_batch_size        32                 \\\n                        --eval_batch_size         32                 \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/face/min-min-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}\necho $exp_name\n\n# Poison Training\ncd ../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        64                         \\\n                      --eval_batch_size         64                         \\\n                      --train --data_parallel --train_face\n"
  },
  {
    "path": "scripts/face/min-min-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"WebFace\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --mem=32G\n#SBATCH --time 72:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\nscripts_path=$2\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}\necho $exp_name\n\n# Poison Training\ncd ../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        64                         \\\n                      --eval_batch_size         64                         \\\n                      --train --data_parallel\n"
  },
  {
    "path": "scripts/face/min-min-noise/train_clean.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\nexp_name=${exp_path}/clean_train\necho $exp_name\n\ncd ../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        512                         \\\n                      --eval_batch_size         512                         \\\n                      --train --train_face --data_parallel\n"
  },
  {
    "path": "scripts/face/min-min-noise/train_clean.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"WebFace-Clean\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --mem=32G\n#SBATCH --time 168:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\nscripts_path=$2\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\nexp_name=${exp_path}/clean_train\necho $exp_name\n\ncd ../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        512                         \\\n                      --eval_batch_size         512                         \\\n                      --train --train_face --data_parallel\n"
  },
  {
    "path": "scripts/face/min-min-noise/train_protected.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\nexp_name=${exp_path}/protected_train\necho $exp_name\n\ncd ../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         ../datasets/casia-112x112-protected \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        512                         \\\n                      --eval_batch_size         512                         \\\n                      --train --train_face --data_parallel\n"
  },
  {
    "path": "scripts/face/min-min-noise/train_protected.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"WebFace-Protected\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --mem=32G\n#SBATCH --time 168:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\nscripts_path=$2\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\n# Training Setting\nmodel_name=$1\nexp_name=${exp_path}/protected_train\necho $exp_name\n\ncd ../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --test_data_path          $test_dataset_path          \\\n                      --train_data_type         $dataset_type               \\\n                      --test_data_type          $dataset_type               \\\n                      --train_batch_size        512                         \\\n                      --eval_batch_size         512                         \\\n                      --train --train_face --data_parallel\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/classwise-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/imagenet-mini\nexport dataset_path=../datasets/ILSVRC2012\nexport dataset_type=ImageNetMini\nexport poison_dataset_type=PoisonImageNetMini\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=1\nexport train_step=100\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/imagenet-mini/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/imagenet-mini/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_path         $dataset_path      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_path          $dataset_path      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             100 3 224 224      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --train_step              $train_step        \\\n                        --train_batch_size        32                 \\\n                        --eval_batch_size         32                 \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 48:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_path          $dataset_path               \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=24\n#SBATCH --mem=32G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\n\n# Load EXP Setting\ncd $scripts_path\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_path          $dataset_path               \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train_batch_size 64 --eval_batch_size 64            \\\n                      --num_of_workers          24 \\\n                      --train\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/imagenet-mini\nexport dataset_path=../datasets/ILSVRC2012\nexport dataset_type=ImageNetMini\nexport poison_dataset_type=PoisonImageNetMini\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=20\nexport train_step=100\nexport universal_stop_error=0.1\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/imagenet-mini/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/imagenet-mini/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_path         $dataset_path      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_path          $dataset_path      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             25879 3 224 224    \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --train_step              $train_step        \\\n                        --train_batch_size        32                 \\\n                        --eval_batch_size         32                 \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 48:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_path          $dataset_path               \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train --train_portion 0.2\n"
  },
  {
    "path": "scripts/imagenet-mini/min-min-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=24\n#SBATCH --mem=64G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_path         $dataset_path               \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_path          $dataset_path               \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train_batch_size 64 --eval_batch_size 64            \\\n                      --num_of_workers          24 \\\n                      --train --train_portion 0.2\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/classwise-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/svhn\nexport dataset_type=SVHN\nexport poison_dataset_type=PoisonSVHN\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=1\nexport universal_stop_error=0.01\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/svhn/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/svhn/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             10 3 32 32         \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/classwise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n    # \"resnet18_add-uniform-noise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=${attack_type}-${perturb_type}-$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 4:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=${attack_type}-${perturb_type}-$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/classwise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/classwise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/samplewise-noise/exp_setting.sh",
    "content": "#!/bin/bash\n\n\n\n# Exp Setting\nexport config_path=configs/svhn\nexport dataset_type=SVHN\nexport poison_dataset_type=PoisonSVHN\nexport attack_type=min-min\nexport perturb_type=samplewise\nexport base_version=resnet18\nexport epsilon=8\nexport step_size=0.8\nexport num_steps=20\nexport universal_stop_error=0.01\nexport universal_train_target='train_dataset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/svhn/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/svhn/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/samplewise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             73257 3 32 32      \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/samplewise-noise/submit.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Target Models\ndeclare -a type_arr=(\n    \"resnet18\"\n    \"resnet50\"\n    \"dense121\"\n    # \"resnet18_augmentation\"\n    # \"resnet18_denoise\"\n)\n\n# Poison Rates\ndeclare -a poison_rate_arr=(\n    1.0\n    # 0.8\n    # 0.6\n    # 0.4\n    # 0.2\n    0.0\n)\n\n\n# Submit Jobs\nfor model_name in \"${type_arr[@]}\"\ndo\n    for poison_rate in \"${poison_rate_arr[@]}\"\n    do\n      job_name=$exp_args-${model_name}-${poison_rate}\n      echo $job_name\n      sbatch --partition gpgpu --gres=gpu:1 --time 3:00:00 --job-name $job_name train.slurm $model_name $poison_rate $scripts_path\n    done\ndone\n\n\n# # Submit Adv Training\n# for poison_rate in \"${poison_rate_arr[@]}\"\n#   do\n#     job_name=$exp_args-resnet18_madrys-${poison_rate}\n#     echo $job_name\n#     sbatch --partition gpgpu --gres=gpu:1 --time 12:00:00 --job-name $job_name train.slurm resnet18_madrys $poison_rate $scripts_path\n# done\n\n\n# echo resnet18-madrys-1.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-1.0 train.slurm resnet18_madrys 1.0 $scripts_path\n# echo resnet18-madrys-0.0-${exp_args}\n# sbatch --partition gpgpu --gres=gpu:1 --time 24:00:00 --job-name ${exp_args}-resnet18-madrys-0.0 train.slurm resnet18_madrys 0.0 $scripts_path\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/samplewise-noise/train.sh",
    "content": "#!/bin/bash\n\n# Load EXP Setting\nsource exp_setting.sh\n\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/svhn/min-min-noise/samplewise-noise/train.slurm",
    "content": "#!/bin/bash\n#SBATCH --nodes 1\n#SBATCH --job-name=\"c100-universal\"\n#SBATCH --output=slurm-%A-%x.out\n#SBATCH --account=\"punim0784\"\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=4\n#SBATCH --mem=4G\n#SBATCH --time 4:00:00\n#SBATCH --mail-type=ALL\n#SBATCH --mail-user=pineappleorcas@gmail.com\n\n# check that the script is launched with sbatch\nif [ \"x$SLURM_JOB_ID\" == \"x\" ]; then\n   echo \"You need to submit your job to the queuing system with sbatch\"\n   exit 1\nfi\n\n# Training Setting\nmodel_name=$1\npoison_rate=$2\nscripts_path=$3\n\n# Run the job from this directory:\ncd /data/gpfs/projects/punim0784/min-min-noise\ncd $scripts_path\n\n# Load EXP Setting\nsource exp_setting.sh\n\nexp_name=${exp_path}/poison_train_${poison_rate}\necho $exp_name\n\n# Poison Training\ncd ../../../../\nrm -rf ${exp_name}/${model_name}\npython3 -u main.py    --version                 $model_name                 \\\n                      --exp_name                $exp_name                   \\\n                      --config_path             $config_path                \\\n                      --train_data_type         $poison_dataset_type        \\\n                      --test_data_type          $dataset_type               \\\n                      --poison_rate             $poison_rate                \\\n                      --perturb_type            $perturb_type               \\\n                      --perturb_tensor_filepath ${exp_path}/perturbation.pt \\\n                      --train\n"
  },
  {
    "path": "scripts/tiny-imagenet/min-min-noise/classwise-noise/exp_setting.sh",
    "content": "#!/usr/bin/env bash\n\n\n\n# Exp Setting\nexport config_path=configs/tiny-imagenet\nexport dataset_path=../datasets/ILSVRC2012\nexport dataset_type=TinyImageNet\nexport poison_dataset_type=PoisonImageNetMini\nexport attack_type=min-min\nexport perturb_type=classwise\nexport base_version=resnet18\nexport epsilon=16\nexport step_size=1.6\nexport num_steps=1\nexport train_step=250\nexport universal_stop_error=0.1\nexport universal_train_target='train_subset'\nexport exp_args=${dataset_type}-eps=${epsilon}-se=${universal_stop_error}-base_version=${base_version}\nexport exp_path=experiments/tiny-imagenet/${attack_type}_${perturb_type}/${exp_args}\nexport scripts_path=scripts/tiny-imagenet/${attack_type}-noise/${perturb_type}-noise\n"
  },
  {
    "path": "scripts/tiny-imagenet/min-min-noise/classwise-noise/search_perturbation_noise.sh",
    "content": "#!/bin/bash\n\n# Load Exp Settings\nsource exp_setting.sh\n\n\n# Remove previous files\necho $exp_path\n\n\n# Search Universal Perturbation and build datasets\ncd ../../../../\npwd\nrm -rf $exp_name\npython3 perturbation.py --config_path             $config_path       \\\n                        --exp_name                $exp_path          \\\n                        --version                 $base_version      \\\n                        --train_data_path         $dataset_path      \\\n                        --train_data_type         $dataset_type      \\\n                        --test_data_path          $dataset_path      \\\n                        --test_data_type          $dataset_type      \\\n                        --noise_shape             1000 3 32 32       \\\n                        --epsilon                 $epsilon           \\\n                        --num_steps               $num_steps         \\\n                        --step_size               $step_size         \\\n                        --attack_type             $attack_type       \\\n                        --perturb_type            $perturb_type      \\\n                        --train_step              $train_step        \\\n                        --train_batch_size        32                 \\\n                        --eval_batch_size         32                 \\\n                        --universal_train_target  $universal_train_target\\\n                        --universal_stop_error    $universal_stop_error\\\n                        --use_subset\n"
  },
  {
    "path": "toolbox.py",
    "content": "import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\nclass PerturbationTool():\n    def __init__(self, seed=0, epsilon=0.03137254901, num_steps=20, step_size=0.00784313725):\n        self.epsilon = epsilon\n        self.num_steps = num_steps\n        self.step_size = step_size\n        self.seed = seed\n        np.random.seed(seed)\n\n    def random_noise(self, noise_shape=[10, 3, 32, 32]):\n        random_noise = torch.FloatTensor(*noise_shape).uniform_(-self.epsilon, self.epsilon).to(device)\n        return random_noise\n\n    def min_min_attack(self, images, labels, model, optimizer, criterion, random_noise=None, sample_wise=False):\n        if random_noise is None:\n            random_noise = torch.FloatTensor(*images.shape).uniform_(-self.epsilon, self.epsilon).to(device)\n\n        perturb_img = Variable(images.data + random_noise, requires_grad=True)\n        perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n        eta = random_noise\n        for _ in range(self.num_steps):\n            opt = torch.optim.SGD([perturb_img], lr=1e-3)\n            opt.zero_grad()\n            model.zero_grad()\n            if isinstance(criterion, torch.nn.CrossEntropyLoss):\n                if hasattr(model, 'classify'):\n                    model.classify = True\n                logits = model(perturb_img)\n                loss = criterion(logits, labels)\n            else:\n                logits, loss = criterion(model, perturb_img, labels, optimizer)\n            perturb_img.retain_grad()\n            loss.backward()\n            eta = self.step_size * perturb_img.grad.data.sign() * (-1)\n            perturb_img = Variable(perturb_img.data + eta, requires_grad=True)\n            eta = torch.clamp(perturb_img.data - images.data, -self.epsilon, self.epsilon)\n            perturb_img = Variable(images.data + eta, requires_grad=True)\n            perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n\n        return perturb_img, eta\n\n    def min_max_attack(self, images, labels, model, optimizer, criterion, random_noise=None, sample_wise=False):\n        if random_noise is None:\n            random_noise = torch.FloatTensor(*images.shape).uniform_(-self.epsilon, self.epsilon).to(device)\n\n        perturb_img = Variable(images.data + random_noise, requires_grad=True)\n        perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n        eta = random_noise\n        for _ in range(self.num_steps):\n            opt = torch.optim.SGD([perturb_img], lr=1e-3)\n            opt.zero_grad()\n            model.zero_grad()\n            if isinstance(criterion, torch.nn.CrossEntropyLoss):\n                logits = model(perturb_img)\n                loss = criterion(logits, labels)\n            else:\n                logits, loss = criterion(model, perturb_img, labels, optimizer)\n            loss.backward()\n\n            eta = self.step_size * perturb_img.grad.data.sign()\n            perturb_img = Variable(perturb_img.data + eta, requires_grad=True)\n            eta = torch.clamp(perturb_img.data - images.data, -self.epsilon, self.epsilon)\n            perturb_img = Variable(images.data + eta, requires_grad=True)\n            perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)\n\n        return perturb_img, eta\n\n    def _patch_noise_extend_to_img(self, noise, image_size=[3, 32, 32], patch_location='center'):\n        c, h, w = image_size[0], image_size[1], image_size[2]\n        mask = np.zeros((c, h, w), np.float32)\n        x_len, y_len = noise.shape[1], noise.shape[1]\n\n        if patch_location == 'center' or (h == w == x_len == y_len):\n            x = h // 2\n            y = w // 2\n        elif patch_location == 'random':\n            x = np.random.randint(x_len // 2, w - x_len // 2)\n            y = np.random.randint(y_len // 2, h - y_len // 2)\n        else:\n            raise('Invalid patch location')\n\n        x1 = np.clip(x - x_len // 2, 0, h)\n        x2 = np.clip(x + x_len // 2, 0, h)\n        y1 = np.clip(y - y_len // 2, 0, w)\n        y2 = np.clip(y + y_len // 2, 0, w)\n        if type(noise) is np.ndarray:\n            pass\n        else:\n            mask[:, x1: x2, y1: y2] = noise.cpu().numpy()\n        return ((x1, x2, y1, y2), torch.from_numpy(mask).to(device))\n"
  },
  {
    "path": "trainer.py",
    "content": "import time\nimport models\nimport torch\nimport util\n\nif torch.cuda.is_available():\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\nclass Trainer():\n    def __init__(self, criterion, data_loader, logger, config, global_step=0,\n                 target='train_dataset'):\n        self.criterion = criterion\n        self.data_loader = data_loader\n        self.logger = logger\n        self.config = config\n        self.log_frequency = config.log_frequency if config.log_frequency is not None else 100\n        self.loss_meters = util.AverageMeter()\n        self.acc_meters = util.AverageMeter()\n        self.acc5_meters = util.AverageMeter()\n        self.global_step = global_step\n        self.target = target\n        print(self.target)\n\n    def _reset_stats(self):\n        self.loss_meters = util.AverageMeter()\n        self.acc_meters = util.AverageMeter()\n        self.acc5_meters = util.AverageMeter()\n\n    def train(self, epoch, model, criterion, optimizer, random_noise=None):\n        model.train()\n        for i, (images, labels) in enumerate(self.data_loader[self.target]):\n            images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)\n            if random_noise is not None:\n                random_noise = random_noise.detach().to(device)\n                for i in range(len(labels)):\n                    class_index = labels[i].item()\n                    images[i] += random_noise[class_index].clone()\n                    images[i] = torch.clamp(images[i], 0, 1)\n            start = time.time()\n            log_payload = self.train_batch(images, labels, model, optimizer)\n            end = time.time()\n            time_used = end - start\n            if self.global_step % self.log_frequency == 0:\n                display = util.log_display(epoch=epoch,\n                                           global_step=self.global_step,\n                                           time_elapse=time_used,\n                                           **log_payload)\n                self.logger.info(display)\n            self.global_step += 1\n        return self.global_step\n\n    def train_batch(self, images, labels, model, optimizer):\n        model.zero_grad()\n        optimizer.zero_grad()\n        if isinstance(self.criterion, torch.nn.CrossEntropyLoss) or isinstance(self.criterion, models.CutMixCrossEntropyLoss):\n            logits = model(images)\n            loss = self.criterion(logits, labels)\n        else:\n            logits, loss = self.criterion(model, images, labels, optimizer)\n        if isinstance(self.criterion, models.CutMixCrossEntropyLoss):\n            _, labels = torch.max(labels.data, 1)\n        loss.backward()\n        grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), self.config.grad_clip)\n        optimizer.step()\n        if logits.shape[1] >= 5:\n            acc, acc5 = util.accuracy(logits, labels, topk=(1, 5))\n            acc, acc5 = acc.item(), acc5.item()\n        else:\n            acc, = util.accuracy(logits, labels, topk=(1,))\n            acc, acc5 = acc.item(), 1\n        self.loss_meters.update(loss.item(), labels.shape[0])\n        self.acc_meters.update(acc, labels.shape[0])\n        self.acc5_meters.update(acc5, labels.shape[0])\n        payload = {\"acc\": acc,\n                   \"acc_avg\": self.acc_meters.avg,\n                   \"loss\": loss,\n                   \"loss_avg\": self.loss_meters.avg,\n                   \"lr\": optimizer.param_groups[0]['lr'],\n                   \"|gn|\": grad_norm}\n        return payload\n"
  },
  {
    "path": "util.py",
    "content": "import logging\nimport os\n\nimport numpy as np\nimport torch\n\nif torch.cuda.is_available():\n    torch.backends.cudnn.enabled = True\n    torch.backends.cudnn.benchmark = True\n    torch.backends.cudnn.deterministic = True\n    device = torch.device('cuda')\nelse:\n    device = torch.device('cpu')\n\n\ndef _patch_noise_extend_to_img(noise, image_size=[3, 32, 32], patch_location='center'):\n    c, h, w = image_size[0], image_size[1], image_size[2]\n    mask = np.zeros((c, h, w), np.float32)\n    x_len, y_len = noise.shape[1], noise.shape[2]\n\n    if patch_location == 'center' or (h == w == x_len == y_len):\n        x = h // 2\n        y = w // 2\n    elif patch_location == 'random':\n        x = np.random.randint(x_len // 2, w - x_len // 2)\n        y = np.random.randint(y_len // 2, h - y_len // 2)\n    else:\n        raise('Invalid patch location')\n\n    x1 = np.clip(x - x_len // 2, 0, h)\n    x2 = np.clip(x + x_len // 2, 0, h)\n    y1 = np.clip(y - y_len // 2, 0, w)\n    y2 = np.clip(y + y_len // 2, 0, w)\n    mask[:, x1: x2, y1: y2] = noise\n    return mask\n\n\ndef setup_logger(name, log_file, level=logging.INFO):\n    \"\"\"To setup as many loggers as you want\"\"\"\n    formatter = logging.Formatter('%(asctime)s %(message)s')\n    console_handler = logging.StreamHandler()\n    console_handler.setFormatter(formatter)\n    file_handler = logging.FileHandler(log_file)\n    file_handler.setFormatter(formatter)\n    logger = logging.getLogger(name)\n    logger.setLevel(level)\n    logger.addHandler(file_handler)\n    logger.addHandler(console_handler)\n    return logger\n\n\ndef log_display(epoch, global_step, time_elapse, **kwargs):\n    display = 'epoch=' + str(epoch) + \\\n              '\\tglobal_step=' + str(global_step)\n    for key, value in kwargs.items():\n        if type(value) == str:\n            display = '\\t' + key + '=' + value\n        else:\n            display += '\\t' + str(key) + '=%.4f' % value\n    display += '\\ttime=%.2fit/s' % (1. / time_elapse)\n    return display\n\n\ndef accuracy(output, target, topk=(1,)):\n    maxk = max(topk)\n\n    batch_size = target.size(0)\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(1/batch_size))\n    return res\n\n\ndef save_model(filename, epoch, model, optimizer, scheduler, save_best=False, **kwargs):\n    # Torch Save State Dict\n    state = {\n        'epoch': epoch+1,\n        'model_state_dict': model.state_dict(),\n        'optimizer_state_dict': optimizer.state_dict(),\n        'scheduler_state_dict': scheduler.state_dict() if scheduler is not None else None\n    }\n    for key, value in kwargs.items():\n        state[key] = value\n    torch.save(state, filename + '.pth')\n    filename += '_best.pth'\n    if save_best:\n        torch.save(state, filename)\n    return\n\n\ndef load_model(filename, model, optimizer, scheduler, **kwargs):\n    # Load Torch State Dict\n    filename = filename + '.pth'\n    checkpoints = torch.load(filename, map_location=device)\n    model.load_state_dict(checkpoints['model_state_dict'])\n    if optimizer is not None and checkpoints['optimizer_state_dict'] is not None:\n        optimizer.load_state_dict(checkpoints['optimizer_state_dict'])\n    if scheduler is not None and checkpoints['scheduler_state_dict'] is not None:\n        scheduler.load_state_dict(checkpoints['scheduler_state_dict'])\n    return checkpoints\n\n\ndef count_parameters_in_MB(model):\n    return sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary_head\" not in name)/1e6\n\n\ndef build_dirs(path):\n    if not os.path.exists(path):\n        os.makedirs(path)\n    return\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n\n    def __init__(self):\n        self.reset()\n\n    def reset(self):\n        self.val = 0\n        self.avg = 0\n        self.sum = 0\n        self.count = 0\n        self.max = 0\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n        self.max = max(self.max, val)\n\n\ndef onehot(size, target):\n    vec = torch.zeros(size, dtype=torch.float32)\n    vec[target] = 1.\n    return vec\n\n\ndef rand_bbox(size, lam):\n    if len(size) == 4:\n        W = size[2]\n        H = size[3]\n    elif len(size) == 3:\n        W = size[1]\n        H = size[2]\n    else:\n        raise Exception\n\n    cut_rat = np.sqrt(1. - lam)\n    cut_w = np.int(W * cut_rat)\n    cut_h = np.int(H * cut_rat)\n\n    # uniform\n    cx = np.random.randint(W)\n    cy = np.random.randint(H)\n\n    bbx1 = np.clip(cx - cut_w // 2, 0, W)\n    bby1 = np.clip(cy - cut_h // 2, 0, H)\n    bbx2 = np.clip(cx + cut_w // 2, 0, W)\n    bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n    return bbx1, bby1, bbx2, bby2\n"
  }
]