[
  {
    "path": ".gitignore",
    "content": ".DS_Store\n# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm\n# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839\n\n# User-specific stuff\n.idea/**/workspace.xml\n.idea/**/tasks.xml\n.idea/**/usage.statistics.xml\n.idea/**/dictionaries\n.idea/**/shelf\n\n# Generated files\n.idea/**/contentModel.xml\n\n# Sensitive or high-churn files\n.idea/**/dataSources/\n.idea/**/dataSources.ids\n.idea/**/dataSources.local.xml\n.idea/**/sqlDataSources.xml\n.idea/**/dynamic.xml\n.idea/**/uiDesigner.xml\n.idea/**/dbnavigator.xml\n\n# Gradle\n.idea/**/gradle.xml\n.idea/**/libraries\n\n# Gradle and Maven with auto-import\n# When using Gradle or Maven with auto-import, you should exclude module files,\n# since they will be recreated, and may cause churn.  Uncomment if using\n# auto-import.\n# .idea/modules.xml\n# .idea/*.iml\n# .idea/modules\n# *.iml\n# *.ipr\n\n# CMake\ncmake-build-*/\n\n# Mongo Explorer plugin\n.idea/**/mongoSettings.xml\n\n# File-based project format\n*.iws\n\n# IntelliJ\nout/\n\n# mpeltonen/sbt-idea plugin\n.idea_modules/\n\n# JIRA plugin\natlassian-ide-plugin.xml\n\n# Cursive Clojure plugin\n.idea/replstate.xml\n\n# Crashlytics plugin (for Android Studio and IntelliJ)\ncom_crashlytics_export_strings.xml\ncrashlytics.properties\ncrashlytics-build.properties\nfabric.properties\n\n# Editor-based Rest Client\n.idea/httpRequests\n\n# Android studio 3.1+ serialized cache file\n.idea/caches/build_file_checksums.ser\n\n/models/\n"
  },
  {
    "path": ".idea/MADAN.iml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager\">\n    <content url=\"file://$MODULE_DIR$\" />\n    <orderEntry type=\"jdk\" jdkName=\"Remote Python 3.5.2 (sftp://luban@10.84.217.43:8022/usr/bin/python3)\" jdkType=\"Python SDK\" />\n    <orderEntry type=\"sourceFolder\" forTests=\"false\" />\n  </component>\n  <component name=\"TestRunnerService\">\n    <option name=\"projectConfiguration\" value=\"Twisted Trial\" />\n    <option name=\"PROJECT_TEST_RUNNER\" value=\"Twisted Trial\" />\n  </component>\n</module>"
  },
  {
    "path": ".idea/deployment.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"PublishConfigData\" autoUpload=\"Always\" serverName=\"MV_CyCADA_22G\">\n    <serverData>\n      <paths name=\"MV_CyCADA_22G\">\n        <serverdata>\n          <mappings>\n            <mapping deploy=\"/nfs/project/libo_i/MADAN\" local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n    </serverData>\n    <option name=\"myAutoUpload\" value=\"ALWAYS\" />\n  </component>\n</project>"
  },
  {
    "path": ".idea/inspectionProfiles/profiles_settings.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <settings>\n    <option name=\"USE_PROJECT_PROFILE\" value=\"false\" />\n    <version value=\"1.0\" />\n  </settings>\n</component>"
  },
  {
    "path": ".idea/misc.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"JavaScriptSettings\">\n    <option name=\"languageLevel\" value=\"ES6\" />\n  </component>\n  <component name=\"ProjectRootManager\" version=\"2\" project-jdk-name=\"Remote Python 3.5.2 (sftp://luban@10.84.217.43:8022/usr/bin/python3)\" project-jdk-type=\"Python SDK\" />\n  <component name=\"PythonCompatibilityInspectionAdvertiser\">\n    <option name=\"version\" value=\"3\" />\n  </component>\n</project>"
  },
  {
    "path": ".idea/modules.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n      <module fileurl=\"file://$PROJECT_DIR$/.idea/MADAN.iml\" filepath=\"$PROJECT_DIR$/.idea/MADAN.iml\" />\n    </modules>\n  </component>\n</project>"
  },
  {
    "path": ".idea/remote-mappings.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"RemoteMappingsManager\">\n    <list>\n      <list>\n        <remote-mappings server-id=\"python@sftp://luban@10.84.217.43:8022/usr/bin/python3\">\n          <settings>\n            <list>\n              <mapping local-root=\"$PROJECT_DIR$\" remote-root=\"/nfs/project/libo_i/MADAN\" />\n            </list>\n          </settings>\n        </remote-mappings>\n      </list>\n    </list>\n  </component>\n</project>"
  },
  {
    "path": ".idea/vcs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping directory=\"$PROJECT_DIR$\" vcs=\"Git\" />\n  </component>\n</project>"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 liljprime\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# MADAN\n\nA Pytorch Code for [Multi-source Domain Adaptation for Semantic Segmentation](https://arxiv.org/abs/1910.12181)\n\nIf you use this code in your research please consider citing:\n\n```\n@InProceedings{zhao2019madan,\n   title = {Multi-source Domain Adaptation for Semantic Segmentation},\n   author = {Zhao, Sicheng and Li, Bo and Yue, Xiangyu and Gu, Yang and Xu, Pengfei and Tan, Hu, Runbo and Chai, Hua and   Keutzer, Kurt},\n   booktitle = {Advances in Neural Information Processing Systems},\n   year = {2019}\n}\n```\n\n## Quick Look\n\nOur multi-source domain adaptation builds on the work [CyCADA](https://github.com/jhoffman/cycada_release) and [CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix). Since we focus on Semantic Segmentation task, we remove Digit Classfication part in CyCADA.\n\nWe add following modules and achieve startling improvements.\n\n1. Dynamic Semantic Consistency Module\n2. Adversarial Aggregation Module\n   1. Sub-domain Aggregation Discriminator\n   2. Cross-domain Cycle Discriminator\n\nWhile we implements [MDAN](https://openreview.net/pdf?id=ryDNZZZAW) for Semantic Segmentation task in Pytorch as our baseline comparasion.\n\n## Overall Structure\n\n![image-20190608104531451](http://ww4.sinaimg.cn/large/006tNc79ly1g3tjype7qlj31vo0u0hb1.jpg)\n\n## Setup\n\nCheck out this repo:\n\n```bash\ngit clone https://github.com/pikachusocute/MADAN.git\n```\n\nInstall Python3 requirements\n\n```bash\npip3 install -r requirements.txt\n```\n\n## Dynamic Adversarial Image Generation\n\nWe follow the way in CyCADA, in the first step, we need to train Image Adaptation module to transfer source image(GTA, Synthia or Multi-source) to \"source as target\".\n\n![image-20190608111738818](http://ww4.sinaimg.cn/large/006tNc79ly1g3tkvxw9rrj31r40e8kjl.jpg)\n\nWe refer Image Adaptation module from GTA to Cityscapes as GTA->Cityscapes in the following.\n\n#### GTA->Cityscapes\n\n```bash\ncd scripts/CycleGAN\nbash cyclegan_gta2cityscapes.sh\n```\n\nIn the training process, snapshot files will be stored in `cyclegan/checkpoints/[EXP_NAME]`.\n\nUsually, afer we run for 20 epochs, there'll be a file `20_net_G_A.pth ` in previous folder path. \n\nThen we run the test process.\n\n```bash\nbash scripts/CycleGAN/test_templates.sh [EXP_NAME] 20 cycle_gan_semantic_fcn gta5_cityscapes\n```\n\nIn multi-source case, there are both `20_net_G_A_1.pth` and `20_net_G_A_2.pth` exist. We use another script to run test process.\n\n![image](https://tva1.sinaimg.cn/large/006y8mN6ly1g9cqt9m2kmj31j80skgsh.jpg)\n\n```bash\nbash scripts/CycleGAN/test_templates_cycle.sh [EXP_NAME] 20 test synthia_cityscapes gta5_cityscapes\n```\n\nNew dataset will be generated at `~/cyclegan/results/[EXP_NAME]/train_20`.\n\nAfter we obtain a new source stylized dataset, we then train segmenter on the new dataset.\n\n## Pixel Level Adaptation\n\nIn this part, we train our new segmenter on new dataset.\n\n```bash\nln -s ~/cyclegan/results/[EXP_NAME]/train_20 ~/data/cyclegta5/[EXP_NAME]_TRAIN_60\n```\n\nThen we set `dataflag = [EXP_NAME]_TRAIN_60` to find datasets' paths, and follow instructions to train segmenter to perform pixel level adaptation.\n\n```bash\nbash scripts/FCN/train_fcn8s_cyclesgta5_DSC.sh\n```\n\n## Feature Level Adaptation\n\nFor adaptation, we use\n\n```bash\nbash scripts/ADDA/adda_cyclegta2cs_score.sh\n```\n\nMake sure you choose the desired `src` and `tgt` and `datadir` before. In this process, you should load your `base_model` trained on synthetic dataset and perform adaptation in feature level to real scene dataset.\n\n### Our Model\n\nWe release our adaptation model in the `./models`, you can use `scripts/eval_templates.sh` to evaluate its validity.\n\n1. [CycleGTA5_Dynamic_Semantic_Consistency](https://drive.google.com/file/d/1moGF7L2hkTHUPUzqsSxPwKNlHCHQm4Ms/view?usp=sharing)\n2. [CycleSYNTHIA_Dynamic_Semantic_Consistency](https://drive.google.com/file/d/19V5J1zyF3ct3247gSSr-u3WVkDJqQvUk/view?usp=sharing)\n3. [Multi_Source_SAD_CCD](https://drive.google.com/file/d/1xgmLwhsbwv-isy7R5FkNevVSH9gcMxuq/view?usp=sharing)\n\n### Transfered Dataset\n\nWe will release our transfer dataset soon, where our `CycleGTA5_Dynamic_Semantic_Consistency` model is trained to perform pixel level adaptation.\n"
  },
  {
    "path": "cycada/__init__.py",
    "content": ""
  },
  {
    "path": "cycada/data/__init__.py",
    "content": "from . import gta5, cityscapes, cyclegta5, synthia, cyclesynthia, cyclesynthia_cyclegta5, bdds\nfrom . import adda_datasets"
  },
  {
    "path": "cycada/data/adda_datasets.py",
    "content": "import os.path\n\nimport torch.utils.data\n\nfrom .data_loader import get_transform_dataset\nfrom ..transforms import augment_collate\n\n\nclass AddaDataLoader(object):\n\tdef __init__(self, net_transform, dataset, rootdir, downscale, crop_size=None, resize=None,\n\t             batch_size=1, shuffle=False, num_workers=2, half_crop=None, src_data_flag=None, small=False):\n\t\tself.dataset = dataset\n\t\tself.downscale = downscale\n\t\tself.resize = resize\n\t\tself.crop_size = crop_size\n\t\tself.half_crop = half_crop\n\t\tself.batch_size = batch_size\n\t\tself.shuffle = shuffle\n\t\tself.num_workers = num_workers\n\t\tassert len(self.dataset) == 2, 'Requires two datasets: source, target'\n\t\tsourcedir = os.path.join(rootdir, self.dataset[0])\n\t\ttargetdir = os.path.join(rootdir, self.dataset[1])\n\t\tself.source = get_transform_dataset(self.dataset[0], sourcedir, net_transform, downscale, resize, src_data_flag=src_data_flag, small=small)\n\t\tself.target = get_transform_dataset(self.dataset[1], targetdir, net_transform, downscale, resize, small=small)\n\t\tprint('Source length:', len(self.source), 'Target length:', len(self.target))\n\t\tself.n = max(len(self.source), len(self.target))  # make sure you see all images\n\t\tself.num = 0\n\t\tself.set_loader_src()\n\t\tself.set_loader_tgt()\n\t\n\tdef __iter__(self):\n\t\treturn self\n\t\n\tdef __next__(self):\n\t\treturn self.next()\n\t\n\tdef next(self):\n\t\tif self.num % len(self.iters_src) == 0:\n\t\t\tprint('restarting source dataset')\n\t\t\tself.set_loader_src()\n\t\tif self.num % len(self.iters_tgt) == 0:\n\t\t\tprint('restarting target dataset')\n\t\t\tself.set_loader_tgt()\n\t\t\n\t\timg_src, label_src = next(self.iters_src)\n\t\timg_tgt, label_tgt = next(self.iters_tgt)\n\t\t\n\t\tself.num += 1\n\t\treturn img_src, img_tgt, label_src, label_tgt\n\t\n\tdef __len__(self):\n\t\treturn min(len(self.source), len(self.target))\n\t\n\tdef set_loader_src(self):\n\t\tbatch_size = self.batch_size\n\t\tshuffle = self.shuffle\n\t\tnum_workers = self.num_workers\n\t\tif self.crop_size is not None or self.resize is not None:\n\t\t\tcollate_fn = lambda batch: augment_collate(batch, resize=self.resize, crop=self.crop_size,\n\t\t\t                                           halfcrop=self.half_crop, flip=True)\n\t\telse:\n\t\t\tcollate_fn = torch.utils.data.dataloader.default_collate\n\t\t\t\n\t\tself.loader_src = torch.utils.data.DataLoader(self.source,\n\t\t                                              batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,\n\t\t                                              collate_fn=collate_fn, pin_memory=True)\n\t\tself.iters_src = iter(self.loader_src)\n\t\n\tdef set_loader_tgt(self):\n\t\tbatch_size = self.batch_size\n\t\tshuffle = self.shuffle\n\t\tnum_workers = self.num_workers\n\t\tif self.crop_size is not None or self.resize is not None:\n\t\t\tcollate_fn = lambda batch: augment_collate(batch, resize=self.resize, crop=self.crop_size,\n\t\t\t                                           halfcrop=self.half_crop, flip=True)\n\t\telse:\n\t\t\tcollate_fn = torch.utils.data.dataloader.default_collate\n\t\tself.loader_tgt = torch.utils.data.DataLoader(self.target,\n\t\t                                              batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,\n\t\t                                              collate_fn=collate_fn, pin_memory=True)\n\t\tself.iters_tgt = iter(self.loader_tgt)\n"
  },
  {
    "path": "cycada/data/bdds.py",
    "content": "import os.path\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nfrom .util import classes, ignore_label, id2label\nfrom .data_loader import register_dataset_obj\n\n@register_dataset_obj('bdds')\nclass BDDS(data.Dataset):\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None, target_transform=None, data_flag=None):\n\t\tself.root = root\n\t\tself.split = split\n\t\tself.remap_labels = remap_labels\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tself.classes = classes\n\t\tself.data_flag = data_flag\n\t\tself.num_cls = num_cls\n\t\tself.ids = self.collect_ids()\n\t\n\tdef collect_ids(self):\n\t\tsplits = []\n\t\tpath = os.path.join(self.root, \"images\", self.split)\n\t\tfiles = os.listdir(path)\n\t\tfor item in files:\n\t\t\tfip = os.path.join(path, item)\n\t\t\tsplits.append(fip.split('/')[-1])\n\t\t\n\t\treturn splits\n\t\n\tdef img_path(self, filename):\n\t\treturn os.path.join(self.root, \"images\", self.split, filename)\n\t\n\tdef label_path(self, filename):\n\t\treturn os.path.join(self.root, 'labels', self.split, \"{}_train_id.png\".format(filename[:-4]))\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tid = self.ids[index]\n\t\timg_path = self.img_path(id)\n\t\tlabel_path = self.label_path(id)\n\t\t\n\t\timg = Image.open(img_path).convert('RGB')\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\ttarget = Image.open(label_path)\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.ids)\n"
  },
  {
    "path": "cycada/data/cityscapes.py",
    "content": "import os.path\nimport sys\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nfrom .util import classes, ignore_label, id2label\nfrom .data_loader import DatasetParams, register_data_params, register_dataset_obj\n\ndef remap_labels_to_train_ids(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n\n@register_data_params('cityscapes')\nclass CityScapesParams(DatasetParams):\n\tnum_channels = 3\n\timage_size = 1024\n\tmean = 0.5\n\tstd = 0.5\n\tnum_cls = 19\n\ttarget_transform = None\n\n\n@register_dataset_obj('cityscapes')\nclass Cityscapes(data.Dataset):\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None,\n\t             target_transform=None):\n\t\tself.root = root\n\t\tsys.path.append(root)\n\t\tself.split = split\n\t\tself.remap_labels = remap_labels\n\t\tself.ids = self.collect_ids()\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tself.num_cls = num_cls\n\t\t\n\t\tself.id2label = id2label\n\t\tself.classes = classes\n\t\n\tdef collect_ids(self):\n\t\tim_dir = os.path.join(self.root, 'leftImg8bit', self.split)\n\t\tids = []\n\t\tfor dirpath, dirnames, filenames in os.walk(im_dir):\n\t\t\tfor filename in filenames:\n\t\t\t\tif filename.endswith('.png'):\n\t\t\t\t\tids.append('_'.join(filename.split('_')[:3]))\n\t\treturn ids\n\t\n\tdef img_path(self, id):\n\t\tfmt = 'leftImg8bit/{}/{}/{}_leftImg8bit.png'\n\t\tsubdir = id.split('_')[0]\n\t\tpath = fmt.format(self.split, subdir, id)\n\t\treturn os.path.join(self.root, path)\n\t\n\tdef label_path(self, id):\n\t\tfmt = 'gtFine/{}/{}/{}_gtFine_labelIds.png'\n\t\tsubdir = id.split('_')[0]\n\t\tpath = fmt.format(self.split, subdir, id)\n\t\treturn os.path.join(self.root, path)\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tid = self.ids[index]\n\t\timg = Image.open(self.img_path(id)).convert('RGB')\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\ttarget = Image.open(self.label_path(id)).convert('L')\n\t\tif self.remap_labels:\n\t\t\ttarget = np.asarray(target)\n\t\t\ttarget = remap_labels_to_train_ids(target)\n\t\t\ttarget = Image.fromarray(np.uint8(target), 'L')\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.ids)\n"
  },
  {
    "path": "cycada/data/cityscapes_labels.py",
    "content": "# function for colorizing a label image:\n# camera-ready\n\nimport numpy as np\n\n\ndef label_img_to_color(img):\n\tlabel_to_color = {\n\t\t0: [128, 64, 128],\n\t\t1: [244, 35, 232],\n\t\t2: [70, 70, 70],\n\t\t3: [102, 102, 156],\n\t\t4: [190, 153, 153],\n\t\t5: [153, 153, 153],\n\t\t6: [250, 170, 30],\n\t\t7: [220, 220, 0],\n\t\t8: [107, 142, 35],\n\t\t9: [152, 251, 152],\n\t\t10: [70, 130, 180],\n\t\t11: [220, 20, 60],\n\t\t12: [255, 0, 0],\n\t\t13: [0, 0, 142],\n\t\t14: [0, 0, 70],\n\t\t15: [0, 60, 100],\n\t\t16: [0, 80, 100],\n\t\t17: [0, 0, 230],\n\t\t18: [119, 11, 32]\n\t}\n\t\n\timg_height, img_width = img.shape\n\t\n\timg_color = np.zeros((img_height, img_width, 3))\n\tfor row in range(img_height):\n\t\tfor col in range(img_width):\n\t\t\tlabel = img[row, col]\n\t\t\timg_color[row, col] = np.array(label_to_color[label])\n\t\n\treturn img_color\n"
  },
  {
    "path": "cycada/data/cyclegan.py",
    "content": "import os\nfrom os.path import join\nimport glob\nfrom PIL import Image\n\nimport torch.utils.data as data\nfrom .data_loader import DatasetParams\nfrom .data_loader import register_dataset_obj, register_data_params\n\nclass CycleGANDataset(data.Dataset):\n    def __init__(self, root, regexp, transform=None, target_transform=None, \n            download=False):\n        self.root = root\n        self.transform = transform\n        self.target_transform = target_transform\n\n        self.image_paths, self.labels = self.find_images(regexp)\n\n    def find_images(self, regexp='*.png'):\n        basenames = sorted(glob.glob(join(self.root, regexp)))\n        image_paths = []\n        labels = []\n        for basename in basenames:\n            image_paths.append(os.path.join(self.root, basename))\n            labels.append(int(basename.split('/')[-1].split('_')[0]))\n        return image_paths, labels\n\n    def __getitem__(self, index):\n        im = Image.open(self.image_paths[index]) #.convert('L')\n        target = self.labels[index]\n\n        if self.transform is not None:\n            im = self.transform(im)\n        if self.target_transform is not None:\n            target = self.target_transform(target)\n\n        return im, target\n\n    def __len__(self):\n        return len(self.image_paths)\n\n\n@register_dataset_obj('svhn2mnist')\nclass Svhn2MNIST(CycleGANDataset):\n    def __init__(self, root, train=True, transform=None, target_transform=None, \n            download=False):\n        if not train:\n            print('No test set for svhn2mnist.')\n            self.image_paths = []\n        else:\n            super(Svhn2MNIST, self).__init__(root, '*_fake_B.png',\n                    transform=transform, target_transform=target_transform, \n                    download=download)\n\n@register_data_params('svhn2mnist')\nclass Svhn2MNISTParams(DatasetParams):\n    num_channels = 3\n    image_size = 32\n    mean = 0.5\n    std = 0.5\n    #mean = 0.1307\n    #std = 0.3081\n    \n    # mean and std (when scaled between [0,1])\n    #mean = 0.127 # ep50\n    #mean = 0.21 # ep100 -- more white pixels...\n    #std = 0.29\n\n    #mean = 0.21\n    #std = 0.2\n    \n    num_cls = 10\n    target_transform = None\n\n@register_dataset_obj('usps2mnist')\nclass Usps2Mnist(CycleGANDataset):\n    def __init__(self, root, train=True, transform=None, target_transform=None, \n            download=False):\n        if not train:\n            print('No test set for usps2mnist.')\n            self.image_paths = []\n        else:\n            super(Usps2Mnist, self).__init__(root, '*_fake_A.png',\n                    transform=transform, target_transform=target_transform, \n                    download=download)\n\n@register_data_params('usps2mnist')\nclass Usps2MnistParams(DatasetParams):\n    num_channels = 3\n    image_size = 16\n    #mean = 0.1307\n    #std = 0.3081\n    mean = 0.5\n    std = 0.5\n    num_cls = 10\n    target_transform = None\n\n\n@register_dataset_obj('mnist2usps')\nclass Mnist2Usps(CycleGANDataset):\n    def __init__(self, root, train=True, transform=None, target_transform=None, \n            download=False):\n        if not train:\n            print('No test set for mnist2usps.')\n            self.image_paths = []\n        else:\n            super(Mnist2Usps, self).__init__(root, '*_fake_B.png',\n                    transform=transform, target_transform=target_transform, \n                    download=download)\n\n@register_data_params('mnist2usps')\nclass Mnist2UspsParams(DatasetParams):\n    num_channels = 3\n    image_size = 16 # this seems wrong...\n    #mean = 0.25\n    #std = 0.37\n    \n    #mean = 0.1307\n    #std = 0.3081\n    mean = 0.5\n    std = 0.5\n    num_cls = 10\n    target_transform = None\n"
  },
  {
    "path": "cycada/data/cyclegta5.py",
    "content": "import os.path\n\nimport numpy as np\nfrom PIL import Image\n\nfrom .cityscapes import remap_labels_to_train_ids\nfrom .data_loader import register_dataset_obj\nfrom .gta5 import GTA5  # , LABEL2TRAIN\n\n\n@register_dataset_obj('cyclegta5')\nclass CycleGTA5(GTA5):\n\tdef collect_ids(self):\n\t\t# ids = GTA5.collect_ids(self)\n\t\texisting_ids = []\n\t\tif self.data_flag:\n\t\t\tpath = os.path.join(self.root, self.data_flag)\n\t\telse:\n\t\t\tpath = os.path.join(self.root, \"images\")\n\t\t\n\t\tfiles = os.listdir(path)\n\t\tfor item in files:\n\t\t\tfull_path = os.path.join(path, item)\n\t\t\tif os.path.exists(full_path) is False:\n\t\t\t\tcontinue\n\t\t\texisting_ids.append(full_path.split('/')[-1])\n\t\treturn sorted(existing_ids)\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tfilename = self.ids[index]\n\t\tif self.data_flag == '' or self.data_flag is None:\n\t\t\timg_path = os.path.join(self.root, \"images\", filename)\n\t\telse:\n\t\t\timg_path = os.path.join(self.root, self.data_flag, filename)\n\t\t\n\t\tif self.data_flag == '' or self.data_flag is None:\n\t\t\tlabel_path = os.path.join(self.root, 'labels_600x1080', filename)\n\t\telse:\n\t\t\tif filename.endswith('_fake_B.png'):\n\t\t\t\tlabel_path = os.path.join(self.root, 'labels_600x1080', filename.replace('_fake_B.png', '.png'))\n\t\t\telif filename.endswith('_fake_B_2.png'):\n\t\t\t\tlabel_path = os.path.join(self.root, 'labels_600x1080', filename.replace('_fake_B_2.png', '.png'))\n\t\t\t\t\n\t\timg = Image.open(img_path).convert('RGB')\n\t\ttarget = Image.open(label_path)\n\t\timg = img.resize(target.size, resample=Image.BILINEAR)\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\tif self.remap_labels:\n\t\t\ttarget = np.asarray(target)\n\t\t\ttarget = remap_labels_to_train_ids(target)\n\t\t\ttarget = Image.fromarray(target, 'L')\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n"
  },
  {
    "path": "cycada/data/cyclesynthia.py",
    "content": "import os.path\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\n\nfrom .data_loader import DatasetParams, register_data_params, register_dataset_obj\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\n\ndef syn_relabel(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n\n@register_data_params('cyclesynthia')\nclass SYNTHIAParams(DatasetParams):\n\tnum_channels = 3\n\timage_size = 1024\n\tmean = 0.5\n\tstd = 0.5\n\tnum_cls = 19\n\ttarget_transform = None\n\n\n@register_dataset_obj('cyclesynthia')\nclass CycleSYNTHIA(data.Dataset):\n\t\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None, target_transform=None):\n\t\tself.root = root.replace('cycle', '')\n\t\tself.split = split\n\t\tself.remap_labels = remap_labels\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tself.classes = classes\n\t\tself.num_cls = num_cls\n\t\tself.ids = self.collect_ids()\n\t\n\tdef collect_ids(self):\n\t\tsplits = []\n\t\tif self.data_flag:\n\t\t\tpath = os.path.join(self.root, self.data_flag)\n\t\telse:\n\t\t\tpath = os.path.join(self.root, 'Cycle')\n\t\tfiles = os.listdir(path)\n\t\tfor item in files:\n\t\t\tfip = os.path.join(path, item)\n\t\t\tif (fip.endswith('_fake_B_1.png') or fip.endswith('_fake_B.png')):\n\t\t\t\tsplits.append(fip.split('/')[-1])\n\t\t\n\t\treturn splits\n\t\n\tdef img_path(self, filename):\n\t\treturn os.path.join(self.root, filename)\n\t\n\tdef label_path(self, filename):\n\t\t# Case for loading images generated in multi-source cycle\n\t\t# In this case, you will generate fake_B_1 for cyclesynthia dataset and fake_B_2 for cyclegta5\n\t\tif filename.endswith('_fake_B_1.png'):\n\t\t\treturn os.path.join(self.root, 'GT', 'parsed_LABELS', filename.replace('_fake_B_1.png', '.png'))\n\t\telif filename.endswith('_fake_B.png'):\n\t\t\treturn os.path.join(self.root, 'GT', 'parsed_LABELS', filename.replace('_fake_B.png', '.png'))\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tid = self.ids[index]\n\t\timg_path = self.img_path(id)\n\t\tlabel_path = self.label_path(id)\n\t\timg = Image.open(img_path).convert('RGB')\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\ttarget = Image.open(label_path)\n\t\tif self.remap_labels:\n\t\t\ttarget = np.asarray(target)\n\t\t\ttarget = syn_relabel(target)\n\t\t\ttarget = Image.fromarray(target, 'L')\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.ids)\n"
  },
  {
    "path": "cycada/data/cyclesynthia_cyclegta5.py",
    "content": "import os.path\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\n\nfrom .cityscapes import remap_labels_to_train_ids\nfrom .data_loader import DatasetParams, register_data_params, register_dataset_obj\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\n\ndef syn_relabel(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n\n@register_data_params('cyclesynthia_cyclegta5')\nclass SYNTHIAParams(DatasetParams):\n\tnum_channels = 3\n\timage_size = 1024\n\tmean = 0.5\n\tstd = 0.5\n\tnum_cls = 19\n\ttarget_transform = None\n\n# In this class, we iteratively load transferred images from cyclesynthia and cyclegta5\n@register_dataset_obj('cyclesynthia_cyclegta5')\nclass CycleSYNTHIACycleGTA5(data.Dataset):\n\t\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None, target_transform=None):\n\t\tself.dataset_name = os.path.basename(root)\n\t\tself.parent_path = root.replace(self.dataset_name, '')\n\t\tself.syn_name = os.path.join(self.parent_path, 'synthia')\n\t\tself.gta_name = os.path.join(self.parent_path, 'cyclegta5')\n\t\tself.remap_labels = remap_labels\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tself.classes = classes\n\t\tself.num_cls = num_cls\n\t\tself.syn_ids = self.collect_ids('syn')\n\t\tself.gta_ids = self.collect_ids('gta')\n\t\n\tdef collect_ids(self, datasets_name):\n\t\tsplits = []\n\t\tif datasets_name == 'syn':\n\t\t\tfiles = os.listdir(self.syn_name)\n\t\t\tfor item in files:\n\t\t\t\tfip = os.path.join(self.syn_name, item)\n\t\t\t\tif (fip.endswith('_fake_B_1.png') or fip.endswith('_fake_B.png')):\n\t\t\t\t\tsplits.append(fip.split('/')[-1])\n\t\t\n\t\telif datasets_name == 'gta':\n\t\t\tfiles = os.listdir(self.gta_name)\n\t\t\tfor item in files:\n\t\t\t\tfip = os.path.join(self.gta_name, item)\n\t\t\t\tif (fip.endswith('_fake_B_2.png') or fip.endswith('_fake_B.png')):\n\t\t\t\t\tsplits.append(fip.split('/')[-1])\n\t\t\n\t\telse:\n\t\t\tprint(\"Don't Recognize {}\".format(datasets_name))\n\t\t\n\t\treturn splits\n\t\n\tdef img_path(self, prefix, filename):\n\t\treturn os.path.join(prefix, filename)\n\t\n\t# Case for loading images generated in multi-source cycle\n\t# In this case, you will generate fake_B_1 for cyclesynthia dataset and fake_B_2 for cyclegta5\n\tdef syn_label_path(self, filename):\n\t\tif filename.endswith('_fake_B_1.png'):\n\t\t\treturn os.path.join(\"/nfs/project/libo_i/MADAN/data/synthia\", 'GT', 'parsed_LABELS', filename.replace('_fake_B_1.png', '.png'))\n\t\telif filename.endswith('_fake_B.png'):\n\t\t\treturn os.path.join(\"/nfs/project/libo_i/MADAN/data/synthia\", 'GT', 'parsed_LABELS', filename.replace('_fake_B.png', '.png'))\n\t\n\tdef gta_label_path(self, filename):\n\t\tif filename.endswith('_fake_B_2.png'):\n\t\t\treturn os.path.join('/nfs/project/libo_i/MADAN/data/cyclegta5', 'labels', filename.replace('_fake_B_2.png', '.png'))\n\t\telif filename.endswith('_fake_B.png'):\n\t\t\treturn os.path.join('/nfs/project/libo_i/MADAN/data/cyclegta5', 'labels', filename.replace('_fake_B.png', '.png'))\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\t# we iteratively load images from cyclesynthia and cyclegta5\n\t\tif index % 2:\n\t\t\tid = self.syn_ids[index % len(self.syn_ids)]\n\t\t\timg_path = self.img_path(self.syn_name, id)\n\t\t\tlabel_path = self.syn_label_path(id)\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\tif self.transform is not None:\n\t\t\t\timg = self.transform(img)\n\t\t\ttarget = Image.open(label_path)\n\t\t\tif self.remap_labels:\n\t\t\t\ttarget = np.asarray(target)\n\t\t\t\ttarget = syn_relabel(target)\n\t\t\t\ttarget = Image.fromarray(target, 'L')\n\t\t\tif self.target_transform is not None:\n\t\t\t\ttarget = self.target_transform(target)\n\t\t\n\t\telse:\n\t\t\tid = self.gta_ids[index % len(self.gta_ids)]\n\t\t\timg_path = self.img_path(self.gta_name, id)\n\t\t\tlabel_path = self.gta_label_path(id)\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\tif self.transform is not None:\n\t\t\t\timg = self.transform(img)\n\t\t\ttarget = Image.open(label_path)\n\t\t\tif self.remap_labels:\n\t\t\t\ttarget = np.asarray(target)\n\t\t\t\ttarget = remap_labels_to_train_ids(target)\n\t\t\t\ttarget = Image.fromarray(target, 'L')\n\t\t\tif self.target_transform is not None:\n\t\t\t\ttarget = self.target_transform(target)\n\t\t\n\t\t# if debug:\n\t\t# \tprint(self.__class__.__name__)\n\t\t# \tprint(\"IMG Path: {}\".format(img_path))\n\t\t# \tprint(\"Label Path: {}\".format(label_path))\n\t\t#\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.syn_ids) + len(self.gta_ids)\n"
  },
  {
    "path": "cycada/data/data_loader.py",
    "content": "from __future__ import print_function\n\nimport os\nfrom os.path import join\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom ..util import to_tensor_raw\n\n\ndef load_data(name, dset, batch=64, rootdir='', num_channels=3,\n              image_size=32, download=True, kwargs={}):\n\tis_train = (dset == 'train')\n\tif isinstance(name, list) and len(name) == 2:  # load adda data\n\t\tsrc_dataset = get_dataset(name[0], join(rootdir, name[0]), dset,\n\t\t                          image_size, num_channels, download=download)\n\t\ttgt_dataset = get_dataset(name[1], join(rootdir, name[1]), dset,\n\t\t                          image_size, num_channels, download=download)\n\t\tdataset = AddaDataset(src_dataset, tgt_dataset)\n\telse:\n\t\tdataset = get_dataset(name, rootdir, dset, image_size, num_channels,\n\t\t                      download=download)\n\tif len(dataset) == 0:\n\t\treturn None\n\tloader = torch.utils.data.DataLoader(dataset, batch_size=batch,\n\t                                     shuffle=is_train, **kwargs)\n\treturn loader\n\n\ndef get_transform_dataset(dataset_name, rootdir, net_transform, downscale, resize=None, src_data_flag=None, small=False):\n\tuser_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n\ttransform, target_transform = get_transform2(dataset_name, net_transform, downscale, resize)\n\treturn get_fcn_dataset(dataset_name, rootdir, transform=transform, target_transform=target_transform, data_flag=src_data_flag, small=small)\n\n\nsizes = {'cyclesynthia_cyclegta5': 1280, 'cyclesynthia': 1280, 'cityscapes': 1280, 'gta5': 1280, 'cyclegta5': 1280, \"synthia\": 1280}\n\n\ndef get_orig_size(dataset_name):\n\t\"Size of images in the dataset for relative scaling.\"\n\ttry:\n\t\treturn sizes[dataset_name]\n\texcept:\n\t\traise Exception('Unknown dataset size:', dataset_name)\n\n\ndef get_transform2(dataset_name, net_transform, downscale, resize):\n\t\"Returns image and label transform to downscale, crop and prepare for net.\"\n\torig_size = get_orig_size(dataset_name)\n\ttransform = []\n\ttarget_transform = []\n\tif downscale is not None:\n\t\ttransform.append(transforms.Resize(orig_size // downscale))\n\t\ttarget_transform.append(transforms.Resize(orig_size // downscale, interpolation=Image.NEAREST))\n\t\n\tif resize is not None:\n\t\ttransform.extend([transforms.Resize([int(resize), int(int(resize) * 1.8)], interpolation=Image.BICUBIC)])\n\t\ttarget_transform.extend([transforms.Resize([int(resize), int(int(resize) * 1.8)], interpolation=Image.NEAREST)])\n\t\n\ttransform.extend([net_transform])\n\ttarget_transform.extend([to_tensor_raw])\n\t\n\ttransform = transforms.Compose(transform)\n\ttarget_transform = transforms.Compose(target_transform)\n\treturn transform, target_transform\n\n\ndef get_transform(params, image_size, num_channels):\n\t# Transforms for PIL Images: Gray <-> RGB\n\tGray2RGB = transforms.Lambda(lambda x: x.convert('RGB'))\n\tRGB2Gray = transforms.Lambda(lambda x: x.convert('L'))\n\t\n\ttransform = []\n\t# Does size request match original size?\n\tif not image_size == params.image_size:\n\t\ttransform.append(transforms.Resize(image_size))\n\t\n\t# Does number of channels requested match original?\n\tif not num_channels == params.num_channels:\n\t\tif num_channels == 1:\n\t\t\ttransform.append(RGB2Gray)\n\t\telif num_channels == 3:\n\t\t\ttransform.append(Gray2RGB)\n\t\telse:\n\t\t\tprint('NumChannels should be 1 or 3', num_channels)\n\t\t\traise Exception\n\t\n\ttransform += [transforms.ToTensor(),\n\t              transforms.Normalize((params.mean,), (params.std,))]\n\t\n\treturn transforms.Compose(transform)\n\n\ndef get_target_transform(params):\n\ttransform = params.target_transform\n\tt_uniform = transforms.Lambda(lambda x: x[:, 0]\n\tif isinstance(x, (list, np.ndarray)) and len(x) == 2 else x)\n\tif transform is None:\n\t\treturn t_uniform\n\telse:\n\t\treturn transforms.Compose([transform, t_uniform])\n\n\nclass AddaDataset(data.Dataset):\n\t\n\tdef __init__(self, src_data, tgt_data):\n\t\tself.src = src_data\n\t\tself.tgt = tgt_data\n\t\n\tdef __getitem__(self, index):\n\t\tns = len(self.src)\n\t\tnt = len(self.tgt)\n\t\txs, ys = self.src[index % ns]\n\t\txt, yt = self.tgt[index % nt]\n\t\treturn (xs, ys), (xt, yt)\n\t\n\tdef __len__(self):\n\t\treturn min(len(self.src), len(self.tgt))\n\n\ndata_params = {}\n\n\ndef register_data_params(name):\n\tdef decorator(cls):\n\t\tdata_params[name] = cls\n\t\treturn cls\n\t\n\treturn decorator\n\n\ndataset_obj = {}\n\n\ndef register_dataset_obj(name):\n\tdef decorator(cls):\n\t\tdataset_obj[name] = cls\n\t\treturn cls\n\t\n\treturn decorator\n\n\nclass DatasetParams(object):\n\t\"Class variables defined.\"\n\tnum_channels = 1\n\timage_size = 16\n\tmean = 0.1307\n\tstd = 0.3081\n\tnum_cls = 10\n\ttarget_transform = None\n\n\ndef get_dataset(name, rootdir, dset, image_size, num_channels, download=True):\n\tis_train = (dset == 'train')\n\tprint('get dataset:', name, rootdir, dset)\n\tparams = data_params[name]\n\ttransform = get_transform(params, image_size, num_channels)\n\ttarget_transform = get_target_transform(params)\n\treturn dataset_obj[name](rootdir, train=is_train, transform=transform,\n\t                         target_transform=target_transform, download=download)\n\n\ndef get_fcn_dataset(name, rootdir, **kwargs):\n\treturn dataset_obj[name](rootdir, **kwargs)\n"
  },
  {
    "path": "cycada/data/gta5.py",
    "content": "import os.path\n\nimport numpy as np\nimport scipy.io\nimport torch.utils.data as data\nfrom PIL import Image\n\nfrom .cityscapes import id2label as LABEL2TRAIN, remap_labels_to_train_ids\nfrom .data_loader import DatasetParams, register_data_params, register_dataset_obj\n\n\n@register_data_params('gta5')\nclass GTA5Params(DatasetParams):\n\tnum_channels = 3\n\timage_size = 1024\n\tmean = 0.5\n\tstd = 0.5\n\tnum_cls = 19\n\ttarget_transform = None\n\n\n@register_dataset_obj('gta5')\nclass GTA5(data.Dataset):\n\t\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None, target_transform=None, data_flag=None):\n\t\tself.root = root\n\t\tself.split = split\n\t\tself.remap_labels = remap_labels\n\t\tself.data_flag = data_flag\n\t\tself.ids = self.collect_ids()\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tm = scipy.io.loadmat(os.path.join(self.root, 'mapping.mat'))\n\t\tfull_classes = [x[0] for x in m['classes'][0]]\n\t\tself.classes = []\n\t\tfor old_id, new_id in LABEL2TRAIN.items():\n\t\t\tif not new_id == 255 and old_id > 0:\n\t\t\t\tself.classes.append(full_classes[old_id])\n\t\tself.num_cls = num_cls\n\t\n\tdef collect_ids(self):\n\t\tsplits = scipy.io.loadmat(os.path.join(self.root, 'split.mat'))\n\t\tids = splits['{}Ids'.format(self.split)].squeeze()\n\t\treturn ids\n\t\n\tdef img_path(self, id):\n\t\tfilename = '{:05d}.png'.format(id)\n\t\treturn os.path.join(self.root, 'images', filename)\n\t\n\tdef label_path(self, id):\n\t\tfilename = '{:05d}.png'.format(id)\n\t\treturn os.path.join(self.root, 'labels', filename)\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tid = self.ids[index]\n\t\timg_path = self.img_path(id)\n\t\tlabel_path = self.label_path(id)\n\t\t\n\t\timg = Image.open(img_path).convert('RGB')\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\ttarget = Image.open(label_path)\n\t\tif self.remap_labels:\n\t\t\ttarget = np.asarray(target)\n\t\t\ttarget = remap_labels_to_train_ids(target)\n\t\t\ttarget = Image.fromarray(target, 'L')\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.ids)\n"
  },
  {
    "path": "cycada/data/rotater.py",
    "content": "class Rotater(object):\n\n    def __init__(self, dataset, orientations=6, transform=None,\n                 target_transform=None):\n        self.dataset = dataset\n        self.orientations = orientations\n        self.transform = transform\n        self.target_transform = target_transform\n\n    def __getitem__(self, index):\n        im, target = self.dataset[index]\n        rotation = index % self.orientations\n        degrees = 360 / self.orientations * rotation\n        im = im.rotate(degrees)\n        if self.transform is not None:\n            im = self.transform(im)\n        if self.target_transform is not None:\n            target = self.target_transform(target)\n        return im, target, degrees\n\n    def __len__(self):\n        return len(self.dataset)\n"
  },
  {
    "path": "cycada/data/synthia.py",
    "content": "import os.path\n\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nfrom .util import classes, ignore_label, id2label\nfrom .data_loader import DatasetParams, register_data_params, register_dataset_obj\n\ndef syn_relabel(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n@register_data_params('synthia')\nclass SYNTHIAParams(DatasetParams):\n\tnum_channels = 3\n\timage_size = 1024\n\tmean = 0.5\n\tstd = 0.5\n\tnum_cls = 19\n\ttarget_transform = None\n\n\n@register_dataset_obj('synthia')\nclass SYNTHIA(data.Dataset):\n\t\n\tdef __init__(self, root, num_cls=19, split='train', remap_labels=True, transform=None, target_transform=None, data_flag=None, small=2):\n\t\tself.root = root\n\t\tself.split = split\n\t\tself.small = small\n\t\tself.remap_labels = remap_labels\n\t\tself.ids = self.collect_ids()\n\t\tself.transform = transform\n\t\tself.target_transform = target_transform\n\t\tself.classes = classes\n\t\tself.num_cls = num_cls\n\t\tself.data_flag = data_flag\n\t\n\tdef collect_ids(self):\n\t\tsplits = []\n\t\twith open(os.path.join(self.root, 'SYNTHIA_imagelist_{}.txt'.format(self.split))) as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.strip('\\n')\n\t\t\t\tsplits.append(line.split('/')[-1])\n\t\treturn splits\n\t\n\tdef img_path(self, filename):\n\t\tif self.small == 0:\n\t\t\treturn os.path.join(self.root, 'RGB_300x540', filename)\n\t\telif self.small == 1:\n\t\t\treturn os.path.join(self.root, 'RGB_600x1080', filename)\n\t\telse:\n\t\t\treturn os.path.join(self.root, 'RGB', filename)\n\t\n\tdef label_path(self, filename):\n\t\tif self.small == 0:\n\t\t\treturn os.path.join(self.root, 'GT', 'parsed_LABELS_300x540', filename)\n\t\telif self.small == 1:\n\t\t\treturn os.path.join(self.root, 'GT', 'parsed_LABELS_600x1080', filename)\n\t\telse:\n\t\t\treturn os.path.join(self.root, 'GT', 'parsed_LABELS', filename)\n\t\n\tdef __getitem__(self, index, debug=False):\n\t\tid = self.ids[index]\n\t\timg_path = self.img_path(id)\n\t\tlabel_path = self.label_path(id)\n\t\t\n\t\tif debug:\n\t\t\tprint(self.__class__.__name__)\n\t\t\tprint(\"IMG Path: {}\".format(img_path))\n\t\t\tprint(\"Label Path: {}\".format(label_path))\n\t\t\n\t\timg = Image.open(img_path).convert('RGB')\n\t\tif self.transform is not None:\n\t\t\timg = self.transform(img)\n\t\ttarget = Image.open(label_path)\n\t\t\n\t\tif self.remap_labels:\n\t\t\ttarget = np.asarray(target)\n\t\t\ttarget = syn_relabel(target)\n\t\t\ttarget = Image.fromarray(target, 'L')\n\t\tif self.target_transform is not None:\n\t\t\ttarget = self.target_transform(target)\n\t\treturn img, target\n\t\n\tdef __len__(self):\n\t\treturn len(self.ids)\n"
  },
  {
    "path": "cycada/data/util.py",
    "content": "import logging\nimport os.path\n\nimport requests\n\nlogger = logging.getLogger(__name__)\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n           220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n           0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\n\ndef maybe_download(url, dest):\n    \"\"\"Download the url to dest if necessary, optionally checking file\n    integrity.\n    \"\"\"\n    if not os.path.exists(dest):\n        logger.info('Downloading %s to %s', url, dest)\n        download(url, dest)\n\n\ndef download(url, dest):\n    \"\"\"Download the url to dest, overwriting dest if it already exists.\"\"\"\n    response = requests.get(url, stream=True)\n    with open(dest, 'wb') as f:\n        for chunk in response.iter_content(chunk_size=1024):\n            if chunk:\n                f.write(chunk)\n\n"
  },
  {
    "path": "cycada/logging.yml",
    "content": "---\nversion: 1\ndisable_existing_loggers: False\nformatters:\n    simple:\n        format: \"[%(asctime)s] %(levelname)-8s %(message)s\"\n    color:\n        class: colorlog.ColoredFormatter\n        format: \"[%(asctime)s] %(log_color)s%(levelname)-8s%(reset)s %(message)s\"\n        log_colors:\n            DEBUG: \"cyan\"\n            INFO: \"green\"\n            WARNING: \"yellow\"\n            ERROR: \"red\"\n            CRITICAL: \"red,bg_white\"\n\nhandlers:\n    console:\n        class: cycada.util.TqdmHandler\n        level: INFO\n        formatter: color\n\n    file_handler:\n        class: logging.FileHandler\n        level: INFO\n        formatter: simple\n        encoding: utf8\n\nroot:\n    level: INFO\n    handlers: [console, file_handler]\n\n"
  },
  {
    "path": "cycada/models/MDAN.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nlogger = logging.getLogger(__name__)\n\n\nclass GradientReversalLayer(torch.autograd.Function):\n\t\"\"\"\n\tImplement the gradient reversal layer for the convenience of domain adaptation neural network.\n\tThe forward part is the identity function while the backward part is the negative function.\n\t\"\"\"\n\t\n\tdef forward(self, inputs):\n\t\treturn inputs\n\t\n\tdef backward(self, grad_output):\n\t\tgrad_input = grad_output.clone()\n\t\tgrad_input = -grad_input\n\t\treturn grad_input\n\n\nclass MDANet(nn.Module):\n\t\"\"\"\n\tMulti-layer perceptron with adversarial regularizer by domain classification.\n\t\"\"\"\n\t\n\tdef __init__(self, configs):\n\t\tsuper(MDANet, self).__init__()\n\t\t\n\t\tself.pooling_layer = nn.AdaptiveAvgPool2d((2, 2))\n\t\tself.dim_reduction = nn.Conv2d(4096, 512, kernel_size=1)\n\t\tnn.init.xavier_normal_(self.dim_reduction.weight)\n\t\tnn.init.constant_(self.dim_reduction.bias, 0.1)\n\t\tself.input_dim = configs[\"input_dim\"]\n\t\tself.num_hidden_layers = len(configs[\"hidden_layers\"])\n\t\tself.num_neurons = [] + [self.input_dim] + configs[\"hidden_layers\"]\n\t\tself.num_domains = configs[\"num_domains\"]\n\t\t# Parameters of hidden, fully-connected layers, feature learning component.\n\t\tself.hiddens = nn.ModuleList([nn.Linear(self.num_neurons[i], self.num_neurons[i + 1])\n\t\t                              for i in range(self.num_hidden_layers)])\n\t\t# Parameter of the final softmax classification layer.\n\t\tself.softmax = nn.Linear(self.num_neurons[-1], configs[\"num_classes\"])\n\t\t# Parameter of the domain classification layer, multiple sources single target domain adaptation.\n\t\tself.domains = nn.ModuleList([nn.Linear(self.num_neurons[-1], 2) for _ in range(self.num_domains)])\n\t\t# Gradient reversal layer.\n\t\tself.grls = [GradientReversalLayer() for _ in range(self.num_domains)]\n\t\n\tdef forward(self, sinputs_syn, sinputs_gta, tinputs):\n\t\t\"\"\"\n\t\t:param sinputs:     A list of k inputs from k source domains.\n\t\t:param tinputs:     Input from the target domain.\n\t\t:return:\n\t\t\"\"\"\n\t\tsinputs_gta = self.pooling_layer(sinputs_gta)\n\t\tsinputs_syn = self.pooling_layer(sinputs_syn)\n\t\ttinputs = self.pooling_layer(tinputs)\n\t\t\n\t\tsinputs_gta = self.dim_reduction(sinputs_gta)\n\t\tsinputs_syn = self.dim_reduction(sinputs_syn)\n\t\ttinputs = self.dim_reduction(tinputs)\n\t\t\n\t\tb = sinputs_gta.size()[0]\n\t\tsyn_relu, gta_relu, th_relu = sinputs_syn.view(b, -1), sinputs_gta.view(b, -1), tinputs.view(b, -1)\n\t\tassert (syn_relu[0].size()[0] == self.input_dim)\n\t\t\n\t\tfor hidden in self.hiddens:\n\t\t\tsyn_relu = F.relu(hidden(syn_relu))\n\t\t\tgta_relu = F.relu(hidden(gta_relu))\n\t\t\n\t\tfor hidden in self.hiddens:\n\t\t\tth_relu = F.relu(hidden(th_relu))\n\t\t\n\t\t# Classification probabilities on k source domains.\n\t\tlogprobs = []\n\t\tlogprobs.append(F.log_softmax(self.softmax(syn_relu), dim=1))\n\t\tlogprobs.append(F.log_softmax(self.softmax(gta_relu), dim=1))\n\t\t\n\t\t# Domain classification accuracies.\n\t\tsdomains, tdomains = [], []\n\t\tsdomains.append(F.log_softmax(self.domains[0](self.grls[0](syn_relu)), dim=1))\n\t\ttdomains.append(F.log_softmax(self.domains[0](self.grls[0](th_relu)), dim=1))\n\t\t\n\t\tsdomains.append(F.log_softmax(self.domains[1](self.grls[1](gta_relu)), dim=1))\n\t\ttdomains.append(F.log_softmax(self.domains[1](self.grls[1](th_relu)), dim=1))\n\t\t\n\t\treturn logprobs, sdomains, tdomains\n\t\n\tdef inference(self, inputs):\n\t\th_relu = inputs\n\t\tfor hidden in self.hiddens:\n\t\t\th_relu = F.relu(hidden(h_relu))\n\t\t# Classification probability.\n\t\tlogprobs = F.log_softmax(self.softmax(h_relu), dim=1)\n\t\treturn logprobs\n"
  },
  {
    "path": "cycada/models/__init__.py",
    "content": "from .models import get_model\nfrom .task_net import LeNet\nfrom .task_net import DTNClassifier\nfrom .adda_net import AddaNet\nfrom .fcn8s import VGG16_FCN8s, Discriminator\nfrom .drn import drn26\n"
  },
  {
    "path": "cycada/models/adda_net.py",
    "content": "\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom .util import init_weights\nfrom .models import register_model, get_model \n\n@register_model('AddaNet')\nclass AddaNet(nn.Module):\n    \"Defines and Adda Network.\"\n    def __init__(self, num_cls=10, model='LeNet', src_weights_init=None,\n            weights_init=None):\n        super(AddaNet, self).__init__()\n        self.name = 'AddaNet'\n        self.base_model = model\n        self.num_cls = num_cls\n        self.cls_criterion = nn.CrossEntropyLoss()\n        self.gan_criterion = nn.CrossEntropyLoss()\n      \n        self.setup_net()\n        if weights_init is not None:\n            self.load(weights_init)\n        elif src_weights_init is not None:\n            self.load_src_net(src_weights_init)\n        else:\n            raise Exception('AddaNet must be initialized with weights.')\n        \n\n    def forward(self, x_s, x_t):\n        \"\"\"Pass source and target images through their\n        respective networks.\"\"\"\n        score_s, x_s = self.src_net(x_s, with_ft=True)\n        score_t, x_t = self.tgt_net(x_t, with_ft=True)\n\n        if self.discrim_feat:\n            d_s = self.discriminator(x_s)\n            d_t = self.discriminator(x_t)\n        else:\n            d_s = self.discriminator(score_s)\n            d_t = self.discriminator(score_t)\n        return score_s, score_t, d_s, d_t\n\n    def setup_net(self):\n        \"\"\"Setup source, target and discriminator networks.\"\"\"\n        self.src_net = get_model(self.base_model, num_cls=self.num_cls)\n        self.tgt_net = get_model(self.base_model, num_cls=self.num_cls)\n\n        input_dim = self.num_cls \n        self.discriminator = nn.Sequential(\n                nn.Linear(input_dim, 500),\n                nn.ReLU(),\n                nn.Linear(500, 500),\n                nn.ReLU(),\n                nn.Linear(500, 2),\n                )\n\n        self.image_size = self.src_net.image_size\n        self.num_channels = self.src_net.num_channels\n\n    def load(self, init_path):\n        \"Loads full src and tgt models.\"\n        net_init_dict = torch.load(init_path)\n        self.load_state_dict(net_init_dict)\n\n    def load_src_net(self, init_path):\n        \"\"\"Initialize source and target with source\n        weights.\"\"\"\n        self.src_net.load(init_path)\n        self.tgt_net.load(init_path)\n\n    def save(self, out_path):\n        torch.save(self.state_dict(), out_path)\n\n    def save_tgt_net(self, out_path):\n        torch.save(self.tgt_net.state_dict(), out_path)\n\n"
  },
  {
    "path": "cycada/models/drn.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport torchvision\n\nfrom .models import register_model\nfrom ..util import safe_load_state_dict\n\n__all__ = ['DRN', 'drn26', 'drn42', 'drn58']\n\nmodel_urls = {\n\t'drn26': 'https://tigress-web.princeton.edu/~fy/drn/models/drn26-ddedf421.pth',\n\t'drn42': 'https://tigress-web.princeton.edu/~fy/drn/models/drn42-9d336e8c.pth',\n\t'drn58': 'https://tigress-web.princeton.edu/~fy/drn/models/drn58-0a53a92c.pth'\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):\n\treturn nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n\t                 padding=padding, bias=False, dilation=dilation)\n\n\nclass BasicBlock(nn.Module):\n\texpansion = 1\n\t\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None,\n\t             dilation=(1, 1), residual=True):\n\t\tsuper(BasicBlock, self).__init__()\n\t\tself.conv1 = conv3x3(inplanes, planes, stride,\n\t\t                     padding=dilation[0], dilation=dilation[0])\n\t\tself.bn1 = nn.BatchNorm2d(planes)\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.conv2 = conv3x3(planes, planes,\n\t\t                     padding=dilation[1], dilation=dilation[1])\n\t\tself.bn2 = nn.BatchNorm2d(planes)\n\t\tself.downsample = downsample\n\t\tself.stride = stride\n\t\tself.residual = residual\n\t\n\tdef forward(self, x):\n\t\tresidual = x\n\t\t\n\t\tout = self.conv1(x)\n\t\tout = self.bn1(out)\n\t\tout = self.relu(out)\n\t\t\n\t\tout = self.conv2(out)\n\t\tout = self.bn2(out)\n\t\t\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.downsample(x)\n\t\tif self.residual:\n\t\t\tout += residual\n\t\tout = self.relu(out)\n\t\t\n\t\treturn out\n\n\nclass Bottleneck(nn.Module):\n\texpansion = 4\n\t\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None,\n\t             dilation=(1, 1), residual=True):\n\t\tsuper(Bottleneck, self).__init__()\n\t\tself.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(planes)\n\t\tself.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n\t\t                       padding=dilation[1], bias=False,\n\t\t                       dilation=dilation[1])\n\t\tself.bn2 = nn.BatchNorm2d(planes)\n\t\tself.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n\t\tself.bn3 = nn.BatchNorm2d(planes * 4)\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.downsample = downsample\n\t\tself.stride = stride\n\t\n\tdef forward(self, x):\n\t\tresidual = x\n\t\t\n\t\tout = self.conv1(x)\n\t\tout = self.bn1(out)\n\t\tout = self.relu(out)\n\t\t\n\t\tout = self.conv2(out)\n\t\tout = self.bn2(out)\n\t\tout = self.relu(out)\n\t\t\n\t\tout = self.conv3(out)\n\t\tout = self.bn3(out)\n\t\t\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.downsample(x)\n\t\t\n\t\tout += residual\n\t\tout = self.relu(out)\n\t\t\n\t\treturn out\n\n\nclass DRN(nn.Module):\n\ttransform = torchvision.transforms.Compose([\n\t\ttorchvision.transforms.ToTensor(),\n\t\ttorchvision.transforms.Normalize(\n\t\t\tmean=[0.485, 0.456, 0.406],\n\t\t\tstd=[0.229, 0.224, 0.225]),\n\t])\n\t\n\tdef __init__(self, block, layers, num_cls=1000,\n\t             channels=(16, 32, 64, 128, 256, 512, 512, 512),\n\t             out_map=False, out_middle=False, pool_size=28,\n\t             weights_init=None, pretrained=True, finetune=False,\n\t             output_last_ft=False, modelname='drn26'):\n\t\tif output_last_ft:\n\t\t\tprint('DRN discrim feat not implemented, using scores')\n\t\t\n\t\tsuper(DRN, self).__init__()\n\t\tself.inplanes = channels[0]\n\t\tself.output_last_ft = output_last_ft\n\t\tself.out_map = out_map\n\t\tself.out_dim = channels[-1]\n\t\tself.out_middle = out_middle\n\t\tself.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,\n\t\t                       bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(channels[0])\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\t\n\t\tself.layer1 = self._make_layer(BasicBlock, channels[0], layers[0], stride=1)\n\t\tself.layer2 = self._make_layer(BasicBlock, channels[1], layers[1], stride=2)\n\t\t\n\t\tself.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)\n\t\tself.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)\n\t\tself.layer5 = self._make_layer(block, channels[4], layers[4], dilation=2,\n\t\t                               new_level=False)\n\t\tself.layer6 = None if layers[5] == 0 else \\\n\t\t\tself._make_layer(block, channels[5], layers[5], dilation=4,\n\t\t\t                 new_level=False)\n\t\tself.layer7 = None if layers[6] == 0 else \\\n\t\t\tself._make_layer(BasicBlock, channels[6], layers[6], dilation=2,\n\t\t\t                 new_level=False, residual=False)\n\t\tself.layer8 = None if layers[7] == 0 else \\\n\t\t\tself._make_layer(BasicBlock, channels[7], layers[7], dilation=1,\n\t\t\t                 new_level=False, residual=False)\n\t\t\n\t\tif num_cls > 0:\n\t\t\tself.avgpool = nn.AvgPool2d(pool_size)\n\t\t\t# self.fc = nn.Linear(self.out_dim, num_classes)\n\t\t\tself.fc = nn.Conv2d(self.out_dim, num_cls, kernel_size=1,\n\t\t\t                    stride=1, padding=0, bias=True)\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\tn = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n\t\t\t\tm.weight.data.normal_(0, math.sqrt(2. / n))\n\t\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.weight.data.fill_(1)\n\t\t\t\tm.bias.data.zero_()\n\t\t\n\t\tif pretrained:\n\t\t\tif not weights_init is None:\n\t\t\t\tstate_dict = torch.load(weights_init)\n\t\t\t\tprint('Using state dict from', weights_init)\n\t\t\telse:\n\t\t\t\tstate_dict = model_zoo.load_url(model_urls[modelname])\n\t\t\t\n\t\t\tif finetune:\n\t\t\t\tdel state_dict['fc.weight']\n\t\t\t\tdel state_dict['fc.bias']\n\t\t\t\tsafe_load_state_dict(self, state_dict)\n\t\t\t\tprint('Finetune: remove last layer')\n\t\t\telse:\n\t\t\t\tself.load_state_dict(state_dict)\n\t\t\t\tprint('Loading full model')\n\t\n\tdef _make_layer(self, block, planes, blocks, stride=1, dilation=1,\n\t                new_level=True, residual=True):\n\t\tassert dilation == 1 or dilation % 2 == 0\n\t\tdownsample = None\n\t\tif stride != 1 or self.inplanes != planes * block.expansion:\n\t\t\tdownsample = nn.Sequential(\n\t\t\t\tnn.Conv2d(self.inplanes, planes * block.expansion,\n\t\t\t\t          kernel_size=1, stride=stride, bias=False),\n\t\t\t\tnn.BatchNorm2d(planes * block.expansion),\n\t\t\t)\n\t\t\n\t\tlayers = []\n\t\tlayers.append(block(\n\t\t\tself.inplanes, planes, stride, downsample,\n\t\t\tdilation=(1, 1) if dilation == 1 else (\n\t\t\t\tdilation // 2 if new_level else dilation, dilation),\n\t\t\tresidual=residual))\n\t\tself.inplanes = planes * block.expansion\n\t\tfor i in range(1, blocks):\n\t\t\tlayers.append(block(self.inplanes, planes, residual=residual,\n\t\t\t                    dilation=(dilation, dilation)))\n\t\t\n\t\treturn nn.Sequential(*layers)\n\t\n\tdef forward(self, x):\n\t\t_, _, h, w = x.size()\n\t\ty = list()\n\t\t\n\t\tx = self.conv1(x)\n\t\tx = self.bn1(x)\n\t\tx = self.relu(x)\n\t\tx = self.layer1(x)\n\t\ty.append(x)\n\t\tx = self.layer2(x)\n\t\ty.append(x)\n\t\t\n\t\tx = self.layer3(x)\n\t\ty.append(x)\n\t\t\n\t\tx = self.layer4(x)\n\t\ty.append(x)\n\t\t\n\t\tx = self.layer5(x)\n\t\ty.append(x)\n\t\t\n\t\tif self.layer6 is not None:\n\t\t\tx = self.layer6(x)\n\t\t\ty.append(x)\n\t\t\n\t\tif self.layer7 is not None:\n\t\t\tx = self.layer7(x)\n\t\t\ty.append(x)\n\t\t\n\t\tif self.layer8 is not None:\n\t\t\tx = self.layer8(x)\n\t\t\ty.append(x)\n\t\t\n\t\tif self.output_last_ft:\n\t\t\tft_to_save = x\n\t\t\n\t\tif self.out_map:\n\t\t\tx = self.fc(x)\n\t\t\tx = nn.functional.interpolate(x, (h, w), mode='bilinear', align_corners=True)\n\t\telse:\n\t\t\tx = self.avgpool(x)\n\t\t\tx = self.fc(x)\n\t\t\tx = x.view(x.size(0), -1)\n\t\t\n\t\tif self.out_middle:\n\t\t\treturn x, y\n\t\telif self.output_last_ft:\n\t\t\treturn x, ft_to_save\n\t\telse:\n\t\t\treturn x\n\n\n@register_model('drn26')\ndef drn26(pretrained=True, finetune=False, out_map=True, **kwargs):\n\tmodel = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], modelname='drn26',\n\t            out_map=out_map, finetune=finetune, **kwargs)\n\t# if pretrained:\n\t#    state_dict = model_zoo.load_url(model_urls['drn26'])\n\t#    if finetune:\n\t#        del state_dict['fc.weight']\n\t#        del state_dict['fc.bias']\n\t#        safe_load_state_dict(model, state_dict)\n\t#    else:\n\t#        model.load_state_dict(state_dict)\n\treturn model\n\n\n@register_model('drn42')\ndef drn42(pretrained=False, finetune=False, out_map=True, **kwargs):\n\tmodel = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], modelname='drn42',\n\t            out_map=out_map, finetune=finetune, **kwargs)\n\t# if pretrained:\n\t#    model.load_state_dict(model_zoo.load_url(model_urls['drn42']))\n\treturn model\n\n\ndef drn58(pretrained=False, **kwargs):\n\tmodel = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], **kwargs)\n\tif pretrained:\n\t\tmodel.load_state_dict(model_zoo.load_url(model_urls['drn58']))\n\treturn model\n"
  },
  {
    "path": "cycada/models/fcn8s.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import init\nfrom torch.utils import model_zoo\nfrom torchvision.models import vgg\n\nfrom .models import register_model\n\n\ndef get_upsample_filter(size):\n\t\"\"\"Make a 2D bilinear kernel suitable for upsampling\"\"\"\n\tfactor = (size + 1) // 2\n\tif size % 2 == 1:\n\t\tcenter = factor - 1\n\telse:\n\t\tcenter = factor - 0.5\n\tog = np.ogrid[:size, :size]\n\tfilter = (1 - abs(og[0] - center) / factor) * \\\n\t         (1 - abs(og[1] - center) / factor)\n\treturn torch.from_numpy(filter).float()\n\n\nclass Bilinear(nn.Module):\n\t\n\tdef __init__(self, factor, num_channels):\n\t\tsuper().__init__()\n\t\tself.factor = factor\n\t\tfilter = get_upsample_filter(factor * 2)\n\t\tw = torch.zeros(num_channels, num_channels, factor * 2, factor * 2)\n\t\tfor i in range(num_channels):\n\t\t\tw[i, i] = filter\n\t\tself.register_buffer('w', w)\n\t\n\tdef forward(self, x):\n\t\treturn F.conv_transpose2d(x, Variable(self.w), stride=self.factor)\n\n\n@register_model('fcn8s')\nclass VGG16_FCN8s(nn.Module):\n\ttransform = torchvision.transforms.Compose([\n\t\ttorchvision.transforms.ToTensor(),\n\t\ttorchvision.transforms.Normalize(\n\t\t\tmean=[0.485, 0.456, 0.406],\n\t\t\tstd=[0.229, 0.224, 0.225]),\n\t])\n\t\n\tdef __init__(self, num_cls=19, pretrained=True, weights_init=None,\n\t             output_last_ft=False):\n\t\tsuper().__init__()\n\t\tself.output_last_ft = output_last_ft\n\t\tif weights_init:\n\t\t\tbatch_norm = False\n\t\telse:\n\t\t\tbatch_norm = True\n\t\tself.vgg = make_layers(vgg.cfg['D'], batch_norm=False)\n\t\tself.vgg_head = nn.Sequential(\n\t\t\tnn.Conv2d(512, 4096, 7),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout2d(p=0.5),\n\t\t\tnn.Conv2d(4096, 4096, 1),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout2d(p=0.5),\n\t\t\tnn.Conv2d(4096, num_cls, 1)\n\t\t)\n\t\tself.upscore2 = self.upscore_pool4 = Bilinear(2, num_cls)\n\t\tself.upscore8 = Bilinear(8, num_cls)\n\t\tself.score_pool4 = nn.Conv2d(512, num_cls, 1)\n\t\tfor param in self.score_pool4.parameters():\n\t\t\t# init.constant(param, 0)\n\t\t\tinit.constant_(param, 0)\n\t\tself.score_pool3 = nn.Conv2d(256, num_cls, 1)\n\t\tfor param in self.score_pool3.parameters():\n\t\t\t# init.constant(param, 0)\n\t\t\tinit.constant_(param, 0)\n\t\t\n\t\tif pretrained:\n\t\t\tif weights_init is not None:\n\t\t\t\tself.load_weights(torch.load(weights_init))\n\t\t\telse:\n\t\t\t\tself.load_base_weights()\n\t\n\tdef load_base_vgg(self, weights_state_dict):\n\t\tvgg_state_dict = self.get_dict_by_prefix(weights_state_dict, 'vgg.')\n\t\tself.vgg.load_state_dict(vgg_state_dict)\n\t\n\tdef load_vgg_head(self, weights_state_dict):\n\t\tvgg_head_state_dict = self.get_dict_by_prefix(weights_state_dict, 'vgg_head.')\n\t\tself.vgg_head.load_state_dict(vgg_head_state_dict)\n\t\n\tdef get_dict_by_prefix(self, weights_state_dict, prefix):\n\t\treturn {k[len(prefix):]: v\n\t\t        for k, v in weights_state_dict.items()\n\t\t        if k.startswith(prefix)}\n\t\n\tdef load_weights(self, weights_state_dict):\n\t\tself.load_base_vgg(weights_state_dict)\n\t\tself.load_vgg_head(weights_state_dict)\n\t\n\tdef split_vgg_head(self):\n\t\tself.classifier = list(self.vgg_head.children())[-1]\n\t\tself.vgg_head_feat = nn.Sequential(*list(self.vgg_head.children())[:-1])\n\t\n\tdef forward(self, x):\n\t\tinput = x\n\t\tx = F.pad(x, (99, 99, 99, 99), mode='constant', value=0)\n\t\tintermediates = {}\n\t\tfts_to_save = {16: 'pool3', 23: 'pool4'}\n\t\tfor i, module in enumerate(self.vgg):\n\t\t\tx = module(x)\n\t\t\tif i in fts_to_save:\n\t\t\t\tintermediates[fts_to_save[i]] = x\n\t\t\n\t\tft_to_save = 5  # Dropout before classifier\n\t\tlast_ft = {}\n\t\tfor i, module in enumerate(self.vgg_head):\n\t\t\tx = module(x)\n\t\t\tif i == ft_to_save:\n\t\t\t\tlast_ft = x\n\t\t\n\t\t_, _, h, w = x.size()\n\t\tupscore2 = self.upscore2(x)\n\t\tpool4 = intermediates['pool4']\n\t\tscore_pool4 = self.score_pool4(0.01 * pool4)\n\t\tscore_pool4c = _crop(score_pool4, upscore2, offset=5)\n\t\tfuse_pool4 = upscore2 + score_pool4c\n\t\tupscore_pool4 = self.upscore_pool4(fuse_pool4)\n\t\tpool3 = intermediates['pool3']\n\t\tscore_pool3 = self.score_pool3(0.0001 * pool3)\n\t\tscore_pool3c = _crop(score_pool3, upscore_pool4, offset=9)\n\t\tfuse_pool3 = upscore_pool4 + score_pool3c\n\t\tupscore8 = self.upscore8(fuse_pool3)\n\t\tscore = _crop(upscore8, input, offset=31)\n\t\tif self.output_last_ft:\n\t\t\treturn score, last_ft\n\t\telse:\n\t\t\treturn score\n\t\n\tdef load_base_weights(self):\n\t\t\"\"\"This is complicated because we converted the base model to be fully\n\t\tconvolutional, so some surgery needs to happen here.\"\"\"\n\t\tbase_state_dict = model_zoo.load_url(vgg.model_urls['vgg16'])\n\t\tvgg_state_dict = {k[len('features.'):]: v\n\t\t                  for k, v in base_state_dict.items()\n\t\t                  if k.startswith('features.')}\n\t\tself.vgg.load_state_dict(vgg_state_dict)\n\t\tvgg_head_params = self.vgg_head.parameters()\n\t\tfor k, v in base_state_dict.items():\n\t\t\tif not k.startswith('classifier.'):\n\t\t\t\tcontinue\n\t\t\tif k.startswith('classifier.6.'):\n\t\t\t\t# skip final classifier output\n\t\t\t\tcontinue\n\t\t\tvgg_head_param = next(vgg_head_params)\n\t\t\tvgg_head_param.data = v.view(vgg_head_param.size())\n\n\nclass VGG16_FCN8s_caffe(VGG16_FCN8s):\n\ttransform = torchvision.transforms.Compose([\n\t\ttorchvision.transforms.ToTensor(),\n\t\ttorchvision.transforms.Normalize(\n\t\t\tmean=[0.485, 0.458, 0.408],\n\t\t\tstd=[0.00392156862745098] * 3),\n\t\ttorchvision.transforms.Lambda(\n\t\t\tlambda x: torch.stack(torch.unbind(x, 1)[::-1], 1))\n\t])\n\t\n\tdef load_base_weights(self):\n\t\tbase_state_dict = model_zoo.load_url('https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth')\n\t\tvgg_state_dict = {k[len('features.'):]: v\n\t\t                  for k, v in base_state_dict.items()\n\t\t                  if k.startswith('features.')}\n\t\tself.vgg.load_state_dict(vgg_state_dict)\n\t\tvgg_head_params = self.vgg_head.parameters()\n\t\tfor k, v in base_state_dict.items():\n\t\t\tif not k.startswith('classifier.'):\n\t\t\t\tcontinue\n\t\t\tif k.startswith('classifier.6.'):\n\t\t\t\t# skip final classifier output\n\t\t\t\tcontinue\n\t\t\tvgg_head_param = next(vgg_head_params)\n\t\t\tvgg_head_param.data = v.view(vgg_head_param.size())\n\n\nclass Discriminator(nn.Module):\n\tdef __init__(self, input_dim=4096, output_dim=2, pretrained=False, weights_init=''):\n\t\tsuper().__init__()\n\t\tdim1 = 1024 if input_dim == 4096 else 512\n\t\tdim2 = int(dim1 / 2)\n\t\tself.D = nn.Sequential(\n\t\t\tnn.Conv2d(input_dim, dim1, 1),\n\t\t\tnn.Dropout2d(p=0.5),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Conv2d(dim1, dim2, 1),\n\t\t\tnn.Dropout2d(p=0.5),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Conv2d(dim2, output_dim, 1)\n\t\t)\n\t\t\n\t\tif pretrained and weights_init is not None:\n\t\t\tself.load_weights(weights_init)\n\t\n\tdef forward(self, x):\n\t\td_score = self.D(x)\n\t\treturn d_score\n\t\n\tdef load_weights(self, weights):\n\t\tprint('Loading discriminator weights')\n\t\tself.load_state_dict(torch.load(weights))\n\n\nclass Transform_Module(nn.Module):\n\tdef __init__(self, input_dim=4096):\n\t\tsuper().__init__()\n\t\tself.transform = nn.Sequential(\n\t\t\tnn.Conv2d(input_dim, input_dim, 1),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\t# nn.Conv2d(input_dim, input_dim, 1),\n\t\t\t# nn.ReLU(inplace=True),\n\t\t)\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\tinit_eye(m.weight)\n\t\t\t\tm.bias.data.zero_()\n\t\n\tdef forward(self, x):\n\t\tt_x = self.transform(x)\n\t\treturn t_x\n\n\ndef init_eye(tensor):\n\tif isinstance(tensor, Variable):\n\t\tinit_eye(tensor.data)\n\t\treturn tensor\n\treturn tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))\n\n\ndef _crop(input, shape, offset=0):\n\t_, _, h, w = shape.size()\n\treturn input[:, :, offset:offset + h, offset:offset + w].contiguous()\n\n\ndef make_layers(cfg, batch_norm=False):\n\t\"\"\"This is almost verbatim from torchvision.models.vgg, except that the\n\tMaxPool2d modules are configured with ceil_mode=True.\n\t\"\"\"\n\tlayers = []\n\tin_channels = 3\n\tfor v in cfg:\n\t\tif v == 'M':\n\t\t\tlayers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))\n\t\telse:\n\t\t\tconv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n\t\t\tmodules = [conv2d, nn.ReLU(inplace=True)]\n\t\t\tif batch_norm:\n\t\t\t\tmodules.insert(1, nn.BatchNorm2d(v))\n\t\t\tlayers.extend(modules)\n\t\t\tin_channels = v\n\treturn nn.Sequential(*layers)\n"
  },
  {
    "path": "cycada/models/models.py",
    "content": "import torch\n\nmodels = {}\ndef register_model(name):\n    def decorator(cls):\n        models[name] = cls\n        return cls\n    return decorator\n\n\ndef get_model(name, num_cls=10, **args):\n    net = models[name](num_cls=num_cls, **args)\n    if torch.cuda.is_available():\n        net = net.cuda()\n    return net\n"
  },
  {
    "path": "cycada/models/task_net.py",
    "content": "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom .models import register_model \nfrom .util import init_weights\nimport numpy as np\n\nclass TaskNet(nn.Module):\n\n    num_channels = 3\n    image_size = 32\n    name = 'TaskNet'\n\n    \"Basic class which does classification.\"\n    def __init__(self, num_cls=10, weights_init=None):\n        super(TaskNet, self).__init__()\n        self.num_cls = num_cls\n        self.setup_net()\n        self.criterion = nn.CrossEntropyLoss()\n        if weights_init is not None:\n            self.load(weights_init)\n        else:\n            init_weights(self)\n\n    def forward(self, x, with_ft=False):\n        x = self.conv_params(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc_params(x)\n        score = self.classifier(x)\n        if with_ft:\n            return score, x\n        else:\n            return score\n\n    def setup_net(self):\n        \"\"\"Method to be implemented in each class.\"\"\"\n        pass\n\n    def load(self, init_path):\n        net_init_dict = torch.load(init_path)\n        self.load_state_dict(net_init_dict)\n\n    def save(self, out_path):\n        torch.save(self.state_dict(), out_path)\n\n@register_model('LeNet')\nclass LeNet(TaskNet):\n    \"Network used for MNIST or USPS experiments.\"    \n\n    num_channels = 1\n    image_size = 28\n    name = 'LeNet'\n    out_dim = 500 # dim of last feature layer\n\n    def setup_net(self):\n\n        self.conv_params = nn.Sequential(\n                nn.Conv2d(self.num_channels, 20, kernel_size=5),\n                nn.MaxPool2d(2),\n                nn.ReLU(),\n                nn.Conv2d(20, 50, kernel_size=5),\n                nn.Dropout2d(p=0.5),\n                nn.MaxPool2d(2),\n                nn.ReLU(),\n                )\n        \n        self.fc_params = nn.Linear(50*4*4, 500)\n        self.classifier = nn.Sequential(\n                nn.ReLU(),\n                nn.Dropout(p=0.5),\n                nn.Linear(500, self.num_cls)\n                )\n\n\n@register_model('DTN')\nclass DTNClassifier(TaskNet):\n    \"Classifier used for SVHN->MNIST Experiment\"\n\n    num_channels = 3\n    image_size = 32\n    name = 'DTN'\n    out_dim = 512 # dim of last feature layer\n\n    def setup_net(self):\n        self.conv_params = nn.Sequential (\n                nn.Conv2d(self.num_channels, 64, kernel_size=5, stride=2, padding=2),\n                nn.BatchNorm2d(64),\n                nn.Dropout2d(0.1),\n                nn.ReLU(),\n                nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),\n                nn.BatchNorm2d(128),\n                nn.Dropout2d(0.3),\n                nn.ReLU(),\n                nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),\n                nn.BatchNorm2d(256),\n                nn.Dropout2d(0.5),\n                nn.ReLU()\n                )\n    \n        self.fc_params = nn.Sequential (\n                nn.Linear(256*4*4, 512),\n                nn.BatchNorm1d(512),\n                )\n\n        self.classifier = nn.Sequential(\n                nn.ReLU(),\n                nn.Dropout(),\n                nn.Linear(512, self.num_cls)\n                )\n"
  },
  {
    "path": "cycada/models/util.py",
    "content": "import torch.nn as nn\nfrom torch.nn import init\n\ndef init_weights(obj):\n    for m in obj.modules():\n        if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n            init.xavier_normal_(m.weight)\n            m.bias.data.zero_()\n        elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n            m.reset_parameters()\n"
  },
  {
    "path": "cycada/tools/__init__.py",
    "content": ""
  },
  {
    "path": "cycada/tools/train_adda_net.py",
    "content": "from __future__ import print_function\n\nimport os\nfrom os.path import join\nimport numpy as np\n\n# Import from torch\nimport torch\nimport torch.nn as nn \nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\n# Import from within Package \nfrom ..models.models import get_model\nfrom ..data.data_loader import load_data\nfrom ..tools.test_task_net import test\nfrom ..tools.util import make_variable\n\ndef train(loader_src, loader_tgt, net, opt_net, opt_dis, epoch):\n   \n    log_interval = 100 # specifies how often to display\n  \n    N = min(len(loader_src.dataset), len(loader_tgt.dataset)) \n    joint_loader = zip(loader_src, loader_tgt)\n      \n    net.train()\n   \n    last_update = -1\n    for batch_idx, ((data_s, _), (data_t, _)) in enumerate(joint_loader):\n        \n        # log basic adda train info\n        info_str = \"[Train Adda] Epoch: {} [{}/{} ({:.2f}%)]\".format(\n            epoch, batch_idx*len(data_t), N, 100 * batch_idx / N)\n   \n        ########################\n        # Setup data variables #\n        ########################\n        data_s = make_variable(data_s, requires_grad=False)\n        data_t = make_variable(data_t, requires_grad=False)\n        \n        ##########################\n        # Optimize discriminator #\n        ##########################\n\n        # zero gradients for optimizer\n        opt_dis.zero_grad()\n\n        # extract and concat features\n        score_s = net.src_net(data_s)\n        score_t = net.tgt_net(data_t)\n        f = torch.cat((score_s, score_t), 0)\n        \n        # predict with discriminator\n        pred_concat = net.discriminator(f)\n\n        # prepare real and fake labels: source=1, target=0\n        target_dom_s = make_variable(torch.ones(len(data_s)).long(), requires_grad=False)\n        target_dom_t = make_variable(torch.zeros(len(data_t)).long(), requires_grad=False)\n        label_concat = torch.cat((target_dom_s, target_dom_t), 0)\n\n        # compute loss for disciminator\n        loss_dis = net.gan_criterion(pred_concat, label_concat)\n        loss_dis.backward()\n\n        # optimize discriminator\n        opt_dis.step()\n\n        # compute discriminator acc\n        pred_dis = torch.squeeze(pred_concat.max(1)[1])\n        acc = (pred_dis == label_concat).float().mean()\n        \n        # log discriminator update info\n        info_str += \" acc: {:0.1f} D: {:.3f}\".format(acc.item()*100, loss_dis.item())\n\n        ###########################\n        # Optimize target network #\n        ###########################\n\n        # only update net if discriminator is strong\n        if acc.item() > 0.6:\n            \n            last_update = batch_idx\n        \n            # zero out optimizer gradients\n            opt_dis.zero_grad()\n            opt_net.zero_grad()\n\n            # extract target features\n            score_t = net.tgt_net(data_t)\n\n            # predict with discriinator\n            pred_tgt = net.discriminator(score_t)\n            \n            # create fake label\n            label_tgt = make_variable(torch.ones(pred_tgt.size(0)).long(), requires_grad=False)\n            \n            # compute loss for target network\n            loss_gan_t = net.gan_criterion(pred_tgt, label_tgt) \n            loss_gan_t.backward()\n\n            # optimize tgt network\n            opt_net.step()\n\n            # log net update info\n            info_str += \" G: {:.3f}\".format(loss_gan_t.item()) \n\n        ###########\n        # Logging #\n        ###########\n        if batch_idx % log_interval == 0:\n            print(info_str)\n\n    return last_update\n\n\ndef train_adda(src, tgt, model, num_cls, num_epoch=200,\n        batch=128, datadir=\"\", outdir=\"\", \n        src_weights=None, weights=None, lr=1e-5, betas=(0.9,0.999),\n        weight_decay=0):\n    \"\"\"Main function for training ADDA.\"\"\"\n\n    ###########################\n    # Setup cuda and networks #\n    ###########################\n\n    # setup cuda\n    if torch.cuda.is_available():\n        kwargs = {'num_workers': 1, 'pin_memory': True}\n    else:\n        kwargs = {}\n\n    # setup network \n    net = get_model('AddaNet', model=model, num_cls=num_cls,\n            src_weights_init=src_weights)\n    \n    # print network and arguments\n    print(net)\n    print('Training Adda {} model for {}->{}'.format(model, src, tgt))\n\n    #######################################\n    # Setup data for training and testing #\n    #######################################\n    train_src_data = load_data(src, 'train', batch=batch, \n        rootdir=join(datadir, src), num_channels=net.num_channels, \n        image_size=net.image_size, download=True, kwargs=kwargs)\n    train_tgt_data = load_data(tgt, 'train', batch=batch, \n        rootdir=join(datadir, tgt), num_channels=net.num_channels, \n        image_size=net.image_size, download=True, kwargs=kwargs)\n\n    ######################\n    # Optimization setup #\n    ######################\n \n    net_param = net.tgt_net.parameters()\n    opt_net = optim.Adam(net_param, lr=lr, weight_decay=weight_decay, betas=betas)\n    opt_dis = optim.Adam(net.discriminator.parameters(), lr=lr, \n            weight_decay=weight_decay, betas=betas)\n\n    ##############\n    # Train Adda #\n    ##############\n    for epoch in range(num_epoch):\n        err = train(train_src_data, train_tgt_data, net, opt_net, opt_dis, epoch) \n        if err == -1:\n            print(\"No suitable discriminator\")\n            break\n       \n    ##############\n    # Save Model #\n    ##############\n    os.makedirs(outdir, exist_ok=True)\n    outfile = join(outdir, 'adda_{:s}_net_{:s}_{:s}.pth'.format(\n        model, src, tgt))\n    print('Saving to', outfile)\n    net.save(outfile)\n\n"
  },
  {
    "path": "cycada/tools/train_task_net.py",
    "content": "from __future__ import print_function\n\nimport os\nfrom os.path import join\nimport numpy as np\nimport argparse\n\n# Import from torch\nimport torch\nimport torch.nn as nn \nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\n# Import from Cycada Package \nfrom ..models.models import get_model\nfrom ..data.data_loader import load_data\nfrom .test_task_net import test\nfrom .util import make_variable\n\ndef train_epoch(loader, net, opt_net, epoch):\n    log_interval = 100 # specifies how often to display\n    net.train()\n    for batch_idx, (data, target) in enumerate(loader):\n\n        # make data variables\n        data = make_variable(data, requires_grad=False)\n        target = make_variable(target, requires_grad=False)\n        \n        # zero out gradients\n        opt_net.zero_grad()\n       \n        # forward pass\n        score = net(data)\n        loss = net.criterion(score, target)\n        \n        # backward pass\n        loss.backward()\n        \n        # optimize classifier and representation\n        opt_net.step()\n       \n        # Logging\n        if batch_idx % log_interval == 0:\n            print('[Train] Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n                epoch, batch_idx * len(data), len(loader.dataset),\n                100. * batch_idx / len(loader), loss.item()), end=\"\")\n            pred = score.data.max(1)[1]\n            correct = pred.eq(target.data).cpu().sum()\n            acc = correct.item() / len(pred) * 100.0\n            print('  Acc: {:.2f}'.format(acc))\n\n\ndef train(data, datadir, model, num_cls, outdir='', \n        num_epoch=100, batch=128, \n        lr=1e-4, betas=(0.9, 0.999), weight_decay=0):\n    \"\"\"Train a classification net and evaluate on test set.\"\"\"\n\n    # Setup GPU Usage\n    if torch.cuda.is_available(): \n        kwargs = {'num_workers': 1, 'pin_memory': True}\n    else:\n        kwargs = {}\n\n    ############\n    # Load Net #\n    ############\n    net = get_model(model, num_cls=num_cls)\n    print('-------Training net--------')\n    print(net)\n\n    ############################\n    # Load train and test data # \n    ############################\n    train_data = load_data(data, 'train', batch=batch, \n        rootdir=datadir, num_channels=net.num_channels, \n        image_size=net.image_size, download=True, kwargs=kwargs)\n    \n    test_data = load_data(data, 'test', batch=batch, \n        rootdir=datadir, num_channels=net.num_channels, \n        image_size=net.image_size, download=True, kwargs=kwargs)\n   \n    ###################\n    # Setup Optimizer #\n    ###################\n    opt_net = optim.Adam(net.parameters(), lr=lr, betas=betas, \n            weight_decay=weight_decay)\n    \n    #########\n    # Train #\n    #########\n    print('Training {} model for {}'.format(model, data))\n    for epoch in range(num_epoch):\n        train_epoch(train_data, net, opt_net, epoch)\n    \n    ########\n    # Test #\n    ########\n    if test_data is not None:\n        print('Evaluating {}-{} model on {} test set'.format(model, data, data))\n        test(test_data, net)\n\n    ############\n    # Save net #\n    ############\n    os.makedirs(outdir, exist_ok=True)\n    outfile = join(outdir, '{:s}_net_{:s}.pth'.format(model, data))\n    print('Saving to', outfile)\n    net.save(outfile)\n\n    return net\n"
  },
  {
    "path": "cycada/tools/util.py",
    "content": "from functools import partial\n\nimport torch\nfrom torch.autograd import Variable\n\n\ndef make_variable(tensor, volatile=False, requires_grad=True):\n\tif torch.cuda.is_available():\n\t\ttensor = tensor.cuda()\n\tif volatile:\n\t\trequires_grad = False\n\treturn Variable(tensor, volatile=volatile, requires_grad=requires_grad)\n\n\ndef pairwise_distance(x, y):\n\tif not len(x.shape) == len(y.shape):\n\t\traise ValueError('Both inputs should be matrices.')\n\t\n\tif x.shape[1] != y.shape[1]:\n\t\traise ValueError('The number of features should be the same.')\n\t\n\tx = x.view(x.shape[0], x.shape[1], 1)\n\ty = torch.transpose(y, 0, 1)\n\toutput = torch.sum((x - y) ** 2, 1)\n\toutput = torch.transpose(output, 0, 1)\n\t\n\treturn output\n\n\ndef gaussian_kernel_matrix(x, y, sigmas):\n\tsigmas = sigmas.view(sigmas.shape[0], 1)\n\tbeta = 1. / (2. * sigmas)\n\tdist = pairwise_distance(x, y).contiguous()\n\tdist_ = dist.view(1, -1)\n\ts = torch.matmul(beta, dist_)\n\t\n\treturn torch.sum(torch.exp(-s), 0).view_as(dist)\n\n\ndef maximum_mean_discrepancy(x, y, kernel=gaussian_kernel_matrix):\n\tcost = torch.mean(kernel(x, x))\n\tcost += torch.mean(kernel(y, y))\n\tcost -= 2 * torch.mean(kernel(x, y))\n\t\n\treturn cost\n\n\ndef mmd_loss(source_features, target_features):\n\tsigmas = [\n\t\t1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,\n\t\t1e3, 1e4, 1e5, 1e6\n\t]\n\tgaussian_kernel = partial(\n\t\tgaussian_kernel_matrix, sigmas=Variable(torch.cuda.FloatTensor(sigmas))\n\t)\n\tloss_value = maximum_mean_discrepancy(source_features, target_features, kernel=gaussian_kernel)\n\tloss_value = loss_value\n\t\n\treturn loss_value\n"
  },
  {
    "path": "cycada/transforms.py",
    "content": "\"\"\"These random transforms extend the transforms provided in torchvision to\nallow for transforming multiple images at the same time. This ensures that the\nimages receive the same transformation, e.g. the provided images are either all\nmirrored or all left unchanged.\n\nFor example, this is useful in segmentation tasks, where a transformation to the\nimage necessitates that same transformation on the label.\n\"\"\"\n\nimport numbers\nimport random\n\nimport torch\nimport torchvision\n\n\nclass RandomCrop(object):\n\t\"\"\"Crops the given PIL.Image at a random location to have a region of\n\tthe given size. size can be a tuple (target_height, target_width)\n\tor an integer, in which case the target will be of a square shape (size, size)\n\t\"\"\"\n\t\n\tdef __init__(self, size):\n\t\tif isinstance(size, numbers.Number):\n\t\t\tself.size = (int(size), int(size))\n\t\telse:\n\t\t\tself.size = size\n\t\n\tdef __call__(self, tensors):\n\t\toutput = []\n\t\th, w = None, None\n\t\tth, tw = self.size\n\t\tfor tensor in tensors:\n\t\t\tif h is None and w is None:\n\t\t\t\t_, h, w = tensor.size()\n\t\t\telif tensor.size()[-2:] != (h, w):\n\t\t\t\tprint(tensor.size(), (h, w))\n\t\t\t\traise ValueError('Images must be same size')\n\t\tif w == tw and h == th:\n\t\t\treturn tensors\n\t\tx1 = random.randint(0, w - tw)\n\t\ty1 = random.randint(0, h - th)\n\t\tfor tensor in tensors:\n\t\t\toutput.append(tensor[..., y1:y1 + th, x1:x1 + tw].contiguous())\n\t\treturn output\n\n\nclass HalfCrop(object):\n\t\"\"\"Crops halt the given PIL.Image randomly takes left or right to have a region of\n\tthe given size. size can be a tuple (target_height, target_width)\n\tor an integer, in which case the target will be of a square shape (size, size)\n\t\"\"\"\n\t\n\tdef __call__(self, tensors):\n\t\toutput = []\n\t\tth, tw = self.size\n\t\ttw_half = tw // 2\n\t\tleft_side = random.randint(0, 1)\n\t\tx1 = 0 + left_size * tw_half  # random.randint(0, w - tw)\n\t\tfor tensor in tensors:\n\t\t\toutput.append(tensor[..., ..., x1:x1 + tw_half].contiguous())\n\t\treturn output\n\n\nclass RandomHorizontalFlip(object):\n\t\"\"\"Randomly horizontally flips the given PIL.Image with a probability of 0.5\n\t\"\"\"\n\t\n\tdef __call__(self, tensors):\n\t\tif random.random() < 0.5:\n\t\t\toutput = []\n\t\t\tfor tensor in tensors:\n\t\t\t\tindices = torch.arange(tensor.size(-1) - 1, -1, -1).long()\n\t\t\t\toutput.append(tensor.index_select(-1, indices))\n\t\t\treturn output\n\t\treturn tensors\n\n\ndef augment_collate(batch, crop=None, halfcrop=None, flip=True, resize=None):\n\ttransforms = []\n\tif crop is not None:\n\t\ttransforms.append(RandomCrop(crop))\n\tif halfcrop is not None:\n\t\ttransforms.append(HalfCrop())\n\tif flip:\n\t\ttransforms.append(RandomHorizontalFlip())\n\t\n\ttransform = torchvision.transforms.Compose(transforms)\n\tbatch = [transform(x) for x in batch]\n\treturn torch.utils.data.dataloader.default_collate(batch)\n"
  },
  {
    "path": "cycada/util.py",
    "content": "import logging\nimport logging.config\nimport os.path\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport yaml\nfrom torch.nn.parameter import Parameter\nfrom tqdm import tqdm\n\n\nclass TqdmHandler(logging.StreamHandler):\n\n    def __init__(self):\n        logging.StreamHandler.__init__(self)\n\n    def emit(self, record):\n        msg = self.format(record)\n        tqdm.write(msg)\n\n\ndef config_logging(logfile=None):\n    path = os.path.join(os.path.dirname(__file__), 'logging.yml')\n    with open(path, 'r') as f:\n        config = yaml.load(f.read())\n    if logfile is None:\n        del config['handlers']['file_handler']\n        del config['root']['handlers'][-1]\n    else:\n        config['handlers']['file_handler']['filename'] = logfile\n    logging.config.dictConfig(config)\n\n\ndef to_tensor_raw(im):\n    return torch.from_numpy(np.array(im, np.int64, copy=False))\n\n\ndef safe_load_state_dict(net, state_dict):\n    \"\"\"Copies parameters and buffers from :attr:`state_dict` into\n    this module and its descendants. Any params in :attr:`state_dict`\n    that do not match the keys returned by :attr:`net`'s :func:`state_dict()`\n    method or have differing sizes are skipped.\n\n    Arguments:\n        state_dict (dict): A dict containing parameters and\n            persistent buffers.\n    \"\"\"\n    own_state = net.state_dict()\n    skipped = []\n    for name, param in state_dict.items():\n        if name not in own_state:\n            skipped.append(name)\n            continue\n        if isinstance(param, Parameter):\n            # backwards compatibility for serialized parameters\n            param = param.data\n        if own_state[name].size() != param.size():\n            skipped.append(name)\n            continue\n        own_state[name].copy_(param)\n\n    if skipped:\n        logging.info('Skipped loading some parameters: {}'.format(skipped))\n\ndef step_lr(optimizer, mult):\n    for param_group in optimizer.param_groups:\n        lr = param_group['lr']\n        param_group['lr'] = lr * mult\n"
  },
  {
    "path": "cyclegan/.gitignore",
    "content": ".DS_Store\ndebug*\ncheckpoints/\nresults/\nbuild/\ndist/\n*.png\ntorch.egg-info/\n*/**/__pycache__\ntorch/version.py\ntorch/csrc/generic/TensorMethods.cpp\ntorch/lib/*.so*\ntorch/lib/*.dylib*\ntorch/lib/*.h\ntorch/lib/build\ntorch/lib/tmp_install\ntorch/lib/include\ntorch/lib/torch_shm_manager\ntorch/csrc/cudnn/cuDNN.cpp\ntorch/csrc/nn/THNN.cwrap\ntorch/csrc/nn/THNN.cpp\ntorch/csrc/nn/THCUNN.cwrap\ntorch/csrc/nn/THCUNN.cpp\ntorch/csrc/nn/THNN_generic.cwrap\ntorch/csrc/nn/THNN_generic.cpp\ntorch/csrc/nn/THNN_generic.h\ndocs/src/**/*\ntest/data/legacy_modules.t7\ntest/data/gpu_tensors.pt\ntest/htmlcov\ntest/.coverage\n*/*.pyc\n*/**/*.pyc\n*/**/**/*.pyc\n*/**/**/**/*.pyc\n*/**/**/**/**/*.pyc\n*/*.so*\n*/**/*.so*\n*/**/*.dylib*\ntest/data/legacy_serialized.pt\n*~\n.idea\n"
  },
  {
    "path": "cyclegan/data/__init__.py",
    "content": "import sys\n\nimport torch.utils.data\nfrom data.base_data_loader import BaseDataLoader\n\nsys.path.append('/nfs/project/libo_i/MADAN')\nfrom cycada.transforms import augment_collate\n\n\ndef CreateDataLoader(opt):\n\tdata_loader = CustomDatasetDataLoader()\n\tprint(data_loader.name())\n\tdata_loader.initialize(opt)\n\treturn data_loader\n\n\ndef CreateDataset(opt):\n\tdataset = None\n\tif opt.dataset_mode == 'synthia_cityscapes':\n\t\tfrom data.synthia_cityscapes import SynthiaCityscapesDataset\n\t\tdataset = SynthiaCityscapesDataset()\n\telif opt.dataset_mode == 'gta5_cityscapes':\n\t\tfrom data.gta5_cityscapes import GTAVCityscapesDataset\n\t\tdataset = GTAVCityscapesDataset()\n\telif opt.dataset_mode == 'gta_synthia_cityscapes':\n\t\tfrom data.gta_synthia_cityscapes import GTASynthiaCityscapesDataset\n\t\tdataset = GTASynthiaCityscapesDataset()\n\telif opt.dataset_mode == 'merged_gta_synthia_cityscapes':\n\t\tfrom data.merged_gta_synthia_cityscapes import MergedGTASynthiaCityscapesDataset\n\t\tdataset = MergedGTASynthiaCityscapesDataset()\n\telse:\n\t\traise ValueError(\"Dataset [%s] not recognized.\" % opt.dataset_mode)\n\t\n\tprint(\"dataset [%s] was created\" % (dataset.name()))\n\tdataset.initialize(opt)\n\treturn dataset\n\n\nclass CustomDatasetDataLoader(BaseDataLoader):\n\tdef name(self):\n\t\treturn 'CustomDatasetDataLoader'\n\t\n\tdef initialize(self, opt):\n\t\tBaseDataLoader.initialize(self, opt)\n\t\tself.dataset = CreateDataset(opt)\n\t\tself.dataloader = torch.utils.data.DataLoader(\n\t\t\tself.dataset,\n\t\t\tbatch_size=opt.batchSize,\n\t\t\tshuffle=not opt.serial_batches,\n\t\t\tnum_workers=int(opt.nThreads))\n\t\n\tdef load_data(self):\n\t\treturn self\n\t\n\tdef __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)\n\t\n\tdef __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batchSize >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data\n"
  },
  {
    "path": "cyclegan/data/base_data_loader.py",
    "content": "class BaseDataLoader():\n    def __init__(self):\n        pass\n\n    def initialize(self, opt):\n        self.opt = opt\n        pass\n\n    def load_data():\n        return None\n"
  },
  {
    "path": "cyclegan/data/base_dataset.py",
    "content": "import numpy as np\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n\nclass BaseDataset(data.Dataset):\n\tdef __init__(self):\n\t\tsuper(BaseDataset, self).__init__()\n\t\n\tdef name(self):\n\t\treturn 'BaseDataset'\n\t\n\tdef initialize(self, opt):\n\t\tpass\n\n\n# TODO: 增加crop的部分\ndef get_transform(opt):\n\ttransform_list = []\n\tif opt.resize_or_crop == 'resize_and_crop':\n\t\tosize = [int(opt.loadSize), int(opt.loadSize)]\n\t\ttransform_list.append(transforms.Resize(osize, interpolation=Image.BICUBIC))\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\tif opt.resize_or_crop == 'resize_only':\n\t\tosize = [int(opt.loadSize), int(opt.loadSize)]\n\t\ttransform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.BICUBIC))\n\telif opt.resize_or_crop == 'crop':\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\telif opt.resize_or_crop == 'scale_width':\n\t\ttransform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.BICUBIC))\n\telif opt.resize_or_crop == 'scale_width_and_crop':\n\t\ttransform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.BICUBIC))\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\t\n\tif opt.isTrain and not opt.no_flip:\n\t\ttransform_list.append(transforms.RandomHorizontalFlip())\n\t\n\ttransform_list += [transforms.ToTensor(),\n\t                   transforms.Normalize((0.5, 0.5, 0.5),\n\t                                        (0.5, 0.5, 0.5))]\n\treturn transforms.Compose(transform_list)\n\n\ndef get_label_transform(opt):\n\ttransform_list = []\n\tif opt.resize_or_crop == 'resize_and_crop':\n\t\tosize = [opt.loadSize, opt.loadSize]\n\t\ttransform_list.append(transforms.Resize(osize, interpolation=Image.NEAREST))\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\telif opt.resize_or_crop == 'resize_only':\n\t\tosize = [opt.loadSize, opt.loadSize]\n\t\ttransform_list.append(transforms.Resize(osize, interpolation=Image.NEAREST))\n\telif opt.resize_or_crop == 'crop':\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\telif opt.resize_or_crop == 'scale_width':\n\t\ttransform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.NEAREST))\n\telif opt.resize_or_crop == 'scale_width_and_crop':\n\t\ttransform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.NEAREST))\n\t\ttransform_list.append(transforms.RandomCrop(opt.fineSize))\n\t# transform_list.append(transforms.RandomCrop(opt.fineSize))\n\t\n\tif opt.isTrain and not opt.no_flip:\n\t\ttransform_list.append(transforms.RandomHorizontalFlip())\n\t\n\ttransform_list.append(transforms.Lambda(lambda img: to_tensor_raw(img)))\n\treturn transforms.Compose(transform_list)\n\n\ndef __scale_width(img, target_width):\n\tow, oh = img.size\n\tif (ow == target_width):\n\t\treturn img\n\tw = target_width\n\th = int(target_width * oh / ow)\n\treturn img.resize((w, h), Image.BICUBIC)\n\n\ndef to_tensor_raw(im):\n\treturn torch.from_numpy(np.array(im, np.int64, copy=False))\n"
  },
  {
    "path": "cyclegan/data/cityscapes.py",
    "content": "import numpy as np\n\nignore_label = 255\nid2label = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\n            3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\n            7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\n            14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\n            18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\n            28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n           220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n           0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nclasses = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign',\n           'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n           'bicycle']\n\n\ndef remap_labels_to_train_ids(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n"
  },
  {
    "path": "cyclegan/data/gta5_cityscapes.py",
    "content": "import os.path\nimport random\n\nimport numpy as np\nfrom PIL import Image\nfrom data.base_dataset import BaseDataset, get_label_transform, get_transform\nfrom data.cityscapes import remap_labels_to_train_ids\nfrom data.image_folder import make_cs_labels, make_dataset\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\n\n# This dataset is used to conduct GTA->CityScapes images transfer procedure.\nclass GTAVCityscapesDataset(BaseDataset):\n\tdef initialize(self, opt):\n\t\tself.opt = opt\n\t\tself.root = opt.dataroot\n\t\tself.dir_A = os.path.join(opt.dataroot, 'gta5', 'images')\n\t\tself.dir_B = os.path.join(opt.dataroot, 'cityscapes', 'leftImg8bit')\n\t\tself.dir_A_label = os.path.join(opt.dataroot, 'gta5', 'labels')\n\t\tself.dir_B_label = os.path.join(opt.dataroot, 'cityscapes', 'gtFine')\n\t\t\n\t\tself.A_paths = make_dataset(self.dir_A)\n\t\tself.B_paths = make_dataset(self.dir_B)\n\t\t\n\t\tself.A_paths = sorted(self.A_paths)\n\t\tself.B_paths = sorted(self.B_paths)\n\t\tself.A_size = len(self.A_paths)\n\t\tself.B_size = len(self.B_paths)\n\t\t\n\t\tself.A_labels = make_dataset(self.dir_A_label)\n\t\tself.B_labels = make_cs_labels(self.dir_B_label)\n\t\t\n\t\tself.A_labels = sorted(self.A_labels)\n\t\tself.B_labels = sorted(self.B_labels)\n\t\t\n\t\tself.transform = get_transform(opt)\n\t\tself.label_transform = get_label_transform(opt)\n\t\n\tdef __getitem__(self, index):\n\t\tA_path = self.A_paths[index % self.A_size]\n\t\tif self.opt.serial_batches:\n\t\t\tindex_B = index % self.B_size\n\t\telse:\n\t\t\tindex_B = random.randint(0, self.B_size - 1)\n\t\tB_path = self.B_paths[index_B]\n\t\t\n\t\tA_label_path = self.A_labels[index % self.A_size]\n\t\tB_label_path = self.B_labels[index_B]\n\t\t\n\t\tA_label = Image.open(A_label_path)\n\t\tB_label = Image.open(B_label_path)\n\t\t\n\t\tA_label = np.asarray(A_label)\n\t\tA_label = remap_labels_to_train_ids(A_label)\n\t\t\n\t\tA_label = Image.fromarray(A_label, 'L')\n\t\tB_label = np.asarray(B_label)\n\t\tB_label = remap_labels_to_train_ids(B_label)\n\t\tB_label = Image.fromarray(B_label, 'L')\n\t\t\n\t\tA_img = Image.open(A_path).convert('RGB')\n\t\tB_img = Image.open(B_path).convert('RGB')\n\t\t\n\t\tA = self.transform(A_img)\n\t\tB = self.transform(B_img)\n\t\t\n\t\tA_label = self.label_transform(A_label)\n\t\tB_label = self.label_transform(B_label)\n\t\t\n\t\t# print(A_label.unique())\n\t\t# print(B_label.unique())\n\t\t\n\t\tif self.opt.which_direction == 'BtoA':\n\t\t\tinput_nc = self.opt.output_nc\n\t\t\toutput_nc = self.opt.input_nc\n\t\telse:\n\t\t\tinput_nc = self.opt.input_nc\n\t\t\toutput_nc = self.opt.output_nc\n\t\t\n\t\tif input_nc == 1:  # RGB to gray\n\t\t\ttmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114\n\t\t\tA = tmp.unsqueeze(0)\n\t\t\n\t\tif output_nc == 1:  # RGB to gray\n\t\t\ttmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114\n\t\t\tB = tmp.unsqueeze(0)\n\t\treturn {'A': A, 'B': B,\n\t\t        'A_paths': A_path, 'B_paths': B_path, 'A_label': A_label, 'B_label': B_label}\n\t\n\tdef __len__(self):\n\t\treturn max(self.A_size, self.B_size)\n\t\n\tdef name(self):\n\t\treturn 'GTA5_Cityscapes'\n"
  },
  {
    "path": "cyclegan/data/gta_synthia_cityscapes.py",
    "content": "import os.path\nimport random\n\nimport numpy as np\nfrom PIL import Image\nfrom data.base_dataset import BaseDataset, get_label_transform, get_transform\nfrom data.cityscapes import remap_labels_to_train_ids\nfrom data.image_folder import make_cs_labels, make_dataset\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\n\ndef syn_relabel(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n# This dataset is used to conduct double cyclegan for both GTAV->CityScapes and Synthia->CityScapes\nclass GTASynthiaCityscapesDataset(BaseDataset):\n\tdef initialize(self, opt):\n\t\t# SYNTHIA as dataset 1\n\t\t# GTAV as dataset 2\n\t\tself.opt = opt\n\t\tself.root = opt.dataroot\n\t\tself.dir_A_1 = os.path.join(opt.dataroot, 'synthia', 'RGB')\n\t\tself.dir_A_2 = os.path.join(opt.dataroot, 'gta5', 'images')\n\t\tself.dir_B = os.path.join(opt.dataroot, 'cityscapes', 'leftImg8bit')\n\t\tself.dir_A_label_1 = os.path.join(opt.dataroot, 'synthia', 'GT', 'parsed_LABELS')\n\t\tself.dir_A_label_2 = os.path.join(opt.dataroot, 'gta5', 'labels')\n\t\t\n\t\tself.A_paths_1 = make_dataset(self.dir_A_1)\n\t\tself.A_paths_2 = make_dataset(self.dir_A_2)\n\t\tself.B_paths = make_dataset(self.dir_B)\n\t\t\n\t\tself.A_paths_1 = sorted(self.A_paths_1)\n\t\tself.A_paths_2 = sorted(self.A_paths_2)\n\t\t\n\t\tself.B_paths = sorted(self.B_paths)\n\t\t\n\t\tself.A_size_1 = len(self.A_paths_1)\n\t\tself.A_size_2 = len(self.A_paths_2)\n\t\t\n\t\tself.B_size = len(self.B_paths)\n\t\t\n\t\tself.A_labels_1 = make_dataset(self.dir_A_label_1)\n\t\tself.A_labels_2 = make_dataset(self.dir_A_label_2)\n\t\t\n\t\tself.A_labels_1 = sorted(self.A_labels_1)\n\t\tself.A_labels_2 = sorted(self.A_labels_2)\n\t\t\n\t\tself.transform = get_transform(opt)\n\t\tself.label_transform = get_label_transform(opt)\n\t\n\tdef __getitem__(self, index):\n\t\tA_path_1 = self.A_paths_1[index % self.A_size_1]\n\t\tA_path_2 = self.A_paths_2[index % self.A_size_2]\n\t\t\n\t\tif self.opt.serial_batches:\n\t\t\tindex_B = index % self.B_size\n\t\telse:\n\t\t\tindex_B = random.randint(0, self.B_size - 1)\n\t\t\n\t\tB_path = self.B_paths[index_B]\n\t\t\n\t\tA_label_path_1 = self.A_labels_1[index % self.A_size_1]\n\t\tA_label_path_2 = self.A_labels_2[index % self.A_size_2]\n\t\t\n\t\tA_label_1 = Image.open(A_label_path_1)\n\t\tA_label_2 = Image.open(A_label_path_2)\n\t\t\n\t\t# remaping label for synthia\n\t\tA_label_1 = np.asarray(A_label_1)\n\t\tA_label_1 = syn_relabel(A_label_1)\n\t\tA_label_1 = Image.fromarray(A_label_1, 'L')\n\t\t\n\t\t# remaping label for gta5\n\t\t\n\t\tA_label_2 = np.asarray(A_label_2)\n\t\tA_label_2 = remap_labels_to_train_ids(A_label_2)\n\t\tA_label_2 = Image.fromarray(A_label_2, 'L')\n\t\t\n\t\tA_img_1 = Image.open(A_path_1).convert('RGB')\n\t\tA_img_2 = Image.open(A_path_2).convert('RGB')\n\t\t\n\t\tB_img = Image.open(B_path).convert('RGB')\n\t\t\n\t\tA_1 = self.transform(A_img_1)\n\t\tA_2 = self.transform(A_img_2)\n\t\t\n\t\tB = self.transform(B_img)\n\t\t\n\t\tA_label_1 = self.label_transform(A_label_1)\n\t\tA_label_2 = self.label_transform(A_label_2)\n\t\t\n\t\tif self.opt.which_direction == 'BtoA':\n\t\t\tinput_nc = self.opt.output_nc\n\t\t\toutput_nc = self.opt.input_nc\n\t\telse:\n\t\t\tinput_nc = self.opt.input_nc\n\t\t\toutput_nc = self.opt.output_nc\n\t\t\n\t\tif input_nc == 1:  # RGB to gray\n\t\t\ttmp = A_1[0, ...] * 0.299 + A_1[1, ...] * 0.587 + A_1[2, ...] * 0.114\n\t\t\tA_1 = tmp.unsqueeze(0)\n\t\t\t\n\t\t\ttmp = A_2[0, ...] * 0.299 + A_2[1, ...] * 0.587 + A_2[2, ...] * 0.114\n\t\t\tA_2 = tmp.unsqueeze(0)\n\t\t\n\t\tif output_nc == 1:  # RGB to gray\n\t\t\ttmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114\n\t\t\tB = tmp.unsqueeze(0)\n\t\t\n\t\treturn {'A_1': A_1, 'A_2': A_2, 'B': B, 'A_paths_1': A_path_1, 'A_paths_2': A_path_2, 'B_paths': B_path, 'A_label_1': A_label_1,\n\t\t        'A_label_2': A_label_2}\n\t\n\tdef __len__(self):\n\t\treturn max(self.A_size_1, self.B_size, self.A_size_2)\n\t\n\tdef name(self):\n\t\treturn 'GTA5_Synthia_Cityscapes'\n"
  },
  {
    "path": "cyclegan/data/image_folder.py",
    "content": "###############################################################################\n# Code from\n# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py\n# Modified the original code so that it also loads images from the current\n# directory as well as the subdirectories\n###############################################################################\n\nimport torch.utils.data as data\n\nimport numpy as np\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = [\n    '.jpg', '.JPG', '.jpeg', '.JPEG',\n    '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\n\ndef is_image_file(filename):\n    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\ndef make_cs_labels(dir):\n    images = []\n    assert os.path.isdir(dir), '%s is not a valid directory' % dir\n\n    for root, _, fnames in sorted(os.walk(dir)):\n        for fname in fnames:\n            if is_image_file(fname):\n                path = os.path.join(root, fname)\n                if path.endswith(\"_gtFine_labelIds.png\"):\n                    images.append(path)\n\n    return list(set(images))\n\ndef make_dataset(dir):\n    images = []\n    assert os.path.isdir(dir), '%s is not a valid directory' % dir\n\n    for root, _, fnames in sorted(os.walk(dir)):\n        for fname in fnames:\n            if is_image_file(fname):\n                path = os.path.join(root, fname)\n                images.append(path)\n\n    return list(set(images))\n\ndef load_labels(dir, images):\n    if os.path.exists(os.path.join(dir, 'labels.txt')):\n        with open(os.path.join(dir, 'labels.txt'), 'r') as f:\n            data = f.read().splitlines()\n        parse = np.array([(x.split(' ')[0], int(x.split(' ')[1])) for x in data])\n        label_dict = dict(parse)\n        labels = []\n        for image in images:\n            im_id = image.split('/')[-1].split('.')[0]\n            labels.append(label_dict[im_id])\n    elif os.path.isdir(os.path.join(dir, 'labels')):\n        Exception('Not yet implemented load_labels for image folder')\n    else:\n        Exception('load_labels expects %s to contain labels.txt or labels folder' % dir)\n\ndef default_loader(path):\n    return Image.open(path).convert('RGB')\n\n\nclass ImageFolder(data.Dataset):\n\n    def __init__(self, root, transform=None, return_paths=False,\n                 loader=default_loader):\n        imgs = make_dataset(root)\n        if len(imgs) == 0:\n            raise(RuntimeError(\"Found 0 images in: \" + root + \"\\n\"\n                               \"Supported image extensions are: \" +\n                               \",\".join(IMG_EXTENSIONS)))\n\n        self.root = root\n        self.imgs = imgs\n        self.transform = transform\n        self.return_paths = return_paths\n        self.loader = loader\n\n    def __getitem__(self, index):\n        path = self.imgs[index]\n        img = self.loader(path)\n        if self.transform is not None:\n            img = self.transform(img)\n        if self.return_paths:\n            return img, path\n        else:\n            return img\n\n    def __len__(self):\n        return len(self.imgs)\n"
  },
  {
    "path": "cyclegan/data/synthia_cityscapes.py",
    "content": "import os.path\nimport random\n\nimport numpy as np\nfrom PIL import Image\nfrom data.base_dataset import BaseDataset, get_label_transform, get_transform\nfrom data.image_folder import make_cs_labels, make_dataset\n\nfrom data.cityscapes import remap_labels_to_train_ids\n\nignore_label = 255\nid2label = {0: ignore_label,\n            1: 10,\n            2: 2,\n            3: 0,\n            4: 1,\n            5: 4,\n            6: 8,\n            7: 5,\n            8: 13,\n            9: 7,\n            10: 11,\n            11: 18,\n            12: 17,\n            13: ignore_label,\n            14: ignore_label,\n            15: 6,\n            16: 9,\n            17: 12,\n            18: 14,\n            19: 15,\n            20: 16,\n            21: 3,\n            22: ignore_label}\n\nclasses = ['road',\n           'sidewalk',\n           'building',\n           'wall',\n           'fence',\n           'pole',\n           'traffic light',\n           'traffic sign',\n           'vegetation',\n           'terrain',\n           'sky',\n           'person',\n           'rider',\n           'car',\n           'truck',\n           'bus',\n           'train',\n           'motorcycle',\n           'bicycle']\n\n\ndef syn_relabel(arr):\n\tout = ignore_label * np.ones(arr.shape, dtype=np.uint8)\n\tfor id, label in id2label.items():\n\t\tout[arr == id] = int(label)\n\treturn out\n\n\nclass SynthiaCityscapesDataset(BaseDataset):\n\tdef initialize(self, opt):\n\t\tself.opt = opt\n\t\tself.root = opt.dataroot\n\t\tself.dir_A = os.path.join(opt.dataroot, 'synthia', 'RGB')\n\t\tself.dir_B = os.path.join(opt.dataroot, 'cityscapes', 'leftImg8bit')\n\t\tself.dir_A_label = os.path.join(opt.dataroot, 'synthia', 'GT', 'parsed_LABELS')\n\t\tself.dir_B_label = os.path.join(opt.dataroot, 'cityscapes', 'gtFine')\n\t\t\n\t\tself.A_paths = make_dataset(self.dir_A)\n\t\tself.B_paths = make_dataset(self.dir_B)\n\t\t\n\t\tself.A_paths = sorted(self.A_paths)\n\t\tself.B_paths = sorted(self.B_paths)\n\t\tself.A_size = len(self.A_paths)\n\t\tself.B_size = len(self.B_paths)\n\t\t\n\t\tself.A_labels = make_dataset(self.dir_A_label)\n\t\tself.B_labels = make_cs_labels(self.dir_B_label)\n\t\t\n\t\tself.A_labels = sorted(self.A_labels)\n\t\tself.B_labels = sorted(self.B_labels)\n\t\t\n\t\tself.transform = get_transform(opt)\n\t\tself.label_transform = get_label_transform(opt)\n\t\n\tdef __getitem__(self, index):\n\t\tA_path = self.A_paths[index % self.A_size]\n\t\tif self.opt.serial_batches:\n\t\t\tindex_B = index % self.B_size\n\t\telse:\n\t\t\tindex_B = random.randint(0, self.B_size - 1)\n\t\tB_path = self.B_paths[index_B]\n\t\t\n\t\tA_label_path = self.A_labels[index % self.A_size]\n\t\tB_label_path = self.B_labels[index_B]\n\t\t\n\t\tA_label = Image.open(A_label_path)\n\t\tB_label = Image.open(B_label_path)\n\t\t\n\t\tA_label = np.asarray(A_label)\n\t\tA_label = syn_relabel(A_label)\n\t\t\n\t\tA_label = Image.fromarray(A_label, 'L')\n\t\tB_label = np.asarray(B_label)\n\t\tB_label = remap_labels_to_train_ids(B_label)\n\t\tB_label = Image.fromarray(B_label, 'L')\n\t\t\n\t\tA_img = Image.open(A_path).convert('RGB')\n\t\tB_img = Image.open(B_path).convert('RGB')\n\t\t\n\t\tA = self.transform(A_img)\n\t\tB = self.transform(B_img)\n\t\t\n\t\tA_label = self.label_transform(A_label)\n\t\tB_label = self.label_transform(B_label)\n\t\t\n\t\tif self.opt.which_direction == 'BtoA':\n\t\t\tinput_nc = self.opt.output_nc\n\t\t\toutput_nc = self.opt.input_nc\n\t\telse:\n\t\t\tinput_nc = self.opt.input_nc\n\t\t\toutput_nc = self.opt.output_nc\n\t\t\n\t\tif input_nc == 1:  # RGB to gray\n\t\t\ttmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114\n\t\t\tA = tmp.unsqueeze(0)\n\t\t\n\t\tif output_nc == 1:  # RGB to gray\n\t\t\ttmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114\n\t\t\tB = tmp.unsqueeze(0)\n\t\treturn {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path, 'A_label': A_label, 'B_label': B_label}\n\t\n\tdef __len__(self):\n\t\treturn max(self.A_size, self.B_size)\n\t\n\tdef name(self):\n\t\treturn 'Synthia_Cityscapes'\n"
  },
  {
    "path": "cyclegan/environment.yml",
    "content": "name: pytorch-CycleGAN-and-pix2pix\nchannels:\n- peterjc123\n- defaults\ndependencies:\n- python=3.5.5\n- pytorch=0.3.1\n- scipy\n- pip:\n  - dominate==2.3.1\n  - git+https://github.com/pytorch/vision.git\n  - Pillow==5.0.0\n  - numpy==1.14.1\n  - visdom==0.1.7\n"
  },
  {
    "path": "cyclegan/models/__init__.py",
    "content": "import logging\n\ndef create_model(opt):\n\tmodel = None\n\tif opt.model == 'cycle_gan':\n\t\t# assert(opt.dataset_mode == 'unaligned')\n\t\tfrom .cycle_gan_model import CycleGANModel\n\t\tmodel = CycleGANModel()\n\telif opt.model == 'test':\n\t\tfrom .test_model import TestModel\n\t\tmodel = TestModel()\n\telif opt.model == 'multi_cycle_gan_semantic':\n\t\tfrom .multi_cycle_gan_semantic_model import CycleGANSemanticModel\n\t\tmodel = CycleGANSemanticModel()\n\telif opt.model == 'cycle_gan_semantic_fcn':\n\t\tfrom .cycle_gan_semantic_model import CycleGANSemanticModel\n\t\tmodel = CycleGANSemanticModel()\n\telse:\n\t\traise NotImplementedError('model [%s] not implemented.' % opt.model)\n\tmodel.initialize(opt)\n\tlogging.info(\"model [%s] was created\" % (model.name()))\n\treturn model\n"
  },
  {
    "path": "cyclegan/models/base_model.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\n\nfrom . import networks\n\n\nclass BaseModel():\n\tdef name(self):\n\t\treturn 'BaseModel'\n\t\n\tdef initialize(self, opt):\n\t\tself.opt = opt\n\t\tself.gpu_ids = opt.gpu_ids\n\t\tself.isTrain = opt.isTrain\n\t\tself.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n\t\tself.save_dir = os.path.join(opt.checkpoints_dir, opt.name)\n\t\tif opt.resize_or_crop != 'scale_width':\n\t\t\ttorch.backends.cudnn.benchmark = True\n\t\tself.loss_names = []\n\t\tself.model_names = []\n\t\tself.visual_names = []\n\t\tself.image_paths = []\n\t\n\tdef set_input(self, input):\n\t\tself.input = input\n\t\n\tdef forward(self):\n\t\tpass\n\t\n\t# load and print networks; create shedulars\n\tdef setup(self, opt):\n\t\tif self.isTrain:\n\t\t\tself.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n\t\t\n\t\tif not self.isTrain or opt.continue_train:\n\t\t\tself.load_networks(opt.which_epoch)\n\t\tself.print_networks(opt.verbose)\n\t\n\t# make models eval mode during test time\n\tdef eval(self):\n\t\tfor name in self.model_names:\n\t\t\tif isinstance(name, str):\n\t\t\t\tnet = getattr(self, 'net' + name)\n\t\t\t\tnet.eval()\n\t\n\t# used in test time, wrapping `forward` in no_grad() so we don't save\n\t# intermediate steps for backprop\n\tdef test(self):\n\t\twith torch.no_grad():\n\t\t\tself.forward()\n\t\n\t# get image paths\n\tdef get_image_paths(self):\n\t\treturn self.image_paths\n\t\n\tdef optimize_parameters(self):\n\t\tpass\n\t\n\t# update learning rate (called once every epoch)\n\tdef update_learning_rate(self):\n\t\tfor scheduler in self.schedulers:\n\t\t\tscheduler.step()\n\t\tlr = self.optimizers[0].param_groups[0]['lr']\n\t\tprint('learning rate = %.7f' % lr)\n\t\n\t# return visualization images. train.py will display these images, and save the images to a html\n\tdef get_current_visuals(self):\n\t\tvisual_ret = OrderedDict()\n\t\tfor name in self.visual_names:\n\t\t\tif isinstance(name, str):\n\t\t\t\tvisual_ret[name] = getattr(self, name)\n\t\treturn visual_ret\n\t\n\t# return traning losses/errors. train.py will print out these errors as debugging information\n\tdef get_current_losses(self):\n\t\terrors_ret = OrderedDict()\n\t\tfor name in self.loss_names:\n\t\t\tif isinstance(name, str):\n\t\t\t\t# float(...) works for both scalar tensor and float number\n\t\t\t\terrors_ret[name] = float(getattr(self, 'loss_' + name))\n\t\treturn errors_ret\n\t\n\t# save models to the disk\n\tdef save_networks(self, which_epoch):\n\t\tfor name in self.model_names:\n\t\t\t# Don't save semantic consistency networks\n\t\t\tif isinstance(name, str) and (\"PixelCLS\" not in name):\n\t\t\t\tsave_filename = '%s_net_%s.pth' % (which_epoch, name)\n\t\t\t\tsave_path = os.path.join(self.save_dir, save_filename)\n\t\t\t\tnet = getattr(self, 'net' + name)\n\t\t\t\t\n\t\t\t\tif len(self.gpu_ids) > 0 and torch.cuda.is_available():\n\t\t\t\t\ttorch.save(net.module.cpu().state_dict(), save_path)\n\t\t\t\t\tnet.cuda(self.gpu_ids[0])\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(net.cpu().state_dict(), save_path)\n\t\n\tdef __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n\t\tkey = keys[i]\n\t\tif i + 1 == len(keys):  # at the end, pointing to a parameter/buffer\n\t\t\tif module.__class__.__name__.startswith('InstanceNorm') and \\\n\t\t\t\t(key == 'running_mean' or key == 'running_var'):\n\t\t\t\tif getattr(module, key) is None:\n\t\t\t\t\tstate_dict.pop('.'.join(keys))\n\t\telse:\n\t\t\tself.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\t\n\t# load models from the disk\n\tdef load_networks(self, which_epoch):\n\t\tfor name in self.model_names:\n\t\t\tif isinstance(name, str):\n\t\t\t\tload_filename = '%s_net_%s.pth' % (which_epoch, name)\n\t\t\t\tload_path = os.path.join(self.save_dir, load_filename)\n\t\t\t\t\n\t\t\t\tnet = getattr(self, 'net' + name)\n\t\t\t\tif isinstance(net, torch.nn.DataParallel):\n\t\t\t\t\tnet = net.module\n\t\t\t\tprint('loading the model from %s' % load_path)\n\t\t\t\t# if you are using PyTorch newer than 0.4 (e.g., built from\n\t\t\t\t# GitHub source), you can remove str() on self.device\n\t\t\t\tstate_dict = torch.load(load_path, map_location=str(self.device))\n\t\t\t\t# patch InstanceNorm checkpoints prior to 0.4\n\t\t\t\tfor key in list(state_dict.keys()):  # need to copy keys here because we mutate in loop\n\t\t\t\t\tself.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n\t\t\t\tnet.load_state_dict(state_dict)\n\t\n\t# print network information\n\tdef print_networks(self, verbose):\n\t\tprint('---------- Networks initialized -------------')\n\t\tfor name in self.model_names:\n\t\t\tif isinstance(name, str):\n\t\t\t\tnet = getattr(self, 'net' + name)\n\t\t\t\tnum_params = 0\n\t\t\t\tfor param in net.parameters():\n\t\t\t\t\tnum_params += param.numel()\n\t\t\t\tif verbose:\n\t\t\t\t\tprint(net)\n\t\t\t\tprint('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n\t\tprint('-----------------------------------------------')\n\t\n\t# set requies_grad=Fasle to avoid computation\n\tdef set_requires_grad(self, nets, requires_grad=False):\n\t\tif not isinstance(nets, list):\n\t\t\tnets = [nets]\n\t\tfor net in nets:\n\t\t\tif net is not None:\n\t\t\t\tfor param in net.parameters():\n\t\t\t\t\tparam.requires_grad = requires_grad\n"
  },
  {
    "path": "cyclegan/models/cycle_gan_model.py",
    "content": "import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\n\n\nclass CycleGANModel(BaseModel):\n    def name(self):\n        return 'CycleGANModel'\n\n    def initialize(self, opt):\n        BaseModel.initialize(self, opt)\n\n        # specify the training losses you want to print out. The program will call base_model.get_current_losses\n        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']\n        # specify the images you want to save/display. The program will call base_model.get_current_visuals\n        visual_names_A = ['real_A', 'fake_B', 'rec_A']\n        visual_names_B = ['real_B', 'fake_A', 'rec_B']\n        if self.isTrain and self.opt.lambda_identity > 0.0:\n            visual_names_A.append('idt_A')\n            visual_names_B.append('idt_B')\n\n        self.visual_names = visual_names_A + visual_names_B\n        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n        if self.isTrain:\n            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']\n        else:  # during test time, only load Gs\n            self.model_names = ['G_A', 'G_B']\n\n        # load/define networks\n        # The naming conversion is different from those used in the paper\n        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,\n                                        opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)\n        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,\n                                        opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)\n\n        if self.isTrain:\n            use_sigmoid = opt.no_lsgan\n            self.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n                                            opt.which_model_netD,\n                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n            self.netD_B = networks.define_D(opt.input_nc, opt.ndf,\n                                            opt.which_model_netD,\n                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n\n        if self.isTrain:\n            self.fake_A_pool = ImagePool(opt.pool_size)\n            self.fake_B_pool = ImagePool(opt.pool_size)\n            # define loss functions\n            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)\n            self.criterionCycle = torch.nn.L1Loss()\n            self.criterionIdt = torch.nn.L1Loss()\n            # initialize optimizers\n            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),\n                                                lr=opt.lr, betas=(opt.beta1, 0.999))\n            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\n                                                lr=opt.lr, betas=(opt.beta1, 0.999))\n            self.optimizers = []\n            self.optimizers.append(self.optimizer_G)\n            self.optimizers.append(self.optimizer_D)\n\n    def set_input(self, input):\n        AtoB = self.opt.which_direction == 'AtoB'\n        self.real_A = input['A' if AtoB else 'B'].to(self.device)\n        self.real_B = input['B' if AtoB else 'A'].to(self.device)\n        self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n    def forward(self):\n        self.fake_B = self.netG_A(self.real_A)\n        self.rec_A = self.netG_B(self.fake_B)\n\n        self.fake_A = self.netG_B(self.real_B)\n        self.rec_B = self.netG_A(self.fake_A)\n\n    def backward_D_basic(self, netD, real, fake):\n        # Real\n        pred_real = netD(real)\n        loss_D_real = self.criterionGAN(pred_real, True)\n        # Fake\n        pred_fake = netD(fake.detach())\n        loss_D_fake = self.criterionGAN(pred_fake, False)\n        # Combined loss\n        loss_D = (loss_D_real + loss_D_fake) * 0.5\n        # backward\n        loss_D.backward()\n        return loss_D\n\n    def backward_D_A(self):\n        fake_B = self.fake_B_pool.query(self.fake_B)\n        self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\n    def backward_D_B(self):\n        fake_A = self.fake_A_pool.query(self.fake_A)\n        self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n\n    def backward_G(self):\n        lambda_idt = self.opt.lambda_identity\n        lambda_A = self.opt.lambda_A\n        lambda_B = self.opt.lambda_B\n        # Identity loss\n        if lambda_idt > 0:\n            # G_A should be identity if real_B is fed.\n            self.idt_A = self.netG_A(self.real_B)\n            self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n            # G_B should be identity if real_A is fed.\n            self.idt_B = self.netG_B(self.real_A)\n            self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n        else:\n            self.loss_idt_A = 0\n            self.loss_idt_B = 0\n\n        # GAN loss D_A(G_A(A))\n        self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n        # GAN loss D_B(G_B(B))\n        self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n        # Forward cycle loss\n        self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n        # Backward cycle loss\n        self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n        # combined loss\n        self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n        self.loss_G.backward()\n\n    def optimize_parameters(self):\n        # forward\n        self.forward()\n        # G_A and G_B\n        self.set_requires_grad([self.netD_A, self.netD_B], False)\n        self.optimizer_G.zero_grad()\n        self.backward_G()\n        self.optimizer_G.step()\n        # D_A and D_B\n        self.set_requires_grad([self.netD_A, self.netD_B], True)\n        self.optimizer_D.zero_grad()\n        self.backward_D_A()\n        self.backward_D_B()\n        self.optimizer_D.step()\n"
  },
  {
    "path": "cyclegan/models/cycle_gan_semantic_model.py",
    "content": "import itertools\nimport sys\n\nimport torch\nimport torch.nn.functional as F\nfrom util.image_pool import ImagePool\n\nfrom . import networks\nfrom .base_model import BaseModel\n\nsys.path.append('/nfs/project/libo_iMADAN')\nfrom cycada.models import get_model\n\n\nclass CycleGANSemanticModel(BaseModel):\n\tdef name(self):\n\t\treturn 'CycleGANModel'\n\t\n\tdef initialize(self, opt):\n\t\tBaseModel.initialize(self, opt)\n\t\t\n\t\t# specify the training losses you want to print out. The program will call base_model.get_current_losses\n\t\tself.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A',\n\t\t                   'D_B', 'G_B', 'cycle_B', 'idt_B',\n\t\t                   'sem_AB']\n\t\t\n\t\t# specify the images you want to save/display. The program will call base_model.get_current_visuals\n\t\tvisual_names_A = ['real_A', 'fake_B', 'rec_A']\n\t\tvisual_names_B = ['real_B', 'fake_A', 'rec_B']\n\t\tif self.isTrain and self.opt.lambda_identity > 0.0:\n\t\t\tvisual_names_A.append('idt_A')\n\t\t\tvisual_names_B.append('idt_B')\n\t\t\n\t\tself.visual_names = visual_names_A + visual_names_B\n\t\t# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n\t\tif self.isTrain:\n\t\t\tself.model_names = ['G_A', 'G_B', 'D_A', 'D_B']\n\t\t\n\t\telse:  # during test time, only load Gs\n\t\t\tself.model_names = ['G_A', 'G_B']\n\t\t\n\t\t# load/define networks\n\t\t# The naming conversion is different from those used in the paper\n\t\t# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n\t\tself.netG_A = networks.define_G(opt.input_nc, opt.output_nc,\n\t\t                                opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\tself.netG_B = networks.define_G(opt.output_nc, opt.input_nc,\n\t\t                                opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\t\n\t\tif self.isTrain:\n\t\t\tuse_sigmoid = opt.no_lsgan\n\t\t\tself.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n\t\t\t                                opt.which_model_netD,\n\t\t\t                                opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t                                opt.init_type, self.gpu_ids)\n\t\t\tself.netD_B = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t                                opt.which_model_netD,\n\t\t\t                                opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t                                opt.init_type, self.gpu_ids)\n\t\t\t\n\t\t\t# Here for semantic consistency loss, load a fcn network as fs here.\n\t\t\tself.netPixelCLS = get_model(opt.weights_model_type, num_cls=opt.num_cls, pretrained=True, weights_init=opt.weights_init)\n\t\t\t# Specially initialize Pixel CLS network\n\t\t\tif len(self.gpu_ids) > 0:\n\t\t\t\tassert (torch.cuda.is_available())\n\t\t\t\tself.netPixelCLS.to(self.gpu_ids[0])\n\t\t\t\tself.netPixelCLS = torch.nn.DataParallel(self.netPixelCLS, self.gpu_ids)\n\t\t\n\t\tif self.isTrain:\n\t\t\tself.fake_A_pool = ImagePool(opt.pool_size)\n\t\t\tself.fake_B_pool = ImagePool(opt.pool_size)\n\t\t\t# define loss functions\n\t\t\tself.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)\n\t\t\tself.criterionCycle = torch.nn.L1Loss()\n\t\t\tself.criterionIdt = torch.nn.L1Loss()\n\t\t\t# self.criterionCLS = torch.nn.modules.CrossEntropyLoss()\n\t\t\tself.criterionSemantic = torch.nn.KLDivLoss(reduction='batchmean')\n\t\t\t# initialize optimizers\n\t\t\tself.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),\n\t\t\t                                    lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\tself.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\n\t\t\t                                    lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tself.optimizers = []\n\t\t\tself.optimizers.append(self.optimizer_G)\n\t\t\tself.optimizers.append(self.optimizer_D)\n\t\n\tdef set_input(self, input):\n\t\tAtoB = self.opt.which_direction == 'AtoB'\n\t\tself.real_A = input['A' if AtoB else 'B'].to(self.device)\n\t\tself.real_B = input['B' if AtoB else 'A'].to(self.device)\n\t\tself.image_paths = input['A_paths' if AtoB else 'B_paths']\n\t\tif 'A_label' in input and 'B_label' in input:\n\t\t\tself.input_A_label = input['A_label' if AtoB else 'B_label'].to(self.device)\n\t\t\tself.input_B_label = input['B_label' if AtoB else 'A_label'].to(self.device)\n\t\n\t# self.image_paths = input['B_paths'] # Hack!! forcing the labels to corresopnd to B domain\n\t\n\tdef forward(self):\n\t\tself.fake_B = self.netG_A(self.real_A)\n\t\tself.rec_A = self.netG_B(self.fake_B)\n\t\t\n\t\tself.fake_A = self.netG_B(self.real_B)\n\t\tself.rec_B = self.netG_A(self.fake_A)\n\t\t\n\t\tif self.isTrain:\n\t\t\t# Forward all four images through classifier\n\t\t\t# Keep predictions from fake images only\n\t\t\tself.pred_real_A = self.netPixelCLS(self.real_A)\n\t\t\t_, self.gt_pred_A = self.pred_real_A.max(1)\n\t\t\t\n\t\t\tself.pred_fake_B = self.netPixelCLS(self.fake_B)\n\t\t\t_, pfB = self.pred_fake_B.max(1)\n\t\n\tdef backward_D_basic(self, netD, real, fake):\n\t\t# Real\n\t\tpred_real = netD(real)\n\t\tloss_D_real = self.criterionGAN(pred_real, True)\n\t\t# Fake\n\t\tpred_fake = netD(fake.detach())\n\t\tloss_D_fake = self.criterionGAN(pred_fake, False)\n\t\t# Combined Loss\n\t\tloss_D = (loss_D_real + loss_D_fake) * 0.5\n\t\t# backward\n\t\tloss_D.backward()\n\t\treturn loss_D\n\t\n\tdef backward_PixelCLS(self):\n\t\tlabel_A = self.input_A_label\n\t\t# forward only real source image through semantic classifier\n\t\tpred_A = self.netPixelCLS(self.real_A)\n\t\tself.loss_PixelCLS = self.criterionSemantic(F.log_softmax(pred_A, dim=1), label_A.long())\n\t\tself.loss_PixelCLS.backward()\n\t\n\tdef backward_D_A(self):\n\t\tfake_B = self.fake_B_pool.query(self.fake_B)\n\t\tself.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\t\n\tdef backward_D_B(self):\n\t\tfake_A = self.fake_A_pool.query(self.fake_A)\n\t\tself.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n\t\n\tdef backward_G(self, opt):\n\t\tlambda_idt = self.opt.lambda_identity\n\t\tlambda_A = self.opt.lambda_A\n\t\tlambda_B = self.opt.lambda_B\n\t\t# Identity loss\n\t\tif lambda_idt > 0:\n\t\t\t# G_A should be identity if real_B is fed.\n\t\t\tself.idt_A = self.netG_A(self.real_B)\n\t\t\tself.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n\t\t\t# G_B should be identity if real_A is fed.\n\t\t\tself.idt_B = self.netG_B(self.real_A)\n\t\t\tself.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n\t\telse:\n\t\t\tself.loss_idt_A = 0\n\t\t\tself.loss_idt_B = 0\n\t\t\n\t\t# GAN loss D_A(G_A(A))\n\t\tself.loss_G_A = 2 * self.criterionGAN(self.netD_A(self.fake_B), True)\n\t\t# GAN loss D_B(G_B(B))\n\t\tself.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n\t\t# Forward cycle loss\n\t\tself.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n\t\t# Backward cycle loss\n\t\tself.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\t\t# combined loss standard cyclegan\n\t\tself.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n\t\t\n\t\t# real_A(syn)->fake_B->(fcn_frozen)->pred_fake_B == input_A_label\n\t\tif opt.semantic_loss:\n\t\t\tself.loss_sem_AB = opt.dynamic_weight * self.criterionSemantic(F.log_softmax(self.pred_fake_B, dim=1), F.softmax(self.pred_real_A,\n\t\t\t                                                                                                                 dim=1))\n\t\t\tself.loss_sem_AB = opt.general_semantic_weight * torch.div(self.loss_sem_AB, self.pred_fake_B.shape[1] * self.pred_fake_B.shape[2]\n\t\t\t                                                           * self.pred_fake_B.shape[3])\n\t\t\tself.loss_G += self.loss_sem_AB\n\t\t\n\t\tself.loss_G.backward()\n\t\n\tdef optimize_parameters(self, opt):\n\t\t# forward\n\t\tself.forward()\n\t\t# G_A and G_B\n\t\tself.set_requires_grad([self.netD_A, self.netD_B], False)\n\t\tself.optimizer_G.zero_grad()\n\t\t# self.optimizer_CLS.zero_grad()\n\t\tself.backward_G(opt)\n\t\tself.optimizer_G.step()\n\t\t# D_A and D_B\n\t\tself.set_requires_grad([self.netD_A, self.netD_B], True)\n\t\tself.optimizer_D.zero_grad()\n\t\tself.backward_D_A()\n\t\tself.backward_D_B()\n\t\tself.optimizer_D.step()\n"
  },
  {
    "path": "cyclegan/models/multi_cycle_gan_semantic_model.py",
    "content": "import itertools\nimport sys\n\nimport torch\nimport torch.nn.functional as F\nfrom util.image_pool import ImagePool\n\nfrom . import networks\nfrom .base_model import BaseModel\n\nsys.path.append('/nfs/project/libo_iMADAN')\nfrom cycada.models import get_model\n\n\nclass CycleGANSemanticModel(BaseModel):\n\tdef name(self):\n\t\treturn 'CycleGANModel'\n\t\n\tdef initialize(self, opt):\n\t\tBaseModel.initialize(self, opt)\n\t\t\n\t\tself.semantic_loss = opt.semantic_loss\n\t\t\n\t\t# specify the training losses you want to print out. The program will call base_model.get_current_losses\n\t\tself.loss_names = ['D_A_1', 'G_A_1', 'cycle_A_1', 'idt_A_1',\n\t\t                   'D_B_1', 'G_B_1', 'cycle_B_1', 'idt_B_1',\n\t\t                   'D_A_2', 'G_A_2', 'cycle_A_2', 'idt_A_2',\n\t\t                   'D_B_2', 'G_B_2', 'cycle_B_2', 'idt_B_2']\n\t\t\n\t\tif opt.SAD:\n\t\t\tself.loss_names.extend(['D_3_1', 'G_s1s2'])\n\t\t\n\t\tif opt.CCD or opt.HF_CCD:\n\t\t\tself.loss_names.extend(['D_21', 'G_s1s21'])\n\t\t\tself.loss_names.extend(['D_12', 'G_s2s12'])\n\t\t\n\t\tif self.semantic_loss:\n\t\t\tself.loss_names.extend(['sem_syn', 'sem_gta'])\n\t\t\n\t\t# specify the images you want to save/display. The program will call base_model.get_current_visuals\n\t\tvisual_names_A_1 = ['real_A_1', 'fake_B_1', 'rec_A_1']\n\t\tvisual_names_B_1 = ['real_B', 'fake_A_1', 'rec_B_1']\n\t\t\n\t\tvisual_names_A_2 = ['real_A_2', 'fake_B_2', 'rec_A_2']\n\t\tvisual_names_B_2 = ['fake_A_2', 'rec_B_2']\n\t\t\n\t\tif self.isTrain and self.opt.lambda_identity > 0.0:\n\t\t\tvisual_names_A_1.append('idt_A_1')\n\t\t\tvisual_names_B_1.append('idt_B_1')\n\t\t\t\n\t\t\tvisual_names_A_2.append('idt_A_2')\n\t\t\tvisual_names_B_2.append('idt_B_2')\n\t\t\n\t\tself.visual_names = visual_names_A_1 + visual_names_B_1 + visual_names_A_2 + visual_names_B_2\n\t\t# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n\t\tif self.isTrain:\n\t\t\t# self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']\n\t\t\tif opt.Shared_DT:\n\t\t\t\tself.model_names = ['G_A_1', 'G_B_1', 'D_A', 'D_B_1', 'D_B_2', 'G_A_2', 'G_B_2']\n\t\t\telse:\n\t\t\t\tself.model_names = ['G_A_1', 'G_B_1', 'D_A_1', 'D_B_1', 'G_A_2', 'G_B_2', 'D_A_2', 'D_B_2']\n\t\t\tif opt.SAD:\n\t\t\t\tself.model_names.append('D_3')\n\t\t\t\n\t\t\tif opt.CCD or opt.HF_CCD:\n\t\t\t\tself.model_names.append('D_12')\n\t\t\t\tself.model_names.append('D_21')\n\t\t\n\t\telse:  # during test time, only load Gs\n\t\t\tself.model_names = ['G_A_1', 'G_B_1', 'G_A_2', 'G_B_2']\n\t\t\n\t\t# load/define networks\n\t\t# The naming conversion is different from those used in the paper\n\t\t# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n\t\tself.netG_A_1 = networks.define_G(opt.input_nc, opt.output_nc,\n\t\t                                  opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                  not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\tself.netG_B_1 = networks.define_G(opt.output_nc, opt.input_nc,\n\t\t                                  opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                  not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\t\n\t\tself.netG_A_2 = networks.define_G(opt.input_nc, opt.output_nc,\n\t\t                                  opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                  not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\t\n\t\tself.netG_B_2 = networks.define_G(opt.output_nc, opt.input_nc,\n\t\t                                  opt.ngf, opt.which_model_netG, opt.norm,\n\t\t                                  not opt.no_dropout, opt.init_type, self.gpu_ids)\n\t\t\n\t\tif opt.semantic_loss:\n\t\t\tself.netPixelCLS_SYN = get_model(opt.weights_model_type, num_cls=opt.num_cls, pretrained=True, weights_init=opt.weights_syn)\n\t\t\tself.netPixelCLS_GTA = get_model(opt.weights_model_type, num_cls=opt.num_cls, pretrained=True, weights_init=opt.weights_gta)\n\t\t\tif len(self.gpu_ids) > 0:\n\t\t\t\tassert (torch.cuda.is_available())\n\t\t\t\tself.netPixelCLS_SYN.to(self.gpu_ids[0])\n\t\t\t\tself.netPixelCLS_SYN = torch.nn.DataParallel(self.netPixelCLS_SYN, self.gpu_ids)\n\t\t\t\tself.netPixelCLS_GTA.to(self.gpu_ids[0])\n\t\t\t\tself.netPixelCLS_GTA = torch.nn.DataParallel(self.netPixelCLS_GTA, self.gpu_ids)\n\t\t\n\t\tif self.isTrain:\n\t\t\tuse_sigmoid = opt.no_lsgan\n\t\t\tif opt.Shared_DT:\n\t\t\t\tself.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n\t\t\t\t                                opt.which_model_netD,\n\t\t\t\t                                opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                opt.init_type, self.gpu_ids)\n\t\t\telse:\n\t\t\t\tself.netD_A_1 = networks.define_D(opt.output_nc, opt.ndf,\n\t\t\t\t                                  opt.which_model_netD,\n\t\t\t\t                                  opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                  opt.init_type, self.gpu_ids)\n\t\t\t\t\n\t\t\t\tself.netD_A_2 = networks.define_D(opt.output_nc, opt.ndf,\n\t\t\t\t                                  opt.which_model_netD,\n\t\t\t\t                                  opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                  opt.init_type, self.gpu_ids)\n\t\t\t\n\t\t\tself.netD_B_1 = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t                                  opt.which_model_netD,\n\t\t\t                                  opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t                                  opt.init_type, self.gpu_ids)\n\t\t\t\n\t\t\tself.netD_B_2 = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t                                  opt.which_model_netD,\n\t\t\t                                  opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t                                  opt.init_type, self.gpu_ids)\n\t\t\t\n\t\t\tif opt.SAD:\n\t\t\t\tself.netD_3 = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t\t                                opt.which_model_netD,\n\t\t\t\t                                opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                opt.init_type, self.gpu_ids)\n\t\t\tif opt.CCD or opt.HF_CCD:\n\t\t\t\tself.netD_12 = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t\t                                 opt.which_model_netD,\n\t\t\t\t                                 opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                 opt.init_type, self.gpu_ids)\n\t\t\t\tself.netD_21 = networks.define_D(opt.input_nc, opt.ndf,\n\t\t\t\t                                 opt.which_model_netD,\n\t\t\t\t                                 opt.n_layers_D, opt.norm, use_sigmoid,\n\t\t\t\t                                 opt.init_type, self.gpu_ids)\n\t\t\n\t\tif self.isTrain:\n\t\t\tself.fake_A_1_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images\n\t\t\tself.fake_B_1_pool = ImagePool(opt.pool_size)\n\t\t\tself.fake_A_2_pool = ImagePool(opt.pool_size)\n\t\t\tself.fake_B_2_pool = ImagePool(opt.pool_size)\n\t\t\tself.fake_A_21_pool = ImagePool(opt.pool_size)\n\t\t\tself.fake_A_12_pool = ImagePool(opt.pool_size)\n\t\t\t# define loss functions\n\t\t\tself.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)\n\t\t\tself.criterionCycle = torch.nn.L1Loss()\n\t\t\tself.criterionIdt = torch.nn.L1Loss()\n\t\t\tself.criterionSemantic = torch.nn.KLDivLoss(reduction='batchmean')\n\t\t\t# initialize optimizers\n\t\t\tif opt.Shared_DT:\n\t\t\t\tself.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B_1.parameters(),\n\t\t\t\t                                                    self.netD_B_2.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\telse:\n\t\t\t\tself.optimizer_D_1 = torch.optim.Adam(itertools.chain(self.netD_A_1.parameters(), self.netD_B_1.parameters()),\n\t\t\t\t                                      lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\tself.optimizer_D_2 = torch.optim.Adam(itertools.chain(self.netD_A_2.parameters(), self.netD_B_2.parameters()),\n\t\t\t\t                                      lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tself.optimizer_G_1 = torch.optim.Adam(itertools.chain(self.netG_A_1.parameters(), self.netG_B_1.parameters()),\n\t\t\t                                      lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tself.optimizer_G_2 = torch.optim.Adam(itertools.chain(self.netG_A_2.parameters(), self.netG_B_2.parameters()),\n\t\t\t                                      lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tif opt.SAD:\n\t\t\t\tself.optimizer_D_3 = torch.optim.Adam(self.netD_3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tif opt.CCD or opt.HF_CCD:\n\t\t\t\tself.optimizer_D_21 = torch.optim.Adam(self.netD_21.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\tself.optimizer_D_12 = torch.optim.Adam(self.netD_12.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\t\t\t\n\t\t\tself.optimizers = []\n\t\t\tself.optimizers.append(self.optimizer_G_1)\n\t\t\tself.optimizers.append(self.optimizer_G_2)\n\t\t\tif opt.Shared_DT:\n\t\t\t\tself.optimizers.append(self.optimizer_D)\n\t\t\telse:\n\t\t\t\tself.optimizers.append(self.optimizer_D_1)\n\t\t\t\tself.optimizers.append(self.optimizer_D_2)\n\t\t\t\n\t\t\tif opt.SAD:\n\t\t\t\tself.optimizers.append(self.optimizer_D_3)\n\t\t\tif opt.CCD or opt.HF_CCD:\n\t\t\t\tself.optimizers.append(self.optimizer_D_12)\n\t\t\t\tself.optimizers.append(self.optimizer_D_21)\n\t\n\tdef set_input(self, input):\n\t\tself.real_A_1 = input['A_1'].to(self.device)\n\t\tself.real_A_2 = input['A_2'].to(self.device)\n\t\tself.real_B = input['B'].to(self.device)\n\t\t\n\t\tself.image_paths_1 = input['A_paths_1']\n\t\tself.image_paths_2 = input['A_paths_2']\n\t\tself.image_paths = self.image_paths_1 + self.image_paths_2\n\t\tif 'A_label_1' in input and 'B_label' in input and 'A_label_2' in input:\n\t\t\tself.input_A_label_1 = input['A_label_1'].to(self.device)\n\t\t\tself.input_A_label_2 = input['A_label_2'].to(self.device)\n\t\t\tself.input_B_label = input['B_label'].to(self.device)\n\t\n\tdef forward(self, opt):\n\t\t# cycle for data input #1\n\t\tself.fake_B_1 = self.netG_A_1(self.real_A_1)\n\t\tself.rec_A_1 = self.netG_B_1(self.fake_B_1)\n\t\t\n\t\tself.fake_A_1 = self.netG_B_1(self.real_B)\n\t\tself.rec_B_1 = self.netG_A_1(self.fake_A_1)\n\t\t\n\t\t# cycle for data input #2\n\t\tself.fake_B_2 = self.netG_A_2(self.real_A_2)\n\t\tself.rec_A_2 = self.netG_B_2(self.fake_B_2)\n\t\t\n\t\tself.fake_A_2 = self.netG_B_2(self.real_B)\n\t\tself.rec_B_2 = self.netG_A_2(self.fake_A_2)\n\t\t\n\t\tif opt.CCD:\n\t\t\t# generate s21 for d21 branch\n\t\t\tself.fake_A_21 = self.netG_B_1(self.fake_B_2)\n\t\t\t# generate s12 for d12 branch\n\t\t\tself.fake_A_12 = self.netG_B_2(self.fake_B_1)\n\t\t\n\t\tif self.isTrain and self.semantic_loss:\n\t\t\t# Forward all four images through classifier\n\t\t\t# Keep predictions from fake images only\n\t\t\tself.pred_real_A_SYN = self.netPixelCLS_SYN(self.real_A_1)\n\t\t\t_, self.gt_pred_A_SYN = self.pred_real_A_SYN.max(1)\n\t\t\t\n\t\t\tself.pred_fake_B_SYN = self.netPixelCLS_SYN(self.fake_B_1)\n\t\t\t_, pfB_SYN = self.pred_fake_B_SYN.max(1)\n\t\t\t\n\t\t\tself.pred_real_A_GTA = self.netPixelCLS_GTA(self.real_A_2)\n\t\t\t_, self.gt_pred_A_GTA = self.pred_real_A_GTA.max(1)\n\t\t\t\n\t\t\tself.pred_fake_B_GTA = self.netPixelCLS_GTA(self.fake_B_2)\n\t\t\t_, pfB_GTA = self.pred_fake_B_GTA.max(1)\n\t\n\tdef backward_D_basic(self, netD, real, fake, SAD=False):\n\t\t# Real\n\t\tif SAD == False:\n\t\t\tpred_real = netD(real)\n\t\telse:\n\t\t\tpred_real = netD(real.detach())\n\t\t\n\t\tloss_D_real = self.criterionGAN(pred_real, True)\n\t\t# Fake\n\t\tpred_fake = netD(fake.detach())\n\t\tloss_D_fake = self.criterionGAN(pred_fake, False)\n\t\t# Combined loss\n\t\tloss_D = (loss_D_real + loss_D_fake) * 0.5\n\t\t# backward\n\t\tloss_D.backward()\n\t\treturn loss_D\n\t\n\tdef backward_D_A(self, Shared_DT):\n\t\t# data 1 A1->B\n\t\tfake_B_1 = self.fake_B_1_pool.query(self.fake_B_1)\n\t\tif Shared_DT:\n\t\t\tself.loss_D_A_1 = self.backward_D_basic(self.netD_A, self.real_B, fake_B_1)\n\t\telse:\n\t\t\tself.loss_D_A_1 = self.backward_D_basic(self.netD_A_1, self.real_B, fake_B_1)\n\t\t# data 2 A2->B\n\t\tfake_B_2 = self.fake_B_2_pool.query(self.fake_B_2)\n\t\tif Shared_DT:\n\t\t\tself.loss_D_A_2 = self.backward_D_basic(self.netD_A, self.real_B, fake_B_2)\n\t\telse:\n\t\t\tself.loss_D_A_2 = self.backward_D_basic(self.netD_A_2, self.real_B, fake_B_2)\n\t\n\tdef backward_D_B(self):\n\t\t# data 1 B->A1\n\t\tfake_A_1 = self.fake_A_1_pool.query(self.fake_A_1)\n\t\tself.loss_D_B_1 = self.backward_D_basic(self.netD_B_1, self.real_A_1, fake_A_1)\n\t\t\n\t\t# data 2 B->A2\n\t\tfake_A_2 = self.fake_A_2_pool.query(self.fake_A_2)\n\t\tself.loss_D_B_2 = self.backward_D_basic(self.netD_B_2, self.real_A_2, fake_A_2)\n\t\n\tdef backward_D(self, which_D):\n\t\tif which_D == 'SAD':\n\t\t\tfake_B_1 = self.fake_B_1_pool.query(self.fake_B_1)\n\t\t\tself.loss_D_3_1 = self.backward_D_basic(self.netD_3, self.fake_B_2, fake_B_1, SAD=True)\n\t\t\n\t\telif which_D == 'CCD_21':\n\t\t\tfake_A_21 = self.fake_A_21_pool.query(self.fake_A_21)\n\t\t\tself.loss_D_21 = self.backward_D_basic(self.netD_21, self.real_A_1, fake_A_21)\n\t\t\n\t\telif which_D == 'CCD_12':\n\t\t\tfake_A_12 = self.fake_A_12_pool.query(self.fake_A_12)\n\t\t\tself.loss_D_12 = self.backward_D_basic(self.netD_12, self.real_A_2, fake_A_12)\n\t\t\n\t\telse:\n\t\t\traise Exception(\"Invalid Choice {}\".format(which_D))\n\t\n\t# fake_B_2 = self.fake_B_pool.query(self.fake_B_2)\n\t# self.loss_D_3_2 = self.backward_D_basic(self.netD_3, self.fake_B_1, fake_B_2)\n\t\n\tdef backward_G(self, opt):\n\t\tlambda_idt = self.opt.lambda_identity\n\t\tlambda_A = self.opt.lambda_A\n\t\tlambda_B = self.opt.lambda_B\n\t\t# Identity loss\n\t\tif lambda_idt > 0:\n\t\t\tself.idt_A_1 = self.netG_A_1(self.real_B)\n\t\t\tself.loss_idt_A_1 = self.criterionIdt(self.idt_A_1, self.real_B) * lambda_B * lambda_idt\n\t\t\t\n\t\t\tself.idt_A_2 = self.netG_A_2(self.real_B)\n\t\t\tself.loss_idt_A_2 = self.criterionIdt(self.idt_A_2, self.real_B) * lambda_B * lambda_idt\n\t\t\t\n\t\t\tself.idt_B_1 = self.netG_B_1(self.real_A_1)\n\t\t\tself.loss_idt_B_1 = self.criterionIdt(self.idt_B_1, self.real_A_1) * lambda_A * lambda_idt\n\t\t\t\n\t\t\tself.idt_B_2 = self.netG_B_2(self.real_A_2)\n\t\t\tself.loss_idt_B_2 = self.criterionIdt(self.idt_B_2, self.real_A_2) * lambda_A * lambda_idt\n\t\t\n\t\telse:\n\t\t\tself.loss_idt_A_1 = 0\n\t\t\tself.loss_idt_A_2 = 0\n\t\t\tself.loss_idt_B_1 = 0\n\t\t\tself.loss_idt_B_2 = 0\n\t\t\n\t\tif opt.Shared_DT:\n\t\t\tself.loss_G_A_1 = 2 * self.criterionGAN(self.netD_A(self.fake_B_1), True)\n\t\t\tself.loss_G_A_2 = 2 * self.criterionGAN(self.netD_A(self.fake_B_2), True)\n\t\telse:\n\t\t\tself.loss_G_A_1 = 2 * self.criterionGAN(self.netD_A_1(self.fake_B_1), True)\n\t\t\tself.loss_G_A_2 = 2 * self.criterionGAN(self.netD_A_2(self.fake_B_2), True)\n\t\t\n\t\t# GAN loss D_B(G_B(B))\n\t\tself.loss_G_B_1 = self.criterionGAN(self.netD_B_1(self.fake_A_1), True)\n\t\tself.loss_G_B_2 = self.criterionGAN(self.netD_B_2(self.fake_A_2), True)\n\t\t\n\t\t# Forward cycle loss\n\t\tself.loss_cycle_A_1 = self.criterionCycle(self.rec_A_1, self.real_A_1) * lambda_A\n\t\tself.loss_cycle_A_2 = self.criterionCycle(self.rec_A_2, self.real_A_2) * lambda_A\n\t\t\n\t\t# Backward cycle loss\n\t\tself.loss_cycle_B_1 = self.criterionCycle(self.rec_B_1, self.real_B) * lambda_B\n\t\tself.loss_cycle_B_2 = self.criterionCycle(self.rec_B_2, self.real_B) * lambda_B\n\t\t\n\t\t# combined loss standard cyclegan\n\t\tself.loss_G_1 = self.loss_G_A_1 + self.loss_G_B_1 + self.loss_cycle_A_1 + self.loss_cycle_B_1 + self.loss_idt_A_1 + self.loss_idt_B_1\n\t\tself.loss_G_2 = self.loss_G_A_2 + self.loss_G_B_2 + self.loss_cycle_A_2 + self.loss_cycle_B_2 + self.loss_idt_A_2 + self.loss_idt_B_2\n\t\tself.loss_G = self.loss_G_1 + self.loss_G_2\n\t\t\n\t\tif opt.SAD:\n\t\t\t# D3 loss\n\t\t\tif opt.SAD_frozen_epoch != -1 and opt.current_epoch > opt.SAD_frozen_epoch:\n\t\t\t\tself.loss_G_s1s2 = self.criterionGAN(self.netD_3(self.fake_B_1), True)\n\t\t\telse:\n\t\t\t\tself.loss_G_s1s2 = 0\n\t\t\tself.loss_G += self.loss_G_s1s2\n\t\t\n\t\tif opt.CCD:\n\t\t\t# D21 loss\n\t\t\tif opt.CCD_frozen_epoch != -1 and opt.current_epoch > opt.CCD_frozen_epoch:\n\t\t\t\tself.loss_G_s1s21 = self.criterionGAN(self.netD_21(self.fake_A_21), True)\n\t\t\t\tself.loss_G += self.loss_G_s1s21 * opt.D1D2_weight\n\t\t\telse:\n\t\t\t\tself.loss_G_s1s21 = 0\n\t\t\t\n\t\t\tif opt.CCD_frozen_epoch != -1 and opt.current_epoch > opt.CCD_frozen_epoch:\n\t\t\t\tself.loss_G_s2s12 = self.criterionGAN(self.netD_12(self.fake_A_12), True)\n\t\t\t\tself.loss_G += self.loss_G_s2s12 * opt.D1D2_weight\n\t\t\telse:\n\t\t\t\tself.loss_G_s2s12 = 0\n\t\t\n\t\tif opt.semantic_loss:\n\t\t\tself.loss_sem_syn = opt.dynamic_weight * self.criterionSemantic(F.log_softmax(self.pred_fake_B_SYN, dim=1),\n\t\t\t                                                                F.softmax(self.pred_real_A_SYN, dim=1))\n\t\t\tself.loss_sem_gta = opt.dynamic_weight * self.criterionSemantic(F.log_softmax(self.pred_fake_B_GTA, dim=1),\n\t\t\t                                                                F.softmax(self.pred_real_A_GTA, dim=1))\n\t\t\tself.loss_G += opt.general_semantic_weight * torch.div(self.loss_sem_syn, self.pred_fake_B_SYN.shape[1] * self.pred_fake_B_SYN.shape[2]\n\t\t\t                                                       * self.pred_fake_B_SYN.shape[3])\n\t\t\tself.loss_G += opt.general_semantic_weight * torch.div(self.loss_sem_gta, self.pred_fake_B_GTA.shape[1] * self.pred_fake_B_GTA.shape[2]\n\t\t\t                                                       * self.pred_fake_B_GTA.shape[3])\n\t\t\n\t\tself.loss_G.backward()\n\t\n\tdef backward_HF_CCD(self, opt):\n\t\tself.fake_B_1 = self.netG_A_1(self.real_A_1)\n\t\tself.fake_B_2 = self.netG_A_2(self.real_A_2)\n\t\t# generate s21 for d21 branch\n\t\tself.fake_A_21 = self.netG_B_1(self.fake_B_2)\n\t\t# generate s12 for d12 branch\n\t\tself.fake_A_12 = self.netG_B_2(self.fake_B_1)\n\t\t\n\t\t# D12 loss\n\t\tif opt.CCD_frozen_epoch != -1 and opt.current_epoch > opt.CCD_frozen_epoch:\n\t\t\tself.loss_G_s2s12 = self.criterionGAN(self.netD_12(self.fake_A_12), True)\n\t\telse:\n\t\t\tself.loss_G_s2s12 = 0\n\t\t# D21 loss\n\t\tif opt.CCD_frozen_epoch != -1 and opt.current_epoch > opt.CCD_frozen_epoch:\n\t\t\tself.loss_G_s1s21 = self.criterionGAN(self.netD_21(self.fake_A_21), True)\n\t\telse:\n\t\t\tself.loss_G_s1s21 = 0\n\t\t\n\t\t# self.loss_G_s2s12 = self.criterionGAN(self.netD_12(self.fake_A_12), True)\n\t\t# self.loss_G_s1s21 = self.criterionGAN(self.netD_21(self.fake_A_21), True)\n\t\tself.loss_G_HF = self.loss_G_s1s21 * opt.CCD_weight + self.loss_G_s2s12 * opt.CCD_weight\n\t\t\n\t\tif isinstance(self.loss_G_HF, torch.Tensor):\n\t\t\tself.loss_G_HF.backward()\n\t\n\tdef optimize_parameters(self, opt):\n\t\t# forward\n\t\tself.forward(opt)\n\t\t# G_A and G_B\n\t\t# set D to false, back prop G's gradients\n\t\tif opt.Shared_DT:\n\t\t\tself.set_requires_grad([self.netD_A, self.netD_B_1, self.netD_B_2], False)\n\t\telse:\n\t\t\tself.set_requires_grad([self.netD_A_1, self.netD_B_1], False)\n\t\t\tself.set_requires_grad([self.netD_A_2, self.netD_B_2], False)\n\t\t\n\t\tif opt.SAD:\n\t\t\tself.set_requires_grad([self.netD_3], False)\n\t\t\n\t\tif opt.CCD or opt.HF_CCD:\n\t\t\tself.set_requires_grad([self.netD_21], False)\n\t\t\tself.set_requires_grad([self.netD_12], False)\n\t\t\n\t\tself.set_requires_grad([self.netG_A_1, self.netG_B_1], True)\n\t\tself.set_requires_grad([self.netG_A_2, self.netG_B_2], True)\n\t\t\n\t\tself.optimizer_G_1.zero_grad()\n\t\tself.optimizer_G_2.zero_grad()\n\t\t# self.optimizer_CLS.zero_grad()\n\t\tself.backward_G(opt)\n\t\tself.optimizer_G_1.step()\n\t\tself.optimizer_G_2.step()\n\t\t\n\t\tif opt.HF_CCD:\n\t\t\tself.optimizer_G_1.zero_grad()\n\t\t\tself.optimizer_G_2.zero_grad()\n\t\t\tself.set_requires_grad([self.netG_A_1, self.netG_A_2], True)\n\t\t\tself.set_requires_grad([self.netG_B_1, self.netG_B_2], False)\n\t\t\t\n\t\t\tself.backward_HF_CCD(opt)\n\t\t\tself.optimizer_G_1.step()\n\t\t\tself.optimizer_G_2.step()\n\t\t\n\t\t# D_A and D_B\n\t\tif opt.Shared_DT:\n\t\t\tself.set_requires_grad([self.netD_A, self.netD_B_1, self.netD_B_2], True)\n\t\telse:\n\t\t\tself.set_requires_grad([self.netD_A_1, self.netD_B_1], True)\n\t\t\tself.set_requires_grad([self.netD_A_2, self.netD_B_2], True)\n\t\t\n\t\tif opt.Shared_DT:\n\t\t\tself.optimizer_D.zero_grad()\n\t\telse:\n\t\t\tself.optimizer_D_1.zero_grad()\n\t\t\tself.optimizer_D_2.zero_grad()\n\t\t\n\t\tself.backward_D_B()\n\t\tself.backward_D_A(opt.Shared_DT)\n\t\tif opt.Shared_DT:\n\t\t\tself.optimizer_D.step()\n\t\telse:\n\t\t\tself.optimizer_D_1.step()\n\t\t\tself.optimizer_D_2.step()\n\t\t\n\t\tif opt.SAD:\n\t\t\tself.set_requires_grad([self.netD_3], True)\n\t\t\tself.optimizer_D_3.zero_grad()\n\t\t\tself.backward_D('SAD')\n\t\t\tself.optimizer_D_3.step()\n\t\t\n\t\tif opt.CCD or opt.HF_CCD:\n\t\t\tself.set_requires_grad([self.netD_21], True)\n\t\t\tself.optimizer_D_21.zero_grad()\n\t\t\tself.backward_D('CCD_21')\n\t\t\tself.optimizer_D_21.step()\n\t\t\t\n\t\t\tself.set_requires_grad([self.netD_12], True)\n\t\t\tself.optimizer_D_12.zero_grad()\n\t\t\tself.backward_D('CCD_12')\n\t\t\tself.optimizer_D_12.step()\n"
  },
  {
    "path": "cyclegan/models/networks.py",
    "content": "import functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torch.optim import lr_scheduler\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\n\n\ndef get_norm_layer(norm_type='instance'):\n\tif norm_type == 'batch':\n\t\tnorm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n\telif norm_type == 'instance':\n\t\tnorm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n\telif norm_type == 'none':\n\t\tnorm_layer = None\n\telse:\n\t\traise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n\treturn norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n\tif opt.lr_policy == 'lambda':\n\t\tdef lambda_rule(epoch):\n\t\t\tlr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n\t\t\treturn lr_l\n\t\t\n\t\tscheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n\telif opt.lr_policy == 'step':\n\t\tscheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n\telif opt.lr_policy == 'plateau':\n\t\tscheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n\telse:\n\t\treturn NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n\treturn scheduler\n\n\ndef init_weights(net, init_type='normal', gain=0.02):\n\tdef init_func(m):\n\t\tclassname = m.__class__.__name__\n\t\tif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n\t\t\tif init_type == 'normal':\n\t\t\t\tinit.normal_(m.weight.data, 0.0, gain)\n\t\t\telif init_type == 'xavier':\n\t\t\t\tinit.xavier_normal_(m.weight.data, gain=gain)\n\t\t\telif init_type == 'kaiming':\n\t\t\t\tinit.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n\t\t\telif init_type == 'orthogonal':\n\t\t\t\tinit.orthogonal_(m.weight.data, gain=gain)\n\t\t\telse:\n\t\t\t\traise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\t\t\tif hasattr(m, 'bias') and m.bias is not None:\n\t\t\t\tinit.constant_(m.bias.data, 0.0)\n\t\telif classname.find('BatchNorm2d') != -1:\n\t\t\tinit.normal_(m.weight.data, 1.0, gain)\n\t\t\tinit.constant_(m.bias.data, 0.0)\n\t\n\tprint('initialize network with %s' % init_type)\n\tnet.apply(init_func)\n\n\ndef init_net(net, init_type='normal', gpu_ids=[]):\n\tif len(gpu_ids) > 0:\n\t\tassert (torch.cuda.is_available())\n\t\tnet.to(gpu_ids[0])\n\t\tnet = torch.nn.DataParallel(net, gpu_ids)\n\tinit_weights(net, init_type)\n\treturn net\n\n\ndef print_network(net):\n\tnum_params = 0\n\tfor param in net.parameters():\n\t\tnum_params += param.numel()\n\tprint(net)\n\tprint('Total number of parameters: %d' % num_params)\n\n\ndef define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):\n\tnetG = None\n\tnorm_layer = get_norm_layer(norm_type=norm)\n\t\n\tif which_model_netG == 'resnet_9blocks':\n\t\tnetG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n\telif which_model_netG == 'resnet_6blocks':\n\t\tnetG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)\n\telif which_model_netG == 'unet_128':\n\t\tnetG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n\telif which_model_netG == 'unet_256':\n\t\tnetG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n\telse:\n\t\traise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)\n\treturn init_net(netG, init_type, gpu_ids)\n\n\ndef define_D(input_nc, ndf, which_model_netD,\n             n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):\n\tnetD = None\n\tnorm_layer = get_norm_layer(norm_type=norm)\n\t\n\tif which_model_netD == 'basic':\n\t\tnetD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n\telif which_model_netD == 'n_layers':\n\t\tnetD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n\telif which_model_netD == 'pixel':\n\t\tnetD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n\telse:\n\t\traise NotImplementedError('Discriminator model name [%s] is not recognized' %\n\t\t                          which_model_netD)\n\treturn init_net(netD, init_type, gpu_ids)\n\n\ndef define_C(output_nc, ndf, init_type='normal', gpu_ids=[]):\n\t# if output_nc == 3:\n\t#    netC = get_model('DTN', num_cls=10)\n\t# else:\n\t#    Exception('classifier only implemented for 32x32x3 images')\n\tnetC = Classifier(output_nc, ndf)\n\treturn init_net(netC, init_type, gpu_ids)\n\n\n##############################################################################\n# Classes\n##############################################################################\n\n\n# Defines the GAN loss which uses either LSGAN or the regular GAN.\n# When LSGAN is used, it is basically same as MSELoss,\n# but it abstracts away the need to create the target label tensor\n# that has the same size as the input\nclass GANLoss(nn.Module):\n\tdef __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):\n\t\tsuper(GANLoss, self).__init__()\n\t\tself.register_buffer('real_label', torch.tensor(target_real_label))\n\t\tself.register_buffer('fake_label', torch.tensor(target_fake_label))\n\t\tif use_lsgan:\n\t\t\tself.loss = nn.MSELoss()\n\t\telse:\n\t\t\tself.loss = nn.BCELoss()\n\t\n\tdef get_target_tensor(self, input, target_is_real):\n\t\tif target_is_real:\n\t\t\ttarget_tensor = self.real_label\n\t\telse:\n\t\t\ttarget_tensor = self.fake_label\n\t\treturn target_tensor.expand_as(input)\n\t\n\tdef __call__(self, input, target_is_real):\n\t\ttarget_tensor = self.get_target_tensor(input, target_is_real)\n\t\treturn self.loss(input, target_tensor)\n\n\n# Defines the generator that consists of Resnet blocks between a few\n# downsampling/upsampling operations.\n# Code and idea originally from Justin Johnson's architecture.\n# https://github.com/jcjohnson/fast-neural-style/\nclass ResnetGenerator(nn.Module):\n\tdef __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n\t\tassert (n_blocks >= 0)\n\t\tsuper(ResnetGenerator, self).__init__()\n\t\tself.input_nc = input_nc\n\t\tself.output_nc = output_nc\n\t\tself.ngf = ngf\n\t\tif type(norm_layer) == functools.partial:\n\t\t\tuse_bias = norm_layer.func == nn.InstanceNorm2d\n\t\telse:\n\t\t\tuse_bias = norm_layer == nn.InstanceNorm2d\n\t\t\n\t\tmodel = [nn.ReflectionPad2d(3),\n\t\t         nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,\n\t\t                   bias=use_bias),\n\t\t         norm_layer(ngf),\n\t\t         nn.ReLU(True)]\n\t\t\n\t\tn_downsampling = 2\n\t\tfor i in range(n_downsampling):\n\t\t\tmult = 2 ** i\n\t\t\tmodel += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,\n\t\t\t                    stride=2, padding=1, bias=use_bias),\n\t\t\t          norm_layer(ngf * mult * 2),\n\t\t\t          nn.ReLU(True)]\n\t\t\n\t\tmult = 2 ** n_downsampling\n\t\tfor i in range(n_blocks):\n\t\t\tmodel += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\t\t\n\t\tfor i in range(n_downsampling):\n\t\t\tmult = 2 ** (n_downsampling - i)\n\t\t\tmodel += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n\t\t\t                             kernel_size=3, stride=2,\n\t\t\t                             padding=1, output_padding=1,\n\t\t\t                             bias=use_bias),\n\t\t\t          norm_layer(int(ngf * mult / 2)),\n\t\t\t          nn.ReLU(True)]\n\t\tmodel += [nn.ReflectionPad2d(3)]\n\t\tmodel += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n\t\tmodel += [nn.Tanh()]\n\t\t\n\t\tself.model = nn.Sequential(*model)\n\t\n\tdef forward(self, input):\n\t\treturn self.model(input)\n\n\n# Define a resnet block\nclass ResnetBlock(nn.Module):\n\tdef __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n\t\tsuper(ResnetBlock, self).__init__()\n\t\tself.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\t\n\tdef build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n\t\tconv_block = []\n\t\tp = 0\n\t\tif padding_type == 'reflect':\n\t\t\tconv_block += [nn.ReflectionPad2d(1)]\n\t\telif padding_type == 'replicate':\n\t\t\tconv_block += [nn.ReplicationPad2d(1)]\n\t\telif padding_type == 'zero':\n\t\t\tp = 1\n\t\telse:\n\t\t\traise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\t\t\n\t\tconv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n\t\t               norm_layer(dim),\n\t\t               nn.ReLU(True)]\n\t\tif use_dropout:\n\t\t\tconv_block += [nn.Dropout(0.5)]\n\t\t\n\t\tp = 0\n\t\tif padding_type == 'reflect':\n\t\t\tconv_block += [nn.ReflectionPad2d(1)]\n\t\telif padding_type == 'replicate':\n\t\t\tconv_block += [nn.ReplicationPad2d(1)]\n\t\telif padding_type == 'zero':\n\t\t\tp = 1\n\t\telse:\n\t\t\traise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\t\tconv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n\t\t               norm_layer(dim)]\n\t\t\n\t\treturn nn.Sequential(*conv_block)\n\t\n\tdef forward(self, x):\n\t\tout = x + self.conv_block(x)\n\t\treturn out\n\n\n# Defines the Unet generator.\n# |num_downs|: number of downsamplings in UNet. For example,\n# if |num_downs| == 7, image of size 128x128 will become of size 1x1\n# at the bottleneck\nclass UnetGenerator(nn.Module):\n\tdef __init__(self, input_nc, output_nc, num_downs, ngf=64,\n\t             norm_layer=nn.BatchNorm2d, use_dropout=False):\n\t\tsuper(UnetGenerator, self).__init__()\n\t\t\n\t\t# construct unet structure\n\t\tunet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)\n\t\tfor i in range(num_downs - 5):\n\t\t\tunet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,\n\t\t\t                                     use_dropout=use_dropout)\n\t\tunet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n\t\tunet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n\t\tunet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n\t\tunet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)\n\t\t\n\t\tself.model = unet_block\n\t\n\tdef forward(self, input):\n\t\treturn self.model(input)\n\n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n#   |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n\tdef __init__(self, outer_nc, inner_nc, input_nc=None,\n\t             submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n\t\tsuper(UnetSkipConnectionBlock, self).__init__()\n\t\tself.outermost = outermost\n\t\tif type(norm_layer) == functools.partial:\n\t\t\tuse_bias = norm_layer.func == nn.InstanceNorm2d\n\t\telse:\n\t\t\tuse_bias = norm_layer == nn.InstanceNorm2d\n\t\tif input_nc is None:\n\t\t\tinput_nc = outer_nc\n\t\tdownconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n\t\t                     stride=2, padding=1, bias=use_bias)\n\t\tdownrelu = nn.LeakyReLU(0.2, True)\n\t\tdownnorm = norm_layer(inner_nc)\n\t\tuprelu = nn.ReLU(True)\n\t\tupnorm = norm_layer(outer_nc)\n\t\t\n\t\tif outermost:\n\t\t\tupconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n\t\t\t                            kernel_size=4, stride=2,\n\t\t\t                            padding=1)\n\t\t\tdown = [downconv]\n\t\t\tup = [uprelu, upconv, nn.Tanh()]\n\t\t\tmodel = down + [submodule] + up\n\t\telif innermost:\n\t\t\tupconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n\t\t\t                            kernel_size=4, stride=2,\n\t\t\t                            padding=1, bias=use_bias)\n\t\t\tdown = [downrelu, downconv]\n\t\t\tup = [uprelu, upconv, upnorm]\n\t\t\tmodel = down + up\n\t\telse:\n\t\t\tupconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n\t\t\t                            kernel_size=4, stride=2,\n\t\t\t                            padding=1, bias=use_bias)\n\t\t\tdown = [downrelu, downconv, downnorm]\n\t\t\tup = [uprelu, upconv, upnorm]\n\t\t\t\n\t\t\tif use_dropout:\n\t\t\t\tmodel = down + [submodule] + up + [nn.Dropout(0.5)]\n\t\t\telse:\n\t\t\t\tmodel = down + [submodule] + up\n\t\t\n\t\tself.model = nn.Sequential(*model)\n\t\n\tdef forward(self, x):\n\t\tif self.outermost:\n\t\t\treturn self.model(x)\n\t\telse:\n\t\t\treturn torch.cat([x, self.model(x)], 1)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n\tdef __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n\t\tsuper(NLayerDiscriminator, self).__init__()\n\t\tif type(norm_layer) == functools.partial:\n\t\t\tuse_bias = norm_layer.func == nn.InstanceNorm2d\n\t\telse:\n\t\t\tuse_bias = norm_layer == nn.InstanceNorm2d\n\t\t\n\t\tkw = 4\n\t\tpadw = 1\n\t\tsequence = [\n\t\t\tnn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n\t\t\tnn.LeakyReLU(0.2, True)\n\t\t]\n\t\t\n\t\tnf_mult = 1\n\t\tnf_mult_prev = 1\n\t\tfor n in range(1, n_layers):\n\t\t\tnf_mult_prev = nf_mult\n\t\t\tnf_mult = min(2 ** n, 8)\n\t\t\tsequence += [\n\t\t\t\tnn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n\t\t\t\t          kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n\t\t\t\tnorm_layer(ndf * nf_mult),\n\t\t\t\tnn.LeakyReLU(0.2, True)\n\t\t\t]\n\t\t\n\t\tnf_mult_prev = nf_mult\n\t\tnf_mult = min(2 ** n_layers, 8)\n\t\tsequence += [\n\t\t\tnn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n\t\t\t          kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n\t\t\tnorm_layer(ndf * nf_mult),\n\t\t\tnn.LeakyReLU(0.2, True)\n\t\t]\n\t\t\n\t\tsequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\t\t\n\t\tif use_sigmoid:\n\t\t\tsequence += [nn.Sigmoid()]\n\t\t\n\t\tself.model = nn.Sequential(*sequence)\n\t\n\tdef forward(self, input):\n\t\treturn self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n\tdef __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n\t\tsuper(PixelDiscriminator, self).__init__()\n\t\tif type(norm_layer) == functools.partial:\n\t\t\tuse_bias = norm_layer.func == nn.InstanceNorm2d\n\t\telse:\n\t\t\tuse_bias = norm_layer == nn.InstanceNorm2d\n\t\t\n\t\tself.net = [\n\t\t\tnn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n\t\t\tnn.LeakyReLU(0.2, True),\n\t\t\tnn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n\t\t\tnorm_layer(ndf * 2),\n\t\t\tnn.LeakyReLU(0.2, True),\n\t\t\tnn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\t\t\n\t\tif use_sigmoid:\n\t\t\tself.net.append(nn.Sigmoid())\n\t\t\n\t\tself.net = nn.Sequential(*self.net)\n\t\n\tdef forward(self, input):\n\t\treturn self.net(input)\n\n\nclass Classifier(nn.Module):\n\tdef __init__(self, input_nc, ndf, norm_layer=nn.BatchNorm2d):\n\t\tsuper(Classifier, self).__init__()\n\t\t\n\t\tkw = 3\n\t\tsequence = [\n\t\t\tnn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2),\n\t\t\tnn.LeakyReLU(0.2, True)\n\t\t]\n\t\t\n\t\tnf_mult = 1\n\t\tnf_mult_prev = 1\n\t\tfor n in range(3):\n\t\t\tnf_mult_prev = nf_mult\n\t\t\tnf_mult = min(2 ** n, 8)\n\t\t\tsequence += [\n\t\t\t\tnn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n\t\t\t\t          kernel_size=kw, stride=2),\n\t\t\t\tnorm_layer(ndf * nf_mult, affine=True),\n\t\t\t\tnn.LeakyReLU(0.2, True)\n\t\t\t]\n\t\tself.before_linear = nn.Sequential(*sequence)\n\t\t\n\t\tsequence = [\n\t\t\tnn.Linear(ndf * nf_mult, 1024),\n\t\t\tnn.Linear(1024, 10)\n\t\t]\n\t\t\n\t\tself.after_linear = nn.Sequential(*sequence)\n\t\n\tdef forward(self, x):\n\t\tbs = x.size(0)\n\t\tout = self.after_linear(self.before_linear(x).view(bs, -1))\n\t\treturn out\n#       return nn.functional.log_softmax(out, dim=1)\n"
  },
  {
    "path": "cyclegan/models/test_model.py",
    "content": "from . import networks\nfrom .base_model import BaseModel\n\n\nclass TestModel(BaseModel):\n\tdef name(self):\n\t\treturn 'TestModel'\n\t\n\tdef initialize(self, opt):\n\t\tassert (not opt.isTrain)\n\t\tBaseModel.initialize(self, opt)\n\t\t\n\t\t# specify the training losses you want to print out. The program will call base_model.get_current_losses\n\t\tself.loss_names = []\n\t\t# specify the images you want to save/display. The program will call base_model.get_current_visuals\n\t\tself.visual_names = ['real_A']\n\t\t# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n\t\t\n\t\tif opt.dataset_mode == 'synthia_cityscapes':\n\t\t\tself.model_names = ['G_A_1']\n\t\t\tself.visual_names.append('fake_B_1')\n\t\t\tself.netG_A_1 = networks.define_G(opt.input_nc, opt.output_nc,\n\t\t\t                                  opt.ngf, opt.which_model_netG,\n\t\t\t                                  opt.norm, not opt.no_dropout,\n\t\t\t                                  opt.init_type,\n\t\t\t                                  self.gpu_ids)\n\t\t\n\t\telif opt.dataset_mode == 'gta5_cityscapes':\n\t\t\tself.model_names = ['G_A_2']\n\t\t\tself.visual_names.append('fake_B_2')\n\t\t\tself.netG_A_2 = networks.define_G(opt.input_nc, opt.output_nc,\n\t\t\t                                  opt.ngf, opt.which_model_netG,\n\t\t\t                                  opt.norm, not opt.no_dropout,\n\t\t\t                                  opt.init_type,\n\t\t\t                                  self.gpu_ids)\n\t\n\tdef set_input(self, input):\n\t\t# we need to use single_dataset mode\n\t\tself.real_A = input['A'].to(self.device)\n\t\tself.image_paths = input['A_paths']\n\t\n\tdef forward(self):\n\t\tif hasattr(self, 'netG_A_1'):\n\t\t\tself.fake_B_1 = self.netG_A_1(self.real_A)\n\t\telif hasattr(self, 'netG_A_2'):\n\t\t\tself.fake_B_2 = self.netG_A_2(self.real_A)\n"
  },
  {
    "path": "cyclegan/options/__init__.py",
    "content": ""
  },
  {
    "path": "cyclegan/options/base_options.py",
    "content": "import argparse\nimport os\n\nimport torch\nfrom util import util\n\n\nclass BaseOptions():\n\tdef __init__(self):\n\t\tself.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\t\tself.initialized = False\n\t\n\tdef initialize(self):\n\t\tself.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n\t\tself.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')\n\t\tself.parser.add_argument('--loadSize', type=int, default=600, help='scale images to this size')\n\t\tself.parser.add_argument('--fineSize', type=int, default=600, help='then crop to this size')\n\t\tself.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')\n\t\tself.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')\n\t\tself.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')\n\t\tself.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')\n\t\tself.parser.add_argument('--which_model_netD', type=str, default='n_layers', help='selects model to use for netD')\n\t\tself.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')\n\t\tself.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')\n\t\tself.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')\n\t\tself.parser.add_argument('--name', type=str, default='experiment_name',\n\t\t                         help='name of the experiment. It decides where to store samples and models')\n\t\tself.parser.add_argument('--dataset_mode', type=str, default='unaligned',\n\t\t                         help='chooses how datasets are loaded. [unaligned | aligned | single]')\n\t\tself.parser.add_argument('--model', type=str, default='cycle_gan',\n\t\t                         help='chooses which model to use. cycle_gan, pix2pix, test')\n\t\tself.parser.add_argument('--weights_model_type', type=str, default='drn26',\n\t\t                         help='chooses which model to use. drn26, fcn8s')\n\t\tself.parser.add_argument('--num_cls', default=19, type=int)\n\t\tself.parser.add_argument('--max_epoch', default=20, type=int)\n\t\tself.parser.add_argument('--current_epoch', default=0, type=int)\n\t\tself.parser.add_argument('--weights_init', type=str)\n\t\tself.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')\n\t\tself.parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')\n\t\tself.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n\t\tself.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')\n\t\tself.parser.add_argument('--serial_batches', action='store_true',\n\t\t                         help='if true, takes images in order to make batches, otherwise takes them randomly')\n\t\tself.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')\n\t\tself.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')\n\t\tself.parser.add_argument('--display_server', type=str, default=\"http://localhost\", help='visdom server of the web display')\n\t\tself.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')\n\t\tself.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n\t\tself.parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"),\n\t\t                         help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, '\n\t\t                              'only a subset is loaded.')\n\t\tself.parser.add_argument('--resize_or_crop', type=str, default='scale_width_and_crop',\n\t\t                         help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')\n\t\tself.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n\t\tself.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')\n\t\tself.parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n\t\tself.parser.add_argument('--suffix', default='', type=str,\n\t\t                         help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')\n\t\tself.parser.add_argument('--out_all', action='store_true', help='output all stylized images(fake_B_{})')\n\t\tself.parser.add_argument('--SAD', action='store_true', help='Sub-domain Aggregation Discriminator module')\n\t\tself.parser.add_argument('--CCD', action='store_true', help='Cross-domain Cycle Discriminator module')\n\t\tself.parser.add_argument('--CCD_weight', type=float, default=1, help='weight for cross domain cycle discriminator loss')\n\t\tself.parser.add_argument('--HF_CCD', action='store_true', help='Half Freeze Cross-domain Cycle Discriminator module')\n\t\tself.parser.add_argument('--CCD_frozen_epoch', type=int, default=-1)\n\t\tself.parser.add_argument('--SAD_frozen_epoch', type=int, default=-1)\n\t\tself.parser.add_argument('--Shared_DT', type=bool, default=True, help=\"Through \")\n\t\tself.parser.add_argument('--model_type', type=str, default='fcn8s', help=\"choose to load which type of model (fcn8s, drn26, deeplabv2)\")\n\t\tself.parser.add_argument('--semantic_loss', action='store_true', help='use semantic loss')\n\t\tself.parser.add_argument('--general_semantic_weight', type=float, default=0.2, help='weight for semantic loss')\n\t\tself.parser.add_argument('--weights_syn', type=str, default='', help='init weights for synthia')\n\t\tself.parser.add_argument('--weights_gta', type=str, default='', help='init weights for gta')\n\t\t\n\t\tself.parser.add_argument('--inference_script', type=str, default='', help='inference script')\n\t\tself.parser.add_argument('--dynamic_weight', type=float, default=10, help='Weight for Dynamic Semantic Loss(KL div) loss')\n\t\tself.initialized = True\n\t\n\tdef parse(self):\n\t\tif not self.initialized:\n\t\t\tself.initialize()\n\t\topt = self.parser.parse_args()\n\t\topt.isTrain = self.isTrain  # train or test\n\t\t\n\t\tstr_ids = opt.gpu_ids.split(',')\n\t\topt.gpu_ids = []\n\t\tfor str_id in str_ids:\n\t\t\tid = int(str_id)\n\t\t\tif id >= 0:\n\t\t\t\topt.gpu_ids.append(id)\n\t\t\n\t\t# set gpu ids\n\t\tif len(opt.gpu_ids) > 0:\n\t\t\ttorch.cuda.set_device(opt.gpu_ids[0])\n\t\t\n\t\targs = vars(opt)\n\t\t\n\t\tprint('------------ Options -------------')\n\t\tfor k, v in sorted(args.items()):\n\t\t\tprint('%s: %s' % (str(k), str(v)))\n\t\tprint('-------------- End ----------------')\n\t\t\n\t\tif opt.suffix:\n\t\t\tsuffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n\t\t\topt.name = opt.name + suffix\n\t\t# save to the disk\n\t\texpr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n\t\tutil.mkdirs(expr_dir)\n\t\tfile_name = os.path.join(expr_dir, 'opt.txt')\n\t\twith open(file_name, 'wt') as opt_file:\n\t\t\topt_file.write('------------ Options -------------\\n')\n\t\t\tfor k, v in sorted(args.items()):\n\t\t\t\topt_file.write('%s: %s\\n' % (str(k), str(v)))\n\t\t\topt_file.write('-------------- End ----------------\\n')\n\t\tself.opt = opt\n\t\treturn self.opt\n"
  },
  {
    "path": "cyclegan/options/test_options.py",
    "content": "from .base_options import BaseOptions\n\n\nclass TestOptions(BaseOptions):\n    def initialize(self):\n        BaseOptions.initialize(self)\n        self.parser.add_argument('--ntest', type=int, default=float(\"inf\"), help='# of test examples.')\n        self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')\n        self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')\n        self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')\n        self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n        self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')\n        self.isTrain = False\n"
  },
  {
    "path": "cyclegan/options/train_options.py",
    "content": "from .base_options import BaseOptions\n\n\nclass TrainOptions(BaseOptions):\n\tdef initialize(self):\n\t\tBaseOptions.initialize(self)\n\t\tself.parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')\n\t\tself.parser.add_argument('--display_ncols', type=int, default=4,\n\t\t                         help='if positive, display all images in a single visdom web panel with certain number of images per row.')\n\t\tself.parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')\n\t\tself.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')\n\t\tself.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')\n\t\tself.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')\n\t\tself.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')\n\t\tself.parser.add_argument('--epoch_count', type=int, default=1,\n\t\t                         help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')\n\t\tself.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')\n\t\tself.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n\t\tself.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')\n\t\tself.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')\n\t\tself.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')\n\t\tself.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')\n\t\tself.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')\n\t\tself.parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)')\n\t\tself.parser.add_argument('--lambda_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)')\n\t\tself.parser.add_argument('--lambda_identity', type=float, default=0,\n\t\t                         help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the '\n\t\t                              'identity mapping loss.'\n\t\t                              'For example, if the weight of the identity loss should be 10 times smaller than the weight of the '\n\t\t                              'reconstruction loss, please set lambda_identity = 0.1')\n\t\tself.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')\n\t\tself.parser.add_argument('--no_html', action='store_true',\n\t\t                         help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')\n\t\tself.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau')\n\t\tself.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')\n\t\tself.isTrain = True\n"
  },
  {
    "path": "cyclegan/test.py",
    "content": "import os\nimport sys\n\nimport torch\nfrom models import create_model\nfrom options.test_options import TestOptions\nfrom util import html\nfrom util.visualizer import save_images\n\nfrom data import CreateDataLoader\nimport logging\n\nsys.path.append(\"/nfs/project/libo_i/MADAN\")\n\nif __name__ == '__main__':\n\topt = TestOptions().parse()\n\topt.serial_batches = True  # no shuffle\n\topt.no_flip = True  # no flip\n\topt.display_id = -1  # no visdom display\n\tdata_loader = CreateDataLoader(opt)\n\tdataset = data_loader.load_data()\n\tmodel = create_model(opt)\n\tmodel.setup(opt)\n\t# create website\n\tweb_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))\n\twebpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))\n\t# test\n\tfor i, data in enumerate(dataset):\n\t\tif i >= opt.how_many:\n\t\t\tbreak\n\t\t# check img size\n\t\tif i == 0:\n\t\t\tfor item in data.items():\n\t\t\t\tif isinstance(item[1], torch.Tensor):\n\t\t\t\t\tlogging.info(item[0], item[1].size())\n\t\t\n\t\tmodel.set_input(data)\n\t\tmodel.test()\n\t\tvisuals = model.get_current_visuals()\n\t\t# remove reductant files when outputing\n\t\tif opt.out_all:\n\t\t\tremove_list = []\n\t\t\tfor item in visuals:\n\t\t\t\tif 'fake_B' not in item:\n\t\t\t\t\tremove_list.append(item)\n\t\t\t\n\t\t\tfor rm_item in remove_list:\n\t\t\t\tdel visuals[rm_item]\n\t\t\n\t\timg_path = model.get_image_paths()\n\t\tif i % 5 == 0:\n\t\t\tlogging.info('processing (%04d)-th image...' % (i * opt.batchSize))\n\t\tif 'mul' in opt.model:\n\t\t\tsave_images(webpage.get_image_dir(), visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, multi_flag=True)\n\t\telse:\n\t\t\tsave_images(webpage.get_image_dir(), visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)\n\n"
  },
  {
    "path": "cyclegan/train.py",
    "content": "import subprocess\nimport sys\nimport time\n\nsys.path.append(\"/nfs/project/libo_i/MADAN/cyclegan\")\nfrom options.train_options import TrainOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.visualizer import Visualizer\nimport torch\nimport logging\n\nif __name__ == '__main__':\n\topt = TrainOptions().parse()\n\tdata_loader = CreateDataLoader(opt)\n\tdataset = data_loader.load_data()\n\tdataset_size = len(data_loader)\n\tlogging.info('#training images = %d' % dataset_size)\n\tmodel = create_model(opt)\n\tmodel.setup(opt)\n\tvisualizer = Visualizer(opt)\n\ttotal_steps = 0\n\tfor epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n\t\tepoch_start_time = time.time()\n\t\titer_data_time = time.time()\n\t\tepoch_iter = 0\n\t\topt.current_epoch = epoch\n\t\tlogging.info(\"Current epoch update to {}\".format(opt.current_epoch))\n\t\tfor i, data in enumerate(dataset):\n\t\t\tif total_steps == 0:\n\t\t\t\tfor item in data.items():\n\t\t\t\t\tif isinstance(item[1], torch.Tensor):\n\t\t\t\t\t\tlogging.info(item[1].size())\n\t\t\titer_start_time = time.time()\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tt_data = iter_start_time - iter_data_time\n\t\t\tvisualizer.reset()\n\t\t\ttotal_steps += opt.batchSize\n\t\t\tepoch_iter += opt.batchSize\n\t\t\tmodel.set_input(data)\n\t\t\tmodel.optimize_parameters(opt)\n\t\t\t\n\t\t\tif total_steps % opt.display_freq == 0:\n\t\t\t\tsave_result = total_steps % opt.update_html_freq == 0\n\t\t\t\tvisualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\t\t\t\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tlosses = model.get_current_losses()\n\t\t\t\tt = (time.time() - iter_start_time) / opt.batchSize\n\t\t\t\tvisualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)\n\t\t\t\tif opt.display_id > 0:\n\t\t\t\t\tvisualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)\n\t\t\t\n\t\t\tif total_steps % opt.save_latest_freq == 0:\n\t\t\t\tlogging.info('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))\n\t\t\t\tmodel.save_networks('latest')\n\t\t\titer_data_time = time.time()\n\t\t\n\t\tif epoch % opt.save_epoch_freq == 0:\n\t\t\tlogging.info('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))\n\t\t\tmodel.save_networks('latest')\n\t\t\tmodel.save_networks(epoch)\n\t\t\n\t\tlogging.info('End of epoch %d / %d \\t Time Taken: %d sec' % (epoch, opt.max_epoch, time.time() - epoch_start_time))\n\t\tmodel.update_learning_rate()\n"
  },
  {
    "path": "cyclegan/util/__init__.py",
    "content": ""
  },
  {
    "path": "cyclegan/util/get_data.py",
    "content": "from __future__ import print_function\nimport os\nimport tarfile\nimport requests\nfrom warnings import warn\nfrom zipfile import ZipFile\nfrom bs4 import BeautifulSoup\nfrom os.path import abspath, isdir, join, basename\n\n\nclass GetData(object):\n    \"\"\"\n\n    Download CycleGAN or Pix2Pix Data.\n\n    Args:\n        technique : str\n            One of: 'cyclegan' or 'pix2pix'.\n        verbose : bool\n            If True, print additional information.\n\n    Examples:\n        >>> from util.get_data import GetData\n        >>> gd = GetData(technique='cyclegan')\n        >>> new_data_path = gd.get(save_path='./datasets')  # options will be displayed.\n\n    \"\"\"\n\n    def __init__(self, technique='cyclegan', verbose=True):\n        url_dict = {\n            'pix2pix': 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets',\n            'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'\n        }\n        self.url = url_dict.get(technique.lower())\n        self._verbose = verbose\n\n    def _print(self, text):\n        if self._verbose:\n            print(text)\n\n    @staticmethod\n    def _get_options(r):\n        soup = BeautifulSoup(r.text, 'lxml')\n        options = [h.text for h in soup.find_all('a', href=True)\n                   if h.text.endswith(('.zip', 'tar.gz'))]\n        return options\n\n    def _present_options(self):\n        r = requests.get(self.url)\n        options = self._get_options(r)\n        print('Options:\\n')\n        for i, o in enumerate(options):\n            print(\"{0}: {1}\".format(i, o))\n        choice = input(\"\\nPlease enter the number of the \"\n                       \"dataset above you wish to download:\")\n        return options[int(choice)]\n\n    def _download_data(self, dataset_url, save_path):\n        if not isdir(save_path):\n            os.makedirs(save_path)\n\n        base = basename(dataset_url)\n        temp_save_path = join(save_path, base)\n\n        with open(temp_save_path, \"wb\") as f:\n            r = requests.get(dataset_url)\n            f.write(r.content)\n\n        if base.endswith('.tar.gz'):\n            obj = tarfile.open(temp_save_path)\n        elif base.endswith('.zip'):\n            obj = ZipFile(temp_save_path, 'r')\n        else:\n            raise ValueError(\"Unknown File Type: {0}.\".format(base))\n\n        self._print(\"Unpacking Data...\")\n        obj.extractall(save_path)\n        obj.close()\n        os.remove(temp_save_path)\n\n    def get(self, save_path, dataset=None):\n        \"\"\"\n\n        Download a dataset.\n\n        Args:\n            save_path : str\n                A directory to save the data to.\n            dataset : str, optional\n                A specific dataset to download.\n                Note: this must include the file extension.\n                If None, options will be presented for you\n                to choose from.\n\n        Returns:\n            save_path_full : str\n                The absolute path to the downloaded data.\n\n        \"\"\"\n        if dataset is None:\n            selected_dataset = self._present_options()\n        else:\n            selected_dataset = dataset\n\n        save_path_full = join(save_path, selected_dataset.split('.')[0])\n\n        if isdir(save_path_full):\n            warn(\"\\n'{0}' already exists. Voiding Download.\".format(\n                save_path_full))\n        else:\n            self._print('Downloading Data...')\n            url = \"{0}/{1}\".format(self.url, selected_dataset)\n            self._download_data(url, save_path=save_path)\n\n        return abspath(save_path_full)\n"
  },
  {
    "path": "cyclegan/util/html.py",
    "content": "import dominate\nfrom dominate.tags import *\nimport os\n\n\nclass HTML:\n    def __init__(self, web_dir, title, reflesh=0):\n        self.title = title\n        self.web_dir = web_dir\n        self.img_dir = os.path.join(self.web_dir, 'images')\n        if not os.path.exists(self.web_dir):\n            os.makedirs(self.web_dir)\n        if not os.path.exists(self.img_dir):\n            os.makedirs(self.img_dir)\n        # print(self.img_dir)\n\n        self.doc = dominate.document(title=title)\n        if reflesh > 0:\n            with self.doc.head:\n                meta(http_equiv=\"reflesh\", content=str(reflesh))\n\n    def get_image_dir(self):\n        return self.img_dir\n\n    def add_header(self, str):\n        with self.doc:\n            h3(str)\n\n    def add_table(self, border=1):\n        self.t = table(border=border, style=\"table-layout: fixed;\")\n        self.doc.add(self.t)\n\n    def add_images(self, ims, txts, links, width=400):\n        self.add_table()\n        with self.t:\n            with tr():\n                for im, txt, link in zip(ims, txts, links):\n                    with td(style=\"word-wrap: break-word;\", halign=\"center\", valign=\"top\"):\n                        with p():\n                            with a(href=os.path.join('images', link)):\n                                img(style=\"width:%dpx\" % width, src=os.path.join('images', im))\n                            br()\n                            p(txt)\n\n    def save(self):\n        html_file = '%s/index.html' % self.web_dir\n        f = open(html_file, 'wt')\n        f.write(self.doc.render())\n        f.close()\n\n\nif __name__ == '__main__':\n    html = HTML('web/', 'test_html')\n    html.add_header('hello world')\n\n    ims = []\n    txts = []\n    links = []\n    for n in range(4):\n        ims.append('image_%d.png' % n)\n        txts.append('text_%d' % n)\n        links.append('image_%d.png' % n)\n    html.add_images(ims, txts, links)\n    html.save()\n"
  },
  {
    "path": "cyclegan/util/image_pool.py",
    "content": "import random\nimport torch\n\n\nclass ImagePool():\n    def __init__(self, pool_size):\n        self.pool_size = pool_size\n        if self.pool_size > 0:\n            self.num_imgs = 0\n            self.images = []\n\n    def query(self, images):\n        if self.pool_size == 0:\n            return images\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.pool_size:\n                self.num_imgs = self.num_imgs + 1\n                self.images.append(image)\n                return_images.append(image)\n            else:\n                p = random.uniform(0, 1)\n                if p > 0.5:\n                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive\n                    tmp = self.images[random_id].clone()\n                    self.images[random_id] = image\n                    return_images.append(tmp)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images\n"
  },
  {
    "path": "cyclegan/util/util.py",
    "content": "from __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\n# Converts a Tensor into an image array (numpy)\n# |imtype|: the desired type of the converted numpy array\ndef tensor2im(input_image, imtype=np.uint8):\n\tif isinstance(input_image, torch.Tensor):\n\t\timage_tensor = input_image.data\n\telse:\n\t\treturn input_image\n\timage_numpy = image_tensor.cpu().float().numpy()\n\tif image_numpy.shape[0] == 1:\n\t\timage_numpy = np.tile(image_numpy, (3, 1, 1))\n\timage_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n\treturn image_numpy.astype(imtype)\n\n# def tensor2im(input_image, imtype=np.uint8):\n#     if isinstance(input_image, torch.Tensor):\n#         image_tensor = input_image.data\n#     else:\n#         return input_image\n#     image_numpy = image_tensor[0].cpu().float().numpy()\n#     if image_numpy.shape[0] == 1:\n#         image_numpy = np.tile(image_numpy, (3, 1, 1))\n#     image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n#     return image_numpy.astype(imtype)\n\n\ndef diagnose_network(net, name='network'):\n\tmean = 0.0\n\tcount = 0\n\tfor param in net.parameters():\n\t\tif param.grad is not None:\n\t\t\tmean += torch.mean(torch.abs(param.grad.data))\n\t\t\tcount += 1\n\tif count > 0:\n\t\tmean = mean / count\n\tprint(name)\n\tprint(mean)\n\n\ndef save_image(image_numpy, image_path):\n\timage_pil = Image.fromarray(image_numpy)\n\timage_pil.save(image_path)\n\n\ndef print_numpy(x, val=True, shp=False):\n\tx = x.astype(np.float64)\n\tif shp:\n\t\tprint('shape,', x.shape)\n\tif val:\n\t\tx = x.flatten()\n\t\tprint('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n\t\t\tnp.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n\tif isinstance(paths, list) and not isinstance(paths, str):\n\t\tfor path in paths:\n\t\t\tmkdir(path)\n\telse:\n\t\tmkdir(paths)\n\n\ndef mkdir(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n"
  },
  {
    "path": "cyclegan/util/visualizer.py",
    "content": "import ntpath\nimport os\nimport time\n\nimport numpy as np\nfrom . import html, util\n\n\n# save image to the disk\ndef save_images(image_dir, visuals, image_path, aspect_ratio=1.0, width=256, multi_flag=False):\n\tfor i in range(len(image_path)):\n\t\tshort_path = ntpath.basename(image_path[i])\n\t\tname = os.path.splitext(short_path)[0]\n\t\t\n\t\tfor ind, (label, im_data) in enumerate(visuals.items()):\n\t\t\t# align visual names and real image name\n\t\t\tif multi_flag is True and (str(i + 1) not in label):\n\t\t\t\tcontinue\n\t\t\tim = util.tensor2im(im_data[i, :, :, :])\n\t\t\timage_name = '%s_%s.png' % (name, label)\n\t\t\tsave_path = os.path.join(image_dir, image_name)\n\t\t\th, w, _ = im.shape\n\t\t\tutil.save_image(im, save_path)\n\n\nclass Visualizer():\n\tdef __init__(self, opt):\n\t\tself.display_id = opt.display_id\n\t\tself.use_html = opt.isTrain and not opt.no_html\n\t\tself.win_size = opt.display_winsize\n\t\tself.name = opt.name\n\t\tself.opt = opt\n\t\tself.saved = False\n\t\tif self.display_id > 0:\n\t\t\timport visdom\n\t\t\tself.ncols = opt.display_ncols\n\t\t\tself.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port)\n\t\t\n\t\tif self.use_html:\n\t\t\tself.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n\t\t\tself.img_dir = os.path.join(self.web_dir, 'images')\n\t\t\tprint('create web directory %s...' % self.web_dir)\n\t\t\tutil.mkdirs([self.web_dir, self.img_dir])\n\t\tself.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n\t\twith open(self.log_name, \"a\") as log_file:\n\t\t\tnow = time.strftime(\"%c\")\n\t\t\tlog_file.write('================ Training Loss (%s) ================\\n' % now)\n\t\n\tdef reset(self):\n\t\tself.saved = False\n\t\n\t# |visuals|: dictionary of images to display or save\n\tdef display_current_results(self, visuals, epoch, save_result):\n\t\tif self.display_id > 0:  # show images in the browser\n\t\t\tncols = self.ncols\n\t\t\tif ncols > 0:\n\t\t\t\tncols = min(ncols, len(visuals))\n\t\t\t\th, w = next(iter(visuals.values())).shape[:2]\n\t\t\t\ttable_css = \"\"\"<style>\n                        table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}\n                        table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}\n                        </style>\"\"\" % (w, h)\n\t\t\t\ttitle = self.name\n\t\t\t\tlabel_html = ''\n\t\t\t\tlabel_html_row = ''\n\t\t\t\timages = []\n\t\t\t\tidx = 0\n\t\t\t\tfor label, image in visuals.items():\n\t\t\t\t\timage_numpy = util.tensor2im(image)\n\t\t\t\t\tlabel_html_row += '<td>%s</td>' % label\n\t\t\t\t\timages.append(image_numpy.transpose([2, 0, 1]))\n\t\t\t\t\tidx += 1\n\t\t\t\t\tif idx % ncols == 0:\n\t\t\t\t\t\tlabel_html += '<tr>%s</tr>' % label_html_row\n\t\t\t\t\t\tlabel_html_row = ''\n\t\t\t\twhite_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n\t\t\t\twhile idx % ncols != 0:\n\t\t\t\t\timages.append(white_image)\n\t\t\t\t\tlabel_html_row += '<td></td>'\n\t\t\t\t\tidx += 1\n\t\t\t\tif label_html_row != '':\n\t\t\t\t\tlabel_html += '<tr>%s</tr>' % label_html_row\n\t\t\t\t# pane col = image row\n\t\t\t\tself.vis.images(images, nrow=ncols, win=self.display_id + 1,\n\t\t\t\t                padding=2, opts=dict(title=title + ' images'))\n\t\t\t\tlabel_html = '<table>%s</table>' % label_html\n\t\t\t\tself.vis.text(table_css + label_html, win=self.display_id + 2,\n\t\t\t\t              opts=dict(title=title + ' labels'))\n\t\t\telse:\n\t\t\t\tidx = 1\n\t\t\t\tfor label, image in visuals.items():\n\t\t\t\t\timage_numpy = util.tensor2im(image)\n\t\t\t\t\tself.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n\t\t\t\t\t               win=self.display_id + idx)\n\t\t\t\t\tidx += 1\n\t\t\n\t\tif self.use_html and (save_result or not self.saved):  # save images to a html file\n\t\t\tself.saved = True\n\t\t\tfor label, image in visuals.items():\n\t\t\t\timage_numpy = util.tensor2im(image[0, :, :, :])\n\t\t\t\timg_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n\t\t\t\tutil.save_image(image_numpy, img_path)\n\t\t\t# update website\n\t\t\twebpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)\n\t\t\tfor n in range(epoch, 0, -1):\n\t\t\t\twebpage.add_header('epoch [%d]' % n)\n\t\t\t\tims, txts, links = [], [], []\n\t\t\t\t\n\t\t\t\tfor label, image_numpy in visuals.items():\n\t\t\t\t\t# image_numpy = util.tensor2im(image)\n\t\t\t\t\timg_path = 'epoch%.3d_%s.png' % (n, label)\n\t\t\t\t\tims.append(img_path)\n\t\t\t\t\ttxts.append(label)\n\t\t\t\t\tlinks.append(img_path)\n\t\t\t\twebpage.add_images(ims, txts, links, width=self.win_size)\n\t\t\twebpage.save()\n\t\n\t# losses: dictionary of error labels and values\n\tdef plot_current_losses(self, epoch, counter_ratio, opt, losses):\n\t\tif not hasattr(self, 'plot_data'):\n\t\t\tself.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n\t\tself.plot_data['X'].append(epoch + counter_ratio)\n\t\tself.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n\t\tself.vis.line(\n\t\t\tX=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n\t\t\tY=np.array(self.plot_data['Y']),\n\t\t\topts={\n\t\t\t\t'title': self.name + ' loss over time',\n\t\t\t\t'legend': self.plot_data['legend'],\n\t\t\t\t'xlabel': 'epoch',\n\t\t\t\t'ylabel': 'loss'},\n\t\t\twin=self.display_id)\n\t\n\t# losses: same format as |losses| of plot_current_losses\n\tdef print_current_losses(self, epoch, i, losses, t, t_data):\n\t\tmessage = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data)\n\t\tfor k, v in losses.items():\n\t\t\tmessage += '%s: %.3f ' % (k, v)\n\t\t\n\t\tprint(message)\n\t\twith open(self.log_name, \"a\") as log_file:\n\t\t\tlog_file.write('%s\\n' % message)\n"
  },
  {
    "path": "requirements.txt",
    "content": "scipy\ntorchvision\ntensorboardX\ntensorflow\nclick\ntqdm\nrequests\ncolorlog\npyyaml\ntorch>=1.1.0\ntorchvision>=0.3.0\ndominate>=2.3.1\nvisdom>=0.1.8.3\n"
  },
  {
    "path": "scripts/ADDA/adda_cyclegta2cs_feat.sh",
    "content": "#!/usr/bin/env bash\n\ngpu=0,1,2,3\n\n######################\n# loss weight params #\n######################\nlr=2e-5\nmomentum=0.9\nlambda_d=1\nlambda_g=0.1\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\nexport PYTHONPATH='/usr/bin/python3'\n\n################\n# train params #\n################\nmax_iter=50000\ncrop=600\nsnapshot=5000\nbatch=8\n\nweight_share='weights_shared'\ndiscrim='discrim_feat'\n\n########\n# Data #\n########\nsrc='cyclegta5'\ntgt='cityscapes'\ndata_flag='V4_SEM_300'\ndatadir='/nfs/project/libo_i/cycada/data/'\n\n\nresdir=\"results/${src}_to_${tgt}/adda_sgd_${weight_share}_nolsgan_${discrim}_${data_flag}\"\n\n# init with pre-trained cyclegta5 model\n#model='drn26'\n#baseiter=115000\nmodel='fcn8s'\nbaseiter=100000\n\nbase_model=\"/nfs/project/libo_i/cycada/pretrained_models/GTA2CS_score_V4_SEM_300_net-itercurr.pth\"\ndiscrim_model=\"/nfs/project/libo_i/cycada/pretrained_models/new_Dis_cyclegta5_sem_v4_300x540_iter-iter5440_abv60.pth\"\noutdir=\"${resdir}/${model}/lr${lr}_crop${crop}_ld${lambda_d}_lg${lambda_g}_momentum${momentum}_${discrim}\"\n\necho $outdir\necho $base_model\n\ncd /nfs/project/libo_i/cycada\n\n# Run python script #\npython3 scripts/train_fcn_adda.py \\\n    ${outdir} \\\n    --dataset ${src} --dataset ${tgt} --datadir ${datadir} \\\n    --lr ${lr} --momentum ${momentum} --gpu ${gpu} \\\n    --lambda_d ${lambda_d} --lambda_g ${lambda_g} \\\n    --weights_init ${base_model} --model ${model} \\\n    --\"${weight_share}\" --${discrim} --no_lsgan \\\n    --max_iter ${max_iter} --batch ${batch} \\\n    --snapshot ${snapshot} --no_mmd_loss --small 0 --resize 300 --data_flag ${data_flag}\n"
  },
  {
    "path": "scripts/ADDA/adda_cyclegta2cs_score.sh",
    "content": "#!/usr/bin/env bash\n\ngpu=0,1,2,3\n\n######################\n# loss weight params #\n######################\nlr=2e-5\nmomentum=0.9\nlambda_d=1\nlambda_g=0.1\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\nexport PYTHONPATH='/usr/bin/python3'\n\n################\n# train params #\n################\nmax_iter=25000\ncrop=600\nsnapshot=1000\nbatch=8\n\nweight_share='weights_shared'\ndiscrim='discrim_score'\n\n########\n# Data #\n########\nsrc='cyclegta5'\ntgt='cityscapes'\ndata_flag='V4_SEM_300'\ndatadir='/nfs/project/libo_i/cycada/data/'\n\n\nresdir=\"results/${src}_to_${tgt}/adda_sgd_${weight_share}_nolsgan_${discrim}_${data_flag}\"\n\n# init with pre-trained cyclegta5 model\n#model='drn26'\n#baseiter=115000\nmodel='fcn8s'\nbaseiter=100000\n\nbase_model=\"/nfs/project/libo_i/cycada/pretrained_models/cyclegta_V4_SEM_Final_best_model.pth\"\ndiscrim_model=\"/nfs/project/libo_i/cycada/pretrained_models/new_Dis_cyclegta5_sem_v4_300x540_iter-iter5440_abv60.pth\"\noutdir=\"${resdir}/${model}/lr${lr}_crop${crop}_ld${lambda_d}_lg${lambda_g}_momentum${momentum}_${discrim}\"\n\necho $outdir\necho $base_model\n\ncd /nfs/project/libo_i/cycada\n\n# Run python script #\npython3 scripts/train_fcn_adda.py \\\n    ${outdir} \\\n    --dataset ${src} --dataset ${tgt} --datadir ${datadir} \\\n    --lr ${lr} --momentum ${momentum} --gpu ${gpu} \\\n    --lambda_d ${lambda_d} --lambda_g ${lambda_g} \\\n    --weights_init ${base_model} --model ${model} \\\n    --\"${weight_share}\" --${discrim} --no_lsgan \\\n    --max_iter ${max_iter} --batch ${batch} --weights_discrim ${discrim_model} \\\n    --snapshot ${snapshot} --no_mmd_loss --small 0 --resize 300 --data_flag ${data_flag}\n"
  },
  {
    "path": "scripts/ADDA/adda_cyclesyn2cs_feat.sh",
    "content": "#!/usr/bin/env bash\n\ngpu=0,1,2,3\n\n######################\n# loss weight params #\n######################\nlr=1e-5\nmomentum=0.99\nlambda_d=1\nlambda_g=0.1\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\nexport PYTHONPATH='/usr/bin/python3'\n\n################\n# train params #\n################\nmax_iter=100000\ncrop=800\nsnapshot=5000\nbatch=4\n\nweight_share='weights_shared'\ndiscrim='discrim_score'\n\n########\n# Data #\n########\nsrc='cyclesynthia'\ntgt='cityscapes'\ndata_flag='V2_SEM'\ndatadir='/nfs/project/libo_i/cycada/data/'\n\n\nresdir=\"results/${src}_to_${tgt}/adda_sgd/${weight_share}_nolsgan_${discrim}\"\n\n# init with pre-trained cyclegta5 model\n#model='drn26'\n#baseiter=115000\nmodel='fcn8s'\nbaseiter=100000\n\n\nbase_model=\"/nfs/project/libo_i/cycada/pretrained_models/cyclesynthia_V2_SEM_fcn8s-iter21000.pth\"\noutdir=\"${resdir}/${model}/lr${lr}_crop${crop}_ld${lambda_d}_lg${lambda_g}_momentum${momentum}\"\necho $outdir\necho $base_model\n\ncd /nfs/project/libo_i/cycada\n\n# Run python script #\npython3 scripts/train_fcn_adda.py ${outdir} \\\n    --dataset ${src} --dataset ${tgt} --datadir ${datadir} \\\n    --lr ${lr} --momentum ${momentum} --gpu ${gpu} \\\n    --lambda_d ${lambda_d} --lambda_g ${lambda_g} \\\n    --weights_init ${base_model} --model ${model} \\\n    --\"${weight_share}\" --${discrim} --no_lsgan \\\n    --max_iter ${max_iter} --crop_size ${crop} --batch ${batch} \\\n    --snapshot ${snapshot}\n"
  },
  {
    "path": "scripts/ADDA/adda_cyclesyn2cs_score.sh",
    "content": "#!/usr/bin/env bash\n\ngpu=0,1,2,3\n\n######################\n# loss weight params #\n######################\nlr=1e-5\nmomentum=0.99\nlambda_d=1\nlambda_g=0.1\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\nexport PYTHONPATH='/usr/bin/python3'\n\n################\n# train params #\n################\nmax_iter=100000\ncrop=800\nsnapshot=5000\nbatch=4\n\nweight_share='weights_shared'\ndiscrim='discrim_score'\n\n########\n# Data #\n########\nsrc='cyclesynthia'\ntgt='cityscapes'\ndata_flag='V2_SEM'\ndatadir='/nfs/project/libo_i/cycada/data/'\n\n\nresdir=\"results/${src}_to_${tgt}/adda_sgd/${weight_share}_nolsgan_${discrim}\"\n\n# init with pre-trained cyclegta5 model\n#model='drn26'\n#baseiter=115000\nmodel='fcn8s'\nbaseiter=100000\n\n\nbase_model=\"/nfs/project/libo_i/cycada/pretrained_models/cyclesynthia_V2_SEM_fcn8s-iter21000.pth\"\noutdir=\"${resdir}/${model}/lr${lr}_crop${crop}_ld${lambda_d}_lg${lambda_g}_momentum${momentum}\"\necho $outdir\necho $base_model\n\ncd /nfs/project/libo_i/cycada\n\n# Run python script #\npython3 scripts/train_fcn_adda.py ${outdir} \\\n    --dataset ${src} --dataset ${tgt} --datadir ${datadir} \\\n    --lr ${lr} --momentum ${momentum} --gpu ${gpu} \\\n    --lambda_d ${lambda_d} --lambda_g ${lambda_g} \\\n    --weights_init ${base_model} --model ${model} \\\n    --\"${weight_share}\" --${discrim} --no_lsgan \\\n    --max_iter ${max_iter} --crop_size ${crop} --batch ${batch} \\\n    --snapshot ${snapshot}\n"
  },
  {
    "path": "scripts/ADDA/adda_templates.sh",
    "content": "#!/usr/bin/env bash\n\ngpu=0,1,2,3\n\n######################\n# loss weight params #\n######################\nlr=1e-5\nmomentum=0.99\nlambda_d=1\nlambda_g=0.1\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\nexport PYTHONPATH='/usr/bin/python3'\n\n################\n# train params #\n################\nmax_iter=100000\ncrop=800\nsnapshot=5000\nbatch=4\n\nweight_share='weights_shared'\ndiscrim='discrim_score'\n\n########\n# Data #\n########\nsrc=$1\ntgt='cityscapes'\ndata_flag=$2\ndatadir='/nfs/project/libo_i/cycada/data/'\n\n\nresdir=\"results/${src}_to_${tgt}/adda_sgd/${weight_share}_nolsgan_${discrim}\"\n\n# init with pre-trained cyclegta5 model\n#model='drn26'\n#baseiter=115000\nmodel=$2\nbaseiter=$3\n\n\nbase_model=$4\noutdir=\"${resdir}/${model}/lr${lr}_crop${crop}_ld${lambda_d}_lg${lambda_g}_momentum${momentum}\"\necho $outdir\necho $base_model\n\ncd /nfs/project/libo_i/cycada\n\n# Run python script #\npython3 scripts/train_fcn_adda.py \\\n    ${outdir} \\\n    --dataset ${src} --dataset ${tgt} --datadir ${datadir} \\\n    --lr ${lr} --momentum ${momentum} --gpu ${gpu} \\\n    --lambda_d ${lambda_d} --lambda_g ${lambda_g} \\\n    --weights_init ${base_model} --model ${model} \\\n    --\"${weight_share}\" --${discrim} --no_lsgan \\\n    --max_iter ${max_iter} --crop_size ${crop} --batch ${batch} \\\n    --snapshot ${snapshot}\n"
  },
  {
    "path": "scripts/CycleGAN/cyclegan_gta2cityscapes.sh",
    "content": "#!/usr/bin/env bash\ncd /nfs/project/libo_i/MADAN/cyclegan\n\nsudo python3 train.py --name cyclegan_gta2cityscapes \\\n    --resize_or_crop scale_width_and_crop --loadSize 600 --fineSize 500 --which_model_netD n_layers --n_layers_D 3 \\\n    --model cycle_gan_semantic_fcn --no_flip --batchSize 2 --nThreads 8 \\\n    --dataset_mode gta5_cityscapes --dataroot /nfs/project/libo_i/MADAN/data \\\n    --model_type drn26 --weights_init /nfs/project/libo_i/MADAN/pretrained_models/drn26_cycada_cyclegta2cityscapes.pth \\\n    --semantic_loss --gpu 0"
  },
  {
    "path": "scripts/CycleGAN/cyclegan_gta_synthia2cityscapes.sh",
    "content": "#!/usr/bin/env bash\ncd /nfs/project/libo_i/MADAN/cyclegan\n\npython3 train.py --name cyclegan_gta_synthia2cityscapes_noIdentity \\\n    --resize_or_crop scale_width_and_crop --loadSize 500 --fineSize 400 \\\n    --model multi_cycle_gan_semantic --no_flip --batchSize 4 \\\n    --dataset_mode gta_synthia_cityscapes --dataroot /nfs/project/libo_i/MADAN/data \\\n    --DSC --general_semantic_weight 20 --CCD --SAD --CCD_weight 0.2 --SAD_frozen_epoch 5 --CCD_frozen_epoch 10 --max_epoch 40 \\\n    --weights_syn /nfs/project/libo_i/MADAN/pretrained_models/cyclesynthia_drn26_iter2000.pth \\\n    --weights_gta /nfs/project/libo_i/cycada/pretrained_models/drn26_cycada_cyclegta2cityscapes.pth \\\n    --gpu 0,1,2,3 --semantic_loss"
  },
  {
    "path": "scripts/CycleGAN/cyclegan_synthia2cityscapes.sh",
    "content": "#!/usr/bin/env bash\ncd /root/MADAN/cyclegan\n\npython3 train.py --name cycada_gta_synthia2cityscapes_noIdentity_D12D21D3_SEM_final_scale \\\n    --resize_or_crop scale_width_and_crop --loadSize 500 --fineSize 400 \\\n    --model multi_cycle_gan_semantic --no_flip --batchSize 4 \\\n    --dataset_mode gta_synthia_cityscapes --dataroot /nfs/project/libo_i/MADAN/data \\\n    --DSC --general_semantic_weight 20 --CCD --SAD --CCD_weight 0.2 --SAD_frozen_epoch 5 --CCD_frozen_epoch 10 --max_epoch 40 --gpu 0,1,2,3 \\\n    --weights_syn /nfs/project/libo_i/cycada/pretrained_models/cyclesynthia_V4_SEM_Final_iter_6000.pth \\\n    --weights_gta /nfs/project/libo_i/cycada/pretrained_models/drn26_cycada_cyclegta2cityscapes.pth \\\n    --gpu 0,1,2,3 --semantic_loss"
  },
  {
    "path": "scripts/CycleGAN/test_templates.sh",
    "content": "#!/usr/bin/env bash\n\nhow_many=100000\n\ncd /root/MADAN/cyclegan\nname=$1\nepoch=$2\n\npython3 test.py --name ${name} --resize_or_crop=None \\\n    --which_model_netD n_layers --n_layers_D 3 \\\n    --model $3 --loadSize 600 \\\n    --no_flip --batchSize 32 --nThreads 16 \\\n    --dataset_mode $4 --dataroot /nfs/project/libo_i/cycada/data \\\n    --which_direction AtoB \\\n    --phase train --out_all \\\n    --how_many ${how_many} --which_epoch ${epoch} --gpu 0"
  },
  {
    "path": "scripts/CycleGAN/test_templates_cycle.sh",
    "content": "#!/usr/bin/env bash\n# Sequentially load two generators(GTA, Synthia) and finish\nhow_many=100000\n\ncd /root/MADAN/cyclegan\nmodel=$1\nepoch=$2\n\npython3 test.py --name ${model} --resize_or_crop=None \\\n    --which_model_netD n_layers --n_layers_D 3 \\\n    --model $3 \\\n    --no_flip --batchSize 32 --nThreads 16 \\\n    --dataset_mode $4 --dataroot /nfs/project/libo_i/cycada/data \\\n    --which_direction AtoB \\\n    --phase train --out_all \\\n    --how_many ${how_many} --which_epoch ${epoch} --gpu 0\n\n\npython3 test.py --name ${model} --resize_or_crop=None \\\n    --which_model_netD n_layers --n_layers_D 3 \\\n    --model $3 \\\n    --no_flip --batchSize 32 --nThreads 16 \\\n    --dataset_mode $5 --dataroot /nfs/project/libo_i/cycada/data \\\n    --which_direction AtoB \\\n    --phase train --out_all \\\n    --how_many ${how_many} --which_epoch ${epoch} --gpu 0\n\n# cyclegan/test_templates_cycle.sh cycada_gta_synthia2cityscapes_noIdentity_D12D21D3_SEM_final_scale 15 test synthia_cityscapes gta5_cityscapes"
  },
  {
    "path": "scripts/FCN/train_fcn8s_cyclesgta5.sh",
    "content": "#!/usr/bin/env bash\ngpu=0,1,2,3\ndata=cyclegta5\nmodel=fcn8s\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\n\ndatadir=/root/MADAN/data\nbatch=8\niterations=30000\nsnapshot=2000\nnum_cls=19\ndata_flag=V4_SEM_Final_Scale\n\ncd /root/MADAN\n\noutdir=/root/MADAN/results/${data}/${data}_${data_flag}/${model}\nmkdir -p results/${data}/${data}_${data_flag}/${model}\necho $outdir\n\npython3 scripts/train_fcn.py ${outdir} --model ${model} \\\n    --num_cls ${num_cls} --gpu ${gpu} \\\n    -b ${batch} --adam \\\n    --iterations ${iterations} \\\n    --datadir ${datadir} \\\n    --snapshot ${snapshot} \\\n    --dataset ${data} --data_flag ${data_flag}\n"
  },
  {
    "path": "scripts/FCN/train_fcn8s_cyclesynthia.sh",
    "content": "#!/usr/bin/env bash\ngpu=0,1,2,3\ndata=cyclesynthia\nmodel=fcn8s\n\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\n\ndatadir=/root/MADAN/data\nbatch=28\niterations=20000\nsnapshot=1000\nnum_cls=19\ndata_flag=V4_SEM_Final_Scale\n\ncd /root/MADAN\n\noutdir=/root/MADAN/cycada/results/${data}/${data}_${data_flag}/${model}\nmkdir -p results/${data}/${data}_${data_flag}/${model}\necho $outdir\n\npython3 scripts/train_fcn.py ${outdir} --model ${model} \\\n    --num_cls ${num_cls} --gpu ${gpu} \\\n    -b ${batch} --adam \\\n    --iterations ${iterations} \\\n    --datadir ${datadir} \\\n    --snapshot ${snapshot} --small 1 \\\n    --dataset ${data} --data_flag ${data_flag}"
  },
  {
    "path": "scripts/eval_fcn.py",
    "content": "import os\nimport sys\n\nfrom torchvision.transforms import transforms\n\nsys.path.append('/nfs/project/libo_iMADAN')\nimport json\nimport click\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom tqdm import *\n\nfrom cycada.data.data_loader import dataset_obj, get_fcn_dataset\nfrom cycada.models.models import get_model, models\nfrom cycada.util import to_tensor_raw\nimport torchvision\nfrom PIL import Image\n\nloader = transforms.Compose([\n\ttransforms.ToTensor()])\n\nunloader = transforms.ToPILImage()\n\n\ndef fmt_array(arr, fmt=','):\n\tstrs = ['{:.3f}'.format(x) for x in arr]\n\treturn fmt.join(strs)\n\n\ndef fast_hist(a, b, n):\n\tk = (a >= 0) & (a < n)\n\treturn np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)\n\n\ndef result_stats(hist):\n\tacc_overall = np.diag(hist).sum() / hist.sum() * 100\n\tacc_percls = np.diag(hist) / (hist.sum(1) + 1e-8) * 100\n\tiu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-8) * 100\n\tfreq = hist.sum(1) / hist.sum()\n\tfwIU = (freq[freq > 0] * iu[freq > 0]).sum()\n\treturn acc_overall, acc_percls, iu, fwIU\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('--dataset', default='cityscapes',\n              type=click.Choice(dataset_obj.keys()))\n@click.option('--datadir', default='',\n              type=click.Path(exists=True))\n@click.option('--model', default='fcn8s', type=click.Choice(models.keys()))\n@click.option('--gpu', default='0')\n@click.option('--num_cls', default=19)\n@click.option('--batch_size', default=16)\n@click.option('--loadSize', default=None)\n@click.option('--fineSize', default=None)\ndef main(path, dataset, datadir, model, gpu, num_cls, batch_size, loadSize, fineSize):\n\tos.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\t\n\tnet = get_model(model, num_cls=num_cls, weights_init=path)\n\t\n\tstr_ids = gpu.split(',')\n\tgpu_ids = []\n\tfor str_id in str_ids:\n\t\tid = int(str_id)\n\t\tif id >= 0:\n\t\t\tgpu_ids.append(id)\n\t\n\t# set gpu ids\n\tif len(gpu_ids) > 0:\n\t\ttorch.cuda.set_device(gpu_ids[0])\n\t\tassert (torch.cuda.is_available())\n\t\tnet.to(gpu_ids[0])\n\t\tnet = torch.nn.DataParallel(net, gpu_ids)\n\t\n\tnet.eval()\n\t\n\tif (loadSize and fineSize) is not None:\n\t\tprint(\"Loading Center Crop DataLoader Transform\")\n\t\tdata_transform = torchvision.transforms.Compose([transforms.Resize([int(loadSize), int(int(fineSize) * 1.8)], interpolation=Image.BICUBIC),\n\t\t                                                 net.module.transform.transforms[0], net.module.transform.transforms[1]])\n\t\t\n\t\ttarget_transform = torchvision.transforms.Compose([transforms.Resize([int(loadSize), int(int(fineSize) * 1.8)], interpolation=Image.NEAREST),\n\t\t\t transforms.Lambda(lambda img: to_tensor_raw(img))])\n\t\n\telse:\n\t\tdata_transform = net.module.transform\n\t\ttarget_transform = torchvision.transforms.Compose([transforms.Lambda(lambda img: to_tensor_raw(img))])\n\t\n\tds = get_fcn_dataset(dataset, datadir, num_cls=num_cls, split='val', transform=data_transform, target_transform=target_transform)\n\tclasses = ds.classes\n\t\n\tloader = torch.utils.data.DataLoader(ds, num_workers=16, batch_size=batch_size)\n\n\terrs = []\n\thist = np.zeros((num_cls, num_cls))\n\tif len(loader) == 0:\n\t\tprint('Empty data loader')\n\t\treturn\n\titerations = tqdm(enumerate(loader))\n\tfor im_i, (im, label) in iterations:\n\t\tif im_i == 0:\n\t\t\tprint(im.size())\n\t\t\tprint(label.size())\n\t\t\n\t\tif im_i > 32:\n\t\t\tbreak\n\t\t\n\t\tim = Variable(im.cuda())\n\t\tscore = net(im).data\n\t\t_, preds = torch.max(score, 1)\n\t\thist += fast_hist(label.numpy().flatten(), preds.cpu().numpy().flatten(), num_cls)\n\t\tacc_overall, acc_percls, iu, fwIU = result_stats(hist)\n\t\titerations.set_postfix({'mIoU': ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(np.nanmean(iu), fwIU, acc_overall,\n\t\t                                                                                                          np.nanmean(acc_percls))})\n\tprint()\n\t\n\tsynthia_metric_iu = 0\n\t\n\t# line = \"\"\n\tfor index, item in enumerate(classes):\n\t\tprint(classes[index], \" {:0.1f}\".format(iu[index]))\n\t\tif classes[index] != 'terrain' and classes[index] != 'truck' and classes[index] != 'train':\n\t\t\tsynthia_metric_iu += iu[index]\n\t\t\t# line += \" {:0.1f} &\".format(iu[index])\n\t\t\t\n\t# variable \"line\" is used for adding format results into latex grids\n\t# print(line)\n\t\n\tprint(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))\n\tprint(\"16 Class-Wise mIOU is {}\".format(synthia_metric_iu / 16))\n\tprint('Errors:', errs)\n\t\n\tcur_path = path.split('/')[-1]\n\tparent_path = path.replace(cur_path, '')\n\tresults_dict_path = os.path.join(parent_path, 'result.json')\n\tresults_dict = {}\n\tresults_dict[cur_path] = [np.nanmean(iu), synthia_metric_iu / 16]\n\t\n\tif os.path.exists(results_dict_path) is False:\n\t\twith open(results_dict_path, 'w') as fp:\n\t\t\tjson.dump(results_dict, fp)\n\telse:\n\t\twith open(results_dict_path, 'r') as fp:\n\t\t\texist_dict = json.load(fp)\n\t\t\n\t\twith open(results_dict_path, 'w') as fp:\n\t\t\texist_dict.update(results_dict)\n\t\t\tjson.dump(exist_dict, fp)\n\n\nif __name__ == '__main__':\n\tmain()\n"
  },
  {
    "path": "scripts/train_fcn.py",
    "content": "import logging\nimport os.path\nimport sys\nfrom collections import deque\n\nimport click\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom PIL import Image\nfrom tensorboardX import SummaryWriter\n\nsys.path.append('/nfs/project/libo_iMADAN')\n\nfrom cycada.data.data_loader import get_fcn_dataset as get_dataset\nfrom cycada.models import get_model\nfrom cycada.models.models import models\nfrom cycada.transforms import augment_collate\nfrom cycada.util import config_logging\nfrom cycada.util import to_tensor_raw, step_lr\nfrom cycada.tools.util import make_variable\n\n\ndef to_tensor_raw(im):\n\treturn torch.from_numpy(np.array(im, np.int64, copy=False))\n\n\ndef roundrobin_infinite(*loaders):\n\tif not loaders:\n\t\treturn\n\titers = [iter(loader) for loader in loaders]\n\twhile True:\n\t\tfor i in range(len(iters)):\n\t\t\tit = iters[i]\n\t\t\ttry:\n\t\t\t\tyield next(it)\n\t\t\texcept StopIteration:\n\t\t\t\titers[i] = iter(loaders[i])\n\t\t\t\tyield next(iters[i])\n\n\ndef supervised_loss(score, label, weights=None):\n\tloss_fn_ = torch.nn.NLLLoss2d(weight=weights, size_average=True, ignore_index=255)\n\tloss = loss_fn_(F.log_softmax(score), label)\n\treturn loss\n\n\n@click.command()\n@click.argument('output')\n@click.option('--dataset', required=True, multiple=True)\n@click.option('--datadir', default=\"\", type=click.Path(exists=True))\n@click.option('--batch_size', '-b', default=1)\n@click.option('--lr', '-l', default=0.001)\n@click.option('--step', type=int)\n@click.option('--iterations', '-i', default=100000)\n@click.option('--momentum', '-m', default=0.9)\n@click.option('--snapshot', '-s', default=5000)\n@click.option('--downscale', type=int)\n@click.option('--resize_to', type=int, default=720)\n@click.option('--augmentation/--no-augmentation', default=False)\n@click.option('--adam/--sgd', default=False)\n@click.option('--small', type=int, default=2)\n@click.option('--preprocessing', default=False)\n@click.option('--force_split', default=False)\n@click.option('--fyu/--torch', default=False)\n@click.option('--crop_size', default=720)\n@click.option('--weights', type=click.Path(exists=True))\n@click.option('--model_weights', type=click.Path(exists=True))\n@click.option('--model', default='fcn8s', type=click.Choice(models.keys()))\n@click.option('--num_cls', default=19, type=int)\n@click.option('--nthreads', default=8, type=int)\n@click.option('--gpu', default='0')\n@click.option('--start_step', default=0)\n@click.option('--data_flag', default='', type=str)\n@click.option('--rundir_flag', default='', type=str)\n@click.option('--serial_batches', type=bool, default=False, help='if true, takes images in order to make batches, otherwise takes them randomly')\ndef main(output, dataset, datadir, batch_size, lr, step, iterations,\n         momentum, snapshot, downscale, augmentation, fyu, crop_size,\n         weights, model, gpu, num_cls, nthreads, model_weights, data_flag,\n         serial_batches, resize_to, start_step, preprocessing, small, rundir_flag, force_split, adam):\n\tif weights is not None:\n\t\traise RuntimeError(\"weights don't work because eric is bad at coding\")\n\tos.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\tconfig_logging()\n\tlogdir_flag = data_flag\n\tif rundir_flag != \"\":\n\t\tlogdir_flag += \"_{}\".format(rundir_flag)\n\t\n\tlogdir = 'runs/{:s}/{:s}/{:s}'.format(model, '-'.join(dataset), logdir_flag)\n\twriter = SummaryWriter(log_dir=logdir)\n\tif model == 'fcn8s':\n\t\tnet = get_model(model, num_cls=num_cls, weights_init=model_weights)\n\telse:\n\t\tnet = get_model(model, num_cls=num_cls, finetune=True, weights_init=model_weights)\n\tnet.cuda()\n\t\n\tstr_ids = gpu.split(',')\n\tgpu_ids = []\n\tfor str_id in str_ids:\n\t\tid = int(str_id)\n\t\tif id >= 0:\n\t\t\tgpu_ids.append(id)\n\t\n\t# set gpu ids\n\tif len(gpu_ids) > 0:\n\t\ttorch.cuda.set_device(gpu_ids[0])\n\t\tassert (torch.cuda.is_available())\n\t\tnet.to(gpu_ids[0])\n\t\tnet = torch.nn.DataParallel(net, gpu_ids)\n\t\n\ttransform = []\n\ttarget_transform = []\n\t\n\tif preprocessing:\n\t\ttransform.extend([torchvision.transforms.Resize([int(resize_to), int(int(resize_to) * 1.8)])])\n\t\ttarget_transform.extend([torchvision.transforms.Resize([int(resize_to), int(int(resize_to) * 1.8)], interpolation=Image.NEAREST)])\n\t\n\ttransform.extend([net.module.transform])\n\ttarget_transform.extend([to_tensor_raw])\n\ttransform = torchvision.transforms.Compose(transform)\n\ttarget_transform = torchvision.transforms.Compose(target_transform)\n\t\n\tif force_split:\n\t\tdatasets = []\n\t\tdatasets.append(\n\t\t\tget_dataset(dataset[0], os.path.join(datadir, dataset[0]), num_cls=num_cls, transform=transform, target_transform=target_transform,\n\t\t\t            data_flag=data_flag))\n\t\tdatasets.append(\n\t\t\tget_dataset(dataset[1], os.path.join(datadir, dataset[1]), num_cls=num_cls, transform=transform, target_transform=target_transform))\n\telse:\n\t\tdatasets = [get_dataset(name, os.path.join(datadir, name), num_cls=num_cls, transform=transform, target_transform=target_transform,\n\t\t                        data_flag=data_flag) for name in dataset]\n\t\n\tif weights is not None:\n\t\tweights = np.loadtxt(weights)\n\t\n\tif adam:\n\t\tprint(\"Using Adam\")\n\t\topt = torch.optim.Adam(net.module.parameters(), lr=1e-4)\n\telse:\n\t\tprint(\"Using SGD\")\n\t\topt = torch.optim.SGD(net.module.parameters(), lr=lr, momentum=momentum, weight_decay=0.0005)\n\t\n\tif augmentation:\n\t\tcollate_fn = lambda batch: augment_collate(batch, crop=crop_size, flip=True)\n\telse:\n\t\tcollate_fn = torch.utils.data.dataloader.default_collate\n\t\n\tloaders = [torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=not serial_batches, num_workers=nthreads, collate_fn=collate_fn,\n\t                                       pin_memory=True) for dataset in datasets]\n\titeration = start_step\n\tlosses = deque(maxlen=10)\n\t\n\tfor loader in loaders:\n\t\tloader.dataset.__getitem__(0, debug=True)\n\t\n\tfor im, label in roundrobin_infinite(*loaders):\n\t\t# Clear out gradients\n\t\topt.zero_grad()\n\t\t\n\t\t# load data/label\n\t\tim = make_variable(im, requires_grad=False)\n\t\tlabel = make_variable(label, requires_grad=False)\n\t\t\n\t\tif iteration == 0:\n\t\t\tprint(\"im size: {}\".format(im.size()))\n\t\t\tprint(\"label size: {}\".format(label.size()))\n\t\t\n\t\t# forward pass and compute loss\n\t\tpreds = net(im)\n\t\tloss = supervised_loss(preds, label)\n\t\t\n\t\t# backward pass\n\t\tloss.backward()\n\t\tlosses.append(loss.item())\n\t\t\n\t\t# step gradients\n\t\topt.step()\n\t\t\n\t\t# log results\n\t\tif iteration % 10 == 0:\n\t\t\tlogging.info('Iteration {}:\\t{}'.format(iteration, np.mean(losses)))\n\t\t\twriter.add_scalar('loss', np.mean(losses), iteration)\n\t\titeration += 1\n\t\tif step is not None and iteration % step == 0:\n\t\t\tlogging.info('Decreasing learning rate by 0.1.')\n\t\t\tstep_lr(opt, 0.1)\n\t\t\n\t\tif iteration % snapshot == 0:\n\t\t\ttorch.save(net.module.state_dict(),\n\t\t\t           '{}/iter_{}.pth'.format(output, iteration))\n\t\t\n\t\tif iteration >= iterations:\n\t\t\tlogging.info('Optimization complete.')\n\n\nif __name__ == '__main__':\n\tmain()\n"
  },
  {
    "path": "scripts/train_fcn_adda.py",
    "content": "import logging\nimport os\nimport os.path\nimport sys\nfrom collections import deque\nfrom datetime import datetime\n\nimport click\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\nfrom torch.autograd import Variable\n\nsys.path.append('/nfs/project/libo_iMADAN')\n\nfrom cycada.data.adda_datasets import AddaDataLoader\nfrom cycada.models import get_model\nfrom cycada.models.models import models\nfrom cycada.models import Discriminator\nfrom cycada.util import config_logging\nfrom cycada.tools.util import make_variable, mmd_loss\n\n\ndef check_label(label, num_cls):\n\t\"Check that no labels are out of range\"\n\tlabel_classes = np.unique(label.numpy().flatten())\n\tlabel_classes = label_classes[label_classes < 255]\n\tif len(label_classes) == 0:\n\t\tprint('All ignore labels')\n\t\treturn False\n\tclass_too_large = label_classes.max() > num_cls\n\tif class_too_large or label_classes.min() < 0:\n\t\tprint('Labels out of bound')\n\t\tprint(label_classes)\n\t\treturn False\n\treturn True\n\n\ndef forward_pass(net, discriminator, im, requires_grad=False, discrim_feat=False):\n\tif discrim_feat:\n\t\tscore, feat = net(im)\n\t\tdis_score = discriminator(feat)\n\telse:\n\t\tscore = net(im)\n\t\tdis_score = discriminator(score)\n\tif not requires_grad:\n\t\tscore = Variable(score.data, requires_grad=False)\n\t\n\treturn score, dis_score\n\n\ndef supervised_loss(score, label, weights=None):\n\tloss_fn_ = torch.nn.NLLLoss(weight=weights, reduction='mean', ignore_index=255)\n\tloss = loss_fn_(F.log_softmax(score, dim=1), label)\n\treturn loss\n\n\ndef discriminator_loss(score, target_val, lsgan=False):\n\tif lsgan:\n\t\tloss = 0.5 * torch.mean((score - target_val) ** 2)\n\telse:\n\t\t_, _, h, w = score.size()\n\t\ttarget_val_vec = Variable(target_val * torch.ones(1, h, w), requires_grad=False).long().cuda()\n\t\tloss = supervised_loss(score, target_val_vec)\n\treturn loss\n\n\ndef fast_hist(a, b, n):\n\tk = (a >= 0) & (a < n)\n\treturn np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)\n\n\ndef seg_accuracy(score, label, num_cls):\n\t_, preds = torch.max(score.data, 1)\n\thist = fast_hist(label.cpu().numpy().flatten(),\n\t                 preds.cpu().numpy().flatten(), num_cls)\n\tintersections = np.diag(hist)\n\tunions = (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-8) * 100\n\tacc = np.diag(hist).sum() / hist.sum()\n\treturn intersections, unions, acc\n\n\n@click.command()\n@click.argument('output')\n@click.option('--dataset', required=True, multiple=True)\n@click.option('--datadir', default=\"\", type=click.Path(exists=True))\n@click.option('--lr', '-l', default=0.0001)\n@click.option('--momentum', '-m', default=0.9)\n@click.option('--batch', default=1)\n@click.option('--snapshot', '-s', default=5000)\n@click.option('--downscale', type=int)\n@click.option('--resize', default=None, type=int)\n@click.option('--crop_size', default=None, type=int)\n@click.option('--half_crop', default=None)\n@click.option('--cls_weights', type=click.Path(exists=True))\n@click.option('--weights_discrim', type=click.Path(exists=True))\n@click.option('--weights_init', type=click.Path(exists=True))\n@click.option('--model', default='fcn8s', type=click.Choice(models.keys()))\n@click.option('--lsgan/--no_lsgan', default=False)\n@click.option('--num_cls', type=int, default=19)\n@click.option('--gpu', default='0')\n@click.option('--max_iter', default=10000)\n@click.option('--lambda_d', default=1.0)\n@click.option('--lambda_g', default=1.0)\n@click.option('--train_discrim_only', default=False)\n@click.option('--with_mmd_loss/--no_mmd_loss', default=False)\n@click.option('--discrim_feat/--discrim_score', default=False)\n@click.option('--weights_shared/--weights_unshared', default=False)\n@click.option('--data_flag', type=str, default=None)\n@click.option('--small', type=int, default=2)\ndef main(output, dataset, datadir, lr, momentum, snapshot, downscale, cls_weights, gpu,\n         weights_init, num_cls, lsgan, max_iter, lambda_d, lambda_g,\n         train_discrim_only, weights_discrim, crop_size, weights_shared,\n         discrim_feat, half_crop, batch, model, data_flag, resize, with_mmd_loss, small):\n\t# So data is sampled in consistent way\n\tnp.random.seed(1336)\n\ttorch.manual_seed(1336)\n\tlogdir = 'runs/{:s}/{:s}_to_{:s}/lr{:.1g}_ld{:.2g}_lg{:.2g}'.format(model, dataset[0],\n\t                                                                    dataset[1], lr, lambda_d, lambda_g)\n\tif weights_shared:\n\t\tlogdir += '_weights_shared'\n\telse:\n\t\tlogdir += '_weights_unshared'\n\tif discrim_feat:\n\t\tlogdir += '_discrim_feat'\n\telse:\n\t\tlogdir += '_discrim_score'\n\tlogdir += '/' + datetime.now().strftime('%Y_%b_%d-%H:%M')\n\twriter = SummaryWriter(log_dir=logdir)\n\t\n\tos.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\tconfig_logging()\n\tprint('Train Discrim Only', train_discrim_only)\n\tif model == 'fcn8s':\n\t\tnet = get_model(model, num_cls=num_cls, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)\n\telse:\n\t\tnet = get_model(model, num_cls=num_cls, finetune=True, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)\n\t\n\tnet.cuda()\n\tstr_ids = gpu.split(',')\n\tgpu_ids = []\n\tfor str_id in str_ids:\n\t\tid = int(str_id)\n\t\tif id >= 0:\n\t\t\tgpu_ids.append(id)\n\t\n\t# set gpu ids\n\tif len(gpu_ids) > 0:\n\t\ttorch.cuda.set_device(gpu_ids[0])\n\t\tassert (torch.cuda.is_available())\n\t\tnet.to(gpu_ids[0])\n\t\tnet = torch.nn.DataParallel(net, gpu_ids)\n\t\n\tif weights_shared:\n\t\tnet_src = net  # shared weights\n\telse:\n\t\tnet_src = get_model(model, num_cls=num_cls, finetune=True, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)\n\t\tnet_src.eval()\n\t\n\t# initialize Discrminator\n\todim = 1 if lsgan else 2\n\tidim = num_cls if not discrim_feat else 4096\n\tprint('Discrim_feat', discrim_feat, idim)\n\tprint('Discriminator init weights: ', weights_discrim)\n\tdiscriminator = Discriminator(input_dim=idim, output_dim=odim,\n\t                              pretrained=not (weights_discrim == None),\n\t                              weights_init=weights_discrim).cuda()\n\t\n\tdiscriminator.to(gpu_ids[0])\n\tdiscriminator = torch.nn.DataParallel(discriminator, gpu_ids)\n\t\n\tloader = AddaDataLoader(net.module.transform, dataset, datadir, downscale, resize=resize,\n\t                        crop_size=crop_size, half_crop=half_crop, batch_size=batch,\n\t                        shuffle=True, num_workers=16, src_data_flag=data_flag, small=small)\n\tprint('dataset', dataset)\n\t\n\t# Class weighted loss?\n\tif cls_weights is not None:\n\t\tweights = np.loadtxt(cls_weights)\n\telse:\n\t\tweights = None\n\t\n\t# setup optimizers\n\topt_dis = torch.optim.SGD(discriminator.module.parameters(), lr=lr,\n\t                          momentum=momentum, weight_decay=0.0005)\n\topt_rep = torch.optim.SGD(net.module.parameters(), lr=lr,\n\t                          momentum=momentum, weight_decay=0.0005)\n\t\n\titeration = 0\n\tnum_update_g = 0\n\tlast_update_g = -1\n\tlosses_super_s = deque(maxlen=100)\n\tlosses_super_t = deque(maxlen=100)\n\tlosses_dis = deque(maxlen=100)\n\tlosses_rep = deque(maxlen=100)\n\taccuracies_dom = deque(maxlen=100)\n\tintersections = np.zeros([100, num_cls])\n\tiu_deque = deque(maxlen=100)\n\tunions = np.zeros([100, num_cls])\n\taccuracy = deque(maxlen=100)\n\tprint('Max Iter:', max_iter)\n\t\n\tnet.train()\n\tdiscriminator.train()\n\t\n\tloader.loader_src.dataset.__getitem__(0, debug=True)\n\tloader.loader_tgt.dataset.__getitem__(0, debug=True)\n\t\n\twhile iteration < max_iter:\n\t\t\n\t\tfor im_s, im_t, label_s, label_t in loader:\n\t\t\t\n\t\t\tif iteration == 0:\n\t\t\t\tprint(\"IM S: {}\".format(im_s.size()))\n\t\t\t\tprint(\"Label S: {}\".format(label_s.size()))\n\t\t\t\tprint(\"IM T: {}\".format(im_t.size()))\n\t\t\t\tprint(\"Label T: {}\".format(label_t.size()))\n\t\t\t\n\t\t\tif iteration > max_iter:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tinfo_str = 'Iteration {}: '.format(iteration)\n\t\t\t\n\t\t\tif not check_label(label_s, num_cls):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t###########################\n\t\t\t# 1. Setup Data Variables #\n\t\t\t###########################\n\t\t\tim_s = make_variable(im_s, requires_grad=False)\n\t\t\tlabel_s = make_variable(label_s, requires_grad=False)\n\t\t\tim_t = make_variable(im_t, requires_grad=False)\n\t\t\tlabel_t = make_variable(label_t, requires_grad=False)\n\t\t\t\n\t\t\t#############################\n\t\t\t# 2. Optimize Discriminator #\n\t\t\t#############################\n\t\t\t\n\t\t\t# zero gradients for optimizer\n\t\t\topt_dis.zero_grad()\n\t\t\topt_rep.zero_grad()\n\t\t\t\n\t\t\t# extract features\n\t\t\tif discrim_feat:\n\t\t\t\tscore_s, feat_s = net_src(im_s)\n\t\t\t\tscore_s = Variable(score_s.data, requires_grad=False)\n\t\t\t\tf_s = Variable(feat_s.data, requires_grad=False)\n\t\t\telse:\n\t\t\t\tscore_s = Variable(net_src(im_s).data, requires_grad=False)\n\t\t\t\tf_s = score_s\n\t\t\t\n\t\t\tdis_score_s = discriminator(f_s)\n\t\t\t\n\t\t\tif discrim_feat:\n\t\t\t\tscore_t, feat_t = net(im_t)\n\t\t\t\tscore_t = Variable(score_t.data, requires_grad=False)\n\t\t\t\tf_t = Variable(feat_t.data, requires_grad=False)\n\t\t\telse:\n\t\t\t\tscore_t = Variable(net(im_t).data, requires_grad=False)\n\t\t\t\tf_t = score_t\n\t\t\tdis_score_t = discriminator(f_t)\n\t\t\t\n\t\t\tdis_pred_concat = torch.cat((dis_score_s, dis_score_t))\n\t\t\t\n\t\t\t# prepare real and fake labels\n\t\t\tbatch_t, _, h, w = dis_score_t.size()\n\t\t\tbatch_s, _, _, _ = dis_score_s.size()\n\t\t\tdis_label_concat = make_variable(\n\t\t\t\ttorch.cat(\n\t\t\t\t\t[torch.ones(batch_s, h, w).long(),\n\t\t\t\t\t torch.zeros(batch_t, h, w).long()]\n\t\t\t\t), requires_grad=False)\n\t\t\t\n\t\t\t# compute loss for discriminator\n\t\t\tloss_dis = supervised_loss(dis_pred_concat, dis_label_concat)\n\t\t\t(lambda_d * loss_dis).backward()\n\t\t\tlosses_dis.append(loss_dis.item())\n\t\t\t\n\t\t\t# optimize discriminator\n\t\t\topt_dis.step()\n\t\t\t\n\t\t\t# compute discriminator acc\n\t\t\tpred_dis = torch.squeeze(dis_pred_concat.max(1)[1])\n\t\t\tdom_acc = (pred_dis == dis_label_concat).float().mean().item()\n\t\t\taccuracies_dom.append(dom_acc * 100.)\n\t\t\t\n\t\t\t# add discriminator info to log\n\t\t\tinfo_str += \" domacc:{:0.1f}  D:{:.3f}\".format(np.mean(accuracies_dom),\n\t\t\t                                               np.mean(losses_dis))\n\t\t\twriter.add_scalar('loss/discriminator', np.mean(losses_dis), iteration)\n\t\t\twriter.add_scalar('acc/discriminator', np.mean(accuracies_dom), iteration)\n\t\t\t\n\t\t\t###########################\n\t\t\t# Optimize Target Network #\n\t\t\t########################### np.mean(accuracies_dom) > dom_acc_thresh\n\t\t\t\n\t\t\tdom_acc_thresh = 60\n\t\t\t\n\t\t\tif train_discrim_only and np.mean(accuracies_dom) > dom_acc_thresh:\n\t\t\t\tos.makedirs(output, exist_ok=True)\n\t\t\t\ttorch.save(discriminator.module.state_dict(),\n\t\t\t\t           '{}/discriminator_abv60.pth'.format(output, iteration))\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif not train_discrim_only and np.mean(accuracies_dom) > dom_acc_thresh:\n\t\t\t\t\n\t\t\t\tlast_update_g = iteration\n\t\t\t\tnum_update_g += 1\n\t\t\t\tif num_update_g % 1 == 0:\n\t\t\t\t\tprint('Updating G with adversarial loss ({:d} times)'.format(num_update_g))\n\t\t\t\t\n\t\t\t\t# zero out optimizer gradients\n\t\t\t\topt_dis.zero_grad()\n\t\t\t\topt_rep.zero_grad()\n\t\t\t\t\n\t\t\t\t# extract features\n\t\t\t\tif discrim_feat:\n\t\t\t\t\tscore_t, feat_t = net(im_t)\n\t\t\t\t\tscore_t = Variable(score_t.data, requires_grad=False)\n\t\t\t\t\tf_t = feat_t\n\t\t\t\telse:\n\t\t\t\t\tscore_t = net(im_t)\n\t\t\t\t\tf_t = score_t\n\t\t\t\t\n\t\t\t\t# score_t = net(im_t)\n\t\t\t\tdis_score_t = discriminator(f_t)\n\t\t\t\t\n\t\t\t\t# create fake label\n\t\t\t\tbatch, _, h, w = dis_score_t.size()\n\t\t\t\ttarget_dom_fake_t = make_variable(torch.ones(batch, h, w).long(),\n\t\t\t\t                                  requires_grad=False)\n\t\t\t\t\n\t\t\t\t# compute loss for target net\n\t\t\t\tloss_gan_t = supervised_loss(dis_score_t, target_dom_fake_t)\n\t\t\t\t(lambda_g * loss_gan_t).backward()\n\t\t\t\tlosses_rep.append(loss_gan_t.item())\n\t\t\t\twriter.add_scalar('loss/generator', np.mean(losses_rep), iteration)\n\t\t\t\t\n\t\t\t\t# optimize target net\n\t\t\t\topt_rep.step()\n\t\t\t\t\n\t\t\t\t# log net update info\n\t\t\t\tinfo_str += ' G:{:.3f}'.format(np.mean(losses_rep))\n\t\t\t\n\t\t\tif (not train_discrim_only) and weights_shared and np.mean(accuracies_dom) > dom_acc_thresh:\n\t\t\t\tprint('Updating G using source supervised loss.')\n\t\t\t\t# zero out optimizer gradients\n\t\t\t\topt_dis.zero_grad()\n\t\t\t\topt_rep.zero_grad()\n\t\t\t\t\n\t\t\t\t# extract features\n\t\t\t\tif discrim_feat:\n\t\t\t\t\tscore_s, feat_s = net(im_s)\n\t\t\t\telse:\n\t\t\t\t\tscore_s = net(im_s)\n\t\t\t\t\n\t\t\t\tloss_supervised_s = supervised_loss(score_s, label_s, weights=weights)\n\t\t\t\t\n\t\t\t\tif with_mmd_loss:\n\t\t\t\t\tprint(\"Updating G using discrepancy loss\")\n\t\t\t\t\tlambda_discrepancy = 0.1\n\t\t\t\t\tloss_mmd = mmd_loss(feat_s, feat_t) * 0.5 + mmd_loss(score_s, score_t) * 0.5\n\t\t\t\t\tloss_supervised_s += lambda_discrepancy * loss_mmd\n\t\t\t\t\n\t\t\t\tloss_supervised_s.backward()\n\t\t\t\tlosses_super_s.append(loss_supervised_s.item())\n\t\t\t\tinfo_str += ' clsS:{:.2f}'.format(np.mean(losses_super_s))\n\t\t\t\twriter.add_scalar('loss/supervised/source', np.mean(losses_super_s), iteration)\n\t\t\t\t\n\t\t\t\t# optimize target net\n\t\t\t\topt_rep.step()\n\t\t\t\n\t\t\t# compute supervised losses for target -- monitoring only!!!no backward()\n\t\t\tloss_supervised_t = supervised_loss(score_t, label_t, weights=weights)\n\t\t\tlosses_super_t.append(loss_supervised_t.item())\n\t\t\tinfo_str += ' clsT:{:.2f}'.format(np.mean(losses_super_t))\n\t\t\twriter.add_scalar('loss/supervised/target', np.mean(losses_super_t), iteration)\n\t\t\t\n\t\t\t###########################\n\t\t\t# Log and compute metrics #\n\t\t\t###########################\n\t\t\tif iteration % 10 == 0 and iteration > 0:\n\t\t\t\t\n\t\t\t\t# compute metrics\n\t\t\t\tintersection, union, acc = seg_accuracy(score_t, label_t.data, num_cls)\n\t\t\t\tintersections = np.vstack([intersections[1:, :], intersection[np.newaxis, :]])\n\t\t\t\tunions = np.vstack([unions[1:, :], union[np.newaxis, :]])\n\t\t\t\taccuracy.append(acc.item() * 100)\n\t\t\t\tacc = np.mean(accuracy)\n\t\t\t\tmIoU = np.mean(np.maximum(intersections, 1) / np.maximum(unions, 1)) * 100\n\t\t\t\t\n\t\t\t\tiu = (intersection / union) * 10000\n\t\t\t\tiu_deque.append(np.nanmean(iu))\n\t\t\t\t\n\t\t\t\tinfo_str += ' acc:{:0.2f}  mIoU:{:0.2f}'.format(acc, np.mean(iu_deque))\n\t\t\t\twriter.add_scalar('metrics/acc', np.mean(accuracy), iteration)\n\t\t\t\twriter.add_scalar('metrics/mIoU', np.mean(mIoU), iteration)\n\t\t\t\tlogging.info(info_str)\n\t\t\t\n\t\t\titeration += 1\n\t\t\t\n\t\t\t################\n\t\t\t# Save outputs #\n\t\t\t################\n\t\t\t\n\t\t\t# every 500 iters save current model\n\t\t\tif iteration % 500 == 0:\n\t\t\t\tos.makedirs(output, exist_ok=True)\n\t\t\t\tif not train_discrim_only:\n\t\t\t\t\ttorch.save(net.module.state_dict(),\n\t\t\t\t\t           '{}/net-itercurr.pth'.format(output))\n\t\t\t\ttorch.save(discriminator.module.state_dict(),\n\t\t\t\t           '{}/discriminator-itercurr.pth'.format(output))\n\t\t\t\n\t\t\t# save labeled snapshots\n\t\t\tif iteration % snapshot == 0:\n\t\t\t\tos.makedirs(output, exist_ok=True)\n\t\t\t\tif not train_discrim_only:\n\t\t\t\t\ttorch.save(net.module.state_dict(),\n\t\t\t\t\t           '{}/net-iter{}.pth'.format(output, iteration))\n\t\t\t\ttorch.save(discriminator.module.state_dict(),\n\t\t\t\t           '{}/discriminator-iter{}.pth'.format(output, iteration))\n\t\t\t\n\t\t\tif iteration - last_update_g >= 3 * len(loader):\n\t\t\t\tprint('No suitable discriminator found -- returning.')\n\t\t\t\ttorch.save(net.module.state_dict(),\n\t\t\t\t           '{}/net-iter{}.pth'.format(output, iteration))\n\t\t\t\titeration = max_iter  # make sure outside loop breaks\n\t\t\t\tbreak\n\t\n\twriter.close()\n\n\nif __name__ == '__main__':\n\tmain()\n"
  },
  {
    "path": "scripts/train_fcn_mdan.py",
    "content": "import itertools\nimport json\nimport logging\nimport os.path\nimport subprocess\nimport sys\nfrom collections import deque\n\nimport click\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom PIL import Image\nfrom tensorboardX import SummaryWriter\n\nsys.path.append('/nfs/project/libo_iMADAN')\n\nfrom cycada.data.data_loader import get_fcn_dataset as get_dataset\nfrom cycada.models import get_model\nfrom cycada.models.models import models\nfrom cycada.models.MDAN import MDANet\nfrom cycada.transforms import augment_collate\nfrom cycada.util import config_logging\nfrom cycada.util import to_tensor_raw, step_lr\nfrom cycada.tools.util import make_variable\n\n\ndef to_tensor_raw(im):\n\treturn torch.from_numpy(np.array(im, np.int64, copy=False))\n\n\ndef roundrobin_infinite(*loaders):\n\tif not loaders:\n\t\treturn\n\titers = [iter(loader) for loader in loaders]\n\twhile True:\n\t\tfor i in range(len(iters)):\n\t\t\tit = iters[i]\n\t\t\ttry:\n\t\t\t\tyield next(it)\n\t\t\texcept StopIteration:\n\t\t\t\titers[i] = iter(loaders[i])\n\t\t\t\tyield next(iters[i])\n\n\ndef multi_source_infinite(loaders, target_loader):\n\tif not loaders:\n\t\treturn\n\titers_syn = iter(loaders[0])\n\titers_gta = iter(loaders[1])\n\titers_cs = iter(target_loader)\n\t\n\twhile True:\n\t\ttry:\n\t\t\tyield next(iters_syn), next(iters_gta), next(iters_cs)\n\t\texcept StopIteration:\n\t\t\titers_syn = iter(loaders[0])\n\t\t\titers_gta = iter(loaders[1])\n\t\t\titers_cs = iter(target_loader)\n\t\t\tyield next(iters_syn), next(iters_gta), next(iters_cs)\n\n\ndef supervised_loss(score, label, weights=None):\n\tloss_fn_ = torch.nn.NLLLoss2d(weight=weights, size_average=True, ignore_index=255)\n\tloss = loss_fn_(F.log_softmax(score), label)\n\treturn loss\n\n@click.command()\n@click.argument('output')\n@click.option('--dataset', required=True, multiple=True)\n@click.option('--target_name', required=True)\n@click.option('--datadir', default=\"\", type=click.Path(exists=True))\n@click.option('--batch_size', '-b', default=1)\n@click.option('--lr', '-l', default=0.001)\n@click.option('--iterations', '-i', default=100000)\n@click.option('--momentum', '-m', default=0.9)\n@click.option('--snapshot', '-s', default=5000)\n@click.option('--downscale', type=int)\n@click.option('--resize_to', type=int, default=720)\n@click.option('--augmentation/--no-augmentation', default=False)\n@click.option('--small', type=int, default=2)\n@click.option('--preprocessing', default=False)\n@click.option('--fyu/--torch', default=False)\n@click.option('--crop_size', default=720)\n@click.option('--weights', type=click.Path(exists=True))\n@click.option('--model_weights', type=click.Path(exists=True))\n@click.option('--model', default='fcn8s', type=click.Choice(models.keys()))\n@click.option('--num_cls', default=19, type=int)\n@click.option('--nthreads', default=16, type=int)\n@click.option('--gpu', default='0')\n@click.option('--start_step', default=0)\n@click.option('--data_flag', default='', type=str)\n@click.option('--rundir_flag', default='', type=str)\n@click.option('--serial_batches', type=bool, default=False, help='if true, takes images in order to make batches, otherwise takes them randomly')\ndef main(output, dataset, target_name, datadir, batch_size, lr, iterations,\n         momentum, snapshot, downscale, augmentation, fyu, crop_size,\n         weights, model, gpu, num_cls, nthreads, model_weights, data_flag, serial_batches, resize_to, start_step, preprocessing, small, rundir_flag):\n\tif weights is not None:\n\t\traise RuntimeError(\"weights don't work because eric is bad at coding\")\n\tos.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\tconfig_logging()\n\tlogdir_flag = data_flag\n\tif rundir_flag != \"\":\n\t\tlogdir_flag += \"_{}\".format(rundir_flag)\n\t\n\tlogdir = 'runs/{:s}/{:s}/{:s}'.format(model, '-'.join(dataset), logdir_flag)\n\twriter = SummaryWriter(log_dir=logdir)\n\tif model == 'fcn8s':\n\t\tnet = get_model(model, num_cls=num_cls, weights_init=model_weights, output_last_ft=True)\n\telse:\n\t\tnet = get_model(model, num_cls=num_cls, finetune=True, weights_init=model_weights)\n\tnet.cuda()\n\t\n\tstr_ids = gpu.split(',')\n\tgpu_ids = []\n\tfor str_id in str_ids:\n\t\tid = int(str_id)\n\t\tif id >= 0:\n\t\t\tgpu_ids.append(id)\n\t\n\t# set gpu ids\n\tif len(gpu_ids) > 0:\n\t\ttorch.cuda.set_device(gpu_ids[0])\n\t\tassert (torch.cuda.is_available())\n\t\tnet.to(gpu_ids[0])\n\t\tnet = torch.nn.DataParallel(net, gpu_ids)\n\t\n\ttransform = []\n\ttarget_transform = []\n\t\n\tif preprocessing:\n\t\ttransform.extend([torchvision.transforms.Resize([int(resize_to), int(int(resize_to) * 1.8)], interpolation=Image.BICUBIC)])\n\t\ttarget_transform.extend([torchvision.transforms.Resize([int(resize_to), int(int(resize_to) * 1.8)], interpolation=Image.NEAREST)])\n\t\n\ttransform.extend([net.module.transform])\n\ttarget_transform.extend([to_tensor_raw])\n\ttransform = torchvision.transforms.Compose(transform)\n\ttarget_transform = torchvision.transforms.Compose(target_transform)\n\t\n\tdatasets = [get_dataset(name, os.path.join(datadir, name), num_cls=num_cls, transform=transform, target_transform=target_transform,\n\t                        data_flag=data_flag, small=small) for name in dataset]\n\t\n\ttarget_dataset = get_dataset(target_name, os.path.join(datadir, target_name), num_cls=num_cls, transform=transform,\n\t                             target_transform=target_transform,\n\t                             data_flag=data_flag, small=small)\n\t\n\tif weights is not None:\n\t\tweights = np.loadtxt(weights)\n\t\n\tif augmentation:\n\t\tcollate_fn = lambda batch: augment_collate(batch, crop=crop_size, flip=True)\n\telse:\n\t\tcollate_fn = torch.utils.data.dataloader.default_collate\n\t\n\tloaders = [torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=not serial_batches, num_workers=nthreads, collate_fn=collate_fn,\n\t                                       pin_memory=True, drop_last=True) for dataset in datasets]\n\t\n\ttarget_loader = torch.utils.data.DataLoader(target_dataset, batch_size=batch_size, shuffle=not serial_batches, num_workers=nthreads,\n\t                                            collate_fn=collate_fn,\n\t                                            pin_memory=True, drop_last=True)\n\titeration = start_step\n\tlosses = deque(maxlen=10)\n\tlosses_domain_syn = deque(maxlen=10)\n\tlosses_domain_gta = deque(maxlen=10)\n\tlosses_task = deque(maxlen=10)\n\t\n\tfor loader in loaders:\n\t\tloader.dataset.__getitem__(0, debug=True)\n\t\n\tinput_dim = 2048\n\tconfigs = {\"input_dim\": input_dim, \"hidden_layers\": [1000, 500, 100], \"num_classes\": 2, 'num_domains': 2, 'mode': 'dynamic', 'mu': 1e-2,\n\t           'gamma': 10.0}\n\t\n\tmdan = MDANet(configs).to(gpu_ids[0])\n\tmdan = torch.nn.DataParallel(mdan, gpu_ids)\n\tmdan.train()\n\t\n\topt = torch.optim.Adam(itertools.chain(mdan.module.parameters(), net.module.parameters()), lr=1e-4)\n\t\n\t# cnt = 0\n\tfor (im_syn, label_syn), (im_gta, label_gta), (im_cs, label_cs) in multi_source_infinite(loaders, target_loader):\n\t\t# cnt += 1\n\t\t# print(cnt)\n\t\t# Clear out gradients\n\t\topt.zero_grad()\n\t\t\n\t\t# load data/label\n\t\tim_syn = make_variable(im_syn, requires_grad=False)\n\t\tlabel_syn = make_variable(label_syn, requires_grad=False)\n\t\t\n\t\tim_gta = make_variable(im_gta, requires_grad=False)\n\t\tlabel_gta = make_variable(label_gta, requires_grad=False)\n\t\t\n\t\tim_cs = make_variable(im_cs, requires_grad=False)\n\t\tlabel_cs = make_variable(label_cs, requires_grad=False)\n\t\t\n\t\tif iteration == 0:\n\t\t\tprint(\"im_syn size: {}\".format(im_syn.size()))\n\t\t\tprint(\"label_syn size: {}\".format(label_syn.size()))\n\t\t\t\n\t\t\tprint(\"im_gta size: {}\".format(im_gta.size()))\n\t\t\tprint(\"label_gta size: {}\".format(label_gta.size()))\n\t\t\t\n\t\t\tprint(\"im_cs size: {}\".format(im_cs.size()))\n\t\t\tprint(\"label_cs size: {}\".format(label_cs.size()))\n\t\t\n\t\tif not (im_syn.size() == im_gta.size() == im_cs.size()):\n\t\t\tprint(im_syn.size())\n\t\t\tprint(im_gta.size())\n\t\t\tprint(im_cs.size())\n\t\t\n\t\t# forward pass and compute loss\n\t\tpreds_syn, ft_syn = net(im_syn)\n\t\t# pooled_ft_syn = avg_pool(ft_syn)\n\t\t\n\t\tpreds_gta, ft_gta = net(im_gta)\n\t\t# pooled_ft_gta = avg_pool(ft_gta)\n\t\t\n\t\tpreds_cs, ft_cs = net(im_cs)\n\t\t# pooled_ft_cs = avg_pool(ft_cs)\n\t\t\n\t\tloss_synthia = supervised_loss(preds_syn, label_syn)\n\t\tloss_gta = supervised_loss(preds_gta, label_gta)\n\t\t\n\t\tloss = loss_synthia + loss_gta\n\t\tlosses_task.append(loss.item())\n\t\t\n\t\tlogprobs, sdomains, tdomains = mdan(ft_syn, ft_gta, ft_cs)\n\t\t\n\t\tslabels = torch.ones(batch_size, requires_grad=False).type(torch.LongTensor).to(gpu_ids[0])\n\t\ttlabels = torch.zeros(batch_size, requires_grad=False).type(torch.LongTensor).to(gpu_ids[0])\n\t\t\n\t\t# TODO: increase task loss\n\t\t# Compute prediction accuracy on multiple training sources.\n\t\tdomain_losses = torch.stack([F.nll_loss(sdomains[j], slabels) + F.nll_loss(tdomains[j], tlabels) for j in range(configs['num_domains'])])\n\t\tlosses_domain_syn.append(domain_losses[0].item())\n\t\tlosses_domain_gta.append(domain_losses[1].item())\n\t\t\n\t\t# Different final loss function depending on different training modes.\n\t\tif configs['mode'] == \"maxmin\":\n\t\t\tloss = torch.max(loss) + configs['mu'] * torch.min(domain_losses)\n\t\telif configs['mode'] == \"dynamic\":\n\t\t\tloss = torch.log(torch.sum(torch.exp(configs['gamma'] * (loss + configs['mu'] * domain_losses)))) / configs['gamma']\n\t\t\n\t\t# backward pass\n\t\tloss.backward()\n\t\tlosses.append(loss.item())\n\t\t\n\t\ttorch.nn.utils.clip_grad_norm_(net.module.parameters(), 10)\n\t\ttorch.nn.utils.clip_grad_norm_(mdan.module.parameters(), 10)\n\t\t# step gradients\n\t\topt.step()\n\t\t\n\t\t# log results\n\t\tif iteration % 10 == 0:\n\t\t\tlogging.info(\n\t\t\t\t'Iteration {}:\\t{:.3f} Domain SYN: {:.3f} Domain GTA: {:.3f} Task: {:.3f}'.format(iteration, np.mean(losses),\n\t\t\t\t                                                                                  np.mean(losses_domain_syn),\n\t\t\t\t                                                                                  np.mean(losses_domain_gta), np.mean(losses_task)))\n\t\t\twriter.add_scalar('loss', np.mean(losses), iteration)\n\t\t\twriter.add_scalar('domain_syn', np.mean(losses_domain_syn), iteration)\n\t\t\twriter.add_scalar('domain_gta', np.mean(losses_domain_gta), iteration)\n\t\t\twriter.add_scalar('task', np.mean(losses_task), iteration)\n\t\titeration += 1\n\t\t\n\t\tif iteration % 500 == 0:\n\t\t\tos.makedirs(output, exist_ok=True)\n\t\t\ttorch.save(net.module.state_dict(), '{}/net-itercurr.pth'.format(output))\n\t\t\n\t\tif iteration % snapshot == 0:\n\t\t\ttorch.save(net.module.state_dict(), '{}/iter_{}.pth'.format(output, iteration))\n\t\t\n\t\tif iteration >= iterations:\n\t\t\tlogging.info('Optimization complete.')\n\n\nif __name__ == '__main__':\n\tmain()\n"
  },
  {
    "path": "tools/__init__.py",
    "content": ""
  },
  {
    "path": "tools/eval_templates.sh",
    "content": "#!/usr/bin/env bash\nexport LC_ALL=C.UTF-8\nexport LANG=C.UTF-8\ncd /nfs/project/libo_i/MADAN\n\nckpt_path=$1\ndatadir=/nfs/project/libo_i/MADAN/data/cityscapes\nmodel=fcn8s\nnum_cls=19\ngpu=0\n\nsudo python3 scripts/eval_fcn.py ${ckpt_path} \\\n        --dataset cityscapes \\\n        --datadir ${datadir} \\\n        --model ${model} --num_cls ${num_cls} \\\n        --gpu ${gpu}"
  }
]