Repository: Seanseattle/SMIS Branch: master Commit: b86a6c05acaa Files: 54 Total size: 225.2 KB Directory structure: gitextract_r57wyjgl/ ├── .gitignore ├── LICENSE.md ├── README.md ├── data/ │ ├── __init__.py │ ├── ade20k_dataset.py │ ├── base_dataset.py │ ├── cityscapes_dataset.py │ ├── coco_dataset.py │ ├── custom_dataset.py │ ├── deepfashion_dataset.py │ ├── facades_dataset.py │ ├── image_folder.py │ └── pix2pix_dataset.py ├── docs/ │ ├── README.md │ ├── b5m.js │ ├── glab.css │ ├── index.html │ ├── lib.js │ └── popup.js ├── models/ │ ├── __init__.py │ ├── networks/ │ │ ├── __init__.py │ │ ├── architecture.py │ │ ├── base_network.py │ │ ├── discriminator.py │ │ ├── encoder.py │ │ ├── generator.py │ │ ├── loss.py │ │ ├── normalization.py │ │ └── sync_batchnorm/ │ │ ├── __init__.py │ │ ├── batchnorm.py │ │ ├── batchnorm_reimpl.py │ │ ├── comm.py │ │ ├── replicate.py │ │ └── unittest.py │ ├── pix2pix_model.py │ └── smis_model.py ├── options/ │ ├── __init__.py │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── requirements.txt ├── scripts/ │ ├── ade20k.sh │ ├── cityscapes.sh │ └── deepfashion.sh ├── test.py ├── train.py ├── trainers/ │ ├── __init__.py │ └── pix2pix_trainer.py └── util/ ├── __init__.py ├── coco.py ├── html.py ├── iter_counter.py ├── util.py └── visualizer.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ checkpoints/ results/ .idea/ *.tar.gz *.zip *.pkl *.pyc ================================================ FILE: LICENSE.md ================================================ ## creative commons # Attribution-NonCommercial-ShareAlike 4.0 International Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. ### Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. ### Section 1 – Definitions. a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. ### Section 2 – Scope. a. ___License grant.___ 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. __Term.__ The term of this Public License is specified in Section 6(a). 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 5. __Downstream recipients.__ A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. ___Other rights.___ 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. ### Section 3 – License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. ___Attribution.___ 1. If You Share the Licensed Material (including in modified form), You must: A. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ___ShareAlike.___ In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. ### Section 4 – Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. ### Section 5 – Disclaimer of Warranties and Limitation of Liability. a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. ### Section 6 – Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. ### Section 7 – Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. ### Section 8 – Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ``` Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at [creativecommons.org](http://creativecommons.org/). ``` ================================================ FILE: README.md ================================================ Semantically Multi-modal Image Synthesis --- ### [Project page](http://seanseattle.github.io/SMIS) / [Paper](https://arxiv.org/abs/2003.12697) / [Demo](https://www.youtube.com/watch?v=uarUonGi_ZU&t=2s) ![gif demo](docs/imgs/smis.gif) \ Semantically Multi-modal Image Synthesis(CVPR2020). \ Zhen Zhu, Zhiliang Xu, Ansheng You, Xiang Bai ### Requirements --- - torch>=1.0.0 - torchvision - dominate - dill - scikit-image - tqdm - opencv-python ### Getting Started ---- #### Data Preperation **DeepFashion** \ **Note:** We provide an example of the [DeepFashion](https://drive.google.com/open?id=1ckx35-mlMv57yzv47bmOCrWTm5l2X-zD) dataset. That is slightly different from the DeepFashion used in our paper due to the impact of the COVID-19. **Cityscapes** \ The Cityscapes dataset can be downloaded at [here](https://www.cityscapes-dataset.com/) **ADE20K** \ The ADE20K dataset can be downloaded at [here](http://sceneparsing.csail.mit.edu/) #### Test/Train the models Download the tar of the pretrained models from the [Google Drive Folder](https://drive.google.com/open?id=1og_9By_xdtnEd9-xawAj4jYbXR6A9deG). Save it in `checkpoints/` and unzip it. There are deepfashion.sh, cityscapes.sh and ade20k.sh in the scripts folder. Change the parameters like `--dataroot` and so on, then comment or uncomment some code to test/train model. And you can specify the `--test_mask` for SMIS test. ### Acknowledgments --- Our code is based on the popular [SPADE](https://github.com/NVlabs/SPADE) ================================================ FILE: data/__init__.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import importlib import torch.utils.data from data.base_dataset import BaseDataset def find_dataset_using_name(dataset_name): # Given the option --dataset [datasetname], # the file "datasets/datasetname_dataset.py" # will be imported. dataset_filename = "data." + dataset_name + "_dataset" datasetlib = importlib.import_module(dataset_filename) # In the file, the class called DatasetNameDataset() will # be instantiated. It has to be a subclass of BaseDataset, # and it is case-insensitive. dataset = None target_dataset_name = dataset_name.replace('_', '') + 'dataset' for name, cls in datasetlib.__dict__.items(): if name.lower() == target_dataset_name.lower() \ and issubclass(cls, BaseDataset): dataset = cls if dataset is None: raise ValueError("In %s.py, there should be a subclass of BaseDataset " "with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) return dataset def get_option_setter(dataset_name): dataset_class = find_dataset_using_name(dataset_name) return dataset_class.modify_commandline_options def create_dataloader(opt): dataset = find_dataset_using_name(opt.dataset_mode) instance = dataset() instance.initialize(opt) print("dataset [%s] of size %d was created" % (type(instance).__name__, len(instance))) dataloader = torch.utils.data.DataLoader( instance, batch_size=opt.batchSize, shuffle=not opt.serial_batches, num_workers=int(opt.nThreads), drop_last=opt.isTrain ) return dataloader ================================================ FILE: data/ade20k_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class ADE20KDataset(Pix2pixDataset): @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='resize_and_crop') if is_train: parser.set_defaults(load_size=286) else: parser.set_defaults(load_size=256) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=150) parser.set_defaults(contain_dontcare_label=True) parser.set_defaults(cache_filelist_read=False) parser.set_defaults(cache_filelist_write=False) parser.set_defaults(no_instance=True) return parser def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else 'train' all_images = make_dataset(root, recursive=True, read_cache=False, write_cache=False) image_paths = [] label_paths = [] for p in all_images: if '_%s_' % phase not in p: continue if p.endswith('.jpg'): image_paths.append(p) elif p.endswith('.png'): label_paths.append(p) instance_paths = [] # don't use instance map for ade20k return label_paths, image_paths, instance_paths # In ADE20k, 'unknown' label is of value 0. # Change the 'unknown' label to the last label to match other datasets. def postprocess(self, input_dict): label = input_dict['label'] label = label - 1 label[label == -1] = self.opt.label_nc ================================================ FILE: data/base_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch.utils.data as data from PIL import Image import torchvision.transforms as transforms import numpy as np import random class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() @staticmethod def modify_commandline_options(parser, is_train): return parser def initialize(self, opt): pass def get_params(opt, size): w, h = size new_h = h new_w = w if opt.preprocess_mode == 'resize_and_crop': new_h = new_w = opt.load_size elif opt.preprocess_mode == 'scale_width_and_crop': new_w = opt.load_size new_h = opt.load_size * h // w elif opt.preprocess_mode == 'scale_shortside_and_crop': ss, ls = min(w, h), max(w, h) # shortside and longside width_is_shorter = w == ss ls = int(opt.load_size * ls / ss) new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss) x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) flip = random.random() > 0.5 return {'crop_pos': (x, y), 'flip': flip} def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True): transform_list = [] if 'resize' in opt.preprocess_mode: osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, interpolation=method)) elif 'scale_width' in opt.preprocess_mode: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) elif 'scale_shortside' in opt.preprocess_mode: transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, method))) if 'crop' in opt.preprocess_mode: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) if opt.preprocess_mode == 'none': base = 32 transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) if opt.preprocess_mode == 'fixed': w = opt.crop_size h = round(opt.crop_size / opt.aspect_ratio) transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method))) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) if toTensor: transform_list += [transforms.ToTensor()] if normalize: transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) def normalize(): return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) def __resize(img, w, h, method=Image.BICUBIC): return img.resize((w, h), method) def __make_power_2(img, base, method=Image.BICUBIC): ow, oh = img.size h = int(round(oh / base) * base) w = int(round(ow / base) * base) if (h == oh) and (w == ow): return img return img.resize((w, h), method) def __scale_width(img, target_width, method=Image.BICUBIC): ow, oh = img.size if (ow == target_width): return img w = target_width h = int(target_width * oh / ow) return img.resize((w, h), method) def __scale_shortside(img, target_width, method=Image.BICUBIC): ow, oh = img.size ss, ls = min(ow, oh), max(ow, oh) # shortside and longside width_is_shorter = ow == ss if (ss == target_width): return img ls = int(target_width * ls / ss) nw, nh = (ss, ls) if width_is_shorter else (ls, ss) return img.resize((nw, nh), method) def __crop(img, pos, size): ow, oh = img.size x1, y1 = pos tw = th = size return img.crop((x1, y1, x1 + tw, y1 + th)) def __flip(img, flip): if flip: return img.transpose(Image.FLIP_LEFT_RIGHT) return img ================================================ FILE: data/cityscapes_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os.path from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class CityscapesDataset(Pix2pixDataset): @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='fixed') parser.set_defaults(load_size=512) parser.set_defaults(crop_size=512) parser.set_defaults(display_winsize=512) parser.set_defaults(label_nc=35) parser.set_defaults(aspect_ratio=2.0) opt, _ = parser.parse_known_args() if hasattr(opt, 'num_upsampling_layers'): parser.set_defaults(num_upsampling_layers='more') return parser def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else 'train' label_dir = os.path.join(root, 'gtFine', phase) label_paths_all = make_dataset(label_dir, recursive=True) label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')] image_dir = os.path.join(root, 'leftImg8bit', phase) image_paths = make_dataset(image_dir, recursive=True) if not opt.no_instance: instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')] else: instance_paths = [] return label_paths, image_paths, instance_paths def paths_match(self, path1, path2): name1 = os.path.basename(path1) name2 = os.path.basename(path2) # compare the first 3 components, [city]_[id1]_[id2] return '_'.join(name1.split('_')[:3]) == \ '_'.join(name2.split('_')[:3]) ================================================ FILE: data/coco_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os.path from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class CocoDataset(Pix2pixDataset): @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.add_argument('--coco_no_portraits', action='store_true') parser.set_defaults(preprocess_mode='resize_and_crop') if is_train: parser.set_defaults(load_size=286) else: parser.set_defaults(load_size=256) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=182) parser.set_defaults(contain_dontcare_label=True) parser.set_defaults(cache_filelist_read=True) parser.set_defaults(cache_filelist_write=True) return parser def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else opt.phase label_dir = os.path.join(root, '%s_label' % phase) label_paths = make_dataset(label_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: label_portrait_dir = os.path.join(root, '%s_label_portrait' % phase) if os.path.isdir(label_portrait_dir): label_portrait_paths = make_dataset(label_portrait_dir, recursive=False, read_cache=True) label_paths += label_portrait_paths image_dir = os.path.join(root, '%s_img' % phase) image_paths = make_dataset(image_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: image_portrait_dir = os.path.join(root, '%s_img_portrait' % phase) if os.path.isdir(image_portrait_dir): image_portrait_paths = make_dataset(image_portrait_dir, recursive=False, read_cache=True) image_paths += image_portrait_paths if not opt.no_instance: instance_dir = os.path.join(root, '%s_inst' % phase) instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) if not opt.coco_no_portraits and opt.isTrain: instance_portrait_dir = os.path.join(root, '%s_inst_portrait' % phase) if os.path.isdir(instance_portrait_dir): instance_portrait_paths = make_dataset(instance_portrait_dir, recursive=False, read_cache=True) instance_paths += instance_portrait_paths else: instance_paths = [] return label_paths, image_paths, instance_paths ================================================ FILE: data/custom_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class CustomDataset(Pix2pixDataset): """ Dataset that loads images from directories Use option --label_dir, --image_dir, --instance_dir to specify the directories. The images in the directories are sorted in alphabetical order and paired in order. """ @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='resize_and_crop') load_size = 286 if is_train else 256 parser.set_defaults(load_size=load_size) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=13) parser.set_defaults(contain_dontcare_label=False) parser.add_argument('--label_dir', type=str, required=True, help='path to the directory that contains label images') parser.add_argument('--image_dir', type=str, required=True, help='path to the directory that contains photo images') parser.add_argument('--instance_dir', type=str, default='', help='path to the directory that contains instance maps. Leave black if not exists') return parser def get_paths(self, opt): label_dir = opt.label_dir label_paths = make_dataset(label_dir, recursive=False, read_cache=True) image_dir = opt.image_dir image_paths = make_dataset(image_dir, recursive=False, read_cache=True) if len(opt.instance_dir) > 0: instance_dir = opt.instance_dir instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) else: instance_paths = [] assert len(label_paths) == len(image_paths), "The #images in %s and %s do not match. Is there something wrong?" return label_paths, image_paths, instance_paths ================================================ FILE: data/deepfashion_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os.path from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class DeepfashionDataset(Pix2pixDataset): @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='resize_and_crop') parser.set_defaults(load_size=256) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=8) opt, _ = parser.parse_known_args() return parser def get_paths(self, opt): root = opt.dataroot phase = 'test' if opt.phase == 'test' else 'train' label_dir = os.path.join(root, 'cihp_' + phase + '_mask') label_paths_all = make_dataset(label_dir, recursive=False) label_paths = [p for p in label_paths_all if p.endswith('.png')] image_dir = os.path.join(root, phase) image_paths = make_dataset(image_dir, recursive=False) instance_paths = [] return label_paths, image_paths, instance_paths ================================================ FILE: data/facades_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os.path from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class FacadesDataset(Pix2pixDataset): @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(dataroot='./dataset/facades/') parser.set_defaults(preprocess_mode='resize_and_crop') load_size = 286 if is_train else 256 parser.set_defaults(load_size=load_size) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=13) parser.set_defaults(contain_dontcare_label=False) parser.set_defaults(no_instance=True) return parser def get_paths(self, opt): root = opt.dataroot phase = 'val' if opt.phase == 'test' else opt.phase label_dir = os.path.join(root, '%s_label' % phase) label_paths = make_dataset(label_dir, recursive=False, read_cache=True) image_dir = os.path.join(root, '%s_img' % phase) image_paths = make_dataset(image_dir, recursive=False, read_cache=True) instance_paths = [] return label_paths, image_paths, instance_paths ================================================ FILE: data/image_folder.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ ############################################################################### # Code from # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py # Modified the original code so that it also loads images from the current # directory as well as the subdirectories ############################################################################### import torch.utils.data as data from PIL import Image import os IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff', '.webp' ] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def make_dataset_rec(dir, images): assert os.path.isdir(dir), '%s is not a valid directory' % dir for root, dnames, fnames in sorted(os.walk(dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) def make_dataset(dir, recursive=False, read_cache=False, write_cache=False): images = [] if read_cache: possible_filelist = os.path.join(dir, 'files.list') if os.path.isfile(possible_filelist): with open(possible_filelist, 'r') as f: images = f.read().splitlines() return images if recursive: make_dataset_rec(dir, images) else: assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir for root, dnames, fnames in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) if write_cache: filelist_cache = os.path.join(dir, 'files.list') with open(filelist_cache, 'w') as f: for path in images: f.write("%s\n" % path) print('wrote filelist cache at %s' % filelist_cache) return images def default_loader(path): return Image.open(path).convert('RGB') class ImageFolder(data.Dataset): def __init__(self, root, transform=None, return_paths=False, loader=default_loader): imgs = make_dataset(root) if len(imgs) == 0: raise(RuntimeError("Found 0 images in: " + root + "\n" "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader def __getitem__(self, index): path = self.imgs[index] img = self.loader(path) if self.transform is not None: img = self.transform(img) if self.return_paths: return img, path else: return img def __len__(self): return len(self.imgs) ================================================ FILE: data/pix2pix_dataset.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from data.base_dataset import BaseDataset, get_params, get_transform from PIL import Image import util.util as util import os import cv2 as cv import numpy as np class Pix2pixDataset(BaseDataset): @staticmethod def modify_commandline_options(parser, is_train): parser.add_argument('--no_pairing_check', action='store_true', help='If specified, skip sanity check of correct label-image file pairing') return parser def initialize(self, opt): self.opt = opt label_paths, image_paths, instance_paths = self.get_paths(opt) util.natural_sort(label_paths) util.natural_sort(image_paths) if not opt.no_instance: util.natural_sort(instance_paths) label_paths = label_paths[:opt.max_dataset_size] image_paths = image_paths[:opt.max_dataset_size] instance_paths = instance_paths[:opt.max_dataset_size] self.label_paths = label_paths self.image_paths = image_paths self.instance_paths = instance_paths size = len(self.label_paths) self.dataset_size = size print(self.dataset_size) def get_paths(self, opt): label_paths = [] image_paths = [] instance_paths = [] assert False, "A subclass of Pix2pixDataset must override self.get_paths(self, opt)" return label_paths, image_paths, instance_paths def paths_match(self, path1, path2): filename1_without_ext = os.path.splitext(os.path.basename(path1))[0] filename2_without_ext = os.path.splitext(os.path.basename(path2))[0] return filename1_without_ext == filename2_without_ext def __getitem__(self, index): # Label Image label_path = self.label_paths[index] label = Image.open(label_path) # print(label_path) params = get_params(self.opt, label.size) transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) label_tensor = transform_label(label) * 255.0 label_tensor[label_tensor == 255] = self.opt.label_nc # 'unknown' is opt.label_nc image_path = self.image_paths[index] assert self.paths_match(label_path, image_path), \ "The label_path %s and image_path %s don't match." % \ (label_path, image_path) image = Image.open(image_path) image = image.convert('RGB') transform_image = get_transform(self.opt, params) image_tensor = transform_image(image) # if using instance maps if self.opt.no_instance: instance_tensor = 0 else: instance_path = self.instance_paths[index] instance = Image.open(instance_path) if instance.mode == 'L': instance_tensor = transform_label(instance) * 255 instance_tensor = instance_tensor.long() else: instance_tensor = transform_label(instance) input_dict = {'label': label_tensor, 'instance': instance_tensor, 'image': image_tensor, 'path': image_path, } self.postprocess(input_dict) return input_dict def postprocess(self, input_dict): return input_dict def __len__(self): return self.dataset_size ================================================ FILE: docs/README.md ================================================ ================================================ FILE: docs/b5m.js ================================================ !function(a){var b,c,d="20140605180629",e="http://cdn.bang5mai.com/upload/plugin/assets/main",f="http://b5tcdn.bang5mai.com",g="http://un.114dianxin.com",h="http://p.b5m.com",i="http://ucenter.b5m.com",j="http://c.b5m.com",k={module_url:e+"/js/b5m.{module}.js?v="+d,getModuleUrl:function(a){return this.module_url.replace(/\{module\}/g,a)},paths:{jquery:{path:e+"/js/jquery-1.7.2.min.js",_export:function(){return a.$5m?a.$5m:(a.$5m=a.jQuery.noConflict(!0),a.$5m)}},"jquery-highcharts":{path:e+"/js/jquery-highcharts.js",_export:function(){return a.$5m=a.jQuery.noConflict(!0),a.$5m}}}};!function(d,e){function f(a,b){return u.call(a,b)}function g(a){return"[object Array]"===w.call(a)}function i(a,b){var c=document.getElementsByTagName("head")[0],d=document.createElement("script");d.type="text/javascript",d.async=!0,0!==a.indexOf("http://")&&(a=h+a),d.src=a,d.onload=d.onreadystatechange=function(){d.readyState&&"loaded"!==d.readyState&&"complete"!==d.readyState||(d.onload=d.onreadystatechange=null,b&&b(),d.parentNode.removeChild(d))},c.appendChild(d)}function j(a){for(var b=0,c=a.length;c>b;b++)if(!f(x,a[b]))return!1;return!0}function l(a){if(a){"string"==typeof a&&(a=[a]);for(var b=0,c=a.length;c>b;b++)f(z,a[b])||f(x,a[b])||f(B,a[b])||(z[a[b]]=!0,y.push(a[b]),setTimeout(function(){p()},1))}}function m(b){for(var c=b.dependencies||[],d=[],e=0,f=c.length;f>e;e++)d.push(x[c[e]]);return n(b.name,b.fn.apply(a,d)),setTimeout(function(){s()},1),!0}function n(a,b){x[a]=b,q(),s()}function o(a){if(a){var b=a.name;f(B,b)||(B[b]=!0,A.push(a))}}function p(a){if(!D){D=!0,"undefined"!=typeof a&&a||(a=y);var b=a.shift();if(!b)return void(D=!1);var c,d=k.paths[b]||k.getModuleUrl(b);"object"==typeof d&&(c=d._export,d=d.path),d?i(d,function(){"function"==typeof c&&n(b,c())}):e("module["+b+"] wait to export"),D=!1,p(a)}}function q(a){"undefined"!=typeof a&&a||(a=A);for(var b,c=-1;++ce;e++)d.push(x[c[e]]);return setTimeout(function(){b.fn.apply(a,d)},0),!0}function s(a){if("undefined"!=typeof a&&a||(a=C),0!==a.length)for(var b,c=-1;++c"1.4.3"&&"undefined"!=typeof jQuery.ajax&&(x.jquery=d.jQuery||d.$,d.$5m=x.jquery)}(),b=function(a,b,c,d){if(!f(x,a)||d&&d.force){if("function"==typeof b||g(b)&&0===b.length)return void n(a,b());var e={name:a,dependencies:b,fn:c},h=e.dependencies;return j(h)?void m(e):(l(h),void o(e))}},c=function(a,b){if(0!==arguments.length){if("function"==typeof a&&1===arguments.length)return void b();var c={dependencies:a,fn:b},d=c.dependencies;return j(d)?void r(c):(l(d),void t(c))}}}(a,function(a){window.console&&console.log(a)});var l=a["b5mshoppingassist"+d]={};!function(a){a.define=b,a.require=c,a.build_no=d,a.LOCATION=window.location||document.location,a.assets_base_url=e,a._=a.browser={checkBoxModel:function(){if("undefined"!=typeof a.boxModel)return a.boxModel;{var b=document.createElement("div");document.body}return b.style.cssText="visibility:hidden;border:0;width:1px;height:0;position:static;padding:0px;margin:0px;padding-left:1px;",document.body.appendChild(b),a.boxModel=this.boxModel=2===b.offsetWidth,document.body.removeChild(b),b=null,a.boxModel},isIE6:function(){var a=window.navigator.userAgent.toLowerCase(),b=/(msie) ([\w.]+)/.exec(a);return null!=b&&b[2]<7}(),isIE:function(){var a=window.navigator.userAgent.toLowerCase(),b=/(msie) ([\w.]+)/.exec(a);return null!=b}(),loadCss:function(){if(this.cssLoaded!==!0){var b=this.checkBoxModel(),c=(b?"b5m-plugin.css":"b5m-plugin.qks.css")+"?v="+a.build_no,e=document.createElement("link");e.rel="stylesheet",e.href=a.assets_base_url+"/css/"+(d?"default":"v5")+"/"+c,e.type="text/css",document.getElementsByTagName("head")[0].appendChild(e),this.cssLoaded=!0}},getDomain:function(b){var c=b||a.LOCATION.href;try{c=c.match(/([-\w]+\.\b(?:net\.cn|com\.hk|com\.cn|com|cn|net|org|cc|tv$|hk)\b)/)[1]}catch(d){c=a.LOCATION.hostname}return c}},a.domain=a._.getDomain()}(l),function(a,c){var l=["111.com.cn","12dian.com","136126.com","136buy.com","1626buy.com","1mall.com","20aj.com","228.com.cn","24dq.com","360buy.com","360hqb.com","360kxr.com","360mart.com","360zhai.com","365.com","3guo.cn","4006009207.com","513523.com","51buy.com","yixun.com","51fanli.com","51youpin.com","525j.com.cn","5366.com","55bbs.com","55tuan.com","5lux.com","5taoe.com","7cv.com","838buy.com","91pretty.com","99buy.com.cn","99read.com","99vk.com","afffff.com","ai356.com","aimer.com.cn","amazon.cn","aoliz.com","atopi.cn","bagtree.com","baidu.com","bairong.com","banggo.com","bearbuy.com.cn","behui.com","beifabook.com","beyond.cn","binggo.com","bookall.cn","bookschina.com","bookuu.com","burberry.com","buy007.com","buyjk.com","caomeipai.com","carinalaukl.com","cdg2006.com","chicbaza.com","chictalk.com.cn","chinadrtv.com","coo8.com","crucco.com","d1car.com","d1.com.cn","dahaodian.com","dahuozhan.com","damai.cn","dangdang.com","daoyao.com","daphne.cn","dazhongdianqi.com.cn","dhc.net.cn","dianping.com","didamall.com","diyimeili.com","do93.com","doin.cn","domoho.com","dooland.com","douban.com","duitang.com","duoduofarm.com","dwgou.com","easy361.com","efeihu.com","egu365.com","ehaier.com","eiboa.com","ej100.cn","enet.com.cn","epetbar.com","epinwei.com","epkmall.com","etam.com.cn","etao.com","fanrry.cn","faxianla.com","fc900.com","fclub.cn","fglady.cn","foryouforme.com","gaojie.com","gap.cn","ggooa.com","giftmart.com.cn","giordano.com","go2am.com","gome.com.cn","goodful.com","gotoread.com","goujiuwang.com","gqt168.com","guang.com","guangjiela.com","guopi.com","hany.cn","happigo.com","herbuy.com.cn","hitao.com","hmeili.com","hodo.cn","homecl.com","homevv.com","htjz.com","huilemai.com","huimai365.com","huolibaobao.com","huolida.com","hyj.com","iebaba.com","ihush.com","immyhome.com","imobile.com.cn","imoda.com","it168.com","itruelife.com","j1923.com","jacketman.cn","jd.com","jddiy.com","jianianle.com","jianke.com","jiapin.com","jiuhang.cn","jiuxian.com","jockey.cn","joyeth.com","jukangda","jumei.com","jxdyf.com","k121.com","kadang.com","keede.com","kela.cn","kimiss.com","kongfz.cn","kouclo.com","ladypk.com","lafaso.com","lamiu.com","laredoute.cn","lashou.com","learbetty.com","lebiao.net","lecake.com","ledaojia.com","leftlady.com","leho.com","letao.com","leyou.com.cn","lifevc.com","lifu520.com","lijiababy.com.cn","likeface.com","lingshi.com","lining.com","loobag.com","lookgee.com","lovo.cn","lqdjf.com","luce.com.cn","lucemall.com.cn","luckcart.com","luckigo.com","lusen.com","lvhezi.com","m18.com","m360.com.cn","m6go.com","maiakaweh.com","maichawang.com","maidq.com","maiduo.com","mailuntai.cn","maiwazi.com","maiweila.com","maoer360.com","mbaobao.com","mchepin.com","meici.com","meilishuo.com","meiribuy.com","meituan.com","meiyi.cn","menglu.com","mfplaza.com","misslele.com","miumiu365.com","mixr.cn","mmloo.com","mncake.com","mogujie.com","mojing8.com","mrzero.cn","mutnam.com","muyingzhijia.com","mycoo.cn","myrainbow.cn","myt.hk","nala.com.cn","nanjiren.com.cn","necool.com","new7.com","newegg.com.cn","no5.com.cn","nop.cn","nuanka.cn","nuomi.com","ochirly.com","ogage.cn","okbuy.com","okgolf.cn","okjee.com","onlylady.com","onlyts.cn","orange3c.com","ouku.com","oyeah.com.cn","paipai.com","paixie.net","pb89.com","pcbaby.com.cn","pchome.net","pchouse.com.cn","pclady.com.cn","pconline.com.cn","pcpop.com","pett.cn","popyj.com","pufung.com","pupai.cn","qinqinbaby.com","qiwang360.com","qplmall.com","qq.com","quwan.com","qxian.com","raccfawa.com","redbaby.com.cn","reneeze.com","ruci.cn","sasa.com","s.cn","sephora.cn","shopin.net","skinstorechina.com","so.com","soso_bak.com","strawberrynet.com","suning.com","t0001.com","t3.com.cn","tangrencun.cn","tankl.com","tao3c.com","taobao.com","taofanw.com","taoxie.com","tee7.com","tiantian.com","tmall.com","togj.com","tokyopretty.com","tonlion.com","topnewonline.cn","trura.com","tuan800.com","tymall.com.cn","u8518.com","uiyi.cn","ukool.com.cn","umanto.com","uniqlo.cn","urcosme.com","uya100.com","uzgood.com","v100.com.cn","vancl.com","vcotton.com","vegou.com","vico.cn","vivian.cn","vjia.com","vzi800.cn","walanwalan.com","wangpiao.com","wbiao.cn","weibo.com","weimituan.com","whendream.com","wine9.com","winekee.com","winenice.com","winxuan.com","wl.cn","womai.com","wowsai.com","woxihuan.com","wumeiwang.com","x.com.cn","xiaozhuren.com","xijie.com","xiu.com","yaahe.cn","yanyue.cn","yaofang.cn","yesky.com","yesmywine.com","yidianda.com","yihaodian.com","yhd.com","yintai.com","yizhedian.com","yohobuy.com","yoka.com","yooane.com","yougou.com","ywmei.com","zaihuni.com","zbird.com","zgcbb.com","zhimei.com","zhuangai.com","zm7.cn","zocai.com","zol.com.cn","zol.com","zuomee.com","zwzhome.com","lefeng.com","958shop.com","china-pub.com","wanggou.com","vip.com","baoyeah.com","monteamor.com","qjherb.com","moonbasa.com","ing2ing.com","womai.com","vmall.com","1688.com","etao.com","milier.com","xifuquan.com","sfbest.com","j1.com","liebo.com","esprit.cn","metromall.com.cn","pba.cn","shangpin.com","handuyishe.com","secoo.com","wangjiu.com","masamaso.com","vivian.cn","linkmasa.com","camel.com.cn","naruko.com.cn","sportica.cn","zhenpin.com","xiaomi.com","mi.com","letv.com","bosideng.cn","coolpad.cn","handu.com","ebay.com","staples.cn","feiniu.com","okhqb.com","meilele.com"],m=["ctrip.com","ly.com","lvmama.com","tuniu.com","qunar.com","uzai.com","mangocity.com"],n=["taobao.com","meituan.com","jumei.com","dianping.com","gaopeng.com","58.com","lashou.com","pztuan.com","liketuan.com","nuomi.com"],o=["ctrip.com","ly.com","lvmama.com","qunar.com","meituan.com","jumei.com","lashou.com","nuomi.com","dianping.com","gaopeng.qq.com","gaopeng.com","elong.com","mangocity.com","kuxun.cn","xiu.com","zhuna.cn","pztuan.com","liketuan.com","hao123.com","2345.com","sohu.com","sogou.com","duba.com","qq.com","rising.cn"],p=["taobao.com","sogou.com","2345.com","hao123.com","qzone.qq","autohome","xxhh","letv","jide123","pcauto","auto.sohu","pps","bitauto","duba.com","rising.cn","qq.com","baidu.com","youku.com","tudou.com","iqiyi.com","sohu.com"],q=document.getElementById("b5mmain");q=q.src&&q.src.substring(q.src.indexOf("?")+1);var r=q.split("&");q={};for(var s,t=0,u=r.length;u>t;t++)s=r[t].split("="),q[s[0]]=s[1]||"";b("server",function(){return{server:h,cpsServer:j,ucenterserver:i,assets_base_url:e,assets_union_url:g,domain:a._.getDomain(),uuid:q.uuid,version:q.version,source:q.source,hostname:a.LOCATION.hostname}});for(var v=["maxthon3","firefox","liebao","360se","360jisu","chrome"],w=v.join(",").indexOf(q.source)>-1?!0:!1,x=!("11000"!=q.source&&"50000"!=q.source),y=a.isMall=!!l.join(",").match(new RegExp("\\b"+a.domain+"\\b")),z=a.isTour=!!m.join(",").match(new RegExp("\\b"+a.domain+"\\b")),A=a.isSl=!(x||!o.join(",").match(new RegExp("\\b"+a.domain+"\\b"))||a.browser.isIE&&q.ie32!=c&&!(a.browser.isIE&&parseInt(q.ie32,10)>0)),B=a.isTuan=!!n.join(",").match(new RegExp("\\b"+a.domain+"\\b")),C=!1,t=0;t=0){C=!0;break}var D=a.isNav=!(x||!C||"1"===q.nonav);if(k.paths.all={path:e+"/js/b5m.plugin.all.js?v="+d,_export:function(){return a}},k.paths.tg={path:f+"/js/flag.js?v="+Math.floor((new Date).getTime()/1e4),_export:function(){return window.__5_tg_}},k.paths.sejieall={path:e+"/js/b5m.plugin.sejie.all.js?v="+d,_export:function(){return a}},k.paths.rule={path:e+"/js/plugin/rule/sites/"+a.domain+"?v="+d,_export:function(){return a.rule}},k.paths.env={path:"/extension.do?method=js&buildno="+d+"&url="+encodeURIComponent(a.LOCATION.href)+"&acd="+(q.acd||"")+"&reason="+(q.reason||"")+"&source="+q.source+"&uuid="+q.uuid+"&domain="+a.domain+"&version="+q.version+"&site="+a.domain+(a.browser.isIE?"&t="+(new Date).getTime():""),_export:function(){return a.cookie=Function("return "+(a.env.cookie||"{}"))(),a.env}},k.paths.nav={path:e+"/js/b5m.nav.js?v="+d,_export:function(){return a.nav}},a.require(["server"],function(b){"b5m.com"==b.domain&&a.require(["env"],function(){})}),w||x||!A&&!y||6==q.reason&&"jd.com"!=a.domain||a.require(["sl"],function(a){a.run()}),a.require(["tg"],function(b){if(!b||b.embed){a.require(["adv","server"],function(a,b){a.server=b.server,a.run()});var c="15003,15004,15005,15006,15008,15009,15012,15013,15014,15015,15018,15020,15021,15022,15023,15025,15026,15027,15028,15029,15030,15031,15032,15033,15035,15036,15039,15041,20000,20001";D&&!w&&(0!==q.source.indexOf("15")||c.indexOf(q.source)>=0)&&a.require(["jquery-highcharts","nav","server","common"],function(b,c,d,f){b.extend(c,{server:d,common:f,uuid:q.uuid,acd:q.acd,source:q.source,domain:a.domain,host:a.LOCATION.host,assets_base_url:e,href:a.LOCATION.host+a.LOCATION.pathname,reason:q.reason}),setTimeout(function(){c.init()},30)})}}),y||z||B){a._.loadCss();var E=(new Date).getTime(),F=["jquery-highcharts","all","env"];d||(F=y||z||B?["jquery-highcharts","all","env","rule"]:["jquery-highcharts","all","env"],window.S=a),a.require(F,function(a,b,c){b.console.debug("load time --------------"+((new Date).getTime()-E)+"ms"),b.util.extend(b.constants,q,c,{ucenterserver:i,forwardBase:h+"/",assets_base_url:e+"/"}),b.filterChain=function(){this.index=-1,this.chain=arguments.length>0?Array.prototype.slice.call(arguments,0):[],"slice"in arguments[0]&&(this.chain=arguments[0])},b.filterChain.prototype.register=function(a){this.chain.push(a)},b.filterChain.prototype.insert=function(a){this.chain.splice(this.index+1,0,a)},b.filterChain.prototype.run=function(){this.index++,this.index Semantically Multi-modal Image Synthesis

Semantically Multi-modal Image Synthesis

Zhen Zhu*    Zhiliang Xu*    Ansheng You    Xiang Bai   

Huazhong University of Science and Technology    Peking University   

in CVPR 2020

Arxiv    PyTorch

Abstract

In this paper, we focus on semantically multi-modal image synthesis (SMIS) task, namely, generating multi-modal images at the semantic level. Previous work seeks to use multiple class-specific generators, constraining its usage in datasets with a small number of classes. We instead propose a novel Group Decreasing Network (GroupDNet) that leverages group convolutions in the generator and progressively decreases the group numbers of the convolutions in the decoder. Consequently, GroupDNet is armed with much more controllability on translating semantic labels to natural images and has plausible high-quality yields for datasets with many classes. Experiments on several challenging datasets demonstrate the superiority of GroupDNet on performing the SMIS task. We also show that GroupDNet is capable of performing a wide range of interesting synthesis applications.

Video of Semantically Multi-modal Image Synthesis


Related Work


Thanks to other Demonstrations

================================================ FILE: docs/lib.js ================================================ /* This file contains only functions necessary for the article features The full library code and enhanced versions of the functions present here can be found at http://v2studio.com/k/code/lib/ ARRAY EXTENSIONS push(item [,...,item]) Mimics standard push for IE5, which doesn't implement it. find(value [, start]) searches array for value starting at start (if start is not provided, searches from the beginning). returns value index if found, otherwise returns -1; has(value) returns true if value is found in array, otherwise false; FUNCTIONAL map(list, func) traverses list, applying func to list, returning an array of values returned by func if func is not provided, the array item is returned itself. this is an easy way to transform fake arrays (e.g. the arguments object of a function or nodeList objects) into real javascript arrays. map also provides a safe way for traversing only an array's indexed items, ignoring its other properties. (as opposed to how for-in works) this is a simplified version of python's map. parameter order is different, only a single list (array) is accepted, and the parameters passed to func are different: func takes the current item, then, optionally, the current index and a reference to the list (so that func can modify list) filter(list, func) returns an array of values in list for which func is true if func is not specified the values are evaluated themselves, that is, filter will return an array of the values in list which evaluate to true this is a similar to python's filter, but parameter order is inverted DOM getElem(elem) returns an element in document. elem can be the id of such element or the element itself (in which case the function does nothing, merely returning it) this function is useful to enable other functions to take either an element directly or an element id as parameter. if elem is string and there's no element with such id, it throws an error. if elem is an object but not an Element, it's returned anyway hasClass(elem, className) Checks the class list of element elem or element of id elem for className, if found, returns true, otherwise false. The tested element can have multiple space-separated classes. className must be a single class (i.e. can't be a list). getElementsByClass(className [, tagName [, parentNode]]) Returns elements having class className, optionally being a tag tagName (otherwise any tag), optionally being a descendant of parentNode (otherwise the whole document is searched) DOM EVENTS listen(event,elem,func) x-browser function to add event listeners listens for event on elem with func event is string denoting the event name without the on- prefix. e.g. 'click' elem is either the element object or the element's id func is the function to call when the event is triggered in IE, func is wrapped and this wrapper passes in a W3CDOM_Event (a faux simplified Event object) mlisten(event, elem_list, func) same as listen but takes an element list (a NodeList, Array, etc) instead of an element. W3CDOM_Event(currentTarget) is a faux Event constructor. it should be passed in IE when a function expects a real Event object. For now it only implements the currentTarget property and the preventDefault method. The currentTarget value must be passed as a paremeter at the moment of construction. MISC CLEANING-AFTER-MICROSOFT STUFF isUndefined(v) returns true if [v] is not defined, false otherwise IE 5.0 does not support the undefined keyword, so we cannot do a direct comparison such as v===undefined. */ // ARRAY EXTENSIONS if (!Array.prototype.push) Array.prototype.push = function() { for (var i=0; i 0: assert(torch.cuda.is_available()) net.cuda() net.init_weights(opt.init_type, opt.init_variance) return net def define_G(opt): netG_cls = find_network_using_name(opt.netG, 'generator') return create_network(netG_cls, opt) def define_D(opt): netD_cls = find_network_using_name(opt.netD, 'discriminator') return create_network(netD_cls, opt) def define_E(opt): netE_cls = find_network_using_name(opt.netE, 'encoder') return create_network(netE_cls, opt) ================================================ FILE: models/networks/architecture.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch import torch.nn as nn import torch.nn.functional as F import torchvision import torch.nn.utils.spectral_norm as spectral_norm from models.networks.normalization import SPADE, GROUP_SPADE # ResNet block that uses SPADE. # It differs from the ResNet block of pix2pixHD in that # it takes in the segmentation map as input, learns the skip connection if necessary, # and applies normalization first and then convolution. # This architecture seemed like a standard architecture for unconditional or # class-conditional GAN architecture using residual block. # The code was inspired from https://github.com/LMescheder/GAN_stability. class SPADEV2ResnetBlock(nn.Module): def __init__(self, fin, fout, opt, group_num=8): super().__init__() # Attributes self.learned_shortcut = (fin != fout) fmiddle = min(fin, fout) # create conv layers self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1, groups=group_num) # self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1) self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1, groups=group_num) # self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1) if self.learned_shortcut: self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False, groups=group_num) # self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) # apply spectral norm if specified if 'spectral' in opt.norm_G: self.conv_0 = spectral_norm(self.conv_0) self.conv_1 = spectral_norm(self.conv_1) if self.learned_shortcut: self.conv_s = spectral_norm(self.conv_s) # define normalization layers spade_config_str = opt.norm_G.replace('spectral', '') use_instance = False if opt.dataset_mode == 'cityscapes': use_instance = True # group_num = opt.semantic_nc self.norm_0 = GROUP_SPADE(spade_config_str, fin, opt.semantic_nc, group_num, use_instance=use_instance, data_mode=opt.dataset_mode) self.norm_1 = GROUP_SPADE(spade_config_str, fmiddle, opt.semantic_nc, group_num, use_instance=use_instance, data_mode=opt.dataset_mode) if self.learned_shortcut: self.norm_s = GROUP_SPADE(spade_config_str, fin, opt.semantic_nc, group_num,use_instance=use_instance, data_mode=opt.dataset_mode) # note the resnet block with SPADE also takes in |seg|, # the semantic segmentation map as input def forward(self, x, seg): x_s = self.shortcut(x, seg) dx = self.conv_0(self.actvn(self.norm_0(x, seg))) dx = self.conv_1(self.actvn(self.norm_1(dx, seg))) out = x_s + dx return out def shortcut(self, x, seg): if self.learned_shortcut: x_s = self.conv_s(self.norm_s(x, seg)) else: x_s = x return x_s def actvn(self, x): return F.leaky_relu(x, 2e-1) class SPADEResnetBlock(nn.Module): def __init__(self, fin, fout, opt): super().__init__() # Attributes self.learned_shortcut = (fin != fout) fmiddle = min(fin, fout) # create conv layers self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1) self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1) if self.learned_shortcut: self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) # apply spectral norm if specified if 'spectral' in opt.norm_G: self.conv_0 = spectral_norm(self.conv_0) self.conv_1 = spectral_norm(self.conv_1) if self.learned_shortcut: self.conv_s = spectral_norm(self.conv_s) # define normalization layers spade_config_str = opt.norm_G.replace('spectral', '') self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc) self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc) if self.learned_shortcut: self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc) # note the resnet block with SPADE also takes in |seg|, # the semantic segmentation map as input def forward(self, x, seg): x_s = self.shortcut(x, seg) dx = self.conv_0(self.actvn(self.norm_0(x, seg))) dx = self.conv_1(self.actvn(self.norm_1(dx, seg))) out = x_s + dx return out def shortcut(self, x, seg): if self.learned_shortcut: x_s = self.conv_s(self.norm_s(x, seg)) else: x_s = x return x_s def actvn(self, x): return F.leaky_relu(x, 2e-1) # ResNet block used in pix2pixHD # We keep the same architecture as pix2pixHD. class ResnetBlock(nn.Module): def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3, groups=1): super().__init__() pw = (kernel_size - 1) // 2 self.conv_block = nn.Sequential( nn.ReflectionPad2d(pw), norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=groups)), activation, nn.ReflectionPad2d(pw), norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=groups)) ) def forward(self, x): y = self.conv_block(x) out = x + y return out # VGG architecter, used for the perceptual loss using a pretrained VGG network class VGG19(torch.nn.Module): def __init__(self, vgg_path,requires_grad=False): super().__init__() print(vgg_path) if vgg_path is None or vgg_path == '': vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features else: vgg19 = torchvision.models.vgg19(pretrained=False) vgg19.load_state_dict( torch.load(vgg_path, map_location='cpu')) vgg_pretrained_features = vgg19.features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() for x in range(2): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(2, 7): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(7, 12): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(12, 21): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(21, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h_relu1 = self.slice1(X) h_relu2 = self.slice2(h_relu1) h_relu3 = self.slice3(h_relu2) h_relu4 = self.slice4(h_relu3) h_relu5 = self.slice5(h_relu4) out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] return out ================================================ FILE: models/networks/base_network.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch.nn as nn from torch.nn import init class BaseNetwork(nn.Module): def __init__(self): super(BaseNetwork, self).__init__() @staticmethod def modify_commandline_options(parser, is_train): return parser def print_network(self): if isinstance(self, list): self = self[0] num_params = 0 for param in self.parameters(): num_params += param.numel() print('Network [%s] was created. Total number of parameters: %.1f million. ' 'To see the architecture, do print(network).' % (type(self).__name__, num_params / 1000000)) def init_weights(self, init_type='normal', gain=0.02): def init_func(m): classname = m.__class__.__name__ if classname.find('BatchNorm2d') != -1: if hasattr(m, 'weight') and m.weight is not None: init.normal_(m.weight.data, 1.0, gain) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'xavier_uniform': init.xavier_uniform_(m.weight.data, gain=1.0) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) elif init_type == 'none': # uses pytorch's default init method m.reset_parameters() else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) self.apply(init_func) # propagate to children for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights(init_type, gain) ================================================ FILE: models/networks/discriminator.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch.nn as nn import numpy as np import torch.nn.functional as F from models.networks.base_network import BaseNetwork from models.networks.normalization import get_nonspade_norm_layer import util.util as util class MultiscaleDiscriminator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.add_argument('--netD_subarch', type=str, default='n_layer', help='architecture of each discriminator') parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to be used in multiscale') opt, _ = parser.parse_known_args() # define properties of each discriminator of the multiscale discriminator subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator', 'models.networks.discriminator') subnetD.modify_commandline_options(parser, is_train) return parser def __init__(self, opt): super().__init__() self.opt = opt for i in range(opt.num_D): subnetD = self.create_single_discriminator(opt) self.add_module('discriminator_%d' % i, subnetD) def create_single_discriminator(self, opt): subarch = opt.netD_subarch if subarch == 'n_layer': netD = NLayerDiscriminator(opt) else: raise ValueError('unrecognized discriminator subarchitecture %s' % subarch) return netD def downsample(self, input): return F.avg_pool2d(input, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False) # Returns list of lists of discriminator outputs. # The final result is of size opt.num_D x opt.n_layers_D def forward(self, input): result = [] get_intermediate_features = not self.opt.no_ganFeat_loss for name, D in self.named_children(): out = D(input) if not get_intermediate_features: out = [out] result.append(out) input = self.downsample(input) return result # Defines the PatchGAN discriminator with the specified arguments. class NLayerDiscriminator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.add_argument('--n_layers_D', type=int, default=4, help='# layers in each discriminator') return parser def __init__(self, opt): super().__init__() self.opt = opt kw = 4 padw = int(np.ceil((kw - 1.0) / 2)) nf = opt.ndf input_nc = self.compute_D_input_nc(opt) norm_layer = get_nonspade_norm_layer(opt, opt.norm_D) sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, False)]] for n in range(1, opt.n_layers_D): nf_prev = nf nf = min(nf * 2, 512) stride = 1 if n == opt.n_layers_D - 1 else 2 sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)), nn.LeakyReLU(0.2, False) ]] sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] # We divide the layers into groups to extract intermediate layer outputs for n in range(len(sequence)): self.add_module('model' + str(n), nn.Sequential(*sequence[n])) def compute_D_input_nc(self, opt): input_nc = opt.label_nc + opt.output_nc if opt.contain_dontcare_label: input_nc += 1 if not opt.no_instance: input_nc += 1 return input_nc def forward(self, input): results = [input] for submodel in self.children(): intermediate_output = submodel(results[-1]) results.append(intermediate_output) get_intermediate_features = not self.opt.no_ganFeat_loss if get_intermediate_features: return results[1:] else: return results[-1] ================================================ FILE: models/networks/encoder.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch.nn as nn import numpy as np import torch.nn.functional as F from models.networks.base_network import BaseNetwork from models.networks.normalization import get_nonspade_norm_layer import torch class ConvEncoder(BaseNetwork): """ Same architecture as the image discriminator """ def __init__(self, opt): super().__init__() kw = 3 self.opt = opt pw = int(np.ceil((kw - 1.0) / 2)) if opt.dataset_mode == 'cityscapes': ndf = 350 elif opt.dataset_mode == 'ade20k': ndf = 151 * 4 elif opt.dataset_mode == 'deepfashion': ndf = 256 norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) self.layer1 = norm_layer(nn.Conv2d(self.opt.semantic_nc * 3, ndf, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) if opt.crop_size >= 256: self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw, groups=self.opt.semantic_nc)) self.so = s0 = 4 self.fc_mu = nn.Conv2d(ndf * 8, 8 * self.opt.semantic_nc, stride=1, kernel_size=3, padding=1, groups=self.opt.semantic_nc) self.fc_var = nn.Conv2d(ndf * 8, 8 * self.opt.semantic_nc, stride=1, kernel_size=3, padding=1, groups=self.opt.semantic_nc) self.actvn = nn.LeakyReLU(0.2, False) def forward(self, x): bs = x.size(0) x = self.layer1(x) x = self.layer2(self.actvn(x)) x = self.layer3(self.actvn(x)) x = self.layer4(self.actvn(x)) x = self.layer5(self.actvn(x)) if self.opt.crop_size >= 256: x = self.layer6(self.actvn(x)) x = self.actvn(x) # x = x.view(x.size(0), -1) mu = self.fc_mu(x) logvar = self.fc_var(x) return mu, logvar class FcEncoder(BaseNetwork): """ Same architecture as the image discriminator """ def __init__(self, opt): super().__init__() kw = 3 pw = int(np.ceil((kw - 1.0) / 2)) ndf = opt.ngf norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw)) self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw)) self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw)) self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw)) self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) if opt.crop_size >= 256: self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) self.so = s0 = 4 self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256) self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256) self.actvn = nn.LeakyReLU(0.2, False) self.opt = opt def forward(self, x): x = self.layer1(x) x = self.layer2(self.actvn(x)) x = self.layer3(self.actvn(x)) x = self.layer4(self.actvn(x)) x = self.layer5(self.actvn(x)) if self.opt.crop_size >= 256: x = self.layer6(self.actvn(x)) x = self.actvn(x) x = x.view(x.size(0), -1) mu = self.fc_mu(x) logvar = self.fc_var(x) return mu, logvar ================================================ FILE: models/networks/generator.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch import torch.nn as nn import torch.nn.functional as F from models.networks.base_network import BaseNetwork from models.networks.normalization import get_nonspade_norm_layer from models.networks.architecture import ResnetBlock as ResnetBlock from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock, SPADEV2ResnetBlock class SPADEBaseGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.set_defaults(norm_G='spectralspadesyncbatch3x3') parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='more', help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") return parser def __init__(self, opt): super().__init__() self.opt = opt nf = opt.ngf self.sw, self.sh = self.compute_latent_vector_size(opt) if opt.use_vae: # In case of VAE, we will sample from random z vector self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh) else: # Otherwise, we make the network deterministic by starting with # downsampled segmentation map instead of random z self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1) self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt) self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt) self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt) self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt) self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt) final_nc = nf if opt.num_upsampling_layers == 'most': self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt) final_nc = nf // 2 self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) self.up = nn.Upsample(scale_factor=2) def compute_latent_vector_size(self, opt): if opt.num_upsampling_layers == 'normal': num_up_layers = 5 elif opt.num_upsampling_layers == 'more': num_up_layers = 6 elif opt.num_upsampling_layers == 'most': num_up_layers = 7 else: raise ValueError('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers) sw = opt.crop_size // (2 ** num_up_layers) sh = round(sw / opt.aspect_ratio) return sw, sh def forward(self, input, z=None): seg = input if self.opt.use_vae: # we sample z from unit normal and reshape the tensor if z is None: z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device()) x = self.fc(z) x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw) else: # we downsample segmap and run convolution x = F.interpolate(seg, size=(self.sh, self.sw)) x = self.fc(x) x = self.head_0(x, seg) x = self.up(x) x = self.G_middle_0(x, seg) if self.opt.num_upsampling_layers == 'more' or \ self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.G_middle_1(x, seg) x = self.up(x) x = self.up_0(x, seg) x = self.up(x) x = self.up_1(x, seg) x = self.up(x) x = self.up_2(x, seg) x = self.up(x) x = self.up_3(x, seg) if self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.up_4(x, seg) x = self.conv_img(F.leaky_relu(x, 2e-1)) x = F.tanh(x) return x class ADE20KGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.set_defaults(norm_G='spectralspadesyncbatch3x3') parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='more', help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") return parser def __init__(self, opt): super().__init__() self.opt = opt nf = opt.ngf self.sw, self.sh = self.compute_latent_vector_size(opt) self.fc = nn.Conv2d(opt.semantic_nc * 8, self.opt.semantic_nc * 16, kernel_size=3, padding=1, groups=self.opt.semantic_nc) self.head_0 = SPADEV2ResnetBlock(self.opt.semantic_nc * 16, self.opt.semantic_nc * 16, opt, self.opt.semantic_nc) self.G_middle_0 = SPADEV2ResnetBlock(16 * self.opt.semantic_nc, 32 * nf, opt, 16) self.G_middle_1 = SPADEV2ResnetBlock(32 * nf, 16 * nf, opt, 16) self.up_0 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, 8) self.up_1 = SPADEV2ResnetBlock(16 * nf, 8 * nf, opt, 4) self.up_2 = SPADEV2ResnetBlock(8 * nf, 4 * nf, opt, 2) self.up_3 = SPADEV2ResnetBlock(4 * nf, 2 * nf, opt, 1) self.up_4 = SPADEV2ResnetBlock(2 * nf, 1 * nf, opt, 1) final_nc = nf if opt.num_upsampling_layers == 'most': self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt) final_nc = nf // 2 self.conv_img = nn.Conv2d(final_nc * 2, 3, 3, padding=1) self.up = nn.Upsample(scale_factor=2) def compute_latent_vector_size(self, opt): if opt.num_upsampling_layers == 'normal': num_up_layers = 5 elif opt.num_upsampling_layers == 'more': num_up_layers = 6 elif opt.num_upsampling_layers == 'most': num_up_layers = 7 else: raise ValueError('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers) sw = opt.crop_size // (2 ** num_up_layers) sh = round(sw / opt.aspect_ratio) return sw, sh def forward(self, input, z=None): seg = input if self.opt.use_vae: # we sample z from unit normal and reshape the tensor if z is None: z = torch.randn(input.size(0), self.opt.semantic_nc * 8, 4, 4, dtype=torch.float32, device=input.get_device()) x = self.fc(z) # x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw) x = x.view(input.size(0), -1, self.sh, self.sw) else: # we downsample segmap and run convolution x = F.interpolate(seg, size=(self.sh, self.sw)) x = self.fc(x) x = self.head_0(x, seg) x = self.up(x) x = self.G_middle_0(x, seg) if self.opt.num_upsampling_layers == 'more' or \ self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.G_middle_1(x, seg) x = self.up(x) x = self.up_0(x, seg) x = self.up(x) x = self.up_1(x, seg) x = self.up(x) x = self.up_2(x, seg) # x = self.up_3(x, seg) x = self.up(x) # edge = self.edge_gen(x) x = self.up_3(x, seg) # x = self.up_4(x, seg) # x = self.up_5(x, seg) if self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.up_4(x, seg) x = self.conv_img(F.leaky_relu(x, 2e-1)) x = F.tanh(x) return x class CityscapesGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.set_defaults(norm_G='spectralspadesyncbatch3x3') parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='more', help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") return parser def __init__(self, opt): super().__init__() self.opt = opt nf = opt.ngf self.sw, self.sh = self.compute_latent_vector_size(opt) # print(self.opt.semantic_nc) self.fc = nn.Conv2d(opt.semantic_nc * 8, 16 * nf, kernel_size=3, padding=1, groups=self.opt.semantic_nc) self.head_0 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, self.opt.semantic_nc) self.G_middle_0 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, self.opt.semantic_nc) self.G_middle_1 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, 20) self.up_0 = SPADEV2ResnetBlock(16 * nf, 8 * nf, opt, 14) self.up_1 = SPADEV2ResnetBlock(8 * nf, 4 * nf, opt, 10) self.up_2 = SPADEV2ResnetBlock(4 * nf, 2 * nf, opt, 4) self.up_3 = SPADEV2ResnetBlock(2 * nf, 1 * nf, opt, 1) final_nc = nf if opt.num_upsampling_layers == 'most': self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt) final_nc = nf // 2 self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) self.up = nn.Upsample(scale_factor=2) def compute_latent_vector_size(self, opt): if opt.num_upsampling_layers == 'normal': num_up_layers = 5 elif opt.num_upsampling_layers == 'more': num_up_layers = 6 elif opt.num_upsampling_layers == 'most': num_up_layers = 7 else: raise ValueError('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers) sw = opt.crop_size // (2 ** num_up_layers) sh = round(sw / opt.aspect_ratio) return sw, sh def forward(self, input, z=None): seg = input if self.opt.dataset_mode == 'cityscapes': with torch.no_grad(): semantic = seg[:, :-1, :, :] instance = seg[:, -1, :, :].unsqueeze(dim=1).expand_as(semantic).unsqueeze(dim=2) semantic = semantic.unsqueeze(dim=2) seg = torch.cat((semantic, instance), dim=2) seg = seg.view(seg.size()[0], seg.size()[1] * seg.size()[2], seg.size()[3], seg.size()[4]) if self.opt.use_vae: # we sample z from unit normal and reshape the tensor if z is None: z = torch.randn(input.size(0), self.opt.semantic_nc * 8, 4, 8, dtype=torch.float32, device=input.get_device()) x = self.fc(z) x = x.view(input.size(0), 16 * self.opt.ngf, self.sh, self.sw) else: # we downsample segmap and run convolution x = F.interpolate(seg, size=(self.sh, self.sw)) x = self.fc(x) x = self.head_0(x, seg) x = self.up(x) x = self.G_middle_0(x, seg) if self.opt.num_upsampling_layers == 'more' or \ self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.G_middle_1(x, seg) x = self.up(x) x = self.up_0(x, seg) x = self.up(x) x = self.up_1(x, seg) x = self.up(x) x = self.up_2(x, seg) x = self.up(x) # edge = self.edge_gen(x) x = self.up_3(x, seg) if self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.up_4(x, seg) x = self.conv_img(F.leaky_relu(x, 2e-1)) x = F.tanh(x) return x class DeepFashionGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.set_defaults(norm_G='spectralspadesyncbatch3x3') parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='more', help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") return parser def __init__(self, opt): super().__init__() self.opt = opt nf = opt.ngf self.sw, self.sh = self.compute_latent_vector_size(opt) self.fc = nn.Conv2d(opt.semantic_nc * 8, 16 * nf, kernel_size=3, padding=1, groups=8) self.head_0 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, self.opt.semantic_nc) self.G_middle_0 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, self.opt.semantic_nc) self.G_middle_1 = SPADEV2ResnetBlock(16 * nf, 16 * nf, opt, self.opt.semantic_nc // 2) self.up_0 = SPADEV2ResnetBlock(16 * nf, 8 * nf, opt, self.opt.semantic_nc // 2) self.up_1 = SPADEV2ResnetBlock(8 * nf, 4 * nf, opt, self.opt.semantic_nc // 4) self.up_2 = SPADEV2ResnetBlock(4 * nf, 2 * nf, opt, self.opt.semantic_nc // 4) self.up_3 = SPADEV2ResnetBlock(2 * nf, 1 * nf, opt, self.opt.semantic_nc // 8) final_nc = nf if opt.num_upsampling_layers == 'most': self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt) final_nc = nf // 2 self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) self.up = nn.Upsample(scale_factor=2) def compute_latent_vector_size(self, opt): if opt.num_upsampling_layers == 'normal': num_up_layers = 5 elif opt.num_upsampling_layers == 'more': num_up_layers = 6 elif opt.num_upsampling_layers == 'most': num_up_layers = 7 else: raise ValueError('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers) sw = opt.crop_size // (2 ** num_up_layers) sh = round(sw / opt.aspect_ratio) return sw, sh def forward(self, input, z=None): seg = input if self.opt.use_vae: # we sample z from unit normal and reshape the tensor if z is None: z = torch.randn(input.size(0), self.opt.semantic_nc * 8, 4, 4, dtype=torch.float32, device=input.get_device()) z = z.view(input.size()[0], self.opt.semantic_nc * 8, 4, 4) x = self.fc(z) x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw) else: # we downsample segmap and run convolution x = F.interpolate(seg, size=(self.sh, self.sw)) x = self.fc(x) x = self.head_0(x, seg) x = self.up(x) x = self.G_middle_0(x, seg) if self.opt.num_upsampling_layers == 'more' or \ self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.G_middle_1(x, seg) x = self.up(x) x = self.up_0(x, seg) x = self.up(x) x = self.up_1(x, seg) x = self.up(x) x = self.up_2(x, seg) x = self.up(x) # edge = self.edge_gen(x) x = self.up_3(x, seg) if self.opt.num_upsampling_layers == 'most': x = self.up(x) x = self.up_4(x, seg) x = self.conv_img(F.leaky_relu(x, 2e-1)) x = F.tanh(x) return x class Pix2PixHDGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): # parser.add_argument('--resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG') parser.add_argument('--resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network') parser.add_argument('--resnet_kernel_size', type=int, default=3, help='kernel size of the resnet block') parser.add_argument('--resnet_initial_kernel_size', type=int, default=7, help='kernel size of the first convolution') # parser.set_defaults(norm_G='instance') parser.set_defaults(norm_G='spectralinstance') return parser def __init__(self, opt): super().__init__() input_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if opt.no_instance else 1) norm_layer = get_nonspade_norm_layer(opt, opt.norm_G) activation = nn.ReLU(False) model = [] # initial conv model += [nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2), norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)), activation] # downsample mult = 1 for i in range(opt.resnet_n_downsample): model += [norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2, kernel_size=3, stride=2, padding=1)), activation] mult *= 2 # resnet blocks for i in range(opt.resnet_n_blocks): model += [ResnetBlock(opt.ngf * mult, norm_layer=norm_layer, activation=activation, kernel_size=opt.resnet_kernel_size)] # upsample for i in range(opt.resnet_n_downsample): nc_in = int(opt.ngf * mult) nc_out = int((opt.ngf * mult) / 2) model += [norm_layer(nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1)), activation] mult = mult // 2 # final output conv model += [nn.ReflectionPad2d(3), nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0), nn.Tanh()] self.model = nn.Sequential(*model) def forward(self, input, z=None): return self.model(input) ================================================ FILE: models/networks/loss.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch import torch.nn as nn import torch.nn.functional as F from models.networks.architecture import VGG19 # Defines the GAN loss which uses either LSGAN or the regular GAN. # When LSGAN is used, it is basically same as MSELoss, # but it abstracts away the need to create the target label tensor # that has the same size as the input class GANLoss(nn.Module): def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor, opt=None): super(GANLoss, self).__init__() self.real_label = target_real_label self.fake_label = target_fake_label self.real_label_tensor = None self.fake_label_tensor = None self.zero_tensor = None self.Tensor = tensor self.gan_mode = gan_mode self.opt = opt if gan_mode == 'ls': pass elif gan_mode == 'original': pass elif gan_mode == 'w': pass elif gan_mode == 'hinge': pass else: raise ValueError('Unexpected gan_mode {}'.format(gan_mode)) def get_target_tensor(self, input, target_is_real): if target_is_real: if self.real_label_tensor is None: self.real_label_tensor = self.Tensor(1).fill_(self.real_label) self.real_label_tensor.requires_grad_(False) return self.real_label_tensor.expand_as(input) else: if self.fake_label_tensor is None: self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label) self.fake_label_tensor.requires_grad_(False) return self.fake_label_tensor.expand_as(input) def get_zero_tensor(self, input): if self.zero_tensor is None: self.zero_tensor = self.Tensor(1).fill_(0) self.zero_tensor.requires_grad_(False) return self.zero_tensor.expand_as(input) def loss(self, input, target_is_real, for_discriminator=True): if self.gan_mode == 'original': # cross entropy loss target_tensor = self.get_target_tensor(input, target_is_real) loss = F.binary_cross_entropy_with_logits(input, target_tensor) return loss elif self.gan_mode == 'ls': target_tensor = self.get_target_tensor(input, target_is_real) return F.mse_loss(input, target_tensor) elif self.gan_mode == 'hinge': if for_discriminator: if target_is_real: minval = torch.min(input - 1, self.get_zero_tensor(input)) loss = -torch.mean(minval) else: minval = torch.min(-input - 1, self.get_zero_tensor(input)) loss = -torch.mean(minval) else: assert target_is_real, "The generator's hinge loss must be aiming for real" loss = -torch.mean(input) return loss else: # wgan if target_is_real: return -input.mean() else: return input.mean() def __call__(self, input, target_is_real, for_discriminator=True): # computing loss is a bit complicated because |input| may not be # a tensor, but list of tensors in case of multiscale discriminator if isinstance(input, list): loss = 0 for pred_i in input: if isinstance(pred_i, list): pred_i = pred_i[-1] loss_tensor = self.loss(pred_i, target_is_real, for_discriminator) bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0) new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1) loss += new_loss return loss / len(input) else: return self.loss(input, target_is_real, for_discriminator) # Perceptual loss that uses a pretrained VGG network class VGGLoss(nn.Module): def __init__(self, gpu_ids, vgg_path=None): super(VGGLoss, self).__init__() self.vgg = VGG19(vgg_path).cuda() self.criterion = nn.L1Loss() self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] def forward(self, x, y): x_vgg, y_vgg = self.vgg(x), self.vgg(y) loss = 0 for i in range(len(x_vgg)): loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) return loss # KL Divergence loss used in VAE with an image encoder class KLDLoss(nn.Module): def forward(self, mu, logvar): return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) ================================================ FILE: models/networks/normalization.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import re import torch import torch.nn as nn import torch.nn.functional as F from models.networks.sync_batchnorm import SynchronizedBatchNorm2d import torch.nn.utils.spectral_norm as spectral_norm # Returns a function that creates a normalization function # that does not condition on semantic map def get_nonspade_norm_layer(opt, norm_type='instance'): # helper function to get # output channels of the previous layer def get_out_channel(layer): if hasattr(layer, 'out_channels'): return getattr(layer, 'out_channels') return layer.weight.size(0) # this function will be returned def add_norm_layer(layer): nonlocal norm_type if norm_type.startswith('spectral'): layer = spectral_norm(layer) subnorm_type = norm_type[len('spectral'):] if subnorm_type == 'none' or len(subnorm_type) == 0: return layer # remove bias in the previous layer, which is meaningless # since it has no effect after normalization if getattr(layer, 'bias', None) is not None: delattr(layer, 'bias') layer.register_parameter('bias', None) # print(subnorm_type) if subnorm_type == 'batch': norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True) elif subnorm_type == 'sync_batch': norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True) elif subnorm_type == 'instance': norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False) elif subnorm_type == 'group': norm_layer = nn.GroupNorm(8, get_out_channel(layer), affine=True) else: raise ValueError('normalization layer %s is not recognized' % subnorm_type) return nn.Sequential(layer, norm_layer) return add_norm_layer # Creates SPADE normalization layer based on the given configuration # SPADE consists of two steps. First, it normalizes the activations using # your favorite normalization method, such as Batch Norm or Instance Norm. # Second, it applies scale and bias to the normalized output, conditioned on # the segmentation map. # The format of |config_text| is spade(norm)(ks), where # (norm) specifies the type of parameter-free normalization. # (e.g. syncbatch, batch, instance) # (ks) specifies the size of kernel in the SPADE module (e.g. 3x3) # Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5. # Also, the other arguments are # |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE # |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE class GROUP_SPADE(nn.Module): def __init__(self, config_text, norm_nc, label_nc, group_num=0, use_instance=False, data_mode='deepfashion'): super().__init__() if group_num == 0: group_num = label_nc assert config_text.startswith('spade') parsed = re.search('spade(\D+)(\d)x\d', config_text) param_free_norm_type = str(parsed.group(1)) ks = int(parsed.group(2)) if param_free_norm_type == 'instance': self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'syncbatch': self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'batch': self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'group': self.param_free_norm = nn.GroupNorm(label_nc, norm_nc) else: raise ValueError('%s is not a recognized param-free norm type in SPADE' % param_free_norm_type) # The dimension of the intermediate embedding space. Yes, hardcoded. if use_instance: seg_in_dim = label_nc * 2 else: seg_in_dim = label_nc pw = ks // 2 # print(data_mode) if data_mode == 'deepfashion': nhidden = 128 self.mlp_shared = nn.Sequential( nn.Conv2d(seg_in_dim, nhidden, kernel_size=ks, padding=pw, groups=group_num), nn.ReLU() ) self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=group_num) self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=group_num) elif data_mode == 'cityscapes': nhidden = label_nc * group_num # print(nhidden) self.mlp_shared = nn.Sequential( nn.Conv2d(seg_in_dim, nhidden, kernel_size=ks, padding=pw, groups=label_nc), # nn.Conv2d(seg_in_dim, nhidden, kernel_size=ks, padding=pw), nn.ReLU() ) self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=label_nc) self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=label_nc) elif data_mode == 'ade20k': if label_nc % group_num == 0: nhidden = label_nc * 2 else: nhidden = label_nc * group_num self.mlp_shared = nn.Sequential( nn.Conv2d(seg_in_dim, nhidden, kernel_size=ks, padding=pw, groups=label_nc), # nn.Conv2d(seg_in_dim, nhidden, kernel_size=ks, padding=pw), nn.ReLU() ) self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=group_num) self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, groups=group_num) def forward(self, x, segmap): # Part 1. generate parameter-free normalized activations normalized = self.param_free_norm(x) # Part 2. produce scaling and bias conditioned on semantic map segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') actv = self.mlp_shared(segmap) gamma = self.mlp_gamma(actv) beta = self.mlp_beta(actv) # apply scale and bias out = normalized * (1 + gamma) + beta return out class SPADE(nn.Module): def __init__(self, config_text, norm_nc, label_nc): super().__init__() assert config_text.startswith('spade') parsed = re.search('spade(\D+)(\d)x\d', config_text) param_free_norm_type = str(parsed.group(1)) ks = int(parsed.group(2)) if param_free_norm_type == 'instance': self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'syncbatch': self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False) elif param_free_norm_type == 'batch': self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) else: raise ValueError('%s is not a recognized param-free norm type in SPADE' % param_free_norm_type) # The dimension of the intermediate embedding space. Yes, hardcoded. nhidden = 128 pw = ks // 2 self.mlp_shared = nn.Sequential( nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU() ) self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) def forward(self, x, segmap): # Part 1. generate parameter-free normalized activations normalized = self.param_free_norm(x) # Part 2. produce scaling and bias conditioned on semantic map segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') actv = self.mlp_shared(segmap) gamma = self.mlp_gamma(actv) beta = self.mlp_beta(actv) # apply scale and bias out = normalized * (1 + gamma) + beta return out ================================================ FILE: models/networks/sync_batchnorm/__init__.py ================================================ # -*- coding: utf-8 -*- # File : __init__.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 27/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d from .batchnorm import patch_sync_batchnorm, convert_model from .replicate import DataParallelWithCallback, patch_replication_callback ================================================ FILE: models/networks/sync_batchnorm/batchnorm.py ================================================ # -*- coding: utf-8 -*- # File : batchnorm.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 27/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. import collections import contextlib import torch import torch.nn.functional as F from torch.nn.modules.batchnorm import _BatchNorm try: from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast except ImportError: ReduceAddCoalesced = Broadcast = None try: from jactorch.parallel.comm import SyncMaster from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback except ImportError: from .comm import SyncMaster from .replicate import DataParallelWithCallback __all__ = [ 'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d', 'patch_sync_batchnorm', 'convert_model' ] def _sum_ft(tensor): """sum over the first and last dimention""" return tensor.sum(dim=0).sum(dim=-1) def _unsqueeze_ft(tensor): """add new dimensions at the front and the tail""" return tensor.unsqueeze(0).unsqueeze(-1) _ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size']) _MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std']) class _SynchronizedBatchNorm(_BatchNorm): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True): assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.' super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) self._sync_master = SyncMaster(self._data_parallel_master) self._is_parallel = False self._parallel_id = None self._slave_pipe = None def forward(self, input): # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation. if not (self._is_parallel and self.training): return F.batch_norm( input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps) # Resize the input to (B, C, -1). input_shape = input.size() input = input.view(input.size(0), self.num_features, -1) # Compute the sum and square-sum. sum_size = input.size(0) * input.size(2) input_sum = _sum_ft(input) input_ssum = _sum_ft(input ** 2) # Reduce-and-broadcast the statistics. if self._parallel_id == 0: mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) else: mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) # Compute the output. if self.affine: # MJY:: Fuse the multiplication for speed. output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias) else: output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std) # Reshape it. return output.view(input_shape) def __data_parallel_replicate__(self, ctx, copy_id): self._is_parallel = True self._parallel_id = copy_id # parallel_id == 0 means master device. if self._parallel_id == 0: ctx.sync_master = self._sync_master else: self._slave_pipe = ctx.sync_master.register_slave(copy_id) def _data_parallel_master(self, intermediates): """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" # Always using same "device order" makes the ReduceAdd operation faster. # Thanks to:: Tete Xiao (http://tetexiao.com/) intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) to_reduce = [i[1][:2] for i in intermediates] to_reduce = [j for i in to_reduce for j in i] # flatten target_gpus = [i[1].sum.get_device() for i in intermediates] sum_size = sum([i[1].sum_size for i in intermediates]) sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) broadcasted = Broadcast.apply(target_gpus, mean, inv_std) outputs = [] for i, rec in enumerate(intermediates): outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2]))) return outputs def _compute_mean_std(self, sum_, ssum, size): """Compute the mean and standard-deviation with sum and square-sum. This method also maintains the moving average on the master device.""" assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' mean = sum_ / size sumvar = ssum - sum_ * mean unbias_var = sumvar / (size - 1) bias_var = sumvar / size if hasattr(torch, 'no_grad'): with torch.no_grad(): self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data else: self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data return mean, bias_var.clamp(self.eps) ** -0.5 class SynchronizedBatchNorm1d(_SynchronizedBatchNorm): r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a mini-batch. .. math:: y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta This module differs from the built-in PyTorch BatchNorm1d as the mean and standard-deviation are reduced across all devices during training. For example, when one uses `nn.DataParallel` to wrap the network during training, PyTorch's implementation normalize the tensor on each device using the statistics only on that device, which accelerated the computation and is also easy to implement, but the statistics might be inaccurate. Instead, in this synchronized version, the statistics will be computed over all training samples distributed on multiple devices. Note that, for one-GPU or CPU-only case, this module behaves exactly same as the built-in PyTorch implementation. The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors of size C (where C is the input size). During training, this layer keeps a running estimate of its computed mean and variance. The running sum is kept with a default momentum of 0.1. During evaluation, this running mean/variance is used for normalization. Because the BatchNorm is done over the `C` dimension, computing statistics on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm Args: num_features: num_features from an expected input of size `batch_size x num_features [x width]` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, gives the layer learnable affine parameters. Default: ``True`` Shape:: - Input: :math:`(N, C)` or :math:`(N, C, L)` - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) Examples: >>> # With Learnable Parameters >>> m = SynchronizedBatchNorm1d(100) >>> # Without Learnable Parameters >>> m = SynchronizedBatchNorm1d(100, affine=False) >>> input = torch.autograd.Variable(torch.randn(20, 100)) >>> output = m(input) """ def _check_input_dim(self, input): if input.dim() != 2 and input.dim() != 3: raise ValueError('expected 2D or 3D input (got {}D input)' .format(input.dim())) super(SynchronizedBatchNorm1d, self)._check_input_dim(input) class SynchronizedBatchNorm2d(_SynchronizedBatchNorm): r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch of 3d inputs .. math:: y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta This module differs from the built-in PyTorch BatchNorm2d as the mean and standard-deviation are reduced across all devices during training. For example, when one uses `nn.DataParallel` to wrap the network during training, PyTorch's implementation normalize the tensor on each device using the statistics only on that device, which accelerated the computation and is also easy to implement, but the statistics might be inaccurate. Instead, in this synchronized version, the statistics will be computed over all training samples distributed on multiple devices. Note that, for one-GPU or CPU-only case, this module behaves exactly same as the built-in PyTorch implementation. The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors of size C (where C is the input size). During training, this layer keeps a running estimate of its computed mean and variance. The running sum is kept with a default momentum of 0.1. During evaluation, this running mean/variance is used for normalization. Because the BatchNorm is done over the `C` dimension, computing statistics on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm Args: num_features: num_features from an expected input of size batch_size x num_features x height x width eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, gives the layer learnable affine parameters. Default: ``True`` Shape:: - Input: :math:`(N, C, H, W)` - Output: :math:`(N, C, H, W)` (same shape as input) Examples: >>> # With Learnable Parameters >>> m = SynchronizedBatchNorm2d(100) >>> # Without Learnable Parameters >>> m = SynchronizedBatchNorm2d(100, affine=False) >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45)) >>> output = m(input) """ def _check_input_dim(self, input): if input.dim() != 4: raise ValueError('expected 4D input (got {}D input)' .format(input.dim())) super(SynchronizedBatchNorm2d, self)._check_input_dim(input) class SynchronizedBatchNorm3d(_SynchronizedBatchNorm): r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch of 4d inputs .. math:: y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta This module differs from the built-in PyTorch BatchNorm3d as the mean and standard-deviation are reduced across all devices during training. For example, when one uses `nn.DataParallel` to wrap the network during training, PyTorch's implementation normalize the tensor on each device using the statistics only on that device, which accelerated the computation and is also easy to implement, but the statistics might be inaccurate. Instead, in this synchronized version, the statistics will be computed over all training samples distributed on multiple devices. Note that, for one-GPU or CPU-only case, this module behaves exactly same as the built-in PyTorch implementation. The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors of size C (where C is the input size). During training, this layer keeps a running estimate of its computed mean and variance. The running sum is kept with a default momentum of 0.1. During evaluation, this running mean/variance is used for normalization. Because the BatchNorm is done over the `C` dimension, computing statistics on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm or Spatio-temporal BatchNorm Args: num_features: num_features from an expected input of size batch_size x num_features x depth x height x width eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, gives the layer learnable affine parameters. Default: ``True`` Shape:: - Input: :math:`(N, C, D, H, W)` - Output: :math:`(N, C, D, H, W)` (same shape as input) Examples: >>> # With Learnable Parameters >>> m = SynchronizedBatchNorm3d(100) >>> # Without Learnable Parameters >>> m = SynchronizedBatchNorm3d(100, affine=False) >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10)) >>> output = m(input) """ def _check_input_dim(self, input): if input.dim() != 5: raise ValueError('expected 5D input (got {}D input)' .format(input.dim())) super(SynchronizedBatchNorm3d, self)._check_input_dim(input) @contextlib.contextmanager def patch_sync_batchnorm(): import torch.nn as nn backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d nn.BatchNorm1d = SynchronizedBatchNorm1d nn.BatchNorm2d = SynchronizedBatchNorm2d nn.BatchNorm3d = SynchronizedBatchNorm3d yield nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup def convert_model(module): """Traverse the input module and its child recursively and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d to SynchronizedBatchNorm*N*d Args: module: the input module needs to be convert to SyncBN model Examples: >>> import torch.nn as nn >>> import torchvision >>> # m is a standard pytorch model >>> m = torchvision.models.resnet18(True) >>> m = nn.DataParallel(m) >>> # after convert, m is using SyncBN >>> m = convert_model(m) """ if isinstance(module, torch.nn.DataParallel): mod = module.module mod = convert_model(mod) mod = DataParallelWithCallback(mod) return mod mod = module for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d, torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.BatchNorm3d], [SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d]): if isinstance(module, pth_module): mod = sync_module(module.num_features, module.eps, module.momentum, module.affine) mod.running_mean = module.running_mean mod.running_var = module.running_var if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_model(child)) return mod ================================================ FILE: models/networks/sync_batchnorm/batchnorm_reimpl.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : batchnorm_reimpl.py # Author : acgtyrant # Date : 11/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init __all__ = ['BatchNorm2dReimpl'] class BatchNorm2dReimpl(nn.Module): """ A re-implementation of batch normalization, used for testing the numerical stability. Author: acgtyrant See also: https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 """ def __init__(self, num_features, eps=1e-5, momentum=0.1): super().__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = nn.Parameter(torch.empty(num_features)) self.bias = nn.Parameter(torch.empty(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_running_stats(self): self.running_mean.zero_() self.running_var.fill_(1) def reset_parameters(self): self.reset_running_stats() init.uniform_(self.weight) init.zeros_(self.bias) def forward(self, input_): batchsize, channels, height, width = input_.size() numel = batchsize * height * width input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) sum_ = input_.sum(1) sum_of_square = input_.pow(2).sum(1) mean = sum_ / numel sumvar = sum_of_square - sum_ * mean self.running_mean = ( (1 - self.momentum) * self.running_mean + self.momentum * mean.detach() ) unbias_var = sumvar / (numel - 1) self.running_var = ( (1 - self.momentum) * self.running_var + self.momentum * unbias_var.detach() ) bias_var = sumvar / numel inv_std = 1 / (bias_var + self.eps).pow(0.5) output = ( (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() ================================================ FILE: models/networks/sync_batchnorm/comm.py ================================================ # -*- coding: utf-8 -*- # File : comm.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 27/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. import queue import collections import threading __all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] class FutureResult(object): """A thread-safe future implementation. Used only as one-to-one pipe.""" def __init__(self): self._result = None self._lock = threading.Lock() self._cond = threading.Condition(self._lock) def put(self, result): with self._lock: assert self._result is None, 'Previous result has\'t been fetched.' self._result = result self._cond.notify() def get(self): with self._lock: if self._result is None: self._cond.wait() res = self._result self._result = None return res _MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) _SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) class SlavePipe(_SlavePipeBase): """Pipe for master-slave communication.""" def run_slave(self, msg): self.queue.put((self.identifier, msg)) ret = self.result.get() self.queue.put(True) return ret class SyncMaster(object): """An abstract `SyncMaster` object. - During the replication, as the data parallel will trigger an callback of each module, all slave devices should call `register(id)` and obtain an `SlavePipe` to communicate with the master. - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, and passed to a registered callback. - After receiving the messages, the master device should gather the information and determine to message passed back to each slave devices. """ def __init__(self, master_callback): """ Args: master_callback: a callback to be invoked after having collected messages from slave devices. """ self._master_callback = master_callback self._queue = queue.Queue() self._registry = collections.OrderedDict() self._activated = False def __getstate__(self): return {'master_callback': self._master_callback} def __setstate__(self, state): self.__init__(state['master_callback']) def register_slave(self, identifier): """ Register an slave device. Args: identifier: an identifier, usually is the device id. Returns: a `SlavePipe` object which can be used to communicate with the master device. """ if self._activated: assert self._queue.empty(), 'Queue is not clean before next initialization.' self._activated = False self._registry.clear() future = FutureResult() self._registry[identifier] = _MasterRegistry(future) return SlavePipe(identifier, self._queue, future) def run_master(self, master_msg): """ Main entry for the master device in each forward pass. The messages were first collected from each devices (including the master device), and then an callback will be invoked to compute the message to be sent back to each devices (including the master device). Args: master_msg: the message that the master want to send to itself. This will be placed as the first message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. Returns: the message to be sent back to the master device. """ self._activated = True intermediates = [(0, master_msg)] for i in range(self.nr_slaves): intermediates.append(self._queue.get()) results = self._master_callback(intermediates) assert results[0][0] == 0, 'The first result should belongs to the master.' for i, res in results: if i == 0: continue self._registry[i].result.put(res) for i in range(self.nr_slaves): assert self._queue.get() is True return results[0][1] @property def nr_slaves(self): return len(self._registry) ================================================ FILE: models/networks/sync_batchnorm/replicate.py ================================================ # -*- coding: utf-8 -*- # File : replicate.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 27/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. import functools from torch.nn.parallel.data_parallel import DataParallel __all__ = [ 'CallbackContext', 'execute_replication_callbacks', 'DataParallelWithCallback', 'patch_replication_callback' ] class CallbackContext(object): pass def execute_replication_callbacks(modules): """ Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` Note that, as all modules are isomorphism, we assign each sub-module with a context (shared among multiple copies of this module on different devices). Through this context, different copies can share some information. We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback of any slave copies. """ master_copy = modules[0] nr_modules = len(list(master_copy.modules())) ctxs = [CallbackContext() for _ in range(nr_modules)] for i, module in enumerate(modules): for j, m in enumerate(module.modules()): if hasattr(m, '__data_parallel_replicate__'): m.__data_parallel_replicate__(ctxs[j], i) class DataParallelWithCallback(DataParallel): """ Data Parallel with a replication callback. An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by original `replicate` function. The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` Examples: > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) # sync_bn.__data_parallel_replicate__ will be invoked. """ def replicate(self, module, device_ids): modules = super(DataParallelWithCallback, self).replicate(module, device_ids) execute_replication_callbacks(modules) return modules def patch_replication_callback(data_parallel): """ Monkey-patch an existing `DataParallel` object. Add the replication callback. Useful when you have customized `DataParallel` implementation. Examples: > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) > patch_replication_callback(sync_bn) # this is equivalent to > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) """ assert isinstance(data_parallel, DataParallel) old_replicate = data_parallel.replicate @functools.wraps(old_replicate) def new_replicate(module, device_ids): modules = old_replicate(module, device_ids) execute_replication_callbacks(modules) return modules data_parallel.replicate = new_replicate ================================================ FILE: models/networks/sync_batchnorm/unittest.py ================================================ # -*- coding: utf-8 -*- # File : unittest.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 27/01/2018 # # This file is part of Synchronized-BatchNorm-PyTorch. # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch # Distributed under MIT License. import unittest import torch class TorchTestCase(unittest.TestCase): def assertTensorClose(self, x, y): adiff = float((x - y).abs().max()) if (y == 0).all(): rdiff = 'NaN' else: rdiff = float((adiff / y).abs().max()) message = ( 'Tensor close check failed\n' 'adiff={}\n' 'rdiff={}\n' ).format(adiff, rdiff) self.assertTrue(torch.allclose(x, y), message) ================================================ FILE: models/pix2pix_model.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import torch import models.networks as networks import util.util as util import cv2 as cv import torch.nn.functional as F class Pix2pixModel(torch.nn.Module): @staticmethod def modify_commandline_options(parser, is_train): networks.modify_commandline_options(parser, is_train) return parser def __init__(self, opt): super().__init__() self.opt = opt self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() \ else torch.FloatTensor self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() \ else torch.ByteTensor self.netG, self.netD, self.netE = self.initialize_networks(opt) # set loss functions if opt.isTrain: self.criterionGAN = networks.GANLoss( opt.gan_mode, tensor=self.FloatTensor, opt=self.opt) self.criterionFeat = torch.nn.L1Loss() if not opt.no_vgg_loss: self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids, self.opt.vgg_path) if opt.use_vae: self.KLDLoss = networks.KLDLoss() # Entry point for all calls involving forward pass # of deep networks. We used this approach since DataParallel module # can't parallelize custom functions, we branch to different # routines based on |mode|. def forward(self, data, mode): input_semantics, real_image = self.preprocess_input(data) if mode == 'generator': g_loss, generated = self.compute_generator_loss( input_semantics, real_image) return g_loss, generated elif mode == 'discriminator': d_loss = self.compute_discriminator_loss( input_semantics, real_image) return d_loss elif mode == 'encode_only': z, mu, logvar = self.encode_z(real_image) return mu, logvar elif mode == 'inference': with torch.no_grad(): fake_image = self.vis_test(input_semantics, real_image) return fake_image else: raise ValueError("|mode| is invalid") def create_optimizers(self, opt): G_params = list(self.netG.parameters()) if opt.use_vae: G_params += list(self.netE.parameters()) if opt.isTrain: D_params = list(self.netD.parameters()) if opt.no_TTUR: beta1, beta2 = opt.beta1, opt.beta2 G_lr, D_lr = opt.lr, opt.lr else: beta1, beta2 = 0, 0.9 G_lr, D_lr = opt.lr / 2, opt.lr * 2 optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2)) optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2)) return optimizer_G, optimizer_D def save(self, epoch): util.save_network(self.netG, 'G', epoch, self.opt) util.save_network(self.netD, 'D', epoch, self.opt) if self.opt.use_vae: util.save_network(self.netE, 'E', epoch, self.opt) ############################################################################ # Private helper methods ############################################################################ def initialize_networks(self, opt): netG = networks.define_G(opt) # print(netG) netD = networks.define_D(opt) if opt.isTrain else None netE = networks.define_E(opt) if opt.use_vae else None if not opt.isTrain or opt.continue_train: netG = util.load_network(netG, 'G', opt.which_epoch, opt) if opt.isTrain: netD = util.load_network(netD, 'D', opt.which_epoch, opt) if opt.use_vae: netE = util.load_network(netE, 'E', opt.which_epoch, opt) return netG, netD, netE # preprocess the input, such as moving the tensors to GPUs and # transforming the label map to one-hot encoding # |data|: dictionary of the input data def preprocess_input(self, data): # move to GPU and change data types data['label'] = data['label'].long() if self.use_gpu(): data['label'] = data['label'].cuda() data['instance'] = data['instance'].cuda() data['image'] = data['image'].cuda() # create one-hot label map label_map = data['label'] bs, _, h, w = label_map.size() nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \ else self.opt.label_nc input_label = self.FloatTensor(bs, nc, h, w).zero_() input_semantics = input_label.scatter_(1, label_map, 1.0) # concatenate instance map if it exists if not self.opt.no_instance: inst_map = data['instance'] instance_edge_map = self.get_edges(inst_map) input_semantics = torch.cat((input_semantics, instance_edge_map), dim=1) return input_semantics, data['image'] def compute_generator_loss(self, input_semantics, real_image): G_losses = {} fake_image, KLD_loss = self.generate_fake( input_semantics, real_image, compute_kld_loss=self.opt.use_vae) if self.opt.use_vae: G_losses['KLD'] = KLD_loss pred_fake, pred_real = self.discriminate( input_semantics, fake_image, real_image) G_losses['GAN'] = self.criterionGAN(pred_fake, True, for_discriminator=False) if not self.opt.no_ganFeat_loss: num_D = len(pred_fake) GAN_Feat_loss = self.FloatTensor(1).fill_(0) for i in range(num_D): # for each discriminator # last output is the final prediction, so we exclude it num_intermediate_outputs = len(pred_fake[i]) - 1 for j in range(num_intermediate_outputs): # for each layer output unweighted_loss = self.criterionFeat( pred_fake[i][j], pred_real[i][j].detach()) GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D G_losses['GAN_Feat'] = GAN_Feat_loss if not self.opt.no_vgg_loss: G_losses['VGG'] = self.criterionVGG(fake_image, real_image) \ * self.opt.lambda_vgg return G_losses, fake_image def compute_discriminator_loss(self, input_semantics, real_image): D_losses = {} with torch.no_grad(): fake_image, _ = self.generate_fake(input_semantics, real_image) fake_image = fake_image.detach() fake_image.requires_grad_() pred_fake, pred_real = self.discriminate( input_semantics, fake_image, real_image) D_losses['D_Fake'] = self.criterionGAN(pred_fake, False, for_discriminator=True) D_losses['D_real'] = self.criterionGAN(pred_real, True, for_discriminator=True) return D_losses def encode_z(self, real_image): mu, logvar = self.netE(real_image) z = self.reparameterize(mu, logvar) return z, mu, logvar def generate_fake(self, input_semantics, real_image, compute_kld_loss=False): z = None KLD_loss = None if self.opt.use_vae: z, mu, logvar = self.encode_z(real_image) if compute_kld_loss: KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld fake_image = self.netG(input_semantics, z=z) assert (not compute_kld_loss) or self.opt.use_vae, \ "You cannot compute KLD loss if opt.use_vae == False" return fake_image, KLD_loss def vis_test(self, input_semantics, times=1): fake_image = [] for j in range(times): fake_image.append(self.netG(input_semantics, z=None)) return fake_image # Given fake and real image, return the prediction of discriminator # for each fake and real image. def discriminate(self, input_semantics, fake_image, real_image): fake_concat = torch.cat([input_semantics, fake_image], dim=1) real_concat = torch.cat([input_semantics, real_image], dim=1) # In Batch Normalization, the fake and real images are # recommended to be in the same batch to avoid disparate # statistics in fake and real images. # So both fake and real images are fed to D all at once. fake_and_real = torch.cat([fake_concat, real_concat], dim=0) discriminator_out = self.netD(fake_and_real) pred_fake, pred_real = self.divide_pred(discriminator_out) return pred_fake, pred_real # Take the prediction of fake and real images from the combined batch def divide_pred(self, pred): # the prediction contains the intermediate outputs of multiscale GAN, # so it's usually a list if type(pred) == list: fake = [] real = [] for p in pred: fake.append([tensor[:tensor.size(0) // 2] for tensor in p]) real.append([tensor[tensor.size(0) // 2:] for tensor in p]) else: fake = pred[:pred.size(0) // 2] real = pred[pred.size(0) // 2:] return fake, real def get_edges(self, t): edge = self.ByteTensor(t.size()).zero_() edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1]).byte() edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1]).byte() edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]).byte() edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]).byte() return edge.float() def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std) + mu def use_gpu(self): return len(self.opt.gpu_ids) > 0 ================================================ FILE: models/smis_model.py ================================================ import torch import models.networks as networks import util.util as util import cv2 as cv import torch.nn.functional as F import numpy as np class SmisModel(torch.nn.Module): @staticmethod def modify_commandline_options(parser, is_train): networks.modify_commandline_options(parser, is_train) return parser def __init__(self, opt): super().__init__() self.opt = opt self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() \ else torch.FloatTensor self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() \ else torch.ByteTensor self.netG, self.netD, self.netE = self.initialize_networks(opt) # set loss functions if opt.isTrain: self.criterionGAN = networks.GANLoss( opt.gan_mode, tensor=self.FloatTensor, opt=self.opt) self.criterionFeat = torch.nn.L1Loss() if not opt.no_vgg_loss: self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids, self.opt.vgg_path) if opt.use_vae: self.KLDLoss = networks.KLDLoss() # Entry point for all calls involving forward pass # of deep networks. We used this approach since DataParallel module # can't parallelize custom functions, we branch to different # routines based on |mode|. def forward(self, data, mode): # input_semantics, real_image = self.preprocess_input(data) input_semantics, real_image = self.preprocess_input(data) if mode == 'generator': g_loss, generated = self.compute_generator_loss( input_semantics, real_image) return g_loss, generated elif mode == 'discriminator': d_loss = self.compute_discriminator_loss( input_semantics, real_image) return d_loss elif mode == 'encode_only': z, mu, logvar = self.encode_z(real_image) return mu, logvar elif mode == 'inference': with torch.no_grad(): if self.opt.test_mask != -1: fake_image = self.vis_test(input_semantics, times=self.opt.test_times, test_mask=self.opt.test_mask) else: fake_image = self.vis_test(input_semantics, times=self.opt.test_times) return fake_image else: raise ValueError("|mode| is invalid") def create_optimizers(self, opt): G_params = list(self.netG.parameters()) if opt.use_vae: G_params += list(self.netE.parameters()) # G_params += list(self.netE_edge.parameters()) if opt.isTrain: D_params = list(self.netD.parameters()) if opt.no_TTUR: beta1, beta2 = opt.beta1, opt.beta2 G_lr, D_lr = opt.lr, opt.lr else: beta1, beta2 = 0, 0.9 G_lr, D_lr = opt.lr / 2, opt.lr * 2 optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2)) optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2)) return optimizer_G, optimizer_D def save(self, epoch): util.save_network(self.netG, 'G', epoch, self.opt) util.save_network(self.netD, 'D', epoch, self.opt) if self.opt.use_vae: util.save_network(self.netE, 'E', epoch, self.opt) # util.save_network(self.netE_edge, 'E_edge', epoch, self.opt) ############################################################################ # Private helper methods ############################################################################ def initialize_networks(self, opt): netG = networks.define_G(opt) # if not opt.isTrain: # print(netG) netD = networks.define_D(opt) if opt.isTrain else None netE = networks.define_E(opt) if opt.use_vae and opt.isTrain else None if not opt.isTrain or opt.continue_train: netG = util.load_network(netG, 'G', opt.which_epoch, opt) if opt.isTrain: netD = util.load_network(netD, 'D', opt.which_epoch, opt) if opt.use_vae: netE = util.load_network(netE, 'E', opt.which_epoch, opt) # netE_edge = util.load_network(netE_edge, '') return netG, netD, netE # preprocess the input, such as moving the tensors to GPUs and # transforming the label map to one-hot encoding # |data|: dictionary of the input data def preprocess_input(self, data): # move to GPU and change data types data['label'] = data['label'].long() if self.use_gpu(): data['label'] = data['label'].cuda() data['instance'] = data['instance'].cuda() data['image'] = data['image'].cuda() # data['edge'] = data['edge'].cuda() # create one-hot label map label_map = data['label'] bs, _, h, w = label_map.size() nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \ else self.opt.label_nc input_label = self.FloatTensor(bs, nc, h, w).zero_() input_semantics = input_label.scatter_(1, label_map, 1.0) # concatenate instance map if it exists if not self.opt.no_instance: inst_map = data['instance'] instance_edge_map = self.get_edges(inst_map) input_semantics = torch.cat((input_semantics, instance_edge_map), dim=1) return input_semantics, data['image'] def compute_generator_loss(self, input_semantics, real_image): G_losses = {} fake_image, KLD_loss, CODE_loss = self.generate_fake( input_semantics, real_image, compute_kld_loss=self.opt.use_vae) if self.opt.use_vae: G_losses['KLD'] = KLD_loss # G_losses['CODE'] = CODE_loss pred_fake, pred_real = self.discriminate( input_semantics, fake_image, real_image) G_losses['GAN'] = self.criterionGAN(pred_fake, True, for_discriminator=False) if not self.opt.no_ganFeat_loss: num_D = len(pred_fake) GAN_Feat_loss = self.FloatTensor(1).fill_(0) for i in range(num_D): # for each discriminator # last output is the final prediction, so we exclude it num_intermediate_outputs = len(pred_fake[i]) - 1 for j in range(num_intermediate_outputs): # for each layer output unweighted_loss = self.criterionFeat( pred_fake[i][j], pred_real[i][j].detach()) GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D G_losses['GAN_Feat'] = GAN_Feat_loss if not self.opt.no_vgg_loss: G_losses['VGG'] = self.criterionVGG(fake_image, real_image) \ * self.opt.lambda_vgg return G_losses, fake_image def compute_discriminator_loss(self, input_semantics, real_image): D_losses = {} with torch.no_grad(): fake_image, _, _ = self.generate_fake(input_semantics, real_image) fake_image = fake_image.detach() fake_image.requires_grad_() pred_fake, pred_real = self.discriminate( input_semantics, fake_image, real_image) D_losses['D_Fake'] = self.criterionGAN(pred_fake, False, for_discriminator=True) D_losses['D_real'] = self.criterionGAN(pred_real, True, for_discriminator=True) return D_losses def encode_z(self, real_image): mu, logvar = self.netE(real_image) z = self.reparameterize(mu, logvar) return z, mu, logvar def trans_img(self, input_semantics, real_image): images = None seg_range = input_semantics.size()[1] if self.opt.dataset_mode == 'cityscapes': seg_range -= 1 for i in range(input_semantics.size(0)): resize_image = None for n in range(0, seg_range): seg_image = real_image[i] * input_semantics[i][n] # resize seg_image c_sum = seg_image.sum(dim=0) y_seg = c_sum.sum(dim=0) x_seg = c_sum.sum(dim=1) y_id = y_seg.nonzero() if y_id.size()[0] == 0: seg_image = seg_image.unsqueeze(dim=0) # resize_image = torch.cat((resize_image, seg_image), dim=0) if resize_image is None: resize_image = seg_image else: resize_image = torch.cat((resize_image, seg_image), dim=1) continue # print(y_id) y_min = y_id[0][0] y_max = y_id[-1][0] x_id = x_seg.nonzero() x_min = x_id[0][0] x_max = x_id[-1][0] seg_image = seg_image.unsqueeze(dim=0) # print(x_min, x_max, y_min, y_max) if self.opt.dataset_mode == 'cityscapes': seg_image = F.interpolate(seg_image[:, :, x_min:x_max + 1, y_min:y_max + 1], size=[256, 512]) else: seg_image = F.interpolate(seg_image[:, :, x_min:x_max + 1, y_min:y_max + 1], size=[256, 256]) # seg_image = F.interpolate(seg_image[:, :, x_min:x_max + 1, y_min:y_max + 1], scale_factor=256 / max(y_max-y_min, x_max-x_min)) # seg_image = F.interpolate(seg_image[:, :, x_min:x_max + 1, y_min:y_max + 1], size=[256, 256]) if resize_image is None: resize_image = seg_image else: resize_image = torch.cat((resize_image, seg_image), dim=1) if images is None: images = resize_image else: images = torch.cat((images, resize_image), dim=0) return images def generate_fake(self, input_semantics, real_image, compute_kld_loss=False): z = None KLD_loss = None if self.opt.use_vae: images = self.trans_img(input_semantics, real_image) z, mu, logvar = self.encode_z(images) CODE_loss = None if compute_kld_loss: KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld fake_image = self.netG(input_semantics, z=z) assert (not compute_kld_loss) or self.opt.use_vae, \ "You cannot compute KLD loss if opt.use_vae == False" return fake_image, KLD_loss, CODE_loss def vis_test(self, input_semantics, times=1, test_mask=None): fake_image = [] if self.opt.dataset_mode == 'cityscapes': z = torch.randn(input_semantics.size(0), self.opt.label_nc, 8, 4 * 8).cuda() for i in range(times): if test_mask is not None: z[:, test_mask, :, :] = torch.randn(input_semantics.size(0), 8, 4*8) else: z = torch.randn(input_semantics.size(0), self.opt.label_nc, 8, 4 * 8).cuda() fake_image.append( self.netG(input_semantics, z=z.view(input_semantics.size(0), self.opt.label_nc * 8, 4, 8))) else: z = torch.randn(input_semantics.size(0), self.opt.semantic_nc, 8, 4 * 4) for i in range(times): if test_mask is not None: z[:, test_mask, :, :] = torch.randn(input_semantics.size(0), 8, 16) else: z = torch.randn(input_semantics.size(0), self.opt.semantic_nc, 8, 4 * 4) fake_image.append(self.netG(input_semantics, z=z.view(input_semantics.size(0), self.opt.semantic_nc * 8, 4, 4).cuda())) return fake_image # Given fake and real image, return the prediction of discriminator # for each fake and real image. def discriminate(self, input_semantics, fake_image, real_image): fake_concat = torch.cat([input_semantics, fake_image], dim=1) real_concat = torch.cat([input_semantics, real_image], dim=1) # In Batch Normalization, the fake and real images are # recommended to be in the same batch to avoid disparate # statistics in fake and real images. # So both fake and real images are fed to D all at once. fake_and_real = torch.cat([fake_concat, real_concat], dim=0) discriminator_out = self.netD(fake_and_real) pred_fake, pred_real = self.divide_pred(discriminator_out) return pred_fake, pred_real # Take the prediction of fake and real images from the combined batch def divide_pred(self, pred): # the prediction contains the intermediate outputs of multiscale GAN, # so it's usually a list if type(pred) == list: fake = [] real = [] for p in pred: fake.append([tensor[:tensor.size(0) // 2] for tensor in p]) real.append([tensor[tensor.size(0) // 2:] for tensor in p]) else: fake = pred[:pred.size(0) // 2] real = pred[pred.size(0) // 2:] return fake, real def get_edges(self, t): edge = self.ByteTensor(t.size()).zero_() edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1]).byte() edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1]).byte() edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]).byte() edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]).byte() return edge.float() def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std) + mu def use_gpu(self): return len(self.opt.gpu_ids) > 0 ================================================ FILE: options/__init__.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ ================================================ FILE: options/base_options.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import sys import argparse import os from util import util import torch import models import data import pickle class BaseOptions(): def __init__(self): self.initialized = False def initialize(self, parser): # experiment specifics parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--model', type=str, default='smis', help='which model to use, msis or pix2pix') parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization') parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization') parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization') parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') # input/output sizes parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=( "resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside", "scale_shortside_and_crop", "fixed", "none")) parser.add_argument('--load_size', type=int, default=1024, help='Scale images to this size. The final image will be cropped to --crop_size.') parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)') parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio') parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.') parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)') parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') # for setting inputs parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/') parser.add_argument('--dataset_mode', type=str, default='coco') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') parser.add_argument('--nThreads', default=0, type=int, help='# threads for loading data') parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default') parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster') parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache') # for displays parser.add_argument('--display_winsize', type=int, default=400, help='display window size') # for generator parser.add_argument('--netG', type=str, default='spade', help='selects model to use for netG (pix2pixhd | spade)') parser.add_argument('--netE', type=str, default='conv') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution') parser.add_argument('--z_dim', type=int, default=256, help="dimension of the latent z vector") # for instance-wise features parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input') parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer') parser.add_argument('--use_vae', default=True, help='use encoder and vae loss') parser.add_argument('--vgg_path', type=str, default='') parser.add_argument('--clean_code', action='store_true') parser.add_argument('--test_type', type=str, default='visual', help='visual | FID | LPIPS | Mask LPIPS | IS') parser.add_argument('--test_times', type=int, default=1, ) parser.add_argument('--test_mask', type=int, default=-1, ) parser.add_argument('--no_spectral', action='store_true') parser.add_argument('--resnet_n_downsample', type=int, default=3) self.initialized = True return parser def gather_options(self): # initialize parser with basic options if not self.initialized: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, unknown = parser.parse_known_args() # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) # modify dataset-related parser options dataset_mode = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_mode) parser = dataset_option_setter(parser, self.isTrain) opt, unknown = parser.parse_known_args() # if there is opt_file, load it. # The previous default options will be overwritten if opt.load_from_opt_file: parser = self.update_options_from_file(parser, opt) opt = parser.parse_args() self.parser = parser return opt def print_options(self, opt): message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' # print(message) def option_file_path(self, opt, makedir=False): expr_dir = os.path.join(opt.checkpoints_dir, opt.name) if makedir: util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt') return file_name def save_options(self, opt): file_name = self.option_file_path(opt, makedir=True) with open(file_name + '.txt', 'wt') as opt_file: for k, v in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)) with open(file_name + '.pkl', 'wb') as opt_file: pickle.dump(opt, opt_file) def update_options_from_file(self, parser, opt): new_opt = self.load_options(opt) for k, v in sorted(vars(opt).items()): if hasattr(new_opt, k) and v != getattr(new_opt, k): new_val = getattr(new_opt, k) parser.set_defaults(**{k: new_val}) return parser def load_options(self, opt): file_name = self.option_file_path(opt, makedir=False) new_opt = pickle.load(open(file_name + '.pkl', 'rb')) return new_opt def parse(self, save=False): opt = self.gather_options() opt.isTrain = self.isTrain # train or test self.print_options(opt) if opt.isTrain: self.save_options(opt) # Set semantic_nc based on the option. # This will be convenient in many places if opt.model == 'smis' and opt.dataset_mode == 'cityscapes': opt.semantic_nc = opt.label_nc + \ (1 if opt.contain_dontcare_label else 0) else: opt.semantic_nc = opt.label_nc + \ (1 if opt.contain_dontcare_label else 0) \ + (0 if opt.no_instance else 1) # set gpu ids str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: opt.gpu_ids.append(id) if len(opt.gpu_ids) > 0: torch.cuda.set_device(opt.gpu_ids[0]) assert len(opt.gpu_ids) == 0 or opt.batchSize % len(opt.gpu_ids) == 0, \ "Batch size %d is wrong. It must be a multiple of # GPUs %d." \ % (opt.batchSize, len(opt.gpu_ids)) self.opt = opt return self.opt ================================================ FILE: options/test_options.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from .base_options import BaseOptions class TestOptions(BaseOptions): def initialize(self, parser): BaseOptions.initialize(self, parser) parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) parser.set_defaults(serial_batches=True) parser.set_defaults(no_flip=True) parser.set_defaults(phase='test') self.isTrain = False return parser ================================================ FILE: options/train_options.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from .base_options import BaseOptions class TrainOptions(BaseOptions): def initialize(self, parser): BaseOptions.initialize(self, parser) # for displays parser.add_argument('--display_freq', type=int, default=200, help='frequency of showing training results on screen') parser.add_argument('--print_freq', type=int, default=200, help='frequency of showing training results on console') parser.add_argument('--many_test_freq', type=int, default=1000, help='frequency of showing training results on console') parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs') parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed') # for training parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--niter', type=int, default=10, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay') parser.add_argument('--niter_decay', type=int, default=5, help='# of iter to linearly decay learning rate to zero') parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam') parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') parser.add_argument('--D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.') # for discriminators parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') parser.add_argument('--lambda_vgg', type=float, default=10.0, help='weight for vgg loss') parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)') parser.add_argument('--netD', type=str, default='multiscale', help='(n_layers|multiscale|image)') parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme') parser.add_argument('--lambda_kld', type=float, default=0.05) # parser.add_argument('--', type=int, default=6) self.isTrain = True return parser ================================================ FILE: requirements.txt ================================================ torch>=1.0.0 torchvision dominate>=2.3.1 dill scikit-image ================================================ FILE: scripts/ade20k.sh ================================================ #python train.py --name ade20k_smis --dataset_mode ade20k --dataroot /home/zlxu/data/ADEChallengeData2016 --no_instance \ #--gpu_ids 0,1,2,3 --ngf 64 --batchSize 4 --use_vae --niter 100 --niter_decay 100 --model smis --netE conv --netG ADE20K # python test.py --name ade20k_smis --dataset_mode ade20k --dataroot /home/zlxu/data/ADEChallengeData2016 --no_instance \ --gpu_ids 0 --ngf 64 --batchSize 2 --model smis --netG ADE20K ================================================ FILE: scripts/cityscapes.sh ================================================ #!/usr/bin/env bash #python train.py --name cityscapes_smis --dataset_mode cityscapes --dataroot /home/zlxu/data/cityscapes \ #--gpu_ids 0,1,2,3 --ngf 280 --batchSize 4 --niter 100 --niter_decay 100 --netG Cityscapes --model smis --netE conv --use_vae python test.py --name cityscapes_smis --dataset_mode cityscapes --dataroot /home/zlxu/data/cityscapes \ --gpu_ids 1 --ngf 280 --batchSize 4 --netG Cityscapes --model smis ================================================ FILE: scripts/deepfashion.sh ================================================ #python train.py --name deepfashion_smis --dataset_mode deepfashion --dataroot /home/zlxu/data/deepfashion --no_instance \ #--gpu_ids 0,1,2,3 --ngf 160 --batchSize 8 --use_vae --niter 60 --niter_decay 40 --model smis --netE conv --netG deepfashion python test.py --name deepfashion_smis --dataset_mode deepfashion --dataroot /home/zlxu/data/deepfashion --no_instance \ --gpu_ids 1 --ngf 160 --batchSize 4 --model smis --netG deepfashion ================================================ FILE: test.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os from collections import OrderedDict import data from options.test_options import TestOptions from models.pix2pix_model import Pix2pixModel from models.smis_model import SmisModel from util.visualizer import Visualizer from util import html from tqdm import tqdm opt = TestOptions().parse() # print(opt) dataloader = data.create_dataloader(opt) if opt.model == 'smis': model = SmisModel(opt) elif opt.model == 'pix2pix': model = Pix2pixModel(opt) model.eval() visualizer = Visualizer(opt) # create a webpage that summarizes the all results web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) for i, data_i in tqdm(enumerate(dataloader)): generated = model(data_i, mode='inference') img_path = data_i['path'] for b in range(generated[0].shape[0]): if opt.test_times == 1: visuals = OrderedDict([('synthesized_image', generated[0][b])]) else: visuals = OrderedDict([('input_label', data_i['label'][b]), ('real_image', data_i['image'][b]), ]) for t in range(len(generated)): visuals['synthesized_image_' + str(t)] = generated[t][b] visualizer.save_images(webpage, visuals, img_path[b:b + 1]) webpage.save() ================================================ FILE: train.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import sys from collections import OrderedDict from options.train_options import TrainOptions import data from util.iter_counter import IterationCounter from util.visualizer import Visualizer from trainers.pix2pix_trainer import Pix2PixTrainer # parse options opt = TrainOptions().parse() # print options to help debugging print(' '.join(sys.argv)) # load the dataset dataloader = data.create_dataloader(opt) # create trainer for our model trainer = Pix2PixTrainer(opt) # create tool for counting iterations iter_counter = IterationCounter(opt, len(dataloader)) # create tool for visualization visualizer = Visualizer(opt) for epoch in iter_counter.training_epochs(): iter_counter.record_epoch_start(epoch) for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter): iter_counter.record_one_iteration() # Training # train generator if i % opt.D_steps_per_G == 0: trainer.run_generator_one_step(data_i) # train discriminator trainer.run_discriminator_one_step(data_i) # Visualizations if iter_counter.needs_printing(): losses = trainer.get_latest_losses() visualizer.print_current_errors(epoch, iter_counter.epoch_iter, losses, iter_counter.time_per_iter) visualizer.plot_current_errors(losses, iter_counter.total_steps_so_far) if iter_counter.needs_displaying(): visuals = OrderedDict([('input_label', data_i['label']), ('synthesized_image', trainer.get_latest_generated()), ('real_image', data_i['image'])]) visualizer.display_current_results(visuals, epoch, iter_counter.total_steps_so_far) if iter_counter.needs_saving(): print('saving the latest model (epoch %d, total_steps %d)' % (epoch, iter_counter.total_steps_so_far)) trainer.save('latest') iter_counter.record_current_iter() trainer.update_learning_rate(epoch) iter_counter.record_epoch_end() if epoch % opt.save_epoch_freq == 0 or \ epoch == iter_counter.total_epochs: print('saving the model at the end of epoch %d, iters %d' % (epoch, iter_counter.total_steps_so_far)) trainer.save('latest') trainer.save(epoch) print('Training was successfully finished.') ================================================ FILE: trainers/__init__.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ ================================================ FILE: trainers/pix2pix_trainer.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from models.networks.sync_batchnorm import DataParallelWithCallback from models.pix2pix_model import Pix2pixModel from models.smis_model import SmisModel import os class Pix2PixTrainer(): """ Trainer creates the model and optimizers, and uses them to updates the weights of the network while reporting losses and the latest visuals to visualize the progress in training. """ def __init__(self, opt): self.opt = opt if self.opt.model == 'pix2pix': self.pix2pix_model = Pix2pixModel(opt) elif self.opt.model == 'smis': self.pix2pix_model = SmisModel(opt) print(self.pix2pix_model) with open(os.path.join(opt.checkpoints_dir, opt.name, 'model.txt'), 'w') as f: f.write(self.pix2pix_model.__str__()) if len(opt.gpu_ids) > 0: self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model, device_ids=opt.gpu_ids) self.pix2pix_model_on_one_gpu = self.pix2pix_model.module else: self.pix2pix_model_on_one_gpu = self.pix2pix_model self.generated = None if opt.isTrain: self.optimizer_G, self.optimizer_D = \ self.pix2pix_model_on_one_gpu.create_optimizers(opt) self.old_lr = opt.lr def run_generator_one_step(self, data): self.optimizer_G.zero_grad() g_losses, generated = self.pix2pix_model(data, mode='generator') g_loss = sum(g_losses.values()).mean() g_loss.backward() self.optimizer_G.step() self.g_losses = g_losses self.generated = generated def run_discriminator_one_step(self, data): self.optimizer_D.zero_grad() d_losses = self.pix2pix_model(data, mode='discriminator') d_loss = sum(d_losses.values()).mean() d_loss.backward() self.optimizer_D.step() self.d_losses = d_losses def clean_grad(self): self.optimizer_D.zero_grad() self.optimizer_G.zero_grad() def get_latest_losses(self): return {**self.g_losses, **self.d_losses} def get_latest_generated(self): return self.generated def update_learning_rate(self, epoch): self.update_learning_rate(epoch) def save(self, epoch): self.pix2pix_model_on_one_gpu.save(epoch) ################################################################## # Helper functions ################################################################## def update_learning_rate(self, epoch): if epoch > self.opt.niter: lrd = self.opt.lr / self.opt.niter_decay new_lr = self.old_lr - lrd else: new_lr = self.old_lr if new_lr != self.old_lr: if self.opt.no_TTUR: new_lr_G = new_lr new_lr_D = new_lr else: new_lr_G = new_lr / 2 new_lr_D = new_lr * 2 for param_group in self.optimizer_D.param_groups: param_group['lr'] = new_lr_D for param_group in self.optimizer_G.param_groups: param_group['lr'] = new_lr_G print('update learning rate: %f -> %f' % (self.old_lr, new_lr)) self.old_lr = new_lr ================================================ FILE: util/__init__.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ ================================================ FILE: util/coco.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ def id2label(id): if id == 182: id = 0 else: id = id + 1 labelmap = \ {0: 'unlabeled', 1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 12: 'street sign', 13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 26: 'hat', 27: 'backpack', 28: 'umbrella', 29: 'shoe', 30: 'eye glasses', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle', 45: 'plate', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', 66: 'mirror', 67: 'dining table', 68: 'window', 69: 'desk', 70: 'toilet', 71: 'door', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator', 83: 'blender', 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush', 91: 'hair brush', # Last class of Thing 92: 'banner', # Beginning of Stuff 93: 'blanket', 94: 'branch', 95: 'bridge', 96: 'building-other', 97: 'bush', 98: 'cabinet', 99: 'cage', 100: 'cardboard', 101: 'carpet', 102: 'ceiling-other', 103: 'ceiling-tile', 104: 'cloth', 105: 'clothes', 106: 'clouds', 107: 'counter', 108: 'cupboard', 109: 'curtain', 110: 'desk-stuff', 111: 'dirt', 112: 'door-stuff', 113: 'fence', 114: 'floor-marble', 115: 'floor-other', 116: 'floor-stone', 117: 'floor-tile', 118: 'floor-wood', 119: 'flower', 120: 'fog', 121: 'food-other', 122: 'fruit', 123: 'furniture-other', 124: 'grass', 125: 'gravel', 126: 'ground-other', 127: 'hill', 128: 'house', 129: 'leaves', 130: 'light', 131: 'mat', 132: 'metal', 133: 'mirror-stuff', 134: 'moss', 135: 'mountain', 136: 'mud', 137: 'napkin', 138: 'net', 139: 'paper', 140: 'pavement', 141: 'pillow', 142: 'plant-other', 143: 'plastic', 144: 'platform', 145: 'playingfield', 146: 'railing', 147: 'railroad', 148: 'river', 149: 'road', 150: 'rock', 151: 'roof', 152: 'rug', 153: 'salad', 154: 'sand', 155: 'sea', 156: 'shelf', 157: 'sky-other', 158: 'skyscraper', 159: 'snow', 160: 'solid-other', 161: 'stairs', 162: 'stone', 163: 'straw', 164: 'structural-other', 165: 'table', 166: 'tent', 167: 'textile-other', 168: 'towel', 169: 'tree', 170: 'vegetable', 171: 'wall-brick', 172: 'wall-concrete', 173: 'wall-other', 174: 'wall-panel', 175: 'wall-stone', 176: 'wall-tile', 177: 'wall-wood', 178: 'water-other', 179: 'waterdrops', 180: 'window-blind', 181: 'window-other', 182: 'wood'} if id in labelmap: return labelmap[id] else: return 'unknown' ================================================ FILE: util/html.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import datetime import dominate from dominate.tags import * import os class HTML: def __init__(self, web_dir, title, refresh=0): if web_dir.endswith('.html'): web_dir, html_name = os.path.split(web_dir) else: web_dir, html_name = web_dir, 'index.html' self.title = title self.web_dir = web_dir self.html_name = html_name self.img_dir = os.path.join(self.web_dir, 'images') if len(self.web_dir) > 0 and not os.path.exists(self.web_dir): os.makedirs(self.web_dir) if len(self.web_dir) > 0 and not os.path.exists(self.img_dir): os.makedirs(self.img_dir) self.doc = dominate.document(title=title) with self.doc: h1(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")) if refresh > 0: with self.doc.head: meta(http_equiv="refresh", content=str(refresh)) def get_image_dir(self): return self.img_dir def add_header(self, str): with self.doc: h3(str) def add_table(self, border=1): self.t = table(border=border, style="table-layout: fixed;") self.doc.add(self.t) def add_images(self, ims, txts, links, width=512): self.add_table() with self.t: with tr(): for im, txt, link in zip(ims, txts, links): with td(style="word-wrap: break-word;", halign="center", valign="top"): with p(): with a(href=os.path.join('images', link)): img(style="width:%dpx" % (width), src=os.path.join('images', im)) br() p(txt.encode('utf-8')) def save(self): html_file = os.path.join(self.web_dir, self.html_name) f = open(html_file, 'wt') f.write(self.doc.render()) f.close() if __name__ == '__main__': html = HTML('web/', 'test_html') html.add_header('hello world') ims = [] txts = [] links = [] for n in range(4): ims.append('image_%d.jpg' % n) txts.append('text_%d' % n) links.append('image_%d.jpg' % n) html.add_images(ims, txts, links) html.save() ================================================ FILE: util/iter_counter.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os import time import numpy as np # Helper class that keeps track of training iterations class IterationCounter(): def __init__(self, opt, dataset_size): self.opt = opt self.dataset_size = dataset_size self.first_epoch = 1 self.total_epochs = opt.niter + opt.niter_decay self.epoch_iter = 0 # iter number within each epoch self.iter_record_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'iter.txt') if opt.isTrain and opt.continue_train: try: self.first_epoch, self.epoch_iter = np.loadtxt( self.iter_record_path, delimiter=',', dtype=int) print('Resuming from epoch %d at iteration %d' % (self.first_epoch, self.epoch_iter)) except: print('Could not load iteration record at %s. Starting from beginning.' % self.iter_record_path) self.total_steps_so_far = (self.first_epoch - 1) * dataset_size + self.epoch_iter # return the iterator of epochs for the training def training_epochs(self): return range(self.first_epoch, self.total_epochs + 1) def record_epoch_start(self, epoch): self.epoch_start_time = time.time() self.epoch_iter = 0 self.last_iter_time = time.time() self.current_epoch = epoch def record_one_iteration(self): current_time = time.time() # the last remaining batch is dropped (see data/__init__.py), # so we can assume batch size is always opt.batchSize self.time_per_iter = (current_time - self.last_iter_time) / self.opt.batchSize self.last_iter_time = current_time self.total_steps_so_far += self.opt.batchSize self.epoch_iter += self.opt.batchSize def record_epoch_end(self): current_time = time.time() self.time_per_epoch = current_time - self.epoch_start_time print('End of epoch %d / %d \t Time Taken: %d sec' % (self.current_epoch, self.total_epochs, self.time_per_epoch)) if self.current_epoch % self.opt.save_epoch_freq == 0: np.savetxt(self.iter_record_path, (self.current_epoch + 1, 0), delimiter=',', fmt='%d') print('Saved current iteration count at %s.' % self.iter_record_path) def record_current_iter(self): np.savetxt(self.iter_record_path, (self.current_epoch, self.epoch_iter), delimiter=',', fmt='%d') print('Saved current iteration count at %s.' % self.iter_record_path) def needs_saving(self): return (self.total_steps_so_far % self.opt.save_latest_freq) < self.opt.batchSize def needs_printing(self): return (self.total_steps_so_far % self.opt.print_freq) < self.opt.batchSize def needs_displaying(self): return (self.total_steps_so_far % self.opt.display_freq) < self.opt.batchSize def needs_many_test(self): return (self.total_steps_so_far % self.opt.many_test_freq) < self.opt.batchSize ================================================ FILE: util/util.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import re import importlib import torch from argparse import Namespace import numpy as np from PIL import Image import os import argparse import dill as pickle import util.coco def save_obj(obj, name): with open(name, 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with open(name, 'rb') as f: return pickle.load(f) # returns a configuration for creating a generator # |default_opt| should be the opt of the current experiment # |**kwargs|: if any configuration should be overriden, it can be specified here def copyconf(default_opt, **kwargs): conf = argparse.Namespace(**vars(default_opt)) for key in kwargs: print(key, kwargs[key]) setattr(conf, key, kwargs[key]) return conf def tile_images(imgs, picturesPerRow=4): """ Code borrowed from https://stackoverflow.com/questions/26521365/cleanly-tile-numpy-array-of-images-stored-in-a-flattened-1d-format/26521997 """ # Padding if imgs.shape[0] % picturesPerRow == 0: rowPadding = 0 else: rowPadding = picturesPerRow - imgs.shape[0] % picturesPerRow if rowPadding > 0: imgs = np.concatenate([imgs, np.zeros((rowPadding, *imgs.shape[1:]), dtype=imgs.dtype)], axis=0) # Tiling Loop (The conditionals are not necessary anymore) tiled = [] for i in range(0, imgs.shape[0], picturesPerRow): tiled.append(np.concatenate([imgs[j] for j in range(i, i + picturesPerRow)], axis=1)) tiled = np.concatenate(tiled, axis=0) return tiled # Converts a Tensor into a Numpy array # |imtype|: the desired type of the converted numpy array def tensor2im(image_tensor, imtype=np.uint8, normalize=True, tile=False): if isinstance(image_tensor, list): image_numpy = [] for i in range(len(image_tensor)): image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) return image_numpy if image_tensor.dim() == 4: # transform each image in the batch images_np = [] for b in range(image_tensor.size(0)): one_image = image_tensor[b] one_image_np = tensor2im(one_image) images_np.append(one_image_np.reshape(1, *one_image_np.shape)) images_np = np.concatenate(images_np, axis=0) if tile: images_tiled = tile_images(images_np) return images_tiled else: return images_np if image_tensor.dim() == 2: image_tensor = image_tensor.unsqueeze(0) image_numpy = image_tensor.detach().cpu().float().numpy() if normalize: image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 else: image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 image_numpy = np.clip(image_numpy, 0, 255) if image_numpy.shape[2] == 1: image_numpy = image_numpy[:, :, 0] return image_numpy.astype(imtype) # Converts a one-hot tensor into a colorful label map def tensor2label(label_tensor, n_label, imtype=np.uint8, tile=False): if label_tensor.dim() == 4: # transform each image in the batch images_np = [] for b in range(label_tensor.size(0)): one_image = label_tensor[b] one_image_np = tensor2label(one_image, n_label, imtype) images_np.append(one_image_np.reshape(1, *one_image_np.shape)) images_np = np.concatenate(images_np, axis=0) if tile: images_tiled = tile_images(images_np) return images_tiled else: images_np = images_np[0] return images_np if label_tensor.dim() == 1: return np.zeros((64, 64, 3), dtype=np.uint8) if n_label == 0: return tensor2im(label_tensor, imtype) label_tensor = label_tensor.cpu().float() if label_tensor.size()[0] > 1: # label_tensor = label_tensor.max(0, keepdim=True)[1] label_tensor = label_tensor[0].unsqueeze(dim=0) label_tensor = Colorize(n_label)(label_tensor) label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) result = label_numpy.astype(imtype) return result def save_image(image_numpy, image_path, create_dir=False): if create_dir: os.makedirs(os.path.dirname(image_path), exist_ok=True) if len(image_numpy.shape) == 2: image_numpy = np.expand_dims(image_numpy, axis=2) if image_numpy.shape[2] == 1: image_numpy = np.repeat(image_numpy, 3, 2) image_pil = Image.fromarray(image_numpy) # save to png image_pil.save(image_path.replace('.jpg', '.png')) def mkdirs(paths): if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): if not os.path.exists(path): os.makedirs(path) def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [atoi(c) for c in re.split('(\d+)', text)] def natural_sort(items): items.sort(key=natural_keys) def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def find_class_in_module(target_cls_name, module): target_cls_name = target_cls_name.replace('_', '').lower() clslib = importlib.import_module(module) cls = None for name, clsobj in clslib.__dict__.items(): if name.lower() == target_cls_name: cls = clsobj if cls is None: print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name)) exit(0) return cls def save_network(net, label, epoch, opt): save_filename = '%s_net_%s.pth' % (epoch, label) save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename) torch.save(net.cpu().state_dict(), save_path) if len(opt.gpu_ids) and torch.cuda.is_available(): net.cuda() def load_network(net, label, epoch, opt): save_filename = '%s_net_%s.pth' % (epoch, label) save_dir = os.path.join(opt.checkpoints_dir, opt.name) save_path = os.path.join(save_dir, save_filename) weights = torch.load(save_path) net.load_state_dict(weights) return net ############################################################################### # Code from # https://github.com/ycszen/pytorch-seg/blob/master/transform.py # Modified so it complies with the Citscape label map colors ############################################################################### def uint82bin(n, count=8): """returns the binary of integer n, count refers to amount of bits""" return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)]) def labelcolormap(N): if N == 35: # cityscape cmap = np.array([(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232), (250, 170, 160), (230, 150, 140), (70, 70, 70), (102, 102, 156), (190, 153, 153), (180, 165, 180), (150, 100, 100), (150, 120, 90), (153, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (0, 0, 230), (119, 11, 32), (0, 0, 142)], dtype=np.uint8) else: cmap = np.zeros((N, 3), dtype=np.uint8) for i in range(N): r, g, b = 0, 0, 0 id = i + 1 # let's give 0 a color for j in range(7): str_id = uint82bin(id) r = r ^ (np.uint8(str_id[-1]) << (7 - j)) g = g ^ (np.uint8(str_id[-2]) << (7 - j)) b = b ^ (np.uint8(str_id[-3]) << (7 - j)) id = id >> 3 cmap[i, 0] = r cmap[i, 1] = g cmap[i, 2] = b if N == 182: # COCO important_colors = { 'sea': (54, 62, 167), 'sky-other': (95, 219, 255), 'tree': (140, 104, 47), 'clouds': (170, 170, 170), 'grass': (29, 195, 49) } for i in range(N): name = util.coco.id2label(i) if name in important_colors: color = important_colors[name] cmap[i] = np.array(list(color)) return cmap class Colorize(object): def __init__(self, n=35): self.cmap = labelcolormap(n) self.n = n self.cmap = torch.from_numpy(self.cmap[:n]) def __call__(self, gray_image): size = gray_image.size() color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0) for label in range(0, len(self.cmap)): mask = (label == gray_image[0]).cpu() color_image[0][mask] = self.cmap[label][0] color_image[1][mask] = self.cmap[label][1] color_image[2][mask] = self.cmap[label][2] return color_image ================================================ FILE: util/visualizer.py ================================================ """ Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ import os import ntpath import time from . import util from . import html import scipy.misc import numpy as np import torch try: from StringIO import StringIO # Python 2.7 except ImportError: from io import BytesIO # Python 3.x class Visualizer(): def __init__(self, opt): self.opt = opt self.tf_log = opt.isTrain and opt.tf_log self.use_html = opt.isTrain and not opt.no_html self.win_size = opt.display_winsize self.name = opt.name if self.tf_log: import tensorflow as tf self.tf = tf self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') self.writer = tf.summary.FileWriter(self.log_dir) if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) if opt.isTrain: self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now) # |visuals|: dictionary of images to display or save def display_current_results(self, visuals, epoch, step): ## convert tensors to numpy arrays visuals = self.convert_visuals_to_numpy(visuals) if self.tf_log: # show images in tensorboard output img_summaries = [] for label, image_numpy in visuals.items(): # Write the image to a string try: s = StringIO() except: s = BytesIO() if len(image_numpy.shape) >= 4: image_numpy = image_numpy[0] scipy.misc.toimage(image_numpy).save(s, format="jpeg") # Create an Image object img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1]) # Create a Summary value img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum)) # Create and write Summary summary = self.tf.Summary(value=img_summaries) self.writer.add_summary(summary, step) if self.use_html: # save images to a html file for label, image_numpy in visuals.items(): # print(label, image_numpy.shape) if isinstance(image_numpy, list): for i in range(len(image_numpy)): img_path = os.path.join(self.img_dir, 'epoch%.3d_iter%.3d_%s_%d.png' % (epoch, step, label, i)) util.save_image(image_numpy[i], img_path) else: img_path = os.path.join(self.img_dir, 'epoch%.3d_iter%.3d_%s.png' % (epoch, step, label)) if len(image_numpy.shape) >= 4: image_numpy = image_numpy[0] util.save_image(image_numpy, img_path) # update website webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=5) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims = [] txts = [] links = [] for label, image_numpy in visuals.items(): if isinstance(image_numpy, list): for i in range(len(image_numpy)): img_path = 'epoch%.3d_iter%.3d_%s_%d.png' % (n, step, label, i) ims.append(img_path) txts.append(label+str(i)) links.append(img_path) else: img_path = 'epoch%.3d_iter%.3d_%s.png' % (n, step, label) ims.append(img_path) txts.append(label) links.append(img_path) if len(ims) < 10: webpage.add_images(ims, txts, links, width=self.win_size) else: num = int(round(len(ims)/2.0)) webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size) webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size) webpage.save() # errors: dictionary of error labels and values def plot_current_errors(self, errors, step): if self.tf_log: for tag, value in errors.items(): value = value.mean().float() summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step) # errors: same format as |errors| of plotCurrentErrors def print_current_errors(self, epoch, i, errors, t): message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t) for k, v in errors.items(): #print(v) #if v != 0: v = v.mean().float() message += '%s: %.3f ' % (k, v) print(message) with open(self.log_name, "a") as log_file: log_file.write('%s\n' % message) def convert_visuals_to_numpy(self, visuals): for key, t in visuals.items(): tile = self.opt.batchSize > 8 if 'input_label' in key: t = util.tensor2label(t, self.opt.label_nc + 2, tile=tile) else: t = util.tensor2im(t, tile=tile) visuals[key] = t return visuals # save image to the disk def save_images(self, webpage, visuals, image_path): visuals = self.convert_visuals_to_numpy(visuals) image_dir = webpage.get_image_dir() short_path = ntpath.basename(image_path[0]) name = os.path.splitext(short_path)[0] webpage.add_header(name) ims = [] txts = [] links = [] length = len(visuals) i = 0 if self.opt.dataset_mode == 'cityscapes': image_len = 512 else: image_len = 256 whole_image = np.zeros((256, length * image_len, 3), dtype=np.uint8) for label, image_numpy in visuals.items(): whole_image[:, (i * image_len): (i + 1) * image_len, :] = image_numpy i += 1 image_name = os.path.join('%s.png' % (name)) save_path = os.path.join(image_dir, image_name) util.save_image(whole_image, save_path, create_dir=True)