[
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<div id=\"top\"></div>\n<!--\n*** Thanks for checking out the Best-README-Template. If you have a suggestion\n*** that would make this better, please fork the repo and create a pull request\n*** or simply open an issue with the tag \"enhancement\".\n*** Don't forget to give the project a star!\n*** Thanks again! Now go create something AMAZING! :D\n-->\n\n\n\n<!-- PROJECT SHIELDS -->\n<!--\n*** I'm using markdown \"reference style\" links for readability.\n*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).\n*** See the bottom of this document for the declaration of the reference variables\n*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.\n*** https://www.markdownguide.org/basic-syntax/#reference-style-links\n-->\n<!-- [![Contributors][contributors-shield]][contributors-url]\n[![Forks][forks-shield]][forks-url]\n[![Stargazers][stars-shield]][stars-url]\n[![Issues][issues-shield]][issues-url]\n[![MIT License][license-shield]][license-url]\n[![LinkedIn][linkedin-shield]][linkedin-url] -->\n\n\n\n<!-- PROJECT LOGO -->\n<br />\n<!-- <div align=\"center\">\n  <a href=\"https://github.com/othneildrew/Best-README-Template\">\n    <img src=\"images/logo.png\" alt=\"Logo\" width=\"80\" height=\"80\">\n  </a>\n\n  <h3 align=\"center\">Best-README-Template</h3>\n\n  <p align=\"center\">\n    An awesome README template to jumpstart your projects!\n    <br />\n    <a href=\"https://github.com/othneildrew/Best-README-Template\"><strong>Explore the docs »</strong></a>\n    <br />\n    <br />\n    <a href=\"https://github.com/othneildrew/Best-README-Template\">View Demo</a>\n    ·\n    <a href=\"https://github.com/othneildrew/Best-README-Template/issues\">Report Bug</a>\n    ·\n    <a href=\"https://github.com/othneildrew/Best-README-Template/issues\">Request Feature</a>\n  </p>\n</div> -->\n\n\n\n<!-- TABLE OF CONTENTS -->\n<!-- <details>\n  <summary>Table of Contents</summary>\n  <ol>\n    <li>\n      <a href=\"#about-the-project\">CAST</a>\n      <ul>\n        <li><a href=\"#built-with\">Built With</a></li>\n      </ul>\n    </li>\n    <li>\n      <a href=\"#getting-started\">Getting Started</a>\n      <ul>\n        <li><a href=\"#prerequisites\">Prerequisites</a></li>\n        <li><a href=\"#installation\">Installation</a></li>\n      </ul>\n    </li>\n    <li><a href=\"#usage\">Usage</a></li>\n    <li><a href=\"#roadmap\">Roadmap</a></li>\n    <li><a href=\"#contributing\">Contributing</a></li>\n    <li><a href=\"#license\">License</a></li>\n    <li><a href=\"#contact\">Contact</a></li>\n    <li><a href=\"#acknowledgments\">Acknowledgments</a></li>\n  </ol>\n</details> -->\n\n\n\n<!-- ABOUT THE PROJECT -->\n## Domain Enhanced Arbitrary Image Style Transfer via Contrastive Learning (CAST) <br> A Unified Arbitrary Style Transfer Framework via Adaptive Contrastive Learning (UCAST)\n\n<!-- ![teaser](./Images/teaser.png) -->\n![teaser](./Images/teaser.png)\n\n\nWe provide our PyTorch implementation of the paper ''Domain Enhanced Arbitrary Image Style Transfer via Contrastive Learning''(SIGGRAPH 2022) , which is a simple yet powerful model for arbitrary image style transfer, and ''A Unified Arbitrary Style Transfer Framework via Adaptive Contrastive Learning''(ACM Transactions on Graphics) , which is a improved arbitrary style style transfer method.\n\nIn this work, we tackle the challenging problem of arbitrary image style transfer using a novel style feature representation learning method.\nA suitable style representation, as a key component in image stylization tasks, is essential to achieve satisfactory results.\nExisting deep neural network based approaches achieve reasonable results with the guidance from second-order statistics such as Gram matrix of content features.\nHowever, they do not leverage sufficient style information, which results in artifacts such as local distortions and style inconsistency.\nTo address these issues, we propose to learn style representation directly from image features instead of their second-order statistics, by analyzing the similarities and differences between multiple styles and considering the style distribution.\n\nFor details see the papers [CAST](http://arxiv.org/abs/2205.09542) , [UCAST](https://arxiv.org/abs/2303.12710), and the [video](https://youtu.be/3RG2yjLKTus)\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n\n<!-- ### Built With -->\n<!-- \nThis section should list any major frameworks/libraries used to bootstrap your project. Leave any add-ons/plugins for the acknowledgements section. Here are a few examples.\n\n* [Next.js](https://nextjs.org/)\n* [React.js](https://reactjs.org/)\n* [Vue.js](https://vuejs.org/)\n* [Angular](https://angular.io/)\n* [Svelte](https://svelte.dev/)\n* [Laravel](https://laravel.com)\n* [Bootstrap](https://getbootstrap.com)\n* [JQuery](https://jquery.com)\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n -->\n\n\n<!-- GETTING STARTED -->\n## Getting Started\n\n### Prerequisites\n\nPython 3.6 or above.\n\nPyTorch 1.6 or above\n\nFor packages, see requirements.txt.\n\n  ```sh\n  pip install -r requirements.txt\n  ```\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n### Installation\n\n   Clone the repo\n   ```sh\n   git clone https://github.com/zyxElsa/CAST_pytorch.git\n   ```\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n### Datasets\n\n   Then put your content images in ./datasets/{datasets_name}/testA, and style images in ./datasets/{datasets_name}/testB.\n   \n   Example directory hierarchy:\n   ```sh\n      CAST_pytorch\n      |--- datasets\n             |--- {datasets_name}\n                   |--- trainA\n                   |--- trainB\n                   |--- testA\n                   |--- testB\n                   \n      Then, call --dataroot ./datasets/{datasets_name}\n   ```\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n### Train\n\n   Train the CAST model:\n   ```sh\n   python train.py --dataroot ./datasets/{dataset_name} --name {model_name}\n   ```\n   \n   The pretrained style classification model is saved at ./models/style_vgg.pth.\n   \n   Google Drive: Check [here](https://drive.google.com/file/d/12JKlL6QsVWkz6Dag54K59PAZigFBS6PQ/view?usp=sharing)\n   \n   The pretrained content encoder is saved at ./models/vgg_normalised.pth.\n   \n   Google Drive: Check [here](https://drive.google.com/file/d/1DKYRWJUKbmrvEba56tuihy1N6VrNZFwl/view?usp=sharing)\n   \n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n### Test\n\n   Test the CAST or UCAST model:\n   \n   ```sh\n   python test.py --dataroot ./datasets/{dataset_name} --name {model_name}\n   ```\n   \n   The pretrained model is saved at ./checkpoints/CAST_model/*.pth.\n   \n   BaiduNetdisk: Check [CAST model](https://pan.baidu.com/s/12oPk3195fntMEHdlsHNwkQ) (passwd：cast) \n   \n   Google Drive: Download [CAST model](https://drive.google.com/file/d/11dZqu95QfnAgkzgR1NTJfQutz8JlwRY8/view?usp=sharing) and [UCAST model](https://drive.google.com/file/d/1rU8haiPG2BDhh5BNSwngjMKBKdutDYTJ/view?usp=sharing) (for video style transfer).\n\n   \n   \n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n### Citation\n   \n   ```sh\n   @inproceedings{zhang2020cast,\n   author = {Zhang, Yuxin and Tang, Fan and Dong, Weiming and Huang, Haibin and Ma, Chongyang and Lee, Tong-Yee and Xu, Changsheng},\n   title = {Domain Enhanced Arbitrary Image Style Transfer via Contrastive Learning},\n   booktitle = {ACM SIGGRAPH},\n   year = {2022}}\n   ```\n\n   ```sh\n  @article{zhang2023unified,\n    title={A Unified Arbitrary Style Transfer Framework via Adaptive Contrastive Learning},\n    author={Zhang, Yuxin and Tang, Fan and Dong, Weiming and Huang, Haibin and Ma, Chongyang and Lee, Tong-Yee and Xu, Changsheng},\n    journal={ACM Transactions on Graphics},\n    year={2023},\n    publisher={ACM New York, NY}\n  }\n   ```\n   \n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n\n<!-- \n<!-- USAGE EXAMPLES -->\n<!-- ## Usage\n\nUse this space to show useful examples of how a project can be used. Additional screenshots, code examples and demos work well in this space. You may also link to more resources.\n\n_For more examples, please refer to the [Documentation](https://example.com)_\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p> -->\n\n\n\n<!-- ROADMAP -->\n<!-- ## Roadmap\n\n- [x] Add Changelog\n- [x] Add back to top links\n- [ ] Add Additional Templates w/ Examples\n- [ ] Add \"components\" document to easily copy & paste sections of the readme\n- [ ] Multi-language Support\n    - [ ] Chinese\n    - [ ] Spanish\n\nSee the [open issues](https://github.com/othneildrew/Best-README-Template/issues) for a full list of proposed features (and known issues).\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p> -->\n\n\n\n<!-- CONTRIBUTING -->\n<!-- ## Contributing -->\n\n<!-- Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.\n\nIf you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag \"enhancement\".\nDon't forget to give the project a star! Thanks again!\n\n1. Fork the Project\n2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)\n3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)\n4. Push to the Branch (`git push origin feature/AmazingFeature`)\n5. Open a Pull Request\n -->\n<!-- <p align=\"right\">(<a href=\"#top\">back to top</a>)</p> -->\n\n\n\n\n<!-- LICENSE -->\n<!-- ## License -->\n<!-- \nDistributed under the MIT License. See `LICENSE.txt` for more information.\n -->\n<!-- <p align=\"right\">(<a href=\"#top\">back to top</a>)</p> -->\n\n\n\n<!-- CONTACT -->\n## Contact\n\nPlease feel free to open an issue or contact us personally if you have questions, need help, or need explanations. Write to one of the following email addresses, and maybe put one other in the cc:\n\nzhangyuxin2020@ia.ac.cn\n\n\n<!-- \nYour Name - [@your_twitter](https://twitter.com/your_username) - email@example.com\n\nProject Link: [https://github.com/your_username/repo_name](https://github.com/your_username/repo_name)\n -->\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n\n<!-- ACKNOWLEDGMENTS -->\n<!-- ## Acknowledgments -->\n<!-- \nUse this space to list resources you find helpful and would like to give credit to. I've included a few of my favorites to kick things off!\n\n* [Choose an Open Source License](https://choosealicense.com)\n* [GitHub Emoji Cheat Sheet](https://www.webpagefx.com/tools/emoji-cheat-sheet)\n* [Malven's Flexbox Cheatsheet](https://flexbox.malven.co/)\n* [Malven's Grid Cheatsheet](https://grid.malven.co/)\n* [Img Shields](https://shields.io)\n* [GitHub Pages](https://pages.github.com)\n* [Font Awesome](https://fontawesome.com)\n* [React Icons](https://react-icons.github.io/react-icons/search) -->\n\n<!-- <p align=\"right\">(<a href=\"#top\">back to top</a>)</p> -->\n\n\n\n<!-- MARKDOWN LINKS & IMAGES -->\n<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->\n[contributors-shield]: https://img.shields.io/github/contributors/othneildrew/Best-README-Template.svg?style=for-the-badge\n[contributors-url]: https://github.com/othneildrew/Best-README-Template/graphs/contributors\n[forks-shield]: https://img.shields.io/github/forks/othneildrew/Best-README-Template.svg?style=for-the-badge\n[forks-url]: https://github.com/othneildrew/Best-README-Template/network/members\n[stars-shield]: https://img.shields.io/github/stars/othneildrew/Best-README-Template.svg?style=for-the-badge\n[stars-url]: https://github.com/othneildrew/Best-README-Template/stargazers\n[issues-shield]: https://img.shields.io/github/issues/othneildrew/Best-README-Template.svg?style=for-the-badge\n[issues-url]: https://github.com/othneildrew/Best-README-Template/issues\n[license-shield]: https://img.shields.io/github/license/othneildrew/Best-README-Template.svg?style=for-the-badge\n[license-url]: https://github.com/othneildrew/Best-README-Template/blob/master/LICENSE.txt\n[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555\n[linkedin-url]: https://linkedin.com/in/othneildrew\n[product-screenshot]: images/screenshot.png\n"
  },
  {
    "path": "data/__init__.py",
    "content": "\"\"\"This package includes all the modules related to data loading and preprocessing\n\n To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.\n You need to implement four functions:\n    -- <__init__>:                      initialize the class, first call BaseDataset.__init__(self, opt).\n    -- <__len__>:                       return the size of dataset.\n    -- <__getitem__>:                   get a data point from data loader.\n    -- <modify_commandline_options>:    (optionally) add dataset-specific options and set default options.\n\nNow you can use the dataset class by specifying flag '--dataset_mode dummy'.\nSee our template dataset class 'template_dataset.py' for more details.\n\"\"\"\nimport importlib\nimport torch.utils.data\nfrom data.base_dataset import BaseDataset\n\n\ndef find_dataset_using_name(dataset_name):\n    \"\"\"Import the module \"data/[dataset_name]_dataset.py\".\n\n    In the file, the class called DatasetNameDataset() will\n    be instantiated. It has to be a subclass of BaseDataset,\n    and it is case-insensitive.\n    \"\"\"\n    dataset_filename = \"data.\" + dataset_name + \"_dataset\"\n    datasetlib = importlib.import_module(dataset_filename)\n\n    dataset = None\n    target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n    for name, cls in datasetlib.__dict__.items():\n        if name.lower() == target_dataset_name.lower() \\\n           and issubclass(cls, BaseDataset):\n            dataset = cls\n\n    if dataset is None:\n        raise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n    return dataset\n\n\ndef get_option_setter(dataset_name):\n    \"\"\"Return the static method <modify_commandline_options> of the dataset class.\"\"\"\n    dataset_class = find_dataset_using_name(dataset_name)\n    return dataset_class.modify_commandline_options\n\n\ndef create_dataset(opt):\n    \"\"\"Create a dataset given the option.\n\n    This function wraps the class CustomDatasetDataLoader.\n        This is the main interface between this package and 'train.py'/'test.py'\n\n    Example:\n        >>> from data import create_dataset\n        >>> dataset = create_dataset(opt)\n    \"\"\"\n    data_loader = CustomDatasetDataLoader(opt)\n    dataset = data_loader.load_data()\n    return dataset\n\n\nclass CustomDatasetDataLoader():\n    \"\"\"Wrapper class of Dataset class that performs multi-threaded data loading\"\"\"\n\n    def __init__(self, opt):\n        \"\"\"Initialize this class\n\n        Step 1: create a dataset instance given the name [dataset_mode]\n        Step 2: create a multi-threaded data loader.\n        \"\"\"\n        self.opt = opt\n        dataset_class = find_dataset_using_name(opt.dataset_mode)\n        self.dataset = dataset_class(opt)\n        print(\"dataset [%s] was created\" % type(self.dataset).__name__)\n        self.dataloader = torch.utils.data.DataLoader(\n            self.dataset,\n            batch_size=opt.batch_size,\n            shuffle=not opt.serial_batches,\n            num_workers=int(opt.num_threads),\n            drop_last=True if opt.isTrain else False,\n        )\n\n    def set_epoch(self, epoch):\n        self.dataset.current_epoch = epoch\n\n    def load_data(self):\n        return self\n\n    def __len__(self):\n        \"\"\"Return the number of data in the dataset\"\"\"\n        return min(len(self.dataset), self.opt.max_dataset_size)\n\n    def __iter__(self):\n        \"\"\"Return a batch of data\"\"\"\n        for i, data in enumerate(self.dataloader):\n            if i * self.opt.batch_size >= self.opt.max_dataset_size:\n                break\n            yield data\n"
  },
  {
    "path": "data/base_dataset.py",
    "content": "\"\"\"This module implements an abstract base class (ABC) 'BaseDataset' for datasets.\n\nIt also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.\n\"\"\"\nimport random\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom abc import ABC, abstractmethod\n\n\nclass BaseDataset(data.Dataset, ABC):\n    \"\"\"This class is an abstract base class (ABC) for datasets.\n\n    To create a subclass, you need to implement the following four functions:\n    -- <__init__>:                      initialize the class, first call BaseDataset.__init__(self, opt).\n    -- <__len__>:                       return the size of dataset.\n    -- <__getitem__>:                   get a data point.\n    -- <modify_commandline_options>:    (optionally) add dataset-specific options and set default options.\n    \"\"\"\n\n    def __init__(self, opt):\n        \"\"\"Initialize the class; save the options in the class\n\n        Parameters:\n            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n        \"\"\"\n        self.opt = opt\n        self.root = opt.dataroot\n        self.current_epoch = 0\n\n    @staticmethod\n    def modify_commandline_options(parser, is_train):\n        \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n        Parameters:\n            parser          -- original option parser\n            is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n        Returns:\n            the modified parser.\n        \"\"\"\n        return parser\n\n    @abstractmethod\n    def __len__(self):\n        \"\"\"Return the total number of images in the dataset.\"\"\"\n        return 0\n\n    @abstractmethod\n    def __getitem__(self, index):\n        \"\"\"Return a data point and its metadata information.\n\n        Parameters:\n            index - - a random integer for data indexing\n\n        Returns:\n            a dictionary of data with their names. It ususally contains the data itself and its metadata information.\n        \"\"\"\n        pass\n\n\ndef get_params(opt, size):\n    w, h = size\n    new_h = h\n    new_w = w\n    if opt.preprocess == 'resize_and_crop':\n        new_h = new_w = opt.load_size\n    elif opt.preprocess == 'scale_width_and_crop':\n        new_w = opt.load_size\n        new_h = opt.load_size * h // w\n\n    x = random.randint(0, np.maximum(0, new_w - opt.crop_size))\n    y = random.randint(0, np.maximum(0, new_h - opt.crop_size))\n\n    flip = random.random() > 0.5\n\n    return {'crop_pos': (x, y), 'flip': flip}\n\n\ndef get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):\n    transform_list = []\n    if grayscale:\n        transform_list.append(transforms.Grayscale(1))\n    if 'fixsize' in opt.preprocess:\n        transform_list.append(transforms.Resize(params[\"size\"], method))\n    if 'resize' in opt.preprocess:\n        osize = [opt.load_size, opt.load_size]\n        if \"gta2cityscapes\" in opt.dataroot:\n            osize[0] = opt.load_size // 2\n        transform_list.append(transforms.Resize(osize, method))\n    elif 'scale_width' in opt.preprocess:\n        transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))\n    elif 'scale_shortside' in opt.preprocess:\n        transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method)))\n\n    if 'zoom' in opt.preprocess:\n        if params is None:\n            transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method)))\n        else:\n            transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params[\"scale_factor\"])))\n\n    if 'crop' in opt.preprocess:\n        if params is None or 'crop_pos' not in params:\n            transform_list.append(transforms.RandomCrop(opt.crop_size))\n        else:\n            transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))\n\n    if 'patch' in opt.preprocess:\n        transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size)))\n\n    if 'trim' in opt.preprocess:\n        transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size)))\n\n    # if opt.preprocess == 'none':\n    transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))\n\n    if not opt.no_flip:\n        if params is None or 'flip' not in params:\n            transform_list.append(transforms.RandomHorizontalFlip())\n        elif 'flip' in params:\n            transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))\n\n    if convert:\n        transform_list += [transforms.ToTensor()]\n        if grayscale:\n            transform_list += [transforms.Normalize((0.5,), (0.5,))]\n        else:\n            transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n    return transforms.Compose(transform_list)\n\n\ndef __make_power_2(img, base, method=Image.BICUBIC):\n    ow, oh = img.size\n    h = int(round(oh / base) * base)\n    w = int(round(ow / base) * base)\n    if h == oh and w == ow:\n        return img\n\n    return img.resize((w, h), method)\n\n\ndef __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None):\n    if factor is None:\n        zoom_level = np.random.uniform(0.8, 1.0, size=[2])\n    else:\n        zoom_level = (factor[0], factor[1])\n    iw, ih = img.size\n    zoomw = max(crop_width, iw * zoom_level[0])\n    zoomh = max(crop_width, ih * zoom_level[1])\n    img = img.resize((int(round(zoomw)), int(round(zoomh))), method)\n    return img\n\n\ndef __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC):\n    ow, oh = img.size\n    shortside = min(ow, oh)\n    if shortside >= target_width:\n        return img\n    else:\n        scale = target_width / shortside\n        return img.resize((round(ow * scale), round(oh * scale)), method)\n\n\ndef __trim(img, trim_width):\n    ow, oh = img.size\n    if ow > trim_width:\n        xstart = np.random.randint(ow - trim_width)\n        xend = xstart + trim_width\n    else:\n        xstart = 0\n        xend = ow\n    if oh > trim_width:\n        ystart = np.random.randint(oh - trim_width)\n        yend = ystart + trim_width\n    else:\n        ystart = 0\n        yend = oh\n    return img.crop((xstart, ystart, xend, yend))\n\n\ndef __scale_width(img, target_width, crop_width, method=Image.BICUBIC):\n    ow, oh = img.size\n    if ow == target_width and oh >= crop_width:\n        return img\n    w = target_width\n    h = int(max(target_width * oh / ow, crop_width))\n    return img.resize((w, h), method)\n\n\ndef __crop(img, pos, size):\n    ow, oh = img.size\n    x1, y1 = pos\n    tw = th = size\n    if (ow > tw or oh > th):\n        return img.crop((x1, y1, x1 + tw, y1 + th))\n    return img\n\n\ndef __patch(img, index, size):\n    ow, oh = img.size\n    nw, nh = ow // size, oh // size\n    roomx = ow - nw * size\n    roomy = oh - nh * size\n    startx = np.random.randint(int(roomx) + 1)\n    starty = np.random.randint(int(roomy) + 1)\n\n    index = index % (nw * nh)\n    ix = index // nh\n    iy = index % nh\n    gridx = startx + ix * size\n    gridy = starty + iy * size\n    return img.crop((gridx, gridy, gridx + size, gridy + size))\n\n\ndef __flip(img, flip):\n    if flip:\n        return img.transpose(Image.FLIP_LEFT_RIGHT)\n    return img\n\n\ndef __print_size_warning(ow, oh, w, h):\n    \"\"\"Print warning information about image size(only print once)\"\"\"\n    if not hasattr(__print_size_warning, 'has_printed'):\n        print(\"The image size needs to be a multiple of 4. \"\n              \"The loaded image size was (%d, %d), so it was adjusted to \"\n              \"(%d, %d). This adjustment will be done to all images \"\n              \"whose sizes are not multiples of 4\" % (ow, oh, w, h))\n        __print_size_warning.has_printed = True\n"
  },
  {
    "path": "data/image_folder.py",
    "content": "\"\"\"A modified image folder class\n\nWe modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)\nso that this class can load images from both current directory and its subdirectories.\n\"\"\"\n\nimport torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = [\n    '.jpg', '.JPG', '.jpeg', '.JPEG',\n    '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n    '.tif', '.TIF', '.tiff', '.TIFF',\n]\n\n\ndef is_image_file(filename):\n    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef make_dataset(dir, max_dataset_size=float(\"inf\")):\n    images = []\n    assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir\n\n    for root, _, fnames in sorted(os.walk(dir, followlinks=True)):\n        for fname in fnames:\n            if is_image_file(fname):\n                path = os.path.join(root, fname)\n                images.append(path)\n    return images[:min(max_dataset_size, len(images))]\n\n\ndef default_loader(path):\n    return Image.open(path).convert('RGB')\n\n\nclass ImageFolder(data.Dataset):\n\n    def __init__(self, root, transform=None, return_paths=False,\n                 loader=default_loader):\n        imgs = make_dataset(root)\n        if len(imgs) == 0:\n            raise(RuntimeError(\"Found 0 images in: \" + root + \"\\n\"\n                               \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n        self.root = root\n        self.imgs = imgs\n        self.transform = transform\n        self.return_paths = return_paths\n        self.loader = loader\n\n    def __getitem__(self, index):\n        path = self.imgs[index]\n        img = self.loader(path)\n        if self.transform is not None:\n            img = self.transform(img)\n        if self.return_paths:\n            return img, path\n        else:\n            return img\n\n    def __len__(self):\n        return len(self.imgs)\n"
  },
  {
    "path": "data/unaligned_dataset.py",
    "content": "import os.path\nfrom data.base_dataset import BaseDataset, get_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport random\nimport util.util as util\n\n\nclass UnalignedDataset(BaseDataset):\n    \"\"\"\n    This dataset class can load unaligned/unpaired datasets.\n\n    It requires two directories to host training images from domain A '/path/to/data/trainA'\n    and from domain B '/path/to/data/trainB' respectively.\n    You can train the model with the dataset flag '--dataroot /path/to/data'.\n    Similarly, you need to prepare two directories:\n    '/path/to/data/testA' and '/path/to/data/testB' during test time.\n    \"\"\"\n\n    def __init__(self, opt):\n        \"\"\"Initialize this dataset class.\n\n        Parameters:\n            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n        \"\"\"\n        BaseDataset.__init__(self, opt)\n        self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')  # create a path '/path/to/data/trainA'\n        self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')  # create a path '/path/to/data/trainB'\n        if opt.phase == \"test\" and not os.path.exists(self.dir_A) \\\n           and os.path.exists(os.path.join(opt.dataroot, \"valA\")):\n            self.dir_A = os.path.join(opt.dataroot, \"valA\")\n            self.dir_B = os.path.join(opt.dataroot, \"valB\")\n\n        self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))   # load images from '/path/to/data/trainA'\n        self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))    # load images from '/path/to/data/trainB'\n        self.A_size = len(self.A_paths)  # get the size of dataset A\n        self.B_size = len(self.B_paths)  # get the size of dataset B\n\n    def __getitem__(self, index):\n        \"\"\"Return a data point and its metadata information.\n\n        Parameters:\n            index (int)      -- a random integer for data indexing\n\n        Returns a dictionary that contains A, B, A_paths and B_paths\n            A (tensor)       -- an image in the input domain\n            B (tensor)       -- its corresponding image in the target domain\n            A_paths (str)    -- image paths\n            B_paths (str)    -- image paths\n        \"\"\"\n        A_path = self.A_paths[index % self.A_size]  # make sure index is within then range\n        if self.opt.serial_batches:   # make sure index is within then range\n            index_B = index % self.B_size\n        else:   # randomize the index for domain B to avoid fixed pairs.\n            index_B = random.randint(0, self.B_size - 1)\n        B_path = self.B_paths[index_B]\n        A_img = Image.open(A_path).convert('RGB')\n        B_img = Image.open(B_path).convert('RGB')\n\n        # Apply image transformation\n        # For FastCUT mode, if in finetuning phase (learning rate is decaying),\n        # do not perform resize-crop data augmentation of CycleGAN.\n#        print('current_epoch', self.current_epoch)\n        is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs\n        modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size)\n        transform = get_transform(modified_opt)\n        A = transform(A_img)\n        B = transform(B_img)\n\n        return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}\n\n    def __len__(self):\n        \"\"\"Return the total number of images in the dataset.\n\n        As we have two datasets with potentially different number of images,\n        we take a maximum of\n        \"\"\"\n        return max(self.A_size, self.B_size)\n"
  },
  {
    "path": "experiments/__init__.py",
    "content": "import os\nimport importlib\n\n\ndef find_launcher_using_name(launcher_name):\n    # cur_dir = os.path.dirname(os.path.abspath(__file__))\n    # pythonfiles = glob.glob(cur_dir + '/**/*.py')\n    launcher_filename = \"experiments.{}_launcher\".format(launcher_name)\n    launcherlib = importlib.import_module(launcher_filename)\n\n    # In the file, the class called LauncherNameLauncher() will\n    # be instantiated. It has to be a subclass of BaseLauncher,\n    # and it is case-insensitive.\n    launcher = None\n    target_launcher_name = launcher_name.replace('_', '') + 'launcher'\n    for name, cls in launcherlib.__dict__.items():\n        if name.lower() == target_launcher_name.lower():\n            launcher = cls\n\n    if launcher is None:\n        raise ValueError(\"In %s.py, there should be a subclass of BaseLauncher \"\n                         \"with class name that matches %s in lowercase.\" %\n                         (launcher_filename, target_launcher_name))\n\n    return launcher\n\n\nif __name__ == \"__main__\":\n    import sys\n    import pickle\n\n    assert len(sys.argv) >= 3\n\n    name = sys.argv[1]\n    Launcher = find_launcher_using_name(name)\n\n    cache = \"/tmp/tmux_launcher/{}\".format(name)\n    if os.path.isfile(cache):\n        instance = pickle.load(open(cache, 'r'))\n    else:\n        instance = Launcher()\n\n    cmd = sys.argv[2]\n    if cmd == \"launch\":\n        instance.launch()\n    elif cmd == \"stop\":\n        instance.stop()\n    elif cmd == \"send\":\n        expid = int(sys.argv[3])\n        cmd = int(sys.argv[4])\n        instance.send_command(expid, cmd)\n\n    os.makedirs(\"/tmp/tmux_launcher/\", exist_ok=True)\n    pickle.dump(instance, open(cache, 'w'))\n"
  },
  {
    "path": "experiments/__main__.py",
    "content": "import os\nimport importlib\n\n\ndef find_launcher_using_name(launcher_name):\n    # cur_dir = os.path.dirname(os.path.abspath(__file__))\n    # pythonfiles = glob.glob(cur_dir + '/**/*.py')\n    launcher_filename = \"experiments.{}_launcher\".format(launcher_name)\n    launcherlib = importlib.import_module(launcher_filename)\n\n    # In the file, the class called LauncherNameLauncher() will\n    # be instantiated. It has to be a subclass of BaseLauncher,\n    # and it is case-insensitive.\n    launcher = None\n    # target_launcher_name = launcher_name.replace('_', '') + 'launcher'\n    for name, cls in launcherlib.__dict__.items():\n        if name.lower() == \"launcher\":\n            launcher = cls\n\n    if launcher is None:\n        raise ValueError(\"In %s.py, there should be a class named Launcher\")\n\n    return launcher\n\n\nif __name__ == \"__main__\":\n    import argparse\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('name')\n    parser.add_argument('cmd')\n    parser.add_argument('id', nargs='+', type=str)\n    parser.add_argument('--mode', default=None)\n    parser.add_argument('--which_epoch', default=None)\n    parser.add_argument('--continue_train', action='store_true')\n    parser.add_argument('--subdir', default='')\n    parser.add_argument('--title', default='')\n    parser.add_argument('--gpu_id', default=None, type=int)\n    parser.add_argument('--phase', default='test')\n\n    opt = parser.parse_args()\n\n    name = opt.name\n    Launcher = find_launcher_using_name(name)\n\n    instance = Launcher()\n\n    cmd = opt.cmd\n    ids = 'all' if 'all' in opt.id else [int(i) for i in opt.id]\n    if cmd == \"launch\":\n        instance.launch(ids, continue_train=opt.continue_train)\n    elif cmd == \"stop\":\n        instance.stop()\n    elif cmd == \"send\":\n        assert False\n    elif cmd == \"close\":\n        instance.close()\n    elif cmd == \"dry\":\n        instance.dry()\n    elif cmd == \"relaunch\":\n        instance.close()\n        instance.launch(ids, continue_train=opt.continue_train)\n    elif cmd == \"run\" or cmd == \"train\":\n        assert len(ids) == 1, '%s is invalid for run command' % (' '.join(opt.id))\n        expid = ids[0]\n        instance.run_command(instance.commands(), expid,\n                             continue_train=opt.continue_train,\n                             gpu_id=opt.gpu_id)\n    elif cmd == 'launch_test':\n        instance.launch(ids, test=True)\n    elif cmd == \"run_test\" or cmd == \"test\":\n        test_commands = instance.test_commands()\n        if ids == \"all\":\n            ids = list(range(len(test_commands)))\n        for expid in ids:\n            instance.run_command(test_commands, expid, opt.which_epoch,\n                                 gpu_id=opt.gpu_id)\n            if expid < len(ids) - 1:\n                os.system(\"sleep 5s\")\n    elif cmd == \"print_names\":\n        instance.print_names(ids, test=False)\n    elif cmd == \"print_test_names\":\n        instance.print_names(ids, test=True)\n    elif cmd == \"create_comparison_html\":\n        instance.create_comparison_html(name, ids, opt.subdir, opt.title, opt.phase)\n    else:\n        raise ValueError(\"Command not recognized\")\n"
  },
  {
    "path": "models/MSP.py",
    "content": "import numpy as np\nimport torch.nn as nn\nimport torch\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nfrom .torch_utils import concat_all_gather, get_world_size\n\n\nclass StyleExtractor(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, encoder, gpu_ids = []):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super(StyleExtractor, self).__init__()\n        enc_layers = list(encoder.children())\n        self.enc_1 = nn.Sequential(*enc_layers[:6])  # input -> relu1_1\n        self.enc_2 = nn.Sequential(*enc_layers[6:13])  # relu1_1 -> relu2_1\n        self.enc_3 = nn.Sequential(*enc_layers[13:20])  # relu2_1 -> relu3_1\n        self.enc_4 = nn.Sequential(*enc_layers[20:33])  # relu3_1 -> relu4_1\n        self.enc_5 = nn.Sequential(*enc_layers[33:46])  # relu4_1 -> relu5_1\n        self.enc_6 = nn.Sequential(*enc_layers[46:70])  # relu5_1 -> maxpool\n\n        # fix the encoder\n        for name in ['enc_1', 'enc_2','enc_3', 'enc_4', 'enc_5', 'enc_6']:\n            for param in getattr(self, name).parameters():\n                param.requires_grad = True\n\n        # Class Activation Map\n        # self.gap_fc0 = nn.Linear(64, 1, bias=False)\n        # self.gmp_fc0 = nn.Linear(64, 1, bias=False)\n        # self.gap_fc1 = nn.Linear(128, 1, bias=False)\n        # self.gmp_fc1 = nn.Linear(128, 1, bias=False)\n        # self.gap_fc2 = nn.Linear(256, 1, bias=False)\n        # self.gmp_fc2 = nn.Linear(256, 1, bias=False)\n        # self.gap_fc3 = nn.Linear(512, 1, bias=False)\n        # self.gmp_fc3 = nn.Linear(512, 1, bias=False)\n        # self.gap_fc4 = nn.Linear(512, 1, bias=False)\n        # self.gmp_fc4 = nn.Linear(512, 1, bias=False)\n        # self.gap_fc5 = nn.Linear(512, 1, bias=False)\n        # self.gmp_fc5 = nn.Linear(512, 1, bias=False)\n        self.conv1x1_0 = nn.Conv2d(128, 64, kernel_size=1, stride=1, bias=True)\n        self.conv1x1_1 = nn.Conv2d(256, 128, kernel_size=1, stride=1, bias=True)\n        self.conv1x1_2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, bias=True)\n        self.conv1x1_3 = nn.Conv2d(1024, 512, kernel_size=1, stride=1, bias=True)\n        self.conv1x1_4 = nn.Conv2d(1024, 512, kernel_size=1, stride=1, bias=True)\n        self.conv1x1_5 = nn.Conv2d(1024, 512, kernel_size=1, stride=1, bias=True)\n        self.relu = nn.ReLU(True)\n        \n    # extract relu1_1, relu2_1, relu3_1, relu4_1 from input image\n    def encode_with_intermediate(self, input):\n        results = [input]\n        for i in range(6):\n            func = getattr(self, 'enc_{:d}'.format(i + 1))\n            results.append(func(results[-1]))\n        return results[1:]\n\n    def forward(self, input, index):\n        \"\"\"Standard forward.\"\"\"\n        feats = self.encode_with_intermediate(input)\n        codes = []\n        for x in index:\n            code = feats[x].clone()\n            gap = torch.nn.functional.adaptive_avg_pool2d(code, (1,1))\n            gmp = torch.nn.functional.adaptive_max_pool2d(code, (1,1))            \n            conv1x1 = getattr(self, 'conv1x1_{:d}'.format(x))\n            code = torch.cat([gap, gmp], 1)\n            code = self.relu(conv1x1(code))\n            codes.append(code)\n        return codes \n\n\n\nclass Projector(nn.Module):\n    def __init__(self, projector, gpu_ids = []):\n        super(Projector, self).__init__()\n        self.projector0 = nn.Sequential(\n            nn.Linear(64, 1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n        self.projector1 = nn.Sequential(\n            #nn.Dropout(),\n            nn.Linear(128, 1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n        self.projector2 = nn.Sequential(\n            #nn.Dropout(),\n            nn.Linear(256,1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n        self.projector3 = nn.Sequential(\n            #nn.Dropout(),\n            nn.Linear(512, 1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n        self.projector4 = nn.Sequential(\n            #nn.Dropout(),\n            nn.Linear(512, 1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n        self.projector5 = nn.Sequential(\n            #nn.Dropout(),\n            nn.Linear(512, 1024),\n            nn.ReLU(True),\n            #nn.Dropout(),\n            nn.Linear(1024, 2048),\n            nn.ReLU(True),\n            nn.Linear(2048, 2048),\n        )\n\n    def forward(self, input, index):\n        \"\"\"Standard forward.\"\"\"\n        num = 0\n        projections = []\n        for x in index:\n            projector = getattr(self, 'projector{:d}'.format(x))        \n            code = input[num].view(input[num].size(0), -1)\n            projection = projector(code).view(code.size(0), -1)\n            projection = nn.functional.normalize(projection)\n            projections.append(projection)\n            num += 1\n        return projections\n\n\ndef make_layers(cfg, batch_norm=True):\n    layers = []\n    in_channels = 3\n    for v in cfg:\n        if v == 'M':\n            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n        else:\n            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n            if batch_norm:\n                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n            else:\n                layers += [conv2d, nn.ReLU(inplace=True)]\n            in_channels = v\n    return nn.Sequential(*layers)\n\nvgg = make_layers([3, 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', \n          512, 512, 512, 512, 'M', 512, 512, 'M', 512, 512, 'M'])\n\nclass InfoNCELoss(nn.Module):\n\n    def __init__(self, temperature, feature_dim, queue_size):\n        super().__init__()\n        self.tau = temperature\n        self.queue_size = queue_size\n        self.world_size = get_world_size()\n        data0 = torch.randn(2048, queue_size)\n        data0 = F.normalize(data0, dim=0)\n        data1 = torch.randn(2048, queue_size)\n        data1 = F.normalize(data1, dim=0)\n        data2 = torch.randn(2048, queue_size)\n        data2 = F.normalize(data2, dim=0)\n        data3 = torch.randn(2048, queue_size)\n        data3 = F.normalize(data3, dim=0)\n        data4 = torch.randn(2048, queue_size)\n        data4 = F.normalize(data4, dim=0)\n        data5 = torch.randn(2048, queue_size)\n        data5 = F.normalize(data5, dim=0)\n        \n        self.register_buffer(\"queue_data_A0\", data0)\n        self.register_buffer(\"queue_ptr_A0\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B0\", data0)\n        self.register_buffer(\"queue_ptr_B0\", torch.zeros(1, dtype=torch.long))\n\n        self.register_buffer(\"queue_data_A2\", data2)\n        self.register_buffer(\"queue_ptr_A2\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B2\", data2)\n        self.register_buffer(\"queue_ptr_B2\", torch.zeros(1, dtype=torch.long))\n        \n        self.register_buffer(\"queue_data_A4\", data4)\n        self.register_buffer(\"queue_ptr_A4\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B4\", data4)\n        self.register_buffer(\"queue_ptr_B4\", torch.zeros(1, dtype=torch.long))\n        \n        self.register_buffer(\"queue_data_A1\", data1)\n        self.register_buffer(\"queue_ptr_A1\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B1\", data1)\n        self.register_buffer(\"queue_ptr_B1\", torch.zeros(1, dtype=torch.long))\n\n        self.register_buffer(\"queue_data_A3\", data3)\n        self.register_buffer(\"queue_ptr_A3\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B3\", data3)\n        self.register_buffer(\"queue_ptr_B3\", torch.zeros(1, dtype=torch.long))        \n\n        self.register_buffer(\"queue_data_A5\", data5)\n        self.register_buffer(\"queue_ptr_A5\", torch.zeros(1, dtype=torch.long))\n        self.register_buffer(\"queue_data_B5\", data5)\n        self.register_buffer(\"queue_ptr_B5\", torch.zeros(1, dtype=torch.long))\n\n    def forward(self, query, key, style = 'real'):\n        # positive logits: Nx1\n        l_pos = torch.einsum(\"nc,nc->n\", (query, key)).unsqueeze(-1)\n\n        # negative logits: NxK\n        if style == 'real_A0':\n            queue = self.queue_data_A0.clone().detach()        \n        elif style == 'real_A1':\n            queue = self.queue_data_A1.clone().detach()\n        elif style == 'real_A2':\n            queue = self.queue_data_A2.clone().detach()\n        elif style == 'real_A3':\n            queue = self.queue_data_A3.clone().detach()\n        elif style == 'real_A4':\n            queue = self.queue_data_A4.clone().detach()\n        elif style == 'real_A5':\n            queue = self.queue_data_A5.clone().detach()\n        elif style == 'fake_A':\n            queue = self.queue_data_fake_A.clone().detach()  \n        elif style == 'real_B0':\n            queue = self.queue_data_B0.clone().detach()\n        elif style == 'real_B1':\n            queue = self.queue_data_B1.clone().detach()\n        elif style == 'real_B2':\n            queue = self.queue_data_B2.clone().detach()\n        elif style == 'real_B3':\n            queue = self.queue_data_B3.clone().detach()\n        elif style == 'real_B4':\n            queue = self.queue_data_B4.clone().detach()\n        elif style == 'real_B5':\n            queue = self.queue_data_B5.clone().detach()\n        elif style == 'fake_B':\n            queue = self.queue_data_fake_B.clone().detach()           \n        else:\n            raise NotImplementedError('QUEUE: style is not recognized')\n        l_neg = torch.einsum(\"nc,ck->nk\", (query, queue))\n\n        # logits: Nx(1+K)\n        logits = torch.cat((l_pos, l_neg), dim=1)\n\n        # labels: positive key indicators\n        labels = torch.zeros(logits.size(0), dtype=torch.long, device=query.device)\n\n        return F.cross_entropy(logits / self.tau, labels)\n\n    @torch.no_grad()\n    def dequeue_and_enqueue(self, keys, style = 'real'):\n        # gather from all gpus\n        if self.world_size > 1:\n            keys = concat_all_gather(keys, self.world_size)\n        batch_size = keys.size(0)\n        # replace the keys at ptr (dequeue and enqueue)\n        if style == 'real_A0':\n            ptr = int(self.queue_ptr_A0)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A0[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A0[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_A1':\n            ptr = int(self.queue_ptr_A1)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A1[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A1[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_A2':\n            ptr = int(self.queue_ptr_A2)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A2[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A2[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_A3':\n            ptr = int(self.queue_ptr_A3)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A3[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A3[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_A4':\n            ptr = int(self.queue_ptr_A4)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A4[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A4[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_A5':\n            ptr = int(self.queue_ptr_A5)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_A5[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_A5[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B0':\n            ptr = int(self.queue_ptr_B0)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B0[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B0[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B1':\n            ptr = int(self.queue_ptr_B1)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B1[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B1[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B2':\n            ptr = int(self.queue_ptr_B2)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B2[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B2[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B3':\n            ptr = int(self.queue_ptr_B3)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B3[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B3[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B4':\n            ptr = int(self.queue_ptr_B4)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B4[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B4[0] = (ptr + batch_size) % self.queue_size\n        elif style == 'real_B5':\n            ptr = int(self.queue_ptr_B5)\n            assert self.queue_size % batch_size == 0\n            self.queue_data_B5[:, ptr:ptr + batch_size] = keys.T\n            self.queue_ptr_B5[0] = (ptr + batch_size) % self.queue_size\n        else:\n            raise NotImplementedError('QUEUE: style is not recognized')\n\n"
  },
  {
    "path": "models/__init__.py",
    "content": "\"\"\"This package contains modules related to objective functions, optimizations, and network architectures.\n\nTo add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.\nYou need to implement the following five functions:\n    -- <__init__>:                      initialize the class; first call BaseModel.__init__(self, opt).\n    -- <set_input>:                     unpack data from dataset and apply preprocessing.\n    -- <forward>:                       produce intermediate results.\n    -- <optimize_parameters>:           calculate loss, gradients, and update network weights.\n    -- <modify_commandline_options>:    (optionally) add model-specific options and set default options.\n\nIn the function <__init__>, you need to define four lists:\n    -- self.loss_names (str list):          specify the training losses that you want to plot and save.\n    -- self.model_names (str list):         define networks used in our training.\n    -- self.visual_names (str list):        specify the images that you want to display and save.\n    -- self.optimizers (optimizer list):    define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.\n\nNow you can use the model class by specifying flag '--model dummy'.\nSee our template model class 'template_model.py' for more details.\n\"\"\"\n\nimport importlib\nfrom models.base_model import BaseModel\n\n\ndef find_model_using_name(model_name):\n    \"\"\"Import the module \"models/[model_name]_model.py\".\n\n    In the file, the class called DatasetNameModel() will\n    be instantiated. It has to be a subclass of BaseModel,\n    and it is case-insensitive.\n    \"\"\"\n    model_filename = \"models.\" + model_name + \"_model\"\n    modellib = importlib.import_module(model_filename)\n    model = None\n    target_model_name = model_name.replace('_', '') + 'model'\n    for name, cls in modellib.__dict__.items():\n        if name.lower() == target_model_name.lower() \\\n           and issubclass(cls, BaseModel):\n            model = cls\n\n    if model is None:\n        print(\"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.\" % (model_filename, target_model_name))\n        exit(0)\n\n    return model\n\n\ndef get_option_setter(model_name):\n    \"\"\"Return the static method <modify_commandline_options> of the model class.\"\"\"\n    model_class = find_model_using_name(model_name)\n    return model_class.modify_commandline_options\n\n\ndef create_model(opt):\n    \"\"\"Create a model given the option.\n\n    This function warps the class CustomDatasetDataLoader.\n    This is the main interface between this package and 'train.py'/'test.py'\n\n    Example:\n        >>> from models import create_model\n        >>> model = create_model(opt)\n    \"\"\"\n    model = find_model_using_name(opt.model)\n    instance = model(opt)\n    print(\"model [%s] was created\" % type(instance).__name__)\n    return instance\n"
  },
  {
    "path": "models/base_model.py",
    "content": "import os\nimport torch\nfrom collections import OrderedDict\nfrom abc import ABC, abstractmethod\nfrom . import networks\n\n\nclass BaseModel(ABC):\n    \"\"\"This class is an abstract base class (ABC) for models.\n    To create a subclass, you need to implement the following five functions:\n        -- <__init__>:                      initialize the class; first call BaseModel.__init__(self, opt).\n        -- <set_input>:                     unpack data from dataset and apply preprocessing.\n        -- <forward>:                       produce intermediate results.\n        -- <optimize_parameters>:           calculate losses, gradients, and update network weights.\n        -- <modify_commandline_options>:    (optionally) add model-specific options and set default options.\n    \"\"\"\n\n    def __init__(self, opt):\n        \"\"\"Initialize the BaseModel class.\n\n        Parameters:\n            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n\n        When creating your custom class, you need to implement your own initialization.\n        In this fucntion, you should first call <BaseModel.__init__(self, opt)>\n        Then, you need to define four lists:\n            -- self.loss_names (str list):          specify the training losses that you want to plot and save.\n            -- self.model_names (str list):         specify the images that you want to display and save.\n            -- self.visual_names (str list):        define networks used in our training.\n            -- self.optimizers (optimizer list):    define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.\n        \"\"\"\n        self.opt = opt\n        self.gpu_ids = opt.gpu_ids\n        self.isTrain = opt.isTrain\n        self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')  # get device name: CPU or GPU\n        self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)  # save all the checkpoints to save_dir\n        if opt.preprocess != 'scale_width':  # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.\n            torch.backends.cudnn.benchmark = True\n        self.loss_names = []\n        self.model_names = []\n        self.visual_names = []\n        self.optimizers = []\n        self.image_paths = []\n        self.metric = 0  # used for learning rate policy 'plateau'\n\n    @staticmethod\n    def dict_grad_hook_factory(add_func=lambda x: x):\n        saved_dict = dict()\n\n        def hook_gen(name):\n            def grad_hook(grad):\n                saved_vals = add_func(grad)\n                saved_dict[name] = saved_vals\n            return grad_hook\n        return hook_gen, saved_dict\n\n    @staticmethod\n    def modify_commandline_options(parser, is_train):\n        \"\"\"Add new model-specific options, and rewrite default values for existing options.\n\n        Parameters:\n            parser          -- original option parser\n            is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n        Returns:\n            the modified parser.\n        \"\"\"\n        return parser\n\n    @abstractmethod\n    def set_input(self, input):\n        \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n        Parameters:\n            input (dict): includes the data itself and its metadata information.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def forward(self):\n        \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n        pass\n\n    @abstractmethod\n    def optimize_parameters(self):\n        \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n        pass\n\n    def setup(self, opt):\n        \"\"\"Load and print networks; create schedulers\n\n        Parameters:\n            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n        \"\"\"\n        if self.isTrain:\n            self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n        if not self.isTrain or opt.continue_train:\n            load_suffix = opt.epoch\n            self.load_networks(load_suffix)\n\n        self.print_networks(opt.verbose)\n\n    def parallelize(self):\n        for name in self.model_names:\n            if isinstance(name, str):\n                net = getattr(self, 'net' + name)\n                setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))\n\n    def data_dependent_initialize(self, data):\n        pass\n\n    def eval(self):\n        \"\"\"Make models eval mode during test time\"\"\"\n        for name in self.model_names:\n            if isinstance(name, str):\n                net = getattr(self, 'net' + name)\n                net.eval()\n\n    def test(self):\n        \"\"\"Forward function used in test time.\n\n        This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop\n        It also calls <compute_visuals> to produce additional visualization results\n        \"\"\"\n        with torch.no_grad():\n            self.forward()\n            self.compute_visuals()\n\n    def compute_visuals(self):\n        \"\"\"Calculate additional output images for visdom and HTML visualization\"\"\"\n        pass\n\n    def get_image_paths(self):\n        \"\"\" Return image paths that are used to load current data\"\"\"\n        return self.image_paths\n\n    def update_learning_rate(self):\n        \"\"\"Update learning rates for all the networks; called at the end of every epoch\"\"\"\n        for scheduler in self.schedulers:\n            if self.opt.lr_policy == 'plateau':\n                scheduler.step(self.metric)\n            else:\n                scheduler.step()\n\n        lr = self.optimizers[0].param_groups[0]['lr']\n        print('learning rate = %.7f' % lr)\n\n    def get_current_visuals(self):\n        \"\"\"Return visualization images. train.py will display these images with visdom, and save the images to a HTML\"\"\"\n        visual_ret = OrderedDict()\n        for name in self.visual_names:\n            if isinstance(name, str):\n                visual_ret[name] = getattr(self, name)\n        return visual_ret\n\n    def get_current_losses(self):\n        \"\"\"Return traning losses / errors. train.py will print out these errors on console, and save them to a file\"\"\"\n        errors_ret = OrderedDict()\n        for name in self.loss_names:\n            if isinstance(name, str):\n                errors_ret[name] = float(getattr(self, 'loss_' + name))  # float(...) works for both scalar tensor and float number\n        return errors_ret\n\n    def save_networks(self, epoch):\n        \"\"\"Save all the networks to the disk.\n\n        Parameters:\n            epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n        \"\"\"\n        for name in self.model_names:\n            if isinstance(name, str):\n                save_filename = '%s_net_%s.pth' % (epoch, name)\n                save_path = os.path.join(self.save_dir, save_filename)\n                net = getattr(self, 'net' + name)\n\n                if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n                    torch.save(net.module.cpu().state_dict(), save_path)\n                    net.cuda(self.gpu_ids[0])\n                else:\n                    torch.save(net.cpu().state_dict(), save_path)\n\n    def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n        \"\"\"Fix InstanceNorm checkpoints incompatibility (prior to 0.4)\"\"\"\n        key = keys[i]\n        if i + 1 == len(keys):  # at the end, pointing to a parameter/buffer\n            if module.__class__.__name__.startswith('InstanceNorm') and \\\n                    (key == 'running_mean' or key == 'running_var'):\n                if getattr(module, key) is None:\n                    state_dict.pop('.'.join(keys))\n            if module.__class__.__name__.startswith('InstanceNorm') and \\\n               (key == 'num_batches_tracked'):\n                state_dict.pop('.'.join(keys))\n        else:\n            self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n    def load_networks(self, epoch):\n        \"\"\"Load all the networks from the disk.\n\n        Parameters:\n            epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n        \"\"\"\n        for name in self.model_names:\n            if isinstance(name, str):\n                load_filename = '%s_net_%s.pth' % (epoch, name)\n                if self.opt.isTrain and self.opt.pretrained_name is not None:\n                    load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)\n                else:\n                    load_dir = self.save_dir\n\n                load_path = os.path.join(load_dir, load_filename)\n                net = getattr(self, 'net' + name)\n                if isinstance(net, torch.nn.DataParallel):\n                    net = net.module\n                print('loading the model from %s' % load_path)\n                # if you are using PyTorch newer than 0.4 (e.g., built from\n                # GitHub source), you can remove str() on self.device\n                state_dict = torch.load(load_path, map_location=str(self.device))\n                if hasattr(state_dict, '_metadata'):\n                    del state_dict._metadata\n\n                # patch InstanceNorm checkpoints prior to 0.4\n                # for key in list(state_dict.keys()):  # need to copy keys here because we mutate in loop\n                #    self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n                net.load_state_dict(state_dict)\n\n    def print_networks(self, verbose):\n        \"\"\"Print the total number of parameters in the network and (if verbose) network architecture\n\n        Parameters:\n            verbose (bool) -- if verbose: print the network architecture\n        \"\"\"\n        print('---------- Networks initialized -------------')\n        for name in self.model_names:\n            if isinstance(name, str):\n                net = getattr(self, 'net' + name)\n                num_params = 0\n                for param in net.parameters():\n                    num_params += param.numel()\n                if verbose:\n                    print(net)\n                print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n        print('-----------------------------------------------')\n\n    def set_requires_grad(self, nets, requires_grad=False):\n        \"\"\"Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n        Parameters:\n            nets (network list)   -- a list of networks\n            requires_grad (bool)  -- whether the networks require gradients or not\n        \"\"\"\n        if not isinstance(nets, list):\n            nets = [nets]\n        for net in nets:\n            if net is not None:\n                for param in net.parameters():\n                    param.requires_grad = requires_grad\n\n    def generate_visuals_for_evaluation(self, data, mode):\n        return {}\n"
  },
  {
    "path": "models/cast_model.py",
    "content": "import itertools\r\nimport torch\r\nfrom .base_model import BaseModel\r\nfrom . import networks\r\nfrom . import net\r\nfrom . import MSP\r\nimport util.util as util\r\nfrom util.image_pool import ImagePool\r\nimport torch.nn as nn\r\nfrom torch.nn import init\r\nimport kornia.augmentation as K\r\n\r\nclass CASTModel(BaseModel):\r\n    \"\"\" This class implements CAST model.\r\n    This code is inspired by DCLGAN\r\n    \"\"\"\r\n\r\n    @staticmethod\r\n    def modify_commandline_options(parser, is_train=True):\r\n        \"\"\"  Configures options specific for CAST \"\"\"\r\n        parser.add_argument('--CAST_mode', type=str, default=\"CAST\", choices='CAST')\r\n        parser.add_argument('--lambda_GAN_G_A', type=float, default=0.1, help='weight for GAN loss：GAN(G(Ic, Is))')\r\n        parser.add_argument('--lambda_GAN_G_B', type=float, default=0.1, help='weight for GAN loss：GAN(G(Is, Ic))')\r\n\r\n        parser.add_argument('--lambda_GAN_D_A', type=float, default=1.0, help='weight for GAN loss：GAN(G(Is, Ic))')\r\n        parser.add_argument('--lambda_GAN_D_B', type=float, default=1.0, help='weight for GAN loss：GAN(G(Ic, Is))')\r\n        \r\n        parser.add_argument('--lambda_NCE_G', type=float, default=0.05, help='weight for NCE loss: NCE(G(Ic, Is), Is)')\r\n        parser.add_argument('--lambda_NCE_D', type=float, default=1.0, help='weight for NCE loss: NCE(I, I+, I-)')\r\n        \r\n        parser.add_argument('--lambda_CYC', type=float, default=4.0, help='weight for l1 reconstructe loss:||Ic - G(G(Ic, Is),Ic)||')\r\n        \r\n        parser.add_argument('--nce_layers', type=str, default='0,1,2,3', help='compute NCE loss on which layers')\r\n\r\n        parser.set_defaults(pool_size=0)  # no image pooling\r\n\r\n        opt, _ = parser.parse_known_args()\r\n\r\n        # Set default parameters for CAST.\r\n        if opt.CAST_mode.lower() == \"cast\":\r\n            pass\r\n        else:\r\n            raise ValueError(opt.CAST_mode)\r\n\r\n        return parser\r\n\r\n    def __init__(self, opt):\r\n        BaseModel.__init__(self, opt)\r\n\r\n        # specify the training losses you want to print out.\r\n        # The training/test scripts will call <BaseModel.get_current_losses>\r\n        self.loss_names = ['G']\r\n        self.visual_names = ['real_A', 'fake_B', 'real_B']\r\n        \r\n\r\n        if self.opt.lambda_GAN_G_A > 0.0 and self.isTrain:\r\n            self.loss_names += [ 'G_A']\r\n        if self.opt.lambda_GAN_G_B > 0.0 and self.isTrain:\r\n            self.loss_names += [ 'G_B']\r\n\r\n        if self.opt.lambda_GAN_D_A > 0.0 and self.isTrain:\r\n            self.loss_names += ['D_A']\r\n        if self.opt.lambda_GAN_D_B > 0.0 and self.isTrain:\r\n            self.loss_names += ['D_B']\r\n\r\n        if self.opt.lambda_NCE_G > 0.0 and self.isTrain:\r\n            self.loss_names += [ 'G_NCE_style']\r\n\r\n        if self.opt.lambda_NCE_D > 0.0 and self.isTrain:\r\n            self.loss_names += [ 'NCE_D']\r\n\r\n        if self.opt.lambda_CYC > 0.0 and self.isTrain:\r\n            self.visual_names += ['rec_A', 'rec_B']\r\n            self.loss_names += ['cyc']\r\n\r\n        if self.isTrain:\r\n            self.model_names = ['AE','Dec_A', 'Dec_B', 'D', 'P_style', 'D_A', 'D_B']\r\n        else:  # during test time, only load G\r\n            self.model_names = ['AE','Dec_A', 'Dec_B']\r\n\r\n        # define networks \r\n        \r\n        vgg = net.vgg\r\n        vgg.load_state_dict(torch.load('models/vgg_normalised.pth'))\r\n        vgg = nn.Sequential(*list(vgg.children())[:31]) \r\n        self.netAE = net.ADAIN_Encoder(vgg, self.gpu_ids)\r\n        self.netDec_A = net.Decoder(self.gpu_ids)\r\n        self.netDec_B = net.Decoder(self.gpu_ids)  \r\n        init_net(self.netAE, 'normal', 0.02, self.gpu_ids)  \r\n        init_net(self.netDec_A, 'normal', 0.02, self.gpu_ids)  \r\n        init_net(self.netDec_B, 'normal', 0.02, self.gpu_ids)\r\n\r\n        if self.isTrain:       \r\n            style_vgg = MSP.vgg\r\n            style_vgg.load_state_dict(torch.load('models/style_vgg.pth'))\r\n            style_vgg = nn.Sequential(*list(style_vgg.children()))\r\n            self.netD = MSP.StyleExtractor(style_vgg, self.gpu_ids)  \r\n            self.netP_style = MSP.Projector(self.gpu_ids)  \r\n            init_net(self.netD, 'normal', 0.02, self.gpu_ids) \r\n            init_net(self.netP_style, 'normal', 0.02, self.gpu_ids)\r\n            \r\n            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D,\r\n                                            opt.crop_size, opt.feature_dim, opt.max_conv_dim,\r\n                                            opt.normD, opt.init_type, opt.init_gain, opt.no_antialias,\r\n                                            self.gpu_ids, opt)\r\n            self.netD_B = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D,\r\n                                            opt.crop_size, opt.feature_dim, opt.max_conv_dim,\r\n                                            opt.normD, opt.init_type, opt.init_gain, opt.no_antialias,\r\n                                            self.gpu_ids, opt)        \r\n\r\n            self.fake_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images\r\n            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\r\n            self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]\r\n\r\n            self.nce_loss = MSP.InfoNCELoss(opt.temperature, opt.hypersphere_dim, \r\n                                             opt.queue_size).to(self.device)\r\n            self.mse_loss = nn.MSELoss()\r\n            \r\n            self.patch_sampler = K.RandomResizedCrop((256,256),scale=(0.8,1.0),ratio=(0.75,1.33)).to(self.device)\r\n            \r\n            self.criterionCyc = torch.nn.L1Loss().to(self.device)\r\n            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netAE.parameters(), self.netDec_A.parameters(), self.netDec_B.parameters()),\r\n                                                lr=opt.lr_G, betas=(opt.beta1, opt.beta2))\r\n            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\r\n                                                lr=opt.lr_D, betas=(opt.beta1, opt.beta2))\r\n            self.optimizer_D_NCE = torch.optim.Adam(itertools.chain(self.netD.parameters(), self.netP_style.parameters()),\r\n                                                lr=opt.lr_D_NCE, betas=(opt.beta1, opt.beta2))\r\n            self.optimizers.append(self.optimizer_G)\r\n            self.optimizers.append(self.optimizer_D)\r\n            self.optimizers.append(self.optimizer_D_NCE)\r\n\r\n\r\n    def optimize_parameters(self):\r\n        # forward\r\n        self.forward()\r\n\r\n        # update D\r\n        if self.opt.lambda_GAN_D_A > 0.0 or self.opt.lambda_GAN_D_B > 0.0:\r\n            self.set_requires_grad([self.netD_A, self.netD_B], True)\r\n            self.set_requires_grad([self.netD, self.netP_style, self.netAE, self.netDec_A,self.netDec_B ], False)\r\n            self.optimizer_D.zero_grad()\r\n            self.loss_D = self.backward_D()\r\n            self.loss_D.backward(retain_graph=True)\r\n            self.optimizer_D.step()\r\n        \r\n        # update MSP\r\n        if self.opt.lambda_NCE_D > 0.0:\r\n            self.set_requires_grad([self.netD, self.netP_style], True)\r\n            self.set_requires_grad([self.netAE, self.netDec_A,self.netDec_B, self.netD_A, self.netD_B ], False)\r\n            self.optimizer_D_NCE.zero_grad()\r\n            self.loss_NCE_D = self.backward_D_NCEloss()\r\n            self.loss_NCE_D.backward(retain_graph=True)\r\n            self.optimizer_D_NCE.step()\r\n\r\n        # update G\r\n        self.set_requires_grad([self.netD, self.netP_style, self.netD_A, self.netD_B], False)\r\n        self.set_requires_grad([self.netAE, self.netDec_A,self.netDec_B], True)\r\n        self.optimizer_G.zero_grad()\r\n        self.loss_G = self.compute_G_loss()\r\n        self.loss_G.backward()\r\n        self.optimizer_G.step()\r\n            \r\n\r\n    def set_input(self, input):\r\n        \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\r\n        Parameters:\r\n            input (dict): include the data itself and its metadata information.\r\n        The option 'direction' can be used to swap domain A and domain B.\r\n        \"\"\"\r\n        AtoB = self.opt.direction == 'AtoB'\r\n        self.real_A = input['A' if AtoB else 'B'].to(self.device)\r\n        self.real_B = input['B' if AtoB else 'A'].to(self.device)\r\n        self.image_paths = input['A_paths' if AtoB else 'B_paths']\r\n\r\n    def forward(self):\r\n        \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\r\n\r\n        self.real_A_feat = self.netAE(self.real_A, self.real_B)  # G_A(A)\r\n        self.fake_B = self.netDec_B(self.real_A_feat)\r\n        if self.isTrain: \r\n            self.real_B_feat = self.netAE(self.real_B, self.real_A)  # G_A(A)\r\n            self.fake_A = self.netDec_A(self.real_B_feat)\r\n            if self.opt.lambda_CYC > 0.0:\r\n                self.rec_A_feat = self.netAE(self.fake_B, self.real_A)\r\n                self.rec_B_feat = self.netAE(self.fake_A, self.real_B)\r\n                self.rec_A = self.netDec_A(self.rec_A_feat)\r\n                self.rec_B = self.netDec_B(self.rec_B_feat)\r\n\r\n    def backward_D_basic(self, netD, content,style, fake):\r\n        \"\"\"Calculate GAN loss for the discriminator\r\n        Parameters:\r\n            netD (network)      -- the discriminator D\r\n            real (tensor array) -- real images\r\n            fake (tensor array) -- images generated by a generator\r\n\r\n        Return the discriminator loss.\r\n        We also call loss_D.backward() to calculate the gradients.\r\n        \"\"\"\r\n       \r\n        loss_D_real = loss_D_fake = 0\r\n        # Real\r\n        pred_real = netD(style)\r\n        loss_D_real = self.criterionGAN(pred_real, True)\r\n        # Fake\r\n        pred_fake = netD(fake.detach())\r\n        loss_D_fake = self.criterionGAN(pred_fake, False)\r\n\r\n        # Combined loss and calculate gradients\r\n        loss_D = (loss_D_real + loss_D_fake)*0.5        \r\n        return loss_D\r\n        \r\n    def backward_D_NCEloss(self):\r\n        \"\"\"\r\n        Calculate NCE loss for the discriminator\r\n        \"\"\"\r\n        #query_A = query_B =0.0\r\n        real_A = self.netD(self.patch_sampler(self.real_A), self.nce_layers)\r\n        real_B = self.netD(self.patch_sampler(self.real_B), self.nce_layers)\r\n        real_Ax = self.netD(self.patch_sampler(self.real_A), self.nce_layers)\r\n        real_Bx = self.netD(self.patch_sampler(self.real_B), self.nce_layers)\r\n\r\n        query_A = self.netP_style(real_A, self.nce_layers)\r\n        query_B = self.netP_style(real_B, self.nce_layers)\r\n        query_Ax = self.netP_style(real_Ax, self.nce_layers)  \r\n        query_Bx = self.netP_style(real_Bx, self.nce_layers) \r\n\r\n        num = 0\r\n        loss_D_cont_A = 0\r\n        loss_D_cont_B = 0\r\n        for x in self.nce_layers:\r\n            #self.nce_loss.dequeue_and_enqueue(query_A[num], 'real_A{:d}'.format(x))\r\n            self.nce_loss.dequeue_and_enqueue(query_B[num], 'real_B{:d}'.format(x))\r\n            #loss_D_cont_A += self.nce_loss(query_A[num], query_Ax[num], 'real_B{:d}'.format(x))\r\n            loss_D_cont_B += self.nce_loss(query_B[num], query_Bx[num], 'real_B{:d}'.format(x))\r\n            num += 1\r\n        \r\n        loss_NCE_D  = (loss_D_cont_A + loss_D_cont_B) * 0.5 * self.opt.lambda_NCE_D\r\n        return loss_NCE_D\r\n\r\n    def backward_D(self):\r\n        \"\"\"Calculate GAN loss for discriminator D\"\"\"\r\n        if self.opt.lambda_GAN_D_B > 0.0:\r\n            fake_B = self.fake_pool.query(self.fake_B)\r\n            self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, self.real_B, fake_B) * self.opt.lambda_GAN_D_B\r\n        else:\r\n            self.loss_D_B = 0\r\n\r\n        if self.opt.lambda_GAN_D_A > 0.0:\r\n            fake_A = self.fake_pool.query(self.fake_A)\r\n            self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, self.real_A, fake_A) * self.opt.lambda_GAN_D_A\r\n\r\n        else:\r\n            self.loss_D_A = 0\r\n\r\n        self.loss_D = (self.loss_D_B + self.loss_D_A) * 0.5\r\n        return self.loss_D\r\n\r\n    def compute_G_loss(self):\r\n        \"\"\"Calculate GAN and NCE loss for the generator\"\"\"\r\n        # First, G(A) should fake the discriminator\r\n        if self.opt.lambda_GAN_G_A > 0.0:\r\n            pred_fakeB = self.netD_B(self.fake_B)\r\n            self.loss_G_A = self.criterionGAN(pred_fakeB, True).mean() * self.opt.lambda_GAN_G_A\r\n        else:\r\n            self.loss_G_A = 0.0\r\n\r\n        if self.opt.lambda_GAN_G_B > 0.0:\r\n            pred_fakeA = self.netD_A(self.fake_A)\r\n            self.loss_G_B = self.criterionGAN(pred_fakeA, True).mean() * self.opt.lambda_GAN_G_B\r\n        else:\r\n            self.loss_G_B = 0.0\r\n\r\n        # Calculate the style contrastive loss.\r\n        if self.opt.lambda_NCE_G > 0.0:\r\n            real_A = self.patch_sampler(self.real_A)\r\n            real_B = self.patch_sampler(self.real_B)\r\n            fake_A = self.patch_sampler(self.fake_A)\r\n            fake_B = self.patch_sampler(self.fake_B)\r\n\r\n            key_A = self.netP_style(self.netD(real_A, self.nce_layers),self.nce_layers)\r\n            key_B = self.netP_style(self.netD(real_B, self.nce_layers),self.nce_layers)\r\n            query_A = self.netP_style(self.netD(fake_A, self.nce_layers),self.nce_layers)\r\n            query_B = self.netP_style(self.netD(fake_B, self.nce_layers),self.nce_layers)\r\n\r\n            num = 0\r\n            self.loss_G_NCE_style_A = 0\r\n            self.loss_G_NCE_style_B = 0\r\n            for x in self.nce_layers:\r\n                #self.loss_G_NCE_style_A += self.nce_loss(query_A[num], key_A[num], 'real_B{:d}'.format(x))\r\n                self.loss_G_NCE_style_B += self.nce_loss(query_B[num], key_B[num], 'real_B{:d}'.format(x))\r\n                num += 1\r\n        else:\r\n            self.loss_G_NCE_style_A = 0\r\n            self.loss_G_NCE_style_B = 0\r\n        self.loss_G_NCE_style = (self.loss_G_NCE_style_A + self.loss_G_NCE_style_B) * 0.5 * self.opt.lambda_NCE_G\r\n        \r\n        #L1 Cycle Loss\r\n        if self.opt.lambda_CYC > 0.0:\r\n            self.loss_cyc_A = self.criterionCyc(self.rec_A, self.real_A) * self.opt.lambda_CYC\r\n            self.loss_cyc_B = self.criterionCyc(self.rec_B, self.real_B) * self.opt.lambda_CYC\r\n        else:\r\n            self.loss_cyc_A = 0\r\n            self.loss_cyc_B = 0\r\n        self.loss_cyc = (self.loss_cyc_A + self.loss_cyc_B) * 0.5\r\n\r\n        self.loss_G = self.loss_cyc + self.loss_G_NCE_style + (self.loss_G_A + self.loss_G_B) * 0.5\r\n        \r\n        return self.loss_G\r\n\r\n\r\ndef init_weights(net, init_type='normal', init_gain=0.02):\r\n    \"\"\"Initialize network weights.\r\n\r\n    Parameters:\r\n        net (network)   -- network to be initialized\r\n        init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\r\n        init_gain (float)    -- scaling factor for normal, xavier and orthogonal.\r\n\r\n    We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\r\n    work better for some applications. Feel free to try yourself.\r\n    \"\"\"\r\n    def init_func(m):  # define the initialization function\r\n        classname = m.__class__.__name__\r\n        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\r\n            if init_type == 'normal':\r\n                init.normal_(m.weight.data, 0.0, init_gain)\r\n            elif init_type == 'xavier':\r\n                init.xavier_normal_(m.weight.data, gain=init_gain)\r\n            elif init_type == 'kaiming':\r\n                init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\r\n            elif init_type == 'orthogonal':\r\n                init.orthogonal_(m.weight.data, gain=init_gain)\r\n            else:\r\n                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\r\n            if hasattr(m, 'bias') and m.bias is not None:\r\n                init.constant_(m.bias.data, 0.0)\r\n        elif classname.find('BatchNorm2d') != -1:  # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\r\n            init.normal_(m.weight.data, 1.0, init_gain)\r\n            init.constant_(m.bias.data, 0.0)\r\n\r\n    print('initialize network with %s' % init_type)\r\n    net.apply(init_func)  # apply the initialization function <init_func>\r\n\r\n\r\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\r\n    \"\"\"Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\r\n    Parameters:\r\n        net (network)      -- the network to be initialized\r\n        init_type (str)    -- the name of an initialization method: normal | xavier | kaiming | orthogonal\r\n        gain (float)       -- scaling factor for normal, xavier and orthogonal.\r\n        gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\r\n\r\n    Return an initialized network.\r\n    \"\"\"\r\n    if len(gpu_ids) > 0:\r\n        assert(torch.cuda.is_available())\r\n        net.to(gpu_ids[0])\r\n        net = torch.nn.DataParallel(net, gpu_ids)  # multi-GPUs\r\n    init_weights(net, init_type, init_gain=init_gain)\r\n    return net\r\n"
  },
  {
    "path": "models/net.py",
    "content": "import torch.nn as nn\nimport torch\n\nvgg = nn.Sequential(\n    nn.Conv2d(3, 3, (1, 1)),\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(3, 64, (3, 3)),\n    nn.ReLU(),  # relu1-1\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(64, 64, (3, 3)),\n    nn.ReLU(),  # relu1-2\n    nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(64, 128, (3, 3)),\n    nn.ReLU(),  # relu2-1\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(128, 128, (3, 3)),\n    nn.ReLU(),  # relu2-2\n    nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(128, 256, (3, 3)),\n    nn.ReLU(),  # relu3-1\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(256, 256, (3, 3)),\n    nn.ReLU(),  # relu3-2\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(256, 256, (3, 3)),\n    nn.ReLU(),  # relu3-3\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(256, 256, (3, 3)),\n    nn.ReLU(),  # relu3-4\n    nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(256, 512, (3, 3)),\n    nn.ReLU(),  # relu4-1, this is the last layer used\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu4-2\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu4-3\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu4-4\n    nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu5-1\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu5-2\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU(),  # relu5-3\n    nn.ReflectionPad2d((1, 1, 1, 1)),\n    nn.Conv2d(512, 512, (3, 3)),\n    nn.ReLU()  # relu5-4\n)\n\n\nclass ADAIN_Encoder(nn.Module):\n    def __init__(self, encoder, gpu_ids=[]):\n        super(ADAIN_Encoder, self).__init__()\n        enc_layers = list(encoder.children())\n        self.enc_1 = nn.Sequential(*enc_layers[:4])  # input -> relu1_1 64\n        self.enc_2 = nn.Sequential(*enc_layers[4:11])  # relu1_1 -> relu2_1 128\n        self.enc_3 = nn.Sequential(*enc_layers[11:18])  # relu2_1 -> relu3_1 256\n        self.enc_4 = nn.Sequential(*enc_layers[18:31])  # relu3_1 -> relu4_1 512\n        \n        self.mse_loss = nn.MSELoss()\n\n        # fix the encoder\n        for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']:\n            for param in getattr(self, name).parameters():\n                param.requires_grad = False\n\n    # extract relu1_1, relu2_1, relu3_1, relu4_1 from input image\n    def encode_with_intermediate(self, input):\n        results = [input]\n        for i in range(4):\n            func = getattr(self, 'enc_{:d}'.format(i + 1))\n            results.append(func(results[-1]))\n        return results[1:]\n\n    def calc_mean_std(self, feat, eps=1e-5):\n        # eps is a small value added to the variance to avoid divide-by-zero.\n        size = feat.size()\n        assert (len(size) == 4)\n        N, C = size[:2]\n        feat_var = feat.view(N, C, -1).var(dim=2) + eps\n        feat_std = feat_var.sqrt().view(N, C, 1, 1)\n        feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)\n        return feat_mean, feat_std\n\n    def adain(self, content_feat, style_feat):\n        assert (content_feat.size()[:2] == style_feat.size()[:2])\n        size = content_feat.size()\n        style_mean, style_std = self.calc_mean_std(style_feat)\n        content_mean, content_std = self.calc_mean_std(content_feat)\n\n        normalized_feat = (content_feat - content_mean.expand(\n            size)) / content_std.expand(size)\n        return normalized_feat * style_std.expand(size) + style_mean.expand(size)\n\n    def forward(self, content, style, encoded_only = False):\n        style_feats = self.encode_with_intermediate(style)\n        content_feats = self.encode_with_intermediate(content)\n        if encoded_only:\n            return content_feats[-1], style_feats[-1]\n        else:\n            adain_feat = self.adain(content_feats[-1], style_feats[-1])\n            return  adain_feat\n\nclass Decoder(nn.Module):\n    def __init__(self, gpu_ids=[]):\n        super(Decoder, self).__init__()\n        decoder = [\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(512, 256, (3, 3)),\n            nn.ReLU(), # 256\n            nn.Upsample(scale_factor=2, mode='nearest'),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(256, 256, (3, 3)),\n            nn.ReLU(),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(256, 256, (3, 3)),\n            nn.ReLU(),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(256, 256, (3, 3)),\n            nn.ReLU(),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(256, 128, (3, 3)),\n            nn.ReLU(),# 128\n            nn.Upsample(scale_factor=2, mode='nearest'),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(128, 128, (3, 3)),\n            nn.ReLU(),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(128, 64, (3, 3)),\n            nn.ReLU(),# 64\n            nn.Upsample(scale_factor=2, mode='nearest'),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(64, 64, (3, 3)),\n            nn.ReLU(),\n            nn.ReflectionPad2d((1, 1, 1, 1)),\n            nn.Conv2d(64, 3, (3, 3))\n            ]\n        self.decoder = nn.Sequential(*decoder)\n\n    def forward(self, adain_feat):\n        fake_image = self.decoder(adain_feat)\n\n        return fake_image\n"
  },
  {
    "path": "models/networks.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\nimport numpy as np\nfrom torch.nn.parameter import Parameter\n###############################################################################\n# Helper Functions\n###############################################################################\n\n\ndef get_filter(filt_size=3):\n    if(filt_size == 1):\n        a = np.array([1., ])\n    elif(filt_size == 2):\n        a = np.array([1., 1.])\n    elif(filt_size == 3):\n        a = np.array([1., 2., 1.])\n    elif(filt_size == 4):\n        a = np.array([1., 3., 3., 1.])\n    elif(filt_size == 5):\n        a = np.array([1., 4., 6., 4., 1.])\n    elif(filt_size == 6):\n        a = np.array([1., 5., 10., 10., 5., 1.])\n    elif(filt_size == 7):\n        a = np.array([1., 6., 15., 20., 15., 6., 1.])\n\n    filt = torch.Tensor(a[:, None] * a[None, :])\n    filt = filt / torch.sum(filt)\n\n    return filt\n\n\nclass Downsample(nn.Module):\n    def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):\n        super(Downsample, self).__init__()\n        self.filt_size = filt_size\n        self.pad_off = pad_off\n        self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]\n        self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]\n        self.stride = stride\n        self.off = int((self.stride - 1) / 2.)\n        self.channels = channels\n\n        filt = get_filter(filt_size=self.filt_size)\n        self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))\n\n        self.pad = get_pad_layer(pad_type)(self.pad_sizes)\n\n    def forward(self, inp):\n        if(self.filt_size == 1):\n            if(self.pad_off == 0):\n                return inp[:, :, ::self.stride, ::self.stride]\n            else:\n                return self.pad(inp)[:, :, ::self.stride, ::self.stride]\n        else:\n            return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])\n\n\nclass Upsample2(nn.Module):\n    def __init__(self, scale_factor, mode='nearest'):\n        super().__init__()\n        self.factor = scale_factor\n        self.mode = mode\n\n    def forward(self, x):\n        return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)\n\n\nclass Upsample(nn.Module):\n    def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):\n        super(Upsample, self).__init__()\n        self.filt_size = filt_size\n        self.filt_odd = np.mod(filt_size, 2) == 1\n        self.pad_size = int((filt_size - 1) / 2)\n        self.stride = stride\n        self.off = int((self.stride - 1) / 2.)\n        self.channels = channels\n\n        filt = get_filter(filt_size=self.filt_size) * (stride**2)\n        self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))\n\n        self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])\n\n    def forward(self, inp):\n        ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]\n        if(self.filt_odd):\n            return ret_val\n        else:\n            return ret_val[:, :, :-1, :-1]\n\n\ndef get_pad_layer(pad_type):\n    if(pad_type in ['refl', 'reflect']):\n        PadLayer = nn.ReflectionPad2d\n    elif(pad_type in ['repl', 'replicate']):\n        PadLayer = nn.ReplicationPad2d\n    elif(pad_type == 'zero'):\n        PadLayer = nn.ZeroPad2d\n    else:\n        print('Pad type [%s] not recognized' % pad_type)\n    return PadLayer\n\n\nclass Identity(nn.Module):\n    def forward(self, x):\n        return x\n\n\ndef get_norm_layer(norm_type='instance'):\n    \"\"\"Return a normalization layer\n\n    Parameters:\n        norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n    For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n    For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n    \"\"\"\n    if norm_type == 'batch':\n        norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n    elif norm_type == 'instance':\n        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n    elif norm_type == 'none':\n        def norm_layer(x):\n            return Identity()\n    else:\n        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n    return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n    \"\"\"Return a learning rate scheduler\n\n    Parameters:\n        optimizer          -- the optimizer of the network\n        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions．　\n                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n\n    For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs\n    and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.\n    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n    See https://pytorch.org/docs/stable/optim.html for more details.\n    \"\"\"\n    if opt.lr_policy == 'linear':\n        def lambda_rule(epoch):\n            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)\n            return lr_l\n        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n    elif opt.lr_policy == 'step':\n        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n    elif opt.lr_policy == 'plateau':\n        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n    elif opt.lr_policy == 'cosine':\n        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)\n    else:\n        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n    return scheduler\n\n\ndef init_weights(net, init_type='kaiming', init_gain=0.02, debug=False):\n    \"\"\"Initialize network weights.\n\n    Parameters:\n        net (network)   -- network to be initialized\n        init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n        init_gain (float)    -- scaling factor for normal, xavier and orthogonal.\n\n    We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n    work better for some applications. Feel free to try yourself.\n    \"\"\"\n    def init_func(m):  # define the initialization function\n        classname = m.__class__.__name__\n        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n            if debug:\n                print(classname)\n            if init_type == 'normal':\n                init.normal_(m.weight.data, 0.0, init_gain)\n            elif init_type == 'xavier':\n                init.xavier_normal_(m.weight.data, gain=init_gain)\n            elif init_type == 'kaiming':\n                init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n            elif init_type == 'orthogonal':\n                init.orthogonal_(m.weight.data, gain=init_gain)\n            else:\n                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n            if hasattr(m, 'bias') and m.bias is not None:\n                init.constant_(m.bias.data, 0.0)\n        elif classname.find('BatchNorm2d') != -1:  # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n            init.normal_(m.weight.data, 1.0, init_gain)\n            init.constant_(m.bias.data, 0.0)\n\n    net.apply(init_func)  # apply the initialization function <init_func>\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):\n    \"\"\"Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n    Parameters:\n        net (network)      -- the network to be initialized\n        init_type (str)    -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n        gain (float)       -- scaling factor for normal, xavier and orthogonal.\n        gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n    Return an initialized network.\n    \"\"\"\n    if len(gpu_ids) > 0:\n        assert(torch.cuda.is_available())\n        net.to(gpu_ids[0])\n        # if not amp:\n        # net = torch.nn.DataParallel(net, gpu_ids)  # multi-GPUs for non-AMP training\n    if initialize_weights:\n        init_weights(net, init_type, init_gain=init_gain, debug=debug)\n    return net\n\n\ndef define_D(input_nc, ndf, netD, n_layers_D=3, image_size = 256, feature_dim = 256, max_conv_dim = 512,norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):\n    \"\"\"Create a discriminator\n\n    Parameters:\n        input_nc (int)     -- the number of channels in input images\n        ndf (int)          -- the number of filters in the first conv layer\n        netD (str)         -- the architecture's name: basic | n_layers | pixel\n        n_layers_D (int)   -- the number of conv layers in the discriminator; effective when netD=='n_layers'\n        norm (str)         -- the type of normalization layers used in the network.\n        init_type (str)    -- the name of the initialization method.\n        init_gain (float)  -- scaling factor for normal, xavier and orthogonal.\n        gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n    Returns a discriminator\n\n    Our current implementation provides three types of discriminators:\n        [basic]: 'PatchGAN' classifier described in the original pix2pix paper.\n        It can classify whether 70×70 overlapping patches are real or fake.\n        Such a patch-level discriminator architecture has fewer parameters\n        than a full-image discriminator and can work on arbitrarily-sized images\n        in a fully convolutional fashion.\n\n        [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator\n        with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)\n\n        [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n        It encourages greater color diversity but has no effect on spatial statistics.\n\n    The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.\n    \"\"\"\n    net = None\n    norm_layer = get_norm_layer(norm_type=norm)\n\n    if netD == 'basic':  # default PatchGAN classifier\n        net = NLayerDiscriminator(input_nc, ndf, n_layers=3, image_size = image_size, norm_layer=norm_layer, no_antialias=no_antialias,)\n    elif netD == 'n_layers':  # more options\n        net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)\n    elif netD == 'pixel':     # classify if each pixel is real or fake\n        net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)\n    elif 'stylegan2' in netD:\n        net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt)\n    elif netD == 'NCE':  # default PatchGAN classifier\n        net = NCEDiscriminator(input_nc, ndf, n_layers=3, image_size = image_size, feature_dim = feature_dim, max_conv_dim = max_conv_dim, norm_layer=norm_layer, no_antialias=no_antialias,)\n    else:\n        raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)\n     \n    return init_net(net, init_type, init_gain, gpu_ids,\n                    initialize_weights=('stylegan2' not in netD))\n\n\n##############################################################################\n# Classes\n##############################################################################\n\nclass GANLoss(nn.Module):\n    \"\"\"Define different GAN objectives.\n\n    The GANLoss class abstracts away the need to create the target label tensor\n    that has the same size as the input.\n    \"\"\"\n\n    def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n        \"\"\" Initialize the GANLoss class.\n\n        Parameters:\n            gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n            target_real_label (bool) - - label for a real image\n            target_fake_label (bool) - - label of a fake image\n\n        Note: Do not use sigmoid as the last layer of Discriminator.\n        LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n        \"\"\"\n        super(GANLoss, self).__init__()\n        self.register_buffer('real_label', torch.tensor(target_real_label))\n        self.register_buffer('fake_label', torch.tensor(target_fake_label))\n        self.gan_mode = gan_mode\n        if gan_mode == 'lsgan':\n            self.loss = nn.MSELoss()\n        elif gan_mode == 'vanilla':\n            self.loss = nn.BCEWithLogitsLoss()\n        elif gan_mode in ['wgangp', 'nonsaturating']:\n            self.loss = None\n        elif gan_mode == \"hinge\":\n            self.loss = None\n        else:\n            raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n    def get_target_tensor(self, prediction, target_is_real):\n        \"\"\"Create label tensors with the same size as the input.\n\n        Parameters:\n            prediction (tensor) - - tpyically the prediction from a discriminator\n            target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n        Returns:\n            A label tensor filled with ground truth label, and with the size of the input\n        \"\"\"\n\n        if target_is_real:\n            target_tensor = self.real_label\n        else:\n            target_tensor = self.fake_label\n        return target_tensor.expand_as(prediction)\n\n    def __call__(self, prediction, target_is_real):\n        \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n        Parameters:\n            prediction (tensor) - - tpyically the prediction output from a discriminator\n            target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n        Returns:\n            the calculated loss.\n        \"\"\"\n        bs = prediction.size(0)\n        if self.gan_mode in ['lsgan', 'vanilla']:\n            target_tensor = self.get_target_tensor(prediction, target_is_real)\n            loss = self.loss(prediction, target_tensor)\n        elif self.gan_mode == 'wgangp':\n            if target_is_real:\n                loss = -prediction.mean()\n            else:\n                loss = prediction.mean()\n        elif self.gan_mode == 'nonsaturating':\n            if target_is_real:\n                loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)\n            else:\n                loss = F.softplus(prediction).view(bs, -1).mean(dim=1)\n        elif self.gan_mode == 'hinge':\n            if target_is_real:\n                minvalue = torch.min(prediction - 1, torch.zeros(prediction.shape).to(prediction.device))\n                loss = -torch.mean(minvalue)\n            else:\n                minvalue = torch.min(-prediction - 1,torch.zeros(prediction.shape).to(prediction.device))\n                loss = -torch.mean(minvalue)\n        return loss\n\n\ndef cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):\n    \"\"\"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n\n    Arguments:\n        netD (network)              -- discriminator network\n        real_data (tensor array)    -- real images\n        fake_data (tensor array)    -- generated images from the generator\n        device (str)                -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n        type (str)                  -- if we mix real and fake data or not [real | fake | mixed].\n        constant (float)            -- the constant used in formula ( | |gradient||_2 - constant)^2\n        lambda_gp (float)           -- weight for this loss\n\n    Returns the gradient penalty loss\n    \"\"\"\n    if lambda_gp > 0.0:\n        if type == 'real':   # either use real images, fake images, or a linear interpolation of two.\n            interpolatesv = real_data\n        elif type == 'fake':\n            interpolatesv = fake_data\n        elif type == 'mixed':\n            alpha = torch.rand(real_data.shape[0], 1, device=device)\n            alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)\n            interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)\n        else:\n            raise NotImplementedError('{} not implemented'.format(type))\n        interpolatesv.requires_grad_(True)\n        disc_interpolates = netD(interpolatesv)\n        gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,\n                                        grad_outputs=torch.ones(disc_interpolates.size()).to(device),\n                                        create_graph=True, retain_graph=True, only_inputs=True)\n        gradients = gradients[0].view(real_data.size(0), -1)  # flat the data\n        gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp        # added eps\n        return gradient_penalty, gradients\n    else:\n        return 0.0, None\n\n\nclass Normalize(nn.Module):\n\n    def __init__(self, power=2):\n        super(Normalize, self).__init__()\n        self.power = power\n\n    def forward(self, x):\n        norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)\n        out = x.div(norm + 1e-7)\n        return out\n\n##################################################################################\n# Sequential Models\n##################################################################################\n\n\nclass ResBlocks(nn.Module):\n    def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):\n        super(ResBlocks, self).__init__()\n        self.model = []\n        for i in range(num_blocks):\n            self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]\n        self.model = nn.Sequential(*self.model)\n\n    def forward(self, x):\n        return self.model(x)\n\n\n##################################################################################\n# Basic Blocks\n##################################################################################\ndef cat_feature(x, y):\n    y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(\n        y.size(0), y.size(1), x.size(2), x.size(3))\n    x_cat = torch.cat([x, y_expand], 1)\n    return x_cat\n\nclass Conv2dBlock(nn.Module):\n    def __init__(self, input_dim, output_dim, kernel_size, stride,\n                 padding=0, norm='none', activation='relu', pad_type='zero'):\n        super(Conv2dBlock, self).__init__()\n        self.use_bias = True\n        # initialize padding\n        if pad_type == 'reflect':\n            self.pad = nn.ReflectionPad2d(padding)\n        elif pad_type == 'zero':\n            self.pad = nn.ZeroPad2d(padding)\n        else:\n            assert 0, \"Unsupported padding type: {}\".format(pad_type)\n\n        # initialize normalization\n        norm_dim = output_dim\n        if norm == 'batch':\n            self.norm = nn.BatchNorm2d(norm_dim)\n        elif norm == 'inst':\n            self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)\n        elif norm == 'ln':\n            self.norm = LayerNorm(norm_dim)\n        elif norm == 'none':\n            self.norm = None\n        else:\n            assert 0, \"Unsupported normalization: {}\".format(norm)\n\n        # initialize activation\n        if activation == 'relu':\n            self.activation = nn.ReLU(inplace=True)\n        elif activation == 'lrelu':\n            self.activation = nn.LeakyReLU(0.2, inplace=True)\n        elif activation == 'prelu':\n            self.activation = nn.PReLU()\n        elif activation == 'selu':\n            self.activation = nn.SELU(inplace=True)\n        elif activation == 'tanh':\n            self.activation = nn.Tanh()\n        elif activation == 'none':\n            self.activation = None\n        else:\n            assert 0, \"Unsupported activation: {}\".format(activation)\n\n        # initialize convolution\n        self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)\n\n    def forward(self, x):\n        x = self.conv(self.pad(x))\n        if self.norm:\n            x = self.norm(x)\n        if self.activation:\n            x = self.activation(x)\n        return x\n\n\nclass LinearBlock(nn.Module):\n    def __init__(self, input_dim, output_dim, norm='none', activation='relu'):\n        super(LinearBlock, self).__init__()\n        use_bias = True\n        # initialize fully connected layer\n        self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)\n\n        # initialize normalization\n        norm_dim = output_dim\n        if norm == 'batch':\n            self.norm = nn.BatchNorm1d(norm_dim)\n        elif norm == 'inst':\n            self.norm = nn.InstanceNorm1d(norm_dim)\n        elif norm == 'ln':\n            self.norm = LayerNorm(norm_dim)\n        elif norm == 'none':\n            self.norm = None\n        else:\n            assert 0, \"Unsupported normalization: {}\".format(norm)\n\n        # initialize activation\n        if activation == 'relu':\n            self.activation = nn.ReLU(inplace=True)\n        elif activation == 'lrelu':\n            self.activation = nn.LeakyReLU(0.2, inplace=True)\n        elif activation == 'prelu':\n            self.activation = nn.PReLU()\n        elif activation == 'selu':\n            self.activation = nn.SELU(inplace=True)\n        elif activation == 'tanh':\n            self.activation = nn.Tanh()\n        elif activation == 'none':\n            self.activation = None\n        else:\n            assert 0, \"Unsupported activation: {}\".format(activation)\n\n    def forward(self, x):\n        out = self.fc(x)\n        if self.norm:\n            out = self.norm(out)\n        if self.activation:\n            out = self.activation(out)\n        return out\n\n##################################################################################\n# Normalization layers\n##################################################################################\n\n\nclass LayerNorm(nn.Module):\n    def __init__(self, num_features, eps=1e-5, affine=True):\n        super(LayerNorm, self).__init__()\n        self.num_features = num_features\n        self.affine = affine\n        self.eps = eps\n\n        if self.affine:\n            self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())\n            self.beta = nn.Parameter(torch.zeros(num_features))\n\n    def forward(self, x):\n        shape = [-1] + [1] * (x.dim() - 1)\n        mean = x.view(x.size(0), -1).mean(1).view(*shape)\n        std = x.view(x.size(0), -1).std(1).view(*shape)\n        x = (x - mean) / (std + self.eps)\n\n        if self.affine:\n            shape = [1, -1] + [1] * (x.dim() - 2)\n            x = x * self.gamma.view(*shape) + self.beta.view(*shape)\n        return x\n\nclass NLayerDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, input_nc, ndf=64, n_layers=3, image_size = 256,norm_layer=nn.BatchNorm2d, no_antialias=False):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super(NLayerDiscriminator, self).__init__()\n        if type(norm_layer) == functools.partial:  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 4\n        padw = 1\n        if(no_antialias):\n            sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n        else:\n            sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, n_layers):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            if(no_antialias):\n                sequence += [\n                    nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n                    norm_layer(ndf * nf_mult),\n                    nn.LeakyReLU(0.2, True)\n                ]\n            else:\n                sequence += [\n                    nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n                    norm_layer(ndf * nf_mult),\n                    nn.LeakyReLU(0.2, True),\n                    Downsample(ndf * nf_mult)]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** n_layers, 8)\n        sequence += [\n            nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n            norm_layer(ndf * nf_mult),\n            nn.LeakyReLU(0.2, True)\n        ]\n\n        sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        logit = self.model(input) \n        return logit\n\nclass PixelDiscriminator(nn.Module):\n    \"\"\"Defines a 1x1 PatchGAN discriminator (pixelGAN)\"\"\"\n\n    def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):\n        \"\"\"Construct a 1x1 PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            norm_layer      -- normalization layer\n        \"\"\"\n        super(PixelDiscriminator, self).__init__()\n        if type(norm_layer) == functools.partial:  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        self.net = [\n            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n            nn.LeakyReLU(0.2, True),\n            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n            norm_layer(ndf * 2),\n            nn.LeakyReLU(0.2, True),\n            nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n        self.net = nn.Sequential(*self.net)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.net(input)\n\nclass PatchDiscriminator(NLayerDiscriminator):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):\n        super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)\n\n    def forward(self, input):\n        B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)\n        size = 16\n        Y = H // size\n        X = W // size\n        input = input.view(B, C, Y, size, X, size)\n        input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)\n        return super().forward(input)\n\nclass GroupedChannelNorm(nn.Module):\n    def __init__(self, num_groups):\n        super().__init__()\n        self.num_groups = num_groups\n\n    def forward(self, x):\n        shape = list(x.shape)\n        new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]\n        x = x.view(*new_shape)\n        mean = x.mean(dim=2, keepdim=True)\n        std = x.std(dim=2, keepdim=True)\n        x_norm = (x - mean) / (std + 1e-7)\n        return x_norm.view(*shape)\n"
  },
  {
    "path": "models/torch_utils.py",
    "content": "import os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\n@torch.no_grad()\ndef concat_all_gather(tensor, world_size):\n    tensors_gather = [\n        torch.ones_like(tensor) for _ in range(world_size)]\n    torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n    output = torch.cat(tensors_gather, dim=0)\n    return output\n\n\ndef get_rank(group=None):\n    try:\n        return torch.distributed.get_rank(group)\n    except:\n        return 0\n\n\ndef get_world_size(group=None):\n    try:\n        return torch.distributed.get_world_size(group)\n    except:\n        return 1\n\n\ndef kaiming_init(mod):\n    if isinstance(mod, (nn.Conv2d, nn.Linear)):\n        if mod.weight.requires_grad:\n            nn.init.kaiming_normal_(mod.weight, a=0.2, mode=\"fan_in\")\n        if mod.bias is not None:\n            nn.init.zeros_(mod.bias)\n\n\ndef set_seed(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    if torch.cuda.is_available():\n        torch.cuda.manual_seed(seed)\n        torch.cuda.manual_seed_all(seed)\n    os.environ[\"PYTHONHASHSEED\"] = str(seed)\n\n\n@torch.no_grad()\ndef update_average(net, net_ema, m=0.999):\n    net = net.module if hasattr(net, \"module\") else net\n    for p, p_ema in zip(net.parameters(), net_ema.parameters()):\n        p_ema.data.mul_(m).add_((1.0 - m) * p.detach().data)\n\n\ndef warmup_learning_rate(optimizer, lr, train_step, warmup_step):\n    if train_step > warmup_step or warmup_step == 0:\n        return lr\n    ratio = min(1.0, train_step/warmup_step)\n    lr_w = ratio * lr\n    for param_group in optimizer.param_groups:\n        param_group[\"lr\"] = lr_w\n    return lr_w\n"
  },
  {
    "path": "options/__init__.py",
    "content": "\"\"\"This package options includes option modules: training options, test options, and basic options (used in both training and test).\"\"\"\n"
  },
  {
    "path": "options/base_options.py",
    "content": "import argparse\nimport os\nfrom util import util\nimport torch\nimport models\nimport data\n\n\nclass BaseOptions():\n    \"\"\"This class defines options used during both training and test time.\n\n    It also implements several helper functions such as parsing, printing, and saving the options.\n    It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.\n    \"\"\"\n\n    def __init__(self, cmd_line=None):\n        \"\"\"Reset the class; indicates the class hasn't been initailized\"\"\"\n        self.initialized = False\n        self.cmd_line = None\n        if cmd_line is not None:\n            self.cmd_line = cmd_line.split()\n\n    def initialize(self, parser):\n        \"\"\"Define the common options that are used in both training and test.\"\"\"\n        # basic parameters\n        parser.add_argument('--dataroot', default='placeholder', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n        parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n        parser.add_argument('--easy_label', type=str, default='experiment_name', help='Interpretable name')\n        parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')\n        parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n        # model parameters\n        parser.add_argument('--model', type=str, default='cast', help='chooses which model to use.')\n        parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')\n        parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')\n        parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n        parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n        parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n        parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat', 'cluit', 'SA2_2'], help='specify generator architecture')\n        parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')\n        parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G')\n        parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D')\n        parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal'], help='network initialization')\n        parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n        parser.add_argument('--no_dropout', type=util.str2bool, nargs='?', const=True, default=True,\n                            help='no dropout for the generator')\n        parser.add_argument('--no_antialias', action='store_true', help='if specified, use stride=2 convs instead of antialiased-downsampling (sad)')\n        parser.add_argument('--no_antialias_up', action='store_true', help='if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]')\n        # Model parameters.\n        parser.add_argument(\"--style-dim\", default=256, type=int)\n        parser.add_argument(\"--feature_dim\", default=256, type=int)\n        parser.add_argument(\"--hypersphere-dim\", default=256, type=int)\n        parser.add_argument(\"--queue-size\", default=4096, type=int)\n        parser.add_argument(\"--temperature\", default=0.07, type=float)\n        parser.add_argument(\"--max_conv_dim\", default=512, type=int)\n        # dataset parameters\n        parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')\n        parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n        parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n        parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')\n        parser.add_argument('--batch_size', type=int, default=1, help='input batch size')\n        parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')\n        parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')\n        parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n        parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n        parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n        parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')\n        parser.add_argument('--random_scale_max', type=float, default=3.0,\n                            help='(used for single image translation) Randomly scale the image by the specified factor as data augmentation.')\n       # additional parameters\n        parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n        parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n        parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n\n        self.initialized = True\n        return parser\n\n    def gather_options(self):\n        \"\"\"Initialize our parser with basic options(only once).\n        Add additional model-specific and dataset-specific options.\n        These options are defined in the <modify_commandline_options> function\n        in model and dataset classes.\n        \"\"\"\n        if not self.initialized:  # check if it has been initialized\n            parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n            parser = self.initialize(parser)\n\n        # get the basic options\n        if self.cmd_line is None:\n            opt, _ = parser.parse_known_args()\n        else:\n            opt, _ = parser.parse_known_args(self.cmd_line)\n\n        # modify model-related parser options\n        model_name = opt.model\n        model_option_setter = models.get_option_setter(model_name)\n        parser = model_option_setter(parser, self.isTrain)\n        if self.cmd_line is None:\n            opt, _ = parser.parse_known_args()  # parse again with new defaults\n        else:\n            opt, _ = parser.parse_known_args(self.cmd_line)  # parse again with new defaults\n\n        # modify dataset-related parser options\n        dataset_name = opt.dataset_mode\n        dataset_option_setter = data.get_option_setter(dataset_name)\n        parser = dataset_option_setter(parser, self.isTrain)\n\n        # save and return the parser\n        self.parser = parser\n        if self.cmd_line is None:\n            return parser.parse_args()\n        else:\n            return parser.parse_args(self.cmd_line)\n\n    def print_options(self, opt):\n        \"\"\"Print and save options\n\n        It will print both current options and default values(if different).\n        It will save options into a text file / [checkpoints_dir] / opt.txt\n        \"\"\"\n        message = ''\n        message += '----------------- Options ---------------\\n'\n        for k, v in sorted(vars(opt).items()):\n            comment = ''\n            default = self.parser.get_default(k)\n            if v != default:\n                comment = '\\t[default: %s]' % str(default)\n            message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n        message += '----------------- End -------------------'\n        print(message)\n\n        # save to the disk\n        expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n        util.mkdirs(expr_dir)\n        file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))\n        try:\n            with open(file_name, 'wt') as opt_file:\n                opt_file.write(message)\n                opt_file.write('\\n')\n        except PermissionError as error:\n            print(\"permission error {}\".format(error))\n            pass\n\n    def parse(self):\n        \"\"\"Parse our options, create checkpoints directory suffix, and set up gpu device.\"\"\"\n        opt = self.gather_options()\n        opt.isTrain = self.isTrain   # train or test\n\n        # process opt.suffix\n        if opt.suffix:\n            suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n            opt.name = opt.name + suffix\n\n        self.print_options(opt)\n\n        # set gpu ids\n        str_ids = opt.gpu_ids.split(',')\n        opt.gpu_ids = []\n        for str_id in str_ids:\n            id = int(str_id)\n            if id >= 0:\n                opt.gpu_ids.append(id)\n        if len(opt.gpu_ids) > 0:\n            torch.cuda.set_device(opt.gpu_ids[0])\n\n        self.opt = opt\n        return self.opt\n"
  },
  {
    "path": "options/test_options.py",
    "content": "from .base_options import BaseOptions\n\n\nclass TestOptions(BaseOptions):\n    \"\"\"This class includes test options.\n\n    It also includes shared options defined in BaseOptions.\n    \"\"\"\n\n    def initialize(self, parser):\n        parser = BaseOptions.initialize(self, parser)  # define shared options\n        parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')\n        parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')\n        # Dropout and Batchnorm has different behavioir during training and test.\n        parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')\n        # Set the default = 5000 to test the whole test set.\n        parser.add_argument('--num_test', type=int, default=5000, help='how many test images to run')\n\n        # To avoid cropping, the load_size should be the same as crop_size\n        parser.set_defaults(load_size=parser.get_default('crop_size'))\n        self.isTrain = False\n        return parser\n"
  },
  {
    "path": "options/train_options.py",
    "content": "from .base_options import BaseOptions\n\n\nclass TrainOptions(BaseOptions):\n    \"\"\"This class includes training options.\n\n    It also includes shared options defined in BaseOptions.\n    \"\"\"\n\n    def initialize(self, parser):\n        parser = BaseOptions.initialize(self, parser)\n        # visdom and HTML visualization parameters\n        parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')\n        parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')\n        parser.add_argument('--display_id', type=int, default=None, help='window id of the web display. Default is random window id')\n        parser.add_argument('--display_server', type=str, default=\"http://localhost\", help='visdom server of the web display')\n        parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is \"main\")')\n        parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')\n        parser.add_argument('--update_html_freq', type=int, default=100, help='frequency of saving training results to html')\n        parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')\n        parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')\n        # network saving and loading parameters\n        parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')\n        parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')\n        parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq')\n        parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')\n        parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')\n        parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')\n        parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')\n        parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint')\n        # training parameters\n        parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs with the initial learning rate')\n        parser.add_argument('--n_epochs_decay', type=int, default=200, help='number of epochs to linearly decay learning rate to zero')\n        parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')\n        parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')\n        parser.add_argument('--lr_G', type=float, default=0.0001, help='initial learning rate for adam')\n        parser.add_argument('--lr_D', type=float, default=0.0001, help='initial learning rate for adam')\n        parser.add_argument('--lr_D_NCE', type=float, default=0.0001, help='initial learning rate for adam')\n        parser.add_argument('--gan_mode', type=str, default='hinge', help='the type of GAN objective. [vanilla| lsgan | wgangp| hinge]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')\n        parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')\n        parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')\n        parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')\n\n        self.isTrain = True\n        return parser\n"
  },
  {
    "path": "requirements.txt",
    "content": "torch>=1.6.0\ntorchvision>=0.7.0\ndominate>=2.4.0\nvisdom>=0.1.8.8\npackaging\nGPUtil>=1.4.0\nscipy\nPillow>=6.1.0\nnumpy>=1.16.4\nkornia\n"
  },
  {
    "path": "test.py",
    "content": "import os\nfrom options.test_options import TestOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import save_images\nfrom util import html\nimport util.util as util\n\n\nif __name__ == '__main__':\n    opt = TestOptions().parse()  # get test options\n    # hard-code some parameters for test\n    opt.num_threads = 0   # test code only supports num_threads = 1\n    opt.batch_size = 1    # test code only supports batch_size = 1\n    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.\n    opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.\n    opt.display_id = -1   # no visdom display; the test code saves the results to a HTML file.\n    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options\n    # train_dataset = create_dataset(util.copyconf(opt, phase=\"train\"))\n    model = create_model(opt)      # create a model given opt.model and other options\n    # create a webpage for viewing the results\n    web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch))  # define the website directory\n    print('creating web directory', web_dir)\n    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))\n\n    for i, data in enumerate(dataset):\n        if i == 0:\n            model.setup(opt)               # regular setup: load and print networks; create schedulers\n            model.parallelize()\n            if opt.eval:\n                model.eval()\n        if i >= opt.num_test:  # only apply our model to opt.num_test images.\n            break\n        model.set_input(data)  # unpack data from data loader\n        model.test()           # run inference\n        visuals = model.get_current_visuals()  # get image results\n        img_path = model.get_image_paths()     # get image paths\n        if i % 5 == 0:  # save images to an HTML file\n            print('processing (%04d)-th image... %s' % (i, img_path))\n        save_images(webpage, visuals, img_path, width=opt.display_winsize)\n    webpage.save()  # save the HTML\n"
  },
  {
    "path": "train.py",
    "content": "import time\nimport torch\nfrom options.train_options import TrainOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import Visualizer\n\n\nif __name__ == '__main__':\n    opt = TrainOptions().parse()   # get training options\n    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options\n    dataset_size = len(dataset)    # get the number of images in the dataset.\n\n    model = create_model(opt)      # create a model given opt.model and other options\n    print('The number of training images = %d' % dataset_size)\n\n    visualizer = Visualizer(opt)   # create a visualizer that display/save images and plots\n    opt.visualizer = visualizer\n    total_iters = 0                # the total number of training iterations\n\n    optimize_time = 0.1\n    times = []\n    for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1):    # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>\n        epoch_start_time = time.time()  # timer for entire epoch\n        iter_data_time = time.time()    # timer for data loading per iteration\n        epoch_iter = 0                  # the number of training iterations in current epoch, reset to 0 every epoch\n        visualizer.reset()              # reset the visualizer: make sure it saves the results to HTML at least once every epoch\n\n        dataset.set_epoch(epoch)\n        for i, data in enumerate(dataset):  # inner loop within one epoch\n            iter_start_time = time.time()  # timer for computation per iteration\n            if total_iters % opt.print_freq == 0:\n                t_data = iter_start_time - iter_data_time\n\n            batch_size = data[\"A\"].size(0)\n            total_iters += batch_size\n            epoch_iter += batch_size\n            if len(opt.gpu_ids) > 0:\n                torch.cuda.synchronize()\n            optimize_start_time = time.time()\n            if epoch == opt.epoch_count and i == 0:\n                model.setup(opt)               # regular setup: load and print networks; create schedulers\n                model.parallelize()\n            model.set_input(data)  # unpack data from dataset and apply preprocessing\n            model.optimize_parameters()   # calculate loss functions, get gradients, update network weights\n            if len(opt.gpu_ids) > 0:\n                torch.cuda.synchronize()\n            optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time\n\n            if total_iters % opt.display_freq == 0:   # display images on visdom and save images to a HTML file\n                save_result = total_iters % opt.update_html_freq == 0\n                model.compute_visuals()\n                visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\n            if total_iters % opt.print_freq == 0:    # print training losses and save logging information to the disk\n                losses = model.get_current_losses()\n                visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)\n                if opt.display_id is None or opt.display_id > 0:\n                    visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)\n\n            if total_iters % opt.save_latest_freq == 0:   # cache our latest model every <save_latest_freq> iterations\n                print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))\n                print(opt.name)  # it's useful to occasionally show the experiment name on console\n                save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'\n                model.save_networks(save_suffix)\n\n            iter_data_time = time.time()\n\n        if epoch % opt.save_epoch_freq == 0:              # cache our model every <save_epoch_freq> epochs\n            print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))\n            model.save_networks(str(epoch)+'_'+str(total_iters))\n            model.save_networks(epoch)\n\n        print('End of epoch %d / %d \\t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))\n        model.update_learning_rate()                     # update learning rates at the end of every epoch.\n"
  },
  {
    "path": "util/__init__.py",
    "content": "\"\"\"This package includes a miscellaneous collection of useful helper functions.\"\"\"\nfrom util import *\n"
  },
  {
    "path": "util/get_data.py",
    "content": "from __future__ import print_function\nimport os\nimport tarfile\nimport requests\nfrom warnings import warn\nfrom zipfile import ZipFile\nfrom bs4 import BeautifulSoup\nfrom os.path import abspath, isdir, join, basename\n\n\nclass GetData(object):\n    \"\"\"A Python script for downloading CycleGAN or pix2pix datasets.\n\n    Parameters:\n        technique (str) -- One of: 'cyclegan' or 'pix2pix'.\n        verbose (bool)  -- If True, print additional information.\n\n    Examples:\n        >>> from util.get_data import GetData\n        >>> gd = GetData(technique='cyclegan')\n        >>> new_data_path = gd.get(save_path='./datasets')  # options will be displayed.\n\n    Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'\n    and 'scripts/download_cyclegan_model.sh'.\n    \"\"\"\n\n    def __init__(self, technique='cyclegan', verbose=True):\n        url_dict = {\n            'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',\n            'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'\n        }\n        self.url = url_dict.get(technique.lower())\n        self._verbose = verbose\n\n    def _print(self, text):\n        if self._verbose:\n            print(text)\n\n    @staticmethod\n    def _get_options(r):\n        soup = BeautifulSoup(r.text, 'lxml')\n        options = [h.text for h in soup.find_all('a', href=True)\n                   if h.text.endswith(('.zip', 'tar.gz'))]\n        return options\n\n    def _present_options(self):\n        r = requests.get(self.url)\n        options = self._get_options(r)\n        print('Options:\\n')\n        for i, o in enumerate(options):\n            print(\"{0}: {1}\".format(i, o))\n        choice = input(\"\\nPlease enter the number of the \"\n                       \"dataset above you wish to download:\")\n        return options[int(choice)]\n\n    def _download_data(self, dataset_url, save_path):\n        if not isdir(save_path):\n            os.makedirs(save_path)\n\n        base = basename(dataset_url)\n        temp_save_path = join(save_path, base)\n\n        with open(temp_save_path, \"wb\") as f:\n            r = requests.get(dataset_url)\n            f.write(r.content)\n\n        if base.endswith('.tar.gz'):\n            obj = tarfile.open(temp_save_path)\n        elif base.endswith('.zip'):\n            obj = ZipFile(temp_save_path, 'r')\n        else:\n            raise ValueError(\"Unknown File Type: {0}.\".format(base))\n\n        self._print(\"Unpacking Data...\")\n        obj.extractall(save_path)\n        obj.close()\n        os.remove(temp_save_path)\n\n    def get(self, save_path, dataset=None):\n        \"\"\"\n\n        Download a dataset.\n\n        Parameters:\n            save_path (str) -- A directory to save the data to.\n            dataset (str)   -- (optional). A specific dataset to download.\n                            Note: this must include the file extension.\n                            If None, options will be presented for you\n                            to choose from.\n\n        Returns:\n            save_path_full (str) -- the absolute path to the downloaded data.\n\n        \"\"\"\n        if dataset is None:\n            selected_dataset = self._present_options()\n        else:\n            selected_dataset = dataset\n\n        save_path_full = join(save_path, selected_dataset.split('.')[0])\n\n        if isdir(save_path_full):\n            warn(\"\\n'{0}' already exists. Voiding Download.\".format(\n                save_path_full))\n        else:\n            self._print('Downloading Data...')\n            url = \"{0}/{1}\".format(self.url, selected_dataset)\n            self._download_data(url, save_path=save_path)\n\n        return abspath(save_path_full)\n"
  },
  {
    "path": "util/html.py",
    "content": "import dominate\nfrom dominate.tags import meta, h3, table, tr, td, p, a, img, br\nimport os\n\n\nclass HTML:\n    \"\"\"This HTML class allows us to save images and write texts into a single HTML file.\n\n     It consists of functions such as <add_header> (add a text header to the HTML file),\n     <add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).\n     It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.\n    \"\"\"\n\n    def __init__(self, web_dir, title, refresh=0):\n        \"\"\"Initialize the HTML classes\n\n        Parameters:\n            web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/\n            title (str)   -- the webpage name\n            refresh (int) -- how often the website refresh itself; if 0; no refreshing\n        \"\"\"\n        self.title = title\n        self.web_dir = web_dir\n        self.img_dir = os.path.join(self.web_dir, 'images')\n        if not os.path.exists(self.web_dir):\n            os.makedirs(self.web_dir)\n        if not os.path.exists(self.img_dir):\n            os.makedirs(self.img_dir)\n\n        self.doc = dominate.document(title=title)\n        if refresh > 0:\n            with self.doc.head:\n                meta(http_equiv=\"refresh\", content=str(refresh))\n\n    def get_image_dir(self):\n        \"\"\"Return the directory that stores images\"\"\"\n        return self.img_dir\n\n    def add_header(self, text):\n        \"\"\"Insert a header to the HTML file\n\n        Parameters:\n            text (str) -- the header text\n        \"\"\"\n        with self.doc:\n            h3(text)\n\n    def add_images(self, ims, txts, links, width=400):\n        \"\"\"add images to the HTML file\n\n        Parameters:\n            ims (str list)   -- a list of image paths\n            txts (str list)  -- a list of image names shown on the website\n            links (str list) --  a list of hyperref links; when you click an image, it will redirect you to a new page\n        \"\"\"\n        self.t = table(border=1, style=\"table-layout: fixed;\")  # Insert a table\n        self.doc.add(self.t)\n        with self.t:\n            with tr():\n                for im, txt, link in zip(ims, txts, links):\n                    with td(style=\"word-wrap: break-word;\", halign=\"center\", valign=\"top\"):\n                        with p():\n                            with a(href=os.path.join('images', link)):\n                                img(style=\"width:%dpx\" % width, src=os.path.join('images', im))\n                            br()\n                            p(txt)\n\n    def save(self):\n        \"\"\"save the current content to the HMTL file\"\"\"\n        html_file = '%s/index.html' % self.web_dir\n        f = open(html_file, 'wt')\n        f.write(self.doc.render())\n        f.close()\n\n\nif __name__ == '__main__':  # we show an example usage here.\n    html = HTML('web/', 'test_html')\n    html.add_header('hello world')\n\n    ims, txts, links = [], [], []\n    for n in range(4):\n        ims.append('image_%d.png' % n)\n        txts.append('text_%d' % n)\n        links.append('image_%d.png' % n)\n    html.add_images(ims, txts, links)\n    html.save()\n"
  },
  {
    "path": "util/image_pool.py",
    "content": "import random\nimport torch\n\n\nclass ImagePool():\n    \"\"\"This class implements an image buffer that stores previously generated images.\n\n    This buffer enables us to update discriminators using a history of generated images\n    rather than the ones produced by the latest generators.\n    \"\"\"\n\n    def __init__(self, pool_size):\n        \"\"\"Initialize the ImagePool class\n\n        Parameters:\n            pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created\n        \"\"\"\n        self.pool_size = pool_size\n        if self.pool_size > 0:  # create an empty pool\n            self.num_imgs = 0\n            self.images = []\n\n    def query(self, images):\n        \"\"\"Return an image from the pool.\n\n        Parameters:\n            images: the latest generated images from the generator\n\n        Returns images from the buffer.\n\n        By 50/100, the buffer will return input images.\n        By 50/100, the buffer will return images previously stored in the buffer,\n        and insert the current images to the buffer.\n        \"\"\"\n        if self.pool_size == 0:  # if the buffer size is 0, do nothing\n            return images\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.pool_size:   # if the buffer is not full; keep inserting current images to the buffer\n                self.num_imgs = self.num_imgs + 1\n                self.images.append(image)\n                return_images.append(image)\n            else:\n                p = random.uniform(0, 1)\n                if p > 0.5:  # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive\n                    tmp = self.images[random_id].clone()\n                    self.images[random_id] = image\n                    return_images.append(tmp)\n                else:       # by another 50% chance, the buffer will return the current image\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)   # collect all the images and return\n        return return_images\n"
  },
  {
    "path": "util/util.py",
    "content": "\"\"\"This module contains simple helper functions \"\"\"\nfrom __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\nimport importlib\nimport argparse\nfrom argparse import Namespace\nimport torchvision\n\n\ndef str2bool(v):\n    if isinstance(v, bool):\n        return v\n    if v.lower() in ('yes', 'true', 't', 'y', '1'):\n        return True\n    elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n        return False\n    else:\n        raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef copyconf(default_opt, **kwargs):\n    conf = Namespace(**vars(default_opt))\n    for key in kwargs:\n        setattr(conf, key, kwargs[key])\n    return conf\n\n\ndef find_class_in_module(target_cls_name, module):\n    target_cls_name = target_cls_name.replace('_', '').lower()\n    clslib = importlib.import_module(module)\n    cls = None\n    for name, clsobj in clslib.__dict__.items():\n        if name.lower() == target_cls_name:\n            cls = clsobj\n\n    assert cls is not None, \"In %s, there should be a class whose name matches %s in lowercase without underscore(_)\" % (module, target_cls_name)\n\n    return cls\n\n\ndef tensor2im(input_image, imtype=np.uint8):\n    \"\"\"\"Converts a Tensor array into a numpy image array.\n\n    Parameters:\n        input_image (tensor) --  the input image tensor array\n        imtype (type)        --  the desired type of the converted numpy array\n    \"\"\"\n    if not isinstance(input_image, np.ndarray):\n        if isinstance(input_image, torch.Tensor):  # get the data from a variable\n            image_tensor = input_image.data\n        else:\n            return input_image\n        image_numpy = image_tensor[0].clamp(-1.0, 1.0).cpu().float().numpy()  # convert it into a numpy array\n        if image_numpy.shape[0] == 1:  # grayscale to RGB\n            image_numpy = np.tile(image_numpy, (3, 1, 1))\n        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0  # post-processing: tranpose and scaling\n    else:  # if it is a numpy array, do nothing\n        image_numpy = input_image\n    return image_numpy.astype(imtype)\n\n\ndef diagnose_network(net, name='network'):\n    \"\"\"Calculate and print the mean of average absolute(gradients)\n\n    Parameters:\n        net (torch network) -- Torch network\n        name (str) -- the name of the network\n    \"\"\"\n    mean = 0.0\n    count = 0\n    for param in net.parameters():\n        if param.grad is not None:\n            mean += torch.mean(torch.abs(param.grad.data))\n            count += 1\n    if count > 0:\n        mean = mean / count\n    print(name)\n    print(mean)\n\n\ndef save_image(image_numpy, image_path, aspect_ratio=1.0):\n    \"\"\"Save a numpy image to the disk\n\n    Parameters:\n        image_numpy (numpy array) -- input numpy array\n        image_path (str)          -- the path of the image\n    \"\"\"\n\n    image_pil = Image.fromarray(image_numpy)\n    h, w, _ = image_numpy.shape\n\n    if aspect_ratio is None:\n        pass\n    elif aspect_ratio > 1.0:\n        image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)\n    elif aspect_ratio < 1.0:\n        image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)\n    image_pil.save(image_path)\n\n\ndef print_numpy(x, val=True, shp=False):\n    \"\"\"Print the mean, min, max, median, std, and size of a numpy array\n\n    Parameters:\n        val (bool) -- if print the values of the numpy array\n        shp (bool) -- if print the shape of the numpy array\n    \"\"\"\n    x = x.astype(np.float64)\n    if shp:\n        print('shape,', x.shape)\n    if val:\n        x = x.flatten()\n        print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n            np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n    \"\"\"create empty directories if they don't exist\n\n    Parameters:\n        paths (str list) -- a list of directory paths\n    \"\"\"\n    if isinstance(paths, list) and not isinstance(paths, str):\n        for path in paths:\n            mkdir(path)\n    else:\n        mkdir(paths)\n\n\ndef mkdir(path):\n    \"\"\"create a single empty directory if it didn't exist\n\n    Parameters:\n        path (str) -- a single directory path\n    \"\"\"\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n\ndef correct_resize_label(t, size):\n    device = t.device\n    t = t.detach().cpu()\n    resized = []\n    for i in range(t.size(0)):\n        one_t = t[i, :1]\n        one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0))\n        one_np = one_np[:, :, 0]\n        one_image = Image.fromarray(one_np).resize(size, Image.NEAREST)\n        resized_t = torch.from_numpy(np.array(one_image)).long()\n        resized.append(resized_t)\n    return torch.stack(resized, dim=0).to(device)\n\n\ndef correct_resize(t, size, mode=Image.BICUBIC):\n    device = t.device\n    t = t.detach().cpu()\n    resized = []\n    for i in range(t.size(0)):\n        one_t = t[i:i + 1]\n        one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)\n        resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0\n        resized.append(resized_t)\n    return torch.stack(resized, dim=0).to(device)\n"
  },
  {
    "path": "util/visualizer.py",
    "content": "import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\n\nif sys.version_info[0] == 2:\n    VisdomExceptionBase = Exception\nelse:\n    VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n    \"\"\"Save images to the disk.\n\n    Parameters:\n        webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n        visuals (OrderedDict)    -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n        image_path (str)         -- the string is used to create image paths\n        aspect_ratio (float)     -- the aspect ratio of saved images\n        width (int)              -- the images will be resized to width x width\n\n    This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n    \"\"\"\n    image_dir = webpage.get_image_dir()\n    short_path = ntpath.basename(image_path[0])\n    name = os.path.splitext(short_path)[0]\n\n    webpage.add_header(name)\n    ims, txts, links = [], [], []\n\n    for label, im_data in visuals.items():\n        im = util.tensor2im(im_data)\n        image_name = '%s_%s.png' % (name, label)\n        os.makedirs(os.path.join(image_dir, label), exist_ok=True)\n        save_path = os.path.join(image_dir, image_name)\n        util.save_image(im, save_path, aspect_ratio=aspect_ratio)\n        ims.append(image_name)\n        txts.append(label)\n        links.append(image_name)\n    webpage.add_images(ims, txts, links, width=width)\n\n\nclass Visualizer():\n    \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n    It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n    \"\"\"\n\n    def __init__(self, opt):\n        \"\"\"Initialize the Visualizer class\n\n        Parameters:\n            opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n        Step 1: Cache the training/test options\n        Step 2: connect to a visdom server\n        Step 3: create an HTML object for saveing HTML filters\n        Step 4: create a logging file to store training losses\n        \"\"\"\n        self.opt = opt  # cache the option\n        if opt.display_id is None:\n            self.display_id = np.random.randint(100000) * 10  # just a random display id\n        else:\n            self.display_id = opt.display_id\n        self.use_html = opt.isTrain and not opt.no_html\n        self.win_size = opt.display_winsize\n        self.name = opt.name\n        self.port = opt.display_port\n        self.saved = False\n        if self.display_id > 0:  # connect to a visdom server given <display_port> and <display_server>\n            import visdom\n            self.plot_data = {}\n            self.ncols = opt.display_ncols\n            if \"tensorboard_base_url\" not in os.environ:\n                self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)\n            else:\n                self.vis = visdom.Visdom(port=2004,\n                                         base_url=os.environ['tensorboard_base_url'] + '/visdom')\n            if not self.vis.check_connection():\n                self.create_visdom_connections()\n\n        if self.use_html:  # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n            self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n            self.img_dir = os.path.join(self.web_dir, 'images')\n            print('create web directory %s...' % self.web_dir)\n            util.mkdirs([self.web_dir, self.img_dir])\n        # create a logging file to store training losses\n        self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n        with open(self.log_name, \"a\") as log_file:\n            now = time.strftime(\"%c\")\n            log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n    def reset(self):\n        \"\"\"Reset the self.saved status\"\"\"\n        self.saved = False\n\n    def create_visdom_connections(self):\n        \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n        cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n        print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n        print('Command: %s' % cmd)\n        Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n    def display_current_results(self, visuals, epoch, save_result):\n        \"\"\"Display current results on visdom; save current results to an HTML file.\n\n        Parameters:\n            visuals (OrderedDict) - - dictionary of images to display or save\n            epoch (int) - - the current epoch\n            save_result (bool) - - if save the current results to an HTML file\n        \"\"\"\n        if self.display_id > 0:  # show images in the browser using visdom\n            ncols = self.ncols\n            if ncols > 0:        # show all the images in one visdom panel\n                ncols = min(ncols, len(visuals))\n                h, w = next(iter(visuals.values())).shape[:2]\n                table_css = \"\"\"<style>\n                        table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n                        table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n                        </style>\"\"\" % (w, h)  # create a table css\n                # create a table of images.\n                title = self.name\n                label_html = ''\n                label_html_row = ''\n                images = []\n                idx = 0\n                for label, image in visuals.items():\n                    image_numpy = util.tensor2im(image)\n                    label_html_row += '<td>%s</td>' % label\n                    images.append(image_numpy.transpose([2, 0, 1]))\n                    idx += 1\n                    if idx % ncols == 0:\n                        label_html += '<tr>%s</tr>' % label_html_row\n                        label_html_row = ''\n                white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n                while idx % ncols != 0:\n                    images.append(white_image)\n                    label_html_row += '<td></td>'\n                    idx += 1\n                if label_html_row != '':\n                    label_html += '<tr>%s</tr>' % label_html_row\n                try:\n                    self.vis.images(images, ncols, 2, self.display_id + 1,\n                                    None, dict(title=title + ' images'))\n                    label_html = '<table>%s</table>' % label_html\n                    self.vis.text(table_css + label_html, win=self.display_id + 2,\n                                  opts=dict(title=title + ' labels'))\n                except VisdomExceptionBase:\n                    self.create_visdom_connections()\n\n            else:     # show each image in a separate visdom panel;\n                idx = 1\n                try:\n                    for label, image in visuals.items():\n                        image_numpy = util.tensor2im(image)\n                        self.vis.image(\n                            image_numpy.transpose([2, 0, 1]),\n                            self.display_id + idx,\n                            None,\n                            dict(title=label)\n                        )\n                        idx += 1\n                except VisdomExceptionBase:\n                    self.create_visdom_connections()\n\n        if self.use_html and (save_result or not self.saved):  # save images to an HTML file if they haven't been saved.\n            self.saved = True\n            # save images to the disk\n            for label, image in visuals.items():\n                image_numpy = util.tensor2im(image)\n                img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n                util.save_image(image_numpy, img_path)\n\n            # update website\n            webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)\n            for n in range(epoch, 0, -1):\n                webpage.add_header('epoch [%d]' % n)\n                ims, txts, links = [], [], []\n\n                for label, image_numpy in visuals.items():\n                    image_numpy = util.tensor2im(image)\n                    img_path = 'epoch%.3d_%s.png' % (n, label)\n                    ims.append(img_path)\n                    txts.append(label)\n                    links.append(img_path)\n                webpage.add_images(ims, txts, links, width=self.win_size)\n            webpage.save()\n\n    def plot_current_losses(self, epoch, counter_ratio, losses):\n        \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n        Parameters:\n            epoch (int)           -- current epoch\n            counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n            losses (OrderedDict)  -- training losses stored in the format of (name, float) pairs\n        \"\"\"\n        if len(losses) == 0:\n            return\n\n        plot_name = '_'.join(list(losses.keys()))\n\n        if plot_name not in self.plot_data:\n            self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n\n        plot_data = self.plot_data[plot_name]\n        plot_id = list(self.plot_data.keys()).index(plot_name)\n\n        plot_data['X'].append(epoch + counter_ratio)\n        plot_data['Y'].append([losses[k] for k in plot_data['legend']])\n        try:\n            self.vis.line(\n                X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),\n                Y=np.array(plot_data['Y']),\n                opts={\n                    'title': self.name,\n                    'legend': plot_data['legend'],\n                    'xlabel': 'epoch',\n                    'ylabel': 'loss'},\n                win=self.display_id - plot_id)\n        except VisdomExceptionBase:\n            self.create_visdom_connections()\n\n    # losses: same format as |losses| of plot_current_losses\n    def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n        \"\"\"print current losses on console; also save the losses to the disk\n\n        Parameters:\n            epoch (int) -- current epoch\n            iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n            losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n            t_comp (float) -- computational time per data point (normalized by batch_size)\n            t_data (float) -- data loading time per data point (normalized by batch_size)\n        \"\"\"\n        message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n        for k, v in losses.items():\n            message += '%s: %.3f ' % (k, v)\n\n        print(message)  # print the message\n        with open(self.log_name, \"a\") as log_file:\n            log_file.write('%s\\n' % message)  # save the message\n"
  }
]