[
  {
    "path": ".gitignore",
    "content": "__pycache__/\nexperiments/\nresults/\nresult/\nresult\nlog/\nlog\n\ndata_samples/\ncheckpoints/\n\n*.pkl\n*.pt\n*.pth\n*.jpg\n*.png\n*.state\n*.event\n"
  },
  {
    "path": "README.md",
    "content": "This is an offical implementation of the CVPR2022's paper [Learning the Degradation Distribution for Blind Image Super-Resolution](https://arxiv.org/abs/2203.04962). This repo also contains the implementations of many other blind SR methods in [config](codes/config/), including CinGAN, CycleSR, DSGAN-SR, etc.\n\nIf you find this repo useful for your work, please cite our paper:\n```\n@inproceedings{PDMSR,\n  title={Learning the Degradation Distribution for Blind Image Super-Resolution},\n  author={Zhengxiong Luo and Yan Huang and and Shang Li and Liang Wang and Tieniu Tan},\n  booktitle={CVPR},\n  year={2022}\n}\n```\n\nThe codes are built on the basis of [BasicSR](https://github.com/xinntao/BasicSR).\n\n## Dependences\n1. lpips (pip install --user lpips)\n2. matlab (to support the evaluation of NIQE). The details about installing a matlab API for python can refer to [here](https://ww2.mathworks.cn/help/matlab/matlab_external/install-the-matlab-engine-for-python.html)\n\n## Datasets\nThe datasets in NTIRE2017 and NTIRE2018 can be downloaded from [here](https://data.vision.ee.ethz.ch/cvl/DIV2K/). The datasets in NTIRE2020 can be downloaded from the [competition site](https://competitions.codalab.org/competitions/22220).\n\n## Start up\nWe provide the checkpoints in in [Google drive](https://drive.google.com/drive/folders/1bVMGaGF7yLyQhM0xmRVMD2SolOtgLvxO?usp=sharing) and [BaiduYun](https://pan.baidu.com/s/1BcYcX0yCS-3-6XqT4BgYAQ?pwd=ovmw)(password: ovmw). Please download them into the [checkpoints](checkpoints/) directoty. To get a quick start:\n\n```bash\ncd codes/config/PDM-SR/\npython3 inference.py --opt options/test/2020Track2.yml\n```"
  },
  {
    "path": "codes/config/BSRGAN/README.md",
    "content": "This repo currently only supports the test of [BSRGAN](https://arxiv.org/abs/2103.14006). The training related codes may be added in the future. "
  },
  {
    "path": "codes/config/BSRGAN/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, scale=4, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n\n        # define head module\n        if scale >= 1:\n            m_head = [conv(in_nc, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(in_nc, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, out_nc, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n\n        x = self.head(x)\n        f = self.body(x)\n        x = f + x\n        x = self.tail(x)\n\n        return x\n"
  },
  {
    "path": "codes/config/BSRGAN/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/BSRGAN/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/BSRGAN/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t})\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/BSRGAN/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/BSRGAN/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/BSRGAN/models/sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass SRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n\n        self.data_names = [\"lr\", \"hr\"]\n\n        self.network_names = [\"netSR\"]\n        self.networks = {}\n\n        self.loss_names = [\"sr_adv\", \"sr_pix\", \"sr_percep\"]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n\n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n\n        if self.is_train:\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n    def feed_data(self, data):\n\n        self.lr = data[\"src\"].to(self.device)\n        self.hr = data[\"tgt\"].to(self.device)\n\n    def forward(self):\n\n        self.sr = self.netSR(self.lr)\n\n    def optimize_parameters(self, step):\n\n        self.forward()\n\n        loss_dict = OrderedDict()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.hr, self.sr)\n        loss_dict[\"sr_pix\"] = sr_pix\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], False)\n            sr_adv_g = self.calculate_rgan_loss_G(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.hr, self.sr)\n            loss_dict[\"sr_percep\"] = sr_percep\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], True)\n            sr_adv_d = self.calculate_rgan_loss_D(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d\n\n            self.optimizers[\"netD\"].zero_grad()\n            sr_adv_d.backward()\n            self.optimizers[\"netD\"].step()\n\n        self.log_dict = self.reduce_loss_dict(loss_dict)\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data, crop_size=None):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_real_hr = self.netSR(self.real_lr)\n            else:\n                self.fake_real_hr = self.crop_test(self.real_lr, crop_size)\n        self.netSR.train()\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n            \n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/BSRGAN/options/test/2017Track2_2020Track1.yml",
    "content": "#### general settings\nname: 2017Track2_2020Track1\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [6]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/BSRGAN/BSRGAN.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/BSRGAN/options/test/2018Track2_2018Track4.yml",
    "content": "#### general settings\nname: 2018Track2_2018Track4\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [6]\n\nmetrics: [best_psnr, best_ssim, best_lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/BSRGAN/BSRGAN.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/BSRGAN/options/test/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/BSRGAN/BSRGAN.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/BSRGAN/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            gt_img = None\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n                cropped_gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/BSRGAN/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Bicubic/README.md",
    "content": "We use the same bicubic interpolation as that in matlab"
  },
  {
    "path": "codes/config/Bicubic/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/Bicubic/archs/bicubic.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\nfrom utils.resize_utils import imresize\n\n\n@ARCH_REGISTRY.register()\nclass BicuBic(nn.Module):\n    def __init__(self, upscale=4):\n        super().__init__()\n\n        self.empty = nn.Parameter(torch.FloatTensor([0.0]))\n        self.upscale = upscale\n\n    def forward(self, x):\n        y  = imresize(x, self.upscale)\n        return y\n"
  },
  {
    "path": "codes/config/Bicubic/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/Bicubic/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/Bicubic/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/Bicubic/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/Bicubic/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/Bicubic/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/Bicubic/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/Bicubic/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/Bicubic/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/Bicubic/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/Bicubic/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/Bicubic/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/Bicubic/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/Bicubic/models/sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass SRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n\n        self.data_names = [\"lr\", \"hr\"]\n\n        self.network_names = [\"netSR\"]\n        self.networks = {}\n\n        self.loss_names = [\"sr_adv\", \"sr_pix\", \"sr_percep\"]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n\n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n\n        if self.is_train:\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n    def feed_data(self, data):\n\n        self.lr = data[\"src\"].to(self.device)\n        self.hr = data[\"tgt\"].to(self.device)\n\n    def forward(self):\n\n        self.sr = self.netSR(self.lr)\n\n    def optimize_parameters(self, step):\n\n        self.forward()\n\n        loss_dict = OrderedDict()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.hr, self.sr)\n        loss_dict[\"sr_pix\"] = sr_pix\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], False)\n            sr_adv_g = self.calculate_rgan_loss_G(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.hr, self.sr)\n            loss_dict[\"sr_percep\"] = sr_percep\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], True)\n            sr_adv_d = self.calculate_rgan_loss_D(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d\n\n            self.optimizers[\"netD\"].zero_grad()\n            sr_adv_d.backward()\n            self.optimizers[\"netD\"].step()\n\n        self.log_dict = self.reduce_loss_dict(loss_dict)\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data, crop_size=None):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_real_hr = self.netSR(self.real_lr)\n            else:\n                self.fake_real_hr = self.crop_test(self.real_lr, crop_size)\n        self.netSR.train()\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n            \n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/Bicubic/options/test/2017Track2_2020Track1.yml",
    "content": "#### general settings\nname: Bicubic_2017Track2_2020Track1\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: BicuBic\n    setting:\n      upscale: 4\n    pretrain: \n      path: ~\n      strict_load: true\n"
  },
  {
    "path": "codes/config/Bicubic/options/test/2018Track2_2020Track4.yml",
    "content": "#### general settings\nname: Bicubic_2018Track2_2018Track4\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: BicuBic\n    setting:\n      upscale: 4\n    pretrain: \n      path: ~\n      strict_load: true\n"
  },
  {
    "path": "codes/config/Bicubic/options/test/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: BicuBic\n    setting:\n      upscale: 4\n    pretrain: \n      path: ~\n      strict_load: true"
  },
  {
    "path": "codes/config/Bicubic/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            gt_img = None\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n                cropped_gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Bicubic/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Bulat/README.md",
    "content": "This repo supports the training and testing of ECCV paper [To learn image super-resolution, use a GAN to learn how to do image degradation firs](https://arxiv.org/abs/1807.11458)"
  },
  {
    "path": "codes/config/Bulat/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/Bulat/archs/deg_arch.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\nfrom .edsr import default_conv, BasicBlock, ResBlock, Upsampler\n\n\n@ARCH_REGISTRY.register()\nclass DegModel(nn.Module):\n    def __init__(self, nb, nf, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n\n        # define head module\n        m_head = [nn.Conv2d(4, nf, kernel_size=2 * scale + 1, stride=scale, padding=scale)]\n        n_head = [nn.Linear(64, 128**2)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            conv(nf, 3, 3)\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        self.head_noise = nn.Sequential(*n_head)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n        B, C, H, W = x.shape\n        noise = torch.randn(B, 64).to(x.device)\n        noise = self.head_noise(noise).view(B, 1, H, W)\n\n        f = self.head(torch.cat([x, noise], 1))\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=1 / self.scale)\n        return x\n"
  },
  {
    "path": "codes/config/Bulat/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=2, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 4\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=stride, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=2,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/Bulat/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/Bulat/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n@LOSS_REGISTRY.register()\nclass ColorLoss(nn.Module):\n    def __init__(self, gauss_opt=None, pool_opt=None, stride=1, recursion=1, loss_type=\"l1\"):\n        super().__init__()\n        \n        self.stride = stride\n        self.recursion = recursion\n        self.loss_type = loss_type\n\n        self.gauss_opt = gauss_opt\n        if gauss_opt is not None:\n            ksize = gauss_opt[\"ksize\"]\n            if gauss_opt.get(\"sigma\", None) is None:\n                sigma = ksize / 6\n            ax = torch.arange(0, ksize) - (ksize - 1) / 2\n            xx, yy = torch.meshgrid(ax, ax)\n            dis = (xx ** 2 + yy ** 2)\n            dis = torch.exp(-dis / 2 / sigma ** 2)\n            dis = dis / dis.sum()\n\n            weight = dis.view(1, 1, ksize, ksize).repeat(3, 1, 1, 1)\n            self.register_buffer(\"weight\", weight)\n        \n        self.pool_opt = pool_opt\n        if pool_opt is not None:\n            ksize = pool_opt[\"ksize\"]\n            self.pool = nn.AvgPool2d(ksize, stride)\n    \n    def forward(self, src, tgt):\n        for i in range(self.recursion):\n            if self.gauss_opt is not None:\n                tgt = F.conv2d(tgt, self.weight, stride=self.stride, padding=self.ksize//2, groups=3)\n            if self.pool_opt is not None:\n                tgt = self.pool(tgt)\n        if self.loss_type == \"l1\":\n            loss = F.l1_loss(src, tgt)\n        elif self.loss_type == \"mse\":\n            loss = F.mse_loss(src, tgt)\n        return loss\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/Bulat/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/Bulat/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/Bulat/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/Bulat/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/Bulat/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/Bulat/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, nb, nf, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n\n        # define head module\n        if scale >= 1:\n            m_head = [conv(3, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(3, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, 3, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n\n        f = self.head(x)\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=self.scale)\n        return x\n"
  },
  {
    "path": "codes/config/Bulat/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/Bulat/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/Bulat/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/Bulat/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/Bulat/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/Bulat/models/deg_sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\nimport random\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom models.base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass DegSRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"syn_lr\", \"syn_hr\", \"real_lr\"]\n\n        self.network_names = [\"netSR\", \"netDeg\", \"netD1\", \"netD2\"]\n        self.networks = {}\n\n        self.loss_names = [\n           \"lr_adv\",\n            \"lr_percep\",\n            \"lr_color\",\n            \"lr_tv\",\n            \"sr_tv\",\n            \"sr_pix\",\n            \"sr_adv\",\n            \"sr_percep\"\n        ]\n\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n            self.D_ratio = train_opt[\"D_ratio\"]\n\n            ## buffer\n            self.fake_lr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_hr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n    \n    def feed_data(self, data):\n\n        self.syn_hr = data[\"tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n\n    def forward(self):\n\n        self.fake_real_lr = self.netDeg(self.syn_hr)\n        self.fake_syn_hr = self.netSR(self.fake_real_lr)\n        # self.fake_real_hr = self.netSR(self.real_lr)\n\n    def optimize_parameters(self, step):\n        self.forward()\n        \n        loss_dict = OrderedDict()\n\n        loss_G = 0\n\n        if self.losses.get(\"lr_adv\"):\n            self.set_requires_grad([\"netD1\"], False)\n            g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"lr_adv\"], self.real_lr, self.fake_real_lr\n            )\n            loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n            loss_G += self.loss_weights[\"lr_adv\"] * g1_adv_loss\n\n        if self.losses.get(\"lr_percep\"):\n            lr_percep, lr_style = self.losses[\"lr_percep\"](self.real_lr, self.fake_real_lr)\n            loss_dict[\"lr_percep\"] = lr_percep.item()\n            if lr_style is not None:\n                loss_dict[\"lr_style\"] = lr_style.item()\n                loss_G += self.loss_weights[\"sr_percep\"] * lr_style\n            loss_G += self.loss_weights[\"sr_percep\"] * lr_percep\n        \n        if self.losses.get(\"lr_color\"):\n            lr_color = self.losses[\"lr_color\"](self.fake_real_lr, self.syn_hr)\n            loss_dict[\"lr_color\"] = lr_color.item()\n            loss_G += self.loss_weights[\"lr_color\"] * lr_color\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD2\"], False)\n            sr_adv = self.calculate_gan_loss_G(\n                self.netD2, self.losses[\"sr_adv\"], self.syn_hr, self.fake_syn_hr\n            )\n            loss_dict[\"sr_adv\"] = sr_adv.item()\n            loss_G += self.loss_weights[\"sr_adv\"] * sr_adv\n\n        if self.losses.get(\"sr_pix\"):\n            sr_pix = self.losses[\"sr_pix\"](self.fake_syn_hr, self.syn_hr)\n            loss_dict[\"sr_pix\"] = sr_pix.item()\n            loss_G += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.syn_hr, self.fake_syn_hr)\n            loss_dict[\"sr_percep\"] = sr_percep.item()\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style.item()\n                loss_G += self.loss_weights[\"sr_percep\"] * sr_style\n            loss_G += self.loss_weights[\"sr_percep\"] * sr_percep\n        \n        if self.losses.get(\"sr_tv\"):\n            sr_tv = self.losses[\"sr_tv\"](self.fake_real_hr)\n            loss_dict[\"sr_tv\"] = sr_tv.item()\n            loss_G = self.loss_weights[\"sr_tv\"] * sr_tv\n\n        self.set_optimizer(names=[\"netDeg\", \"netSR\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm(names=[\"netDeg\", \"netSR\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netDeg\", \"netSR\"], operation=\"step\")\n\n        ## update D1, D2\n        loss_D = 0\n\n        if self.losses.get(\"lr_adv\"):\n            if step % self.D_ratio == 0:\n                self.set_requires_grad([\"netD1\"], True)\n                loss_d1 = self.calculate_gan_loss_D(\n                    self.netD1, self.losses[\"lr_adv\"], self.real_lr,\n                    self.fake_lr_buffer.choose(self.fake_real_lr.detach())\n                )\n                loss_dict[\"d1_adv\"] = loss_d1.item()\n                loss_d1 = self.loss_weights[\"lr_adv\"] * loss_d1\n\n                self.set_optimizer(names=[\"netD1\"], operation=\"zero_grad\")\n                loss_d1.backward()\n                self.clip_grad_norm([\"netD1\"], norm=self.max_grad_norm)\n                self.set_optimizer(names=[\"netD1\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            if step % self.D_ratio == 0:\n                self.set_requires_grad([\"netD2\"], True)\n                loss_d2 = self.calculate_gan_loss_D(\n                    self.netD2, self.losses[\"sr_adv\"], self.syn_hr,\n                    self.fake_sr_buffer.choose(self.fake_syn_hr.detach())\n                )\n                loss_dict[\"d2_adv\"] = loss_d2.item()\n                loss_d2 = self.loss_weights[\"sr_adv\"] * loss_d2\n\n                self.set_optimizer(names=[\"netD2\"], operation=\"zero_grad\")\n                loss_d1.backward()\n                self.clip_grad_norm([\"netD2\"], norm=self.max_grad_norm)\n                self.set_optimizer(names=[\"netD2\"], operation=\"step\")\n\n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, data):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.set_network_state([\"netSR\"], \"eval\")\n        with torch.no_grad():\n            self.fake_real_hr = self.netSR(self.real_lr)\n        self.set_network_state([\"netSR\"], \"train\")\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n\n\nclass ShuffleBuffer():\n    \"\"\"Random choose previous generated images or ones produced by the latest generators.\n    :param buffer_size: the size of image buffer\n    :type buffer_size: int\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        \"\"\"Initialize the ImagePool class.\n        :param buffer_size: the size of image buffer\n        :type buffer_size: int\n        \"\"\"\n        self.buffer_size = buffer_size\n        self.num_imgs = 0\n        self.images = []\n\n    def choose(self, images, prob=0.5):\n        \"\"\"Return an image from the pool.\n        :param images: the latest generated images from the generator\n        :type images: list\n        :param prob: probability (0~1) of return previous images from buffer\n        :type prob: float\n        :return: Return images from the buffer\n        :rtype: list\n        \"\"\"\n        if self.buffer_size == 0:\n            return  images\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.buffer_size:\n                self.images.append(image)\n                return_images.append(image)\n                self.num_imgs += 1\n            else:\n                p = random.uniform(0, 1)\n                if p < prob:\n                    idx = random.randint(0, self.buffer_size - 1)\n                    stored_image = self.images[idx].clone()\n                    self.images[idx] = image\n                    return_images.append(stored_image)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images"
  },
  {
    "path": "codes/config/Bulat/options/test/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: log/2017Track2/models/latest_netG1.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2017Track2/models/latest_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/Bulat/options/test/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [1]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: log/2018Track2/models/latest_netG1.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track2/models/latest_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/Bulat/options/test/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: log/2018Track4/models/latest_netG1.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track4/models/latest_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/Bulat/options/test/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: log/2020Track1/models/latest_netG1.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2020Track1/models/latest_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/Bulat/options/train/psnr/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_color:\n      type: ColorLoss\n      gauss_opt: ~\n      pool_opt:\n        ksize: 4\n      loss_type: mse\n      stride: 4\n      weight: 1.0\n    \n    sr_pix: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 2e-4\n    netG1: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/Bulat/options/train/psnr/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [3]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_color:\n      type: ColorLoss\n      gauss_opt: ~\n      pool_opt:\n        ksize: 4\n      loss_type: mse\n      stride: 4\n      weight: 1.0\n    \n    sr_pix: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 2e-4\n    netG1: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  "
  },
  {
    "path": "codes/config/Bulat/options/train/psnr/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_color:\n      type: ColorLoss\n      gauss_opt: ~\n      pool_opt:\n        ksize: 4\n      loss_type: mse\n      stride: 4\n      weight: 1.0\n    \n    sr_pix: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 2e-4\n    netG1: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  "
  },
  {
    "path": "codes/config/Bulat/options/train/psnr/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [1]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nf: 64\n      nb: 8\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_color:\n      type: ColorLoss\n      gauss_opt: ~\n      pool_opt:\n        ksize: 4\n      loss_type: mse\n      stride: 4\n      weight: 1.0\n    \n    sr_pix: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 2e-4\n    netG1: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  "
  },
  {
    "path": "codes/config/Bulat/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Bulat/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/CinGAN/README.md",
    "content": "This repo supports the training and testing of CinGAN in the paper [Unsupervised Image Super-Resolution using Cycle-in-Cycle Generative Adversarial Networks](https://arxiv.org/abs/1809.00437)"
  },
  {
    "path": "codes/config/CinGAN/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/CinGAN/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 4\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=stride, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=2,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/CinGAN/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/CinGAN/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass TVLoss(nn.Module):\n    def __init__(self, penealty=\"L1Loss\"):\n        super().__init__()\n        self.penealty = getattr(nn, penealty)()\n\n    def forward(self, pred):\n        y_diff = self.penealty(pred[:, :, :-1, :], pred[:, :, 1:, :])\n        x_diff = self.penealty(pred[:, :, :, :-1], pred[:, :, :, 1:])\n\n        loss = x_diff + y_diff\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/CinGAN/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/CinGAN/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/CinGAN/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/CinGAN/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/CinGAN/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/CinGAN/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, nb, nf, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n        # define head module\n        if scale >= 1:\n            m_head = [conv(3, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(3, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, 3, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n\n        f = self.head(x)\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=self.scale)\n        \n        return x\n"
  },
  {
    "path": "codes/config/CinGAN/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/CinGAN/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/CinGAN/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/CinGAN/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/CinGAN/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/CinGAN/models/cingan_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\nfrom .trans_model import ShuffleBuffer\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass CinGANModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"syn_lr\", \"syn_hr\", \"real_lr\"]\n\n        self.network_names = [\"netSR\", \"netG1\", \"netG2\", \"netG3\", \"netD1\", \"netD2\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"srd2_adv\",\n            \"sr_tv\",\n            \"srg3_cycle\",\n            \"g1d1_adv\",\n            \"g1g2_cycle\",\n            \"lr_tv\",\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n            # setup loss, optimizers, schedulers\n            self.setup_train(train_opt[\"train\"])\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n\n            # buffer\n            self.fake_lr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_hr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n\n        self.syn_lr = data[\"ref_src\"].to(self.device)\n        self.syn_hr = data[\"ref_tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n    \n    def foward_trans(self):\n        self.fake_syn_lr = self.netG1(self.real_lr)\n        self.rec_real_lr = self.netG2(self.fake_syn_lr)\n    \n    def forward_sr(self):\n        \n        self.fake_syn_lr = self.netG1(self.real_lr)\n        self.fake_real_hr = self.netSR(self.fake_syn_lr)\n        self.rec_real_lr = self.netG3(self.fake_real_hr)\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n        \n        # update trans\n        ## update generators\n        self.set_requires_grad([\"netD1\"], False)\n        self.foward_trans()\n\n        loss_G = 0\n\n        g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"g1d1_adv\"], self.syn_lr, self.fake_syn_lr\n        )\n        loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n        loss_G += self.loss_weights[\"g1d1_adv\"] * g1_adv_loss\n\n        if self.losses.get(\"lr_tv\"):\n            lr_tv_loss = self.losses[\"lr_tv\"](self.fake_syn_lr)\n            loss_dict[\"lr_tv\"] = lr_tv_loss.item()\n            loss_G += self.loss_weights[\"lr_tv\"] * lr_tv_loss\n\n        g1g2_cycle = self.losses[\"g1g2_cycle\"](self.rec_real_lr, self.real_lr)\n        loss_dict[\"g1g2_cycle\"] = g1g2_cycle.item()\n        loss_G += self.loss_weights[\"g1g2_cycle\"] * g1g2_cycle\n\n        self.set_optimizer(names=[\"netG1\",\"netG2\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm([\"netG1\",\"netG2\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"step\")\n\n        ## update D\n        self.set_requires_grad([\"netD1\"], True)\n        loss_d1 = self.calculate_gan_loss_D(\n            self.netD1, self.losses[\"g1d1_adv\"], self.syn_lr,\n            self.fake_lr_buffer.choose(self.fake_syn_lr)\n        )\n        loss_dict[\"d1_adv\"] = loss_d1.item()\n        loss_D = self.loss_weights[\"g1d1_adv\"] * loss_d1\n\n        self.set_optimizer([\"netD1\"], \"zero_grad\")\n        loss_D.backward()\n        self.clip_grad_norm([\"netD1\"], self.max_grad_norm)\n        self.set_optimizer([\"netD1\"], \"step\")\n\n        # update sr\n        self.set_requires_grad([\"netD2\"], False)\n        self.forward_sr()\n\n        loss_G = 0\n\n        srd2_adv_g = self.calculate_gan_loss_G(\n            self.netD2, self.losses[\"srd2_adv\"], self.syn_hr, self.fake_real_hr\n        )\n        loss_dict[\"sr_adv\"] = srd2_adv_g.item()\n        loss_G += self.loss_weights[\"srd2_adv\"] * srd2_adv_g\n\n        if self.losses.get(\"sr_tv\"):\n            sr_tv_loss = self.losses[\"sr_tv\"](self.fake_real_hr)\n            loss_dict[\"sr_tv\"] = sr_tv_loss.item()\n            loss_G += self.loss_weights[\"sr_tv\"] * sr_tv_loss\n\n        srg3_cycle = self.losses[\"srg3_cycle\"](self.rec_real_lr, self.real_lr)\n        loss_dict[\"srg3_cycle\"] = srg3_cycle.item()\n        loss_G += self.loss_weights[\"srg3_cycle\"] * srg3_cycle\n\n\n        self.set_optimizer(names=[\"netG1\", \"netSR\", \"netG3\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm(names=[\"netG1\", \"netSR\", \"netG3\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netG1\", \"netSR\", \"netG3\"], operation=\"step\")\n\n        ## update D1, D2\n        self.set_requires_grad([\"netD2\"], True)\n\n        loss_d2 = self.calculate_gan_loss_D(\n            self.netD2, self.losses[\"srd2_adv\"], self.syn_hr,\n            self.fake_hr_buffer.choose(self.fake_real_hr.detach())\n        )\n        loss_dict[\"d1_adv\"] = loss_d2.item()\n        loss_D = self.loss_weights[\"srd2_adv\"] * loss_d2\n\n        self.set_optimizer(names=[\"netD2\"], operation=\"zero_grad\")\n        loss_D.backward()\n        self.clip_grad_norm([\"netD2\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netD2\"], operation=\"step\")\n\n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.set_network_state([\"netSR\", \"netG1\"], \"eval\")\n        with torch.no_grad():\n            self.fake_syn_lr = self.netG1(self.real_lr)\n            self.fake_real_hr = self.netSR(self.fake_syn_lr)\n        self.set_network_state([\"netSR\", \"netG1\"], \"train\")\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/CinGAN/models/trans_model.py",
    "content": "import logging\nfrom collections import OrderedDict\nimport random\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass TransModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"src\", \"tgt\"]\n\n        self.network_names = [\"netG1\", \"netG2\", \"netD1\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"g1d1_adv\",\n            \"g1_idt\",\n            \"g1g2_cycle\",\n            \"lr_tv\"\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n            # setup loss, optimizers, schedulers\n            self.setup_train(train_opt[\"train\"])\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n            # buffer\n            self.fake_tgt_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n\n        self.src = data[\"src\"].to(self.device)\n        self.tgt = data[\"tgt\"].to(self.device)\n    \n    def forward(self):\n\n        self.fake_tgt = self.netG1(self.src)\n        self.rec_src = self.netG2(self.fake_tgt)\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n\n        self.forward()\n\n        loss_G = 0\n        # set D fixed\n        self.set_requires_grad([\"netD1\"], False)\n\n        g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"g1d1_adv\"], self.tgt, self.fake_tgt\n        )\n        loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n        loss_G += self.loss_weights[\"g1d1_adv\"] * g1_adv_loss\n\n        if self.losses.get(\"g1_idt\"):\n            self.tgt_idt = self.netG1(self.tgt)\n            g1_idt = self.losses[\"g1_idt\"](self.tgt, self.tgt_idt)\n            loss_dict[\"g1_idt\"] = g1_idt.item()\n            loss_G += self.loss_weights[\"g1_idt\"] * g1_idt\n        \n        if self.losses.get(\"lr_tv\"):\n            lr_tv = self.losses[\"lr_tv\"](self.fake_tgt)\n            loss_dict[\"lr_tv\"] = lr_tv.item()\n            loss_G += self.loss_weights[\"lr_tv\"] * lr_tv\n\n        g1g2_cycle = self.losses[\"g1g2_cycle\"](self.rec_src, self.src)\n        loss_dict[\"g1g2_cycle\"] = g1g2_cycle.item()\n        loss_G += self.loss_weights[\"g1g2_cycle\"] * g1g2_cycle\n\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm(names=[\"netG1\", \"netG2\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"step\")\n\n        ## update D1, D2\n        self.set_requires_grad([\"netD1\"], True)\n\n        loss_D = 0\n        loss_d1 = self.calculate_gan_loss_D(\n            self.netD1, self.losses[\"g1d1_adv\"], self.tgt,\n            self.fake_tgt_buffer.choose(self.fake_tgt.detach())\n        )\n        loss_dict[\"d1_adv\"] = loss_d1.item()\n        loss_D += loss_d1\n\n        self.set_optimizer(names=[\"netD1\"], operation=\"zero_grad\")\n        loss_D.backward()\n        self.clip_grad_norm(names=[\"netG1\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netD1\"], operation=\"step\")\n\n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data):\n        self.src = data[\"src\"].to(self.device)\n        self.netG1.eval()\n        with torch.no_grad():\n            self.fake_tgt = self.netG1(self.src)\n        self.netG1.train()\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.src.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_tgt.detach()[0].float().cpu()\n        return out_dict\n\nclass ShuffleBuffer():\n    \"\"\"Random choose previous generated images or ones produced by the latest generators.\n    :param buffer_size: the size of image buffer\n    :type buffer_size: int\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        \"\"\"Initialize the ImagePool class.\n        :param buffer_size: the size of image buffer\n        :type buffer_size: int\n        \"\"\"\n        self.buffer_size = buffer_size\n        self.num_imgs = 0\n        self.images = []\n\n    def choose(self, images, prob=0.5):\n        \"\"\"Return an image from the pool.\n        :param images: the latest generated images from the generator\n        :type images: list\n        :param prob: probability (0~1) of return previous images from buffer\n        :type prob: float\n        :return: Return images from the buffer\n        :rtype: list\n        \"\"\"\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.buffer_size:\n                self.images.append(image)\n                return_images.append(image)\n                self.num_imgs += 1\n            else:\n                p = random.uniform(0, 1)\n                if p < prob:\n                    idx = random.randint(0, self.buffer_size - 1)\n                    stored_image = self.images[idx].clone()\n                    self.images[idx] = image\n                    return_images.append(stored_image)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images\n"
  },
  {
    "path": "codes/config/CinGAN/options/test/sr/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CinGAN2017Track1/models/latest_netSR.pth\n      strict_load: true\n  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/CinGAN2017Track1/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CinGAN/options/test/sr/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CinGAN2018Track2/models/latest_netSR.pth\n      strict_load: true\n  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/CinGAN2018Track2/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CinGAN/options/test/sr/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  test4:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CinGAN2018Track4/models/latest_netSR.pth\n      strict_load: true\n  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/CinGAN2018Track4/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CinGAN/options/test/sr/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [1]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CinGAN2020Track1/models/latest_netSR.pth\n      strict_load: true\n  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/CinGAN2020Track1/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CinGAN/options/train/sr/2017Track2.yml",
    "content": "#### general settings\nname: CinGAN2017Track2\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [5]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: log/Trans2017Track1/models/latest_netD2.pth\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2017Track1/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: log/Trans2017Track1/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2017Track1/models/latest_netG2.pth\n      strict_load: true\n\n  netG3:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 0.25\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    srd2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.5\n    \n    sr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 2\n\n    srg3_cycle:\n      type: L1Loss\n      weight: 10\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 1e-4\n      betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netG3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CinGAN/options/train/sr/2018Track2.yml",
    "content": "#### general settings\nname: CinGAN2018Track2\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [6]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: log/Trans2018Track2/models/latest_netD2.pth\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netG2.pth\n      strict_load: true\n\n  netG3:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 0.25\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    srd2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.5\n    \n    sr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 2\n\n    srg3_cycle:\n      type: L1Loss\n      weight: 10\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 1e-4\n      betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netG3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3"
  },
  {
    "path": "codes/config/CinGAN/options/train/sr/2018Track4.yml",
    "content": "#### general settings\nname: CinGAN2018Track4\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [1]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\n# netSR:\n#   which_network: RRDBNet\n#   setting:\n#     in_nc: 3\n#     out_nc: 3\n#     nf: 64\n#     nb: 23\n#     upscale: 4\n#   pretrain:\n#     path: ../../../checkpoints/ESRGAN/RRDB_PSNR_x4.pth\n#     strict_load: true\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: log/Trans2018Track4/models/latest_netD2.pth\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netG2.pth\n      strict_load: true\n\n  netG3:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 0.25\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    srd2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.5\n    \n    sr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 2\n\n    srg3_cycle:\n      type: L1Loss\n      weight: 10\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 1e-4\n      betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netG3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3"
  },
  {
    "path": "codes/config/CinGAN/options/train/sr/2020Track1.yml",
    "content": "#### general settings\nname: CinGAN2020Track1\nuse_tb_logger: false\nmodel: CinGANModel\nscale: 4\ngpu_ids: [5]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\n# netSR:\n#   which_network: RRDBNet\n#   setting:\n#     in_nc: 3\n#     out_nc: 3\n#     nf: 64\n#     nb: 23\n#     upscale: 4\n#   pretrain:\n#     path: ../../../checkpoints/ESRGAN/RRDB_PSNR_x4.pth\n#     strict_load: true\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: log/Trans2020Track1/models/100000_netD2.pth\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2020Track1/models/100000_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: log/Trans2020Track1/models/100000_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: log/Trans2020Track1/models/100000_netG2.pth\n      strict_load: true\n\n  netG3:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 0.25\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n  buffer_size: 16\n\n  losses:\n    srd2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.5\n    \n    sr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 2\n\n    srg3_cycle:\n      type: L1Loss\n      weight: 10\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 1e-4\n      betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netG3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3"
  },
  {
    "path": "codes/config/CinGAN/options/train/trans/2017Track2.yml",
    "content": "#### general settings\nname: Trans2017Track2\nuse_tb_logger: false\nmodel: TransModel\nscale: 1\ngpu_ids: [2]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n    \n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n\n  niter: 100000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CinGAN/options/train/trans/2018Track2.yml",
    "content": "#### general settings\nname: Trans2018Track2\nuse_tb_logger: false\nmodel: TransModel\nscale: 1\ngpu_ids: [3]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n    \n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n\n  niter: 100000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CinGAN/options/train/trans/2018Track4.yml",
    "content": "#### general settings\nname: Trans2018Track4\nuse_tb_logger: false\nmodel: TransModel\nscale: 1\ngpu_ids: [4]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n    \n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n\n  niter: 100000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CinGAN/options/train/trans/2020Track1.yml",
    "content": "#### general settings\nname: Trans2020Track1\nuse_tb_logger: false\nmodel: TransModel\nscale: 1\ngpu_ids: [0]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n    \n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    lr_tv:\n      type: TVLoss\n      penealty: MSELoss\n      weight: 0.5\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n\n  niter: 100000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CinGAN/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n       \n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            cropped_gt_img = None\n        \n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/CinGAN/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/CycleSR/README.md",
    "content": "This repo supports the training and testing of CycleSR in the paper [Unsupervised Image Super-Resolution with an Indirect Supervised Path](https://arxiv.org/abs/1910.02593)"
  },
  {
    "path": "codes/config/CycleSR/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/CycleSR/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 4\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=stride, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=2,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/CycleSR/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/CycleSR/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/CycleSR/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/CycleSR/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/CycleSR/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/CycleSR/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/CycleSR/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/CycleSR/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\nfrom .edsr import default_conv, BasicBlock, ResBlock\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, nb, nf, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n        # define head module\n        if scale >= 1:\n            m_head = [conv(3, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(3, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, 3, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n\n        f = self.head(x)\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=self.scale)\n        \n        return x\n"
  },
  {
    "path": "codes/config/CycleSR/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/CycleSR/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/CycleSR/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/CycleSR/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/CycleSR/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/CycleSR/models/cyclegan_model.py",
    "content": "import logging\nfrom collections import OrderedDict\nimport random\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass CycleGANModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"src\", \"tgt\"]\n\n        self.network_names = [\"netG1\", \"netG2\", \"netD1\", \"netD2\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"g1d1_adv\",\n            \"g2d2_adv\",\n            \"g1_idt\",\n            \"g2_idt\",\n            \"g1g2_cycle\",\n            \"g2g1_cycle\",\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n             # setup loss, optimizers, schedulers\n            self.setup_train(train_opt)\n        \n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n\n            # buffer\n            self.fake_src_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_tgt_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n\n        self.src = data[\"src\"].to(self.device)\n        self.tgt = data[\"tgt\"].to(self.device)\n    \n    def forward(self):\n\n        self.fake_tgt = self.netG1(self.src)\n        self.rec_src = self.netG2(self.fake_tgt)\n        self.fake_src = self.netG2(self.tgt)\n        self.rec_tgt = self.netG1(self.fake_src)\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n\n        self.forward()\n\n        loss_G = 0\n        # set D fixed\n        self.set_requires_grad([\"netD1\", \"netD2\"], False)\n\n        g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"g1d1_adv\"], self.tgt, self.fake_tgt\n        )\n        loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n        loss_G += self.loss_weights[\"g1d1_adv\"] * g1_adv_loss\n\n        g2_adv_loss = self.calculate_gan_loss_G(\n            self.netD2, self.losses[\"g2d2_adv\"], self.src, self.fake_src\n        )\n        loss_dict[\"g2_adv\"] = g2_adv_loss.item()\n        loss_G += self.loss_weights[\"g2d2_adv\"] * g2_adv_loss\n\n        if self.losses.get(\"g1_idt\"):\n            self.tgt_idt = self.netG1(self.tgt)\n            g1_idt = self.losses[\"g1_idt\"](self.tgt, self.tgt_idt)\n            loss_dict[\"g1_idt\"] = g1_idt.item()\n            loss_G += self.loss_weights[\"g1_idt\"] * g1_idt\n        \n        if self.losses.get(\"g2_idt\"):\n            self.src_idt = self.netG2(self.src)\n            g2_idt = self.losses[\"g2_idt\"](self.src, self.src_idt)\n            loss_dict[\"g2_idt\"] = g2_idt.item()\n            loss_G += self.loss_weights[\"g2_idt\"] * g2_idt\n\n        g1g2_cycle = self.losses[\"g1g2_cycle\"](self.rec_src, self.src)\n        loss_dict[\"g1g2_cycle\"] = g1g2_cycle.item()\n        loss_G += self.loss_weights[\"g1g2_cycle\"] * g1g2_cycle\n\n        g2g1_cycle = self.losses[\"g2g1_cycle\"](self.rec_tgt, self.tgt)\n        loss_dict[\"g2g1_cycle\"] = g2g1_cycle.item()\n        loss_G += self.loss_weights[\"g2g1_cycle\"] * g2g1_cycle\n\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm(names=[\"netG1\", \"netG2\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"step\")\n\n        ## update D1, D2\n        self.set_requires_grad([\"netD1\", \"netD2\"], True)\n\n        loss_D = 0\n        loss_d1 = self.calculate_gan_loss_D(\n            self.netD1, self.losses[\"g1d1_adv\"], self.tgt,\n            self.fake_tgt_buffer.choose(self.fake_tgt.detach())\n        )\n        loss_dict[\"d1_adv\"] = loss_d1.item()\n        loss_D += loss_d1\n\n        loss_d2 = self.calculate_gan_loss_D(\n            self.netD2, self.losses[\"g2d2_adv\"], self.src,\n            self.fake_src_buffer.choose(self.fake_src)\n        )\n        loss_dict[\"d2_adv\"] = loss_d2.item()\n        loss_D += loss_d2\n\n        self.set_optimizer(names=[\"netD1\", \"netD2\"], operation=\"zero_grad\")\n        loss_D.backward()\n        self.clip_grad_norm(names=[\"netD1\",\"netD2\"], norm=self.max_grad_norm)\n        self.set_optimizer(names=[\"netD1\", \"netD2\"], operation=\"step\")\n\n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, data):\n        self.src = data[\"src\"].to(self.device)\n        self.netG1.eval()\n        with torch.no_grad():\n            self.fake_tgt = self.netG1(self.src)\n        self.netG1.train()\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.src.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_tgt.detach()[0].float().cpu()\n        return out_dict\n\nclass ShuffleBuffer():\n    \"\"\"Random choose previous generated images or ones produced by the latest generators.\n    :param buffer_size: the size of image buffer\n    :type buffer_size: int\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        \"\"\"Initialize the ImagePool class.\n        :param buffer_size: the size of image buffer\n        :type buffer_size: int\n        \"\"\"\n        self.buffer_size = buffer_size\n        self.num_imgs = 0\n        self.images = []\n\n    def choose(self, images, prob=0.5):\n        \"\"\"Return an image from the pool.\n        :param images: the latest generated images from the generator\n        :type images: list\n        :param prob: probability (0~1) of return previous images from buffer\n        :type prob: float\n        :return: Return images from the buffer\n        :rtype: list\n        \"\"\"\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.buffer_size:\n                self.images.append(image)\n                return_images.append(image)\n                self.num_imgs += 1\n            else:\n                p = random.uniform(0, 1)\n                if p < prob:\n                    idx = random.randint(0, self.buffer_size - 1)\n                    stored_image = self.images[idx].clone()\n                    self.images[idx] = image\n                    return_images.append(stored_image)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images\n"
  },
  {
    "path": "codes/config/CycleSR/models/cyclesr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\nfrom .cyclegan_model import ShuffleBuffer\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass CycleSRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"syn_lr\", \"syn_hr\", \"real_lr\"]\n\n        self.network_names = [\"netSR\", \"netG1\", \"netG2\", \"netD1\", \"netD2\", \"netD3\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"sr_adv\",\n            \"sr_pix\",\n            \"sr_pix_trans\",\n            \"sr_percep\",\n            \"g1_d1_adv\",\n            \"g2_d2_adv\",\n            \"g1_idt\",\n            \"g2_idt\",\n            \"g1g2_cycle\",\n            \"g2g1_cycle\",\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n             # setup loss, optimizers, schedulers\n            self.setup_train(train_opt)\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n            # buffer\n            self.fake_src_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_tgt_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n        \n        self.syn_lr = data[\"ref_src\"].to(self.device)\n        self.syn_hr = data[\"ref_tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n\n    def forward_trans(self):\n        self.fake_real_lr = self.netG1(self.syn_lr)\n        self.fake_syn_hr = self.netSR(self.fake_real_lr)\n\n        self.rec_syn_lr = self.netG2(self.fake_real_lr)\n\n        self.fake_syn_lr = self.netG2(self.real_lr)\n        self.rec_real_lr = self.netG1(self.fake_syn_lr)\n    \n    def forward_sr(self):\n        self.fake_syn_hr = self.netSR(self.fake_real_lr.detach())\n        if self.losses.get(\"sr_adv\"):\n            self.fake_real_hr = self.netSR(self.real_lr)\n    \n    def optimize_trans_models(self, step, loss_dict):\n        # set D fixed\n        self.set_requires_grad([\"netD1\", \"netD2\", \"netSR\"], False)\n        self.forward_trans()\n\n        loss_trans = 0\n\n        g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"g1_d1_adv\"], self.real_lr, self.fake_real_lr\n        )\n        loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n        loss_trans += self.loss_weights[\"g1_d1_adv\"] * g1_adv_loss\n\n        g2_adv_loss = self.calculate_gan_loss_G(\n            self.netD2, self.losses[\"g2_d2_adv\"], self.syn_lr, self.fake_syn_lr\n        )\n        loss_dict[\"g2_adv\"] = g2_adv_loss.item()\n        loss_trans += self.loss_weights[\"g2_d2_adv\"] * g2_adv_loss\n\n        g1g2_cycle = self.losses[\"g1g2_cycle\"](self.rec_syn_lr, self.syn_lr)\n        loss_dict[\"g1g2_cycle\"] = g1g2_cycle.item()\n        loss_trans += self.loss_weights[\"g1g2_cycle\"] * g1g2_cycle\n\n        if self.losses.get(\"g1_idt\"):\n            self.real_lr_idt = self.netG1(self.real_lr)\n            g1_idt = self.losses[\"g1_idt\"](self.real_lr, self.real_lr_idt)\n            loss_dict[\"g1_idt\"] = g1_idt.item()\n            loss_trans += self.loss_weights[\"g1_idt\"] * g1_idt\n        \n        if self.losses.get(\"g2_idt\"):\n            self.syn_lr_idt = self.netG2(self.syn_lr)\n            g2_idt = self.losses[\"g2_idt\"](self.syn_lr, self.syn_lr_idt)\n            loss_dict[\"g2_idt\"] = g2_idt.item()\n            loss_trans += self.loss_weights[\"g2_idt\"] * g2_idt\n\n        g2g1_cycle = self.losses[\"g2g1_cycle\"](self.rec_real_lr, self.real_lr)\n        loss_dict[\"g2g1_cycle\"] = g2g1_cycle.item()\n        loss_trans += self.loss_weights[\"g2g1_cycle\"] * g2g1_cycle\n\n        loss_sr_pix = self.losses[\"sr_pix_trans\"](self.fake_syn_hr, self.syn_hr)\n        loss_dict[\"sr_pix_trans\"] = loss_sr_pix.item()\n        loss_trans += self.loss_weights[\"sr_pix_trans\"] * loss_sr_pix\n\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"zero_grad\")\n        loss_trans.backward()\n        self.clip_grad_norm([\"netG1\", \"netG2\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netG1\", \"netG2\"], operation=\"step\")\n\n        ## update D1, D2\n        self.set_requires_grad([\"netD1\", \"netD2\"], True)\n\n        loss_d1d2 = 0\n        loss_d1 = self.calculate_gan_loss_D(\n            self.netD1, self.losses[\"g1_d1_adv\"], self.real_lr, self.fake_real_lr\n        )\n        loss_dict[\"d1_adv\"] = loss_d1.item()\n        loss_d1d2 += loss_d1\n\n        loss_d2 = self.calculate_gan_loss_D(\n            self.netD2, self.losses[\"g2_d2_adv\"], self.syn_lr, self.fake_syn_lr\n        )\n        loss_dict[\"d2_adv\"] = loss_d2.item()\n        loss_d1d2 += loss_d2\n\n        self.set_optimizer(names=[\"netD1\", \"netD2\"], operation=\"zero_grad\")\n        loss_d1d2.backward()\n        self.clip_grad_norm([\"netD1\", \"netD2\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netD1\", \"netD2\"], operation=\"step\")\n\n        return loss_dict\n    \n    def optimize_sr_models(self, step, loss_dict):\n\n        self.set_requires_grad([\"netSR\"], True)\n        self.forward_sr()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.syn_hr, self.fake_syn_hr)\n        loss_dict[\"sr_pix\"] = sr_pix.item()\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD3\"], False)\n            sr_adv_g = self.calculate_gan_loss_G(\n                self.netD3, self.losses[\"sr_adv\"], self.syn_hr, self.fake_syn_hr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g.item()\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](\n                self.syn_hr, self.fake_syn_hr\n            )\n            loss_dict[\"sr_percep\"] = sr_percep.item()\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style.item()\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.clip_grad_norm([\"netSR\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD3\"], True)\n            sr_adv_d = self.calculate_gan_loss_D(\n                self.netD3, self.losses[\"sr_adv\"], self.syn_hr, self.fake_syn_hr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d.item()\n            loss_D = self.loss_weights[\"sr_adv\"] * sr_adv_d\n\n            self.optimizers[\"netD3\"].zero_grad()\n            loss_D.backward()\n            self.clip_grad_norm([\"netD3\"], self.max_grad_norm)\n            self.optimizers[\"netD3\"].step()\n        \n        return loss_dict\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n\n        loss_dict = self.optimize_trans_models(step, loss_dict)\n        loss_dict = self.optimize_sr_models(step, loss_dict)\n\n        for k, v in loss_dict.items():\n            self.log_dict[k] = v\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, data):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            self.fake_real_hr = self.netSR(self.real_lr)\n        self.netSR.train()\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/CycleSR/options/test/sr/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CycleSR2017Track1/models/200000_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CycleSR/options/test/sr/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [2]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/validx4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CycleSR2018Track2/models/latest_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CycleSR/options/test/sr/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [3]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/validx4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  test4:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CycleSR2018Track4/models/latest_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CycleSR/options/test/sr/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/validx4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CycleSR2020Track1/models/200000_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/CycleSR/options/test/sr/2020Track1_percep.yml",
    "content": "#### general settings\nname: 2020Track1_percep\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [2]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/validx4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: log/CycleSR2020Track1_percep/models/200000_netSR.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/CycleSR/options/train/sr/psnr/2017Track2.yml",
    "content": "#### general settings\nname: CycleSR2017Track1\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [3]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [1, 1]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  # netD3:\n  #   which_network: PatchGANDiscriminator\n  #   setting:\n  #     in_c: 3\n  #     nf: 64\n  #     nb: 3\n  #     stride: 2\n  #   pretrain:\n  #     path: ~\n  #     strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2017Track1/models/190000_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2017Track1/models/190000_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2017Track1/models/190000_netG2.pth\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2017Track1/models/190000_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50.0\n  buffer_size: 16\n\n  losses:\n    # sr_adv:\n    #   type: GANLoss\n    #   gan_type: lsgan\n    #   real_label_val: 1.0\n    #   fake_label_val: 0.0\n    #   weight: !!float 0.0\n\n    sr_pix_trans: \n      type: MSELoss\n      weight: 1000.0\n    \n    sr_pix:\n      type: MSELoss\n      weight: 1.0\n\n    # sr_percep:\n    #   type: PerceptualLoss\n    #   layer_weights:\n    #     'conv5_4': 1  # before relu\n    #   vgg_type: vgg19\n    #   use_input_norm: true\n    #   range_norm: false\n    #   perceptual_weight: 1.0\n    #   style_weight: 0\n    #   criterion: l1\n    #   weight: !!float 0.0\n\n    g1_d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2_d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 5\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    # netD3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/sr/psnr/2018Track2.yml",
    "content": "#### general settings\nname: CycleSR2017Track1\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\n# netSR:\n#   which_network: RRDBNet\n#   setting:\n#     in_nc: 3\n#     out_nc: 3\n#     nf: 64\n#     nb: 23\n#     upscale: 4\n#   pretrain:\n#     path: ../../../checkpoints/ESRGAN/RRDB_PSNR_x4.pth\n#     strict_load: true\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  # netD3:\n  #   which_network: PatchGANDiscriminator\n  #   setting:\n  #     in_c: 3\n  #     nf: 64\n  #     nb: 3\n  #     stride: 2\n  #   pretrain:\n  #     path: ~\n  #     strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netG2.pth\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2018Track2/models/latest_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50.0\n  buffer_size: 16\n\n  losses:\n    # sr_adv:\n    #   type: GANLoss\n    #   gan_type: lsgan\n    #   real_label_val: 1.0\n    #   fake_label_val: 0.0\n    #   weight: !!float 0.0\n\n    sr_pix_trans: \n      type: MSELoss\n      weight: 1000.0\n    \n    sr_pix:\n      type: MSELoss\n      weight: 1.0\n\n    # sr_percep:\n    #   type: PerceptualLoss\n    #   layer_weights:\n    #     'conv5_4': 1  # before relu\n    #   vgg_type: vgg19\n    #   use_input_norm: true\n    #   range_norm: false\n    #   perceptual_weight: 1.0\n    #   style_weight: 0\n    #   criterion: l1\n    #   weight: !!float 0.0\n\n    g1_d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2_d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 5\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    # netD3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/sr/psnr/2018Track4.yml",
    "content": "#### general settings\nname: CycleSR2018Track4\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\n# netSR:\n#   which_network: RRDBNet\n#   setting:\n#     in_nc: 3\n#     out_nc: 3\n#     nf: 64\n#     nb: 23\n#     upscale: 4\n#   pretrain:\n#     path: ../../../checkpoints/ESRGAN/RRDB_PSNR_x4.pth\n#     strict_load: true\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  # netD3:\n  #   which_network: PatchGANDiscriminator\n  #   setting:\n  #     in_c: 3\n  #     nf: 64\n  #     nb: 3\n  #     stride: 2\n  #   pretrain:\n  #     path: ~\n  #     strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netG2.pth\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2018Track4/models/latest_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50.0\n  buffer_size: 16\n\n  losses:\n    # sr_adv:\n    #   type: GANLoss\n    #   gan_type: lsgan\n    #   real_label_val: 1.0\n    #   fake_label_val: 0.0\n    #   weight: !!float 0.0\n\n    sr_pix_trans: \n      type: MSELoss\n      weight: 1000.0\n    \n    sr_pix:\n      type: MSELoss\n      weight: 1.0\n\n    # sr_percep:\n    #   type: PerceptualLoss\n    #   layer_weights:\n    #     'conv5_4': 1  # before relu\n    #   vgg_type: vgg19\n    #   use_input_norm: true\n    #   range_norm: false\n    #   perceptual_weight: 1.0\n    #   style_weight: 0\n    #   criterion: l1\n    #   weight: !!float 0.0\n\n    g1_d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2_d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 5\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    # netD3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/sr/psnr/2020Track1.yml",
    "content": "#### general settings\nname: CycleSR2020Track1\nuse_tb_logger: false\nmodel: CycleSRModel\nscale: 4\ngpu_ids: [4]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\n# netSR:\n#   which_network: RRDBNet\n#   setting:\n#     in_nc: 3\n#     out_nc: 3\n#     nf: 64\n#     nb: 23\n#     upscale: 4\n#   pretrain:\n#     path: ../../../checkpoints/ESRGAN/RRDB_PSNR_x4.pth\n#     strict_load: true\n\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain:\n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  # netD3:\n  #   which_network: PatchGANDiscriminator\n  #   setting:\n  #     in_c: 3\n  #     nf: 64\n  #     nb: 3\n  #     stride: 2\n  #   pretrain:\n  #     path: ~\n  #     strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2020Track1/models/latest_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2020Track1/models/latest_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n    pretrain:\n      path: log/Trans2020Track1/models/latest_netG2.pth\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/Trans2020Track1/models/latest_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50.0\n  buffer_size: 16\n\n  losses:\n    # sr_adv:\n    #   type: GANLoss\n    #   gan_type: lsgan\n    #   real_label_val: 1.0\n    #   fake_label_val: 0.0\n    #   weight: !!float 0.0\n\n    sr_pix_trans: \n      type: MSELoss\n      weight: 1000.0\n    \n    sr_pix:\n      type: MSELoss\n      weight: 1.0\n\n    # sr_percep:\n    #   type: PerceptualLoss\n    #   layer_weights:\n    #     'conv5_4': 1  # before relu\n    #   vgg_type: vgg19\n    #   use_input_norm: true\n    #   range_norm: false\n    #   perceptual_weight: 1.0\n    #   style_weight: 0\n    #   criterion: l1\n    #   weight: !!float 0.0\n\n    g1_d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2_d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 5\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.5, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    # netD3: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/trans/2017Track2.yml",
    "content": "#### general settings\nname: Trans2017Track1\nuse_tb_logger: false\nmodel: CycleGANModel\nscale: 1\ngpu_ids: [3]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [1, 1]\n    \n    dataroot_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5.0\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/trans/2018Track2.yml",
    "content": "#### general settings\nname: Trans2018Track2\nuse_tb_logger: false\nmodel: CycleGANModel\nscale: 1\ngpu_ids: [0]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n    \n    dataroot_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mild.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5.0\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/trans/2018Track4.yml",
    "content": "#### general settings\nname: Trans2018Track4\nuse_tb_logger: false\nmodel: CycleGANModel\nscale: 1\ngpu_ids: [1]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n    \n    dataroot_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/track4/valid_wild.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5.0\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/options/train/trans/2020Track1.yml",
    "content": "#### general settings\nname: Trans2020Track1\nuse_tb_logger: false\nmodel: CycleGANModel\nscale: 1\ngpu_ids: [1]\nmetrics: [psnr, ssim]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n    \n    dataroot_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 32\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: DIV2K\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/DIV2K_valid/BicLR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n\n#### network structures\nnetworks:\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      scale: 1\n      zero_tail: true\n    pretrain:\n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  buffer_size: 16\n  max_grad_norm: 50\n  \n  losses:\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g1_idt:\n      type: L1Loss\n      weight: 5.0\n    \n    g2_idt:\n      type: L1Loss\n      weight: 5.0\n\n    g1g2_cycle:\n      type: L1Loss\n      weight: 10.0\n\n    g2g1_cycle: \n      type: L1Loss\n      weight: 10.0\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 2e-4\n        betas: [0.5, 0.999]\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n  \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/CycleSR/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n       \n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            cropped_gt_img = None\n        \n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/CycleSR/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/DSGANSR/README.md",
    "content": "This repo supports the training of the degradation model DSGAN proposed in [Frequency Separation for Real-World Super-Resolution](https://arxiv.org/abs/1911.07850)"
  },
  {
    "path": "codes/config/DSGANSR/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/deg_arch.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom utils.registry import ARCH_REGISTRY\nfrom kornia.color import yuv\n\n\nclass ResBlock(nn.Module):\n    def __init__(self, nf, ksize, norm=nn.BatchNorm2d, act=nn.ReLU):\n        super().__init__()\n        \n        self.nf = nf\n        self.body = nn.Sequential(\n            nn.Conv2d(nf, nf, ksize, 1, ksize//2),\n            norm(nf), act(),\n            nn.Conv2d(nf, nf, ksize, 1, ksize//2)\n        )\n    \n    def forward(self, x):\n        return torch.add(x, self.body(x))\n\n\n@ARCH_REGISTRY.register()\nclass DegModel(nn.Module):\n    def __init__(\n        self,  scale=4, nc_img=3, kernel_opt=None, noise_opt=None\n    ):\n        super().__init__()\n\n        self.scale = scale\n\n        self.head = nn.Conv2d()\n\n        self.kernel_opt = kernel_opt\n        self.noise_opt = noise_opt\n\n        if kernel_opt is not None:\n            nc, nf, nb = kernel_opt[\"nc\"], kernel_opt[\"nf\"], kernel_opt[\"nb\"]\n            ksize = kernel_opt[\"ksize\"]\n            mix = kernel_opt[\"mix\"]\n            in_nc = nc + nc_img if mix else nc\n\n            spatial = kernel_opt[\"spatial\"]\n            if spatial:\n                head_k = kernel_opt[\"head_k\"]\n                body_k = kernel_opt[\"body_k\"]\n            else:\n                head_k = body_k = 1\n\n            deg_kernel = [\n                nn.Conv2d(in_nc, nf, head_k, 1, head_k//2),\n                nn.BatchNorm2d(nf), nn.ReLU(),\n                *[\n                    ResBlock(nf=nf, ksize=body_k)\n                    for _ in range(nb)\n                    ],\n                nn.Conv2d(nf, ksize ** 2, 1, 1, 0),\n                nn.Softmax(1)\n            ]\n            self.deg_kernel = nn.Sequential(*deg_kernel)\n\n            if kernel_opt[\"zero_init\"]:\n                nn.init.constant_(self.deg_kernel[-2].weight, 0)\n                nn.init.constant_(self.deg_kernel[-2].bias, 0)\n                self.deg_kernel[-2].bias.data[ksize**2//2] = 1\n\n            self.pad = nn.ReflectionPad2d(ksize//2)\n\n        if noise_opt is not None:\n            nc, nf, nb = noise_opt[\"nc\"], noise_opt[\"nf\"], noise_opt[\"nb\"]\n            mix = noise_opt[\"mix\"]\n            in_nc = nc + nc_img if mix else nc\n\n            head_k = noise_opt[\"head_k\"]\n            body_k = noise_opt[\"body_k\"]\n\n            deg_noise = [\n                nn.Conv2d(in_nc, nf, head_k, 1, head_k//2),\n                nn.BatchNorm2d(nf), nn.ReLU(),\n                *[\n                    ResBlock(nf=nf, ksize=body_k)\n                    for _ in range(nb)\n                    ],\n                nn.Conv2d(nf, noise_opt[\"dim\"], head_k, 1, head_k//2),\n                # nn.Sigmoid()\n            ]\n            self.deg_noise = nn.Sequential(*deg_noise)\n            if noise_opt[\"zero_init\"]:\n                nn.init.constant_(self.deg_noise[-1].weight, 0)\n                nn.init.constant_(self.deg_noise[-1].bias, 0)\n        \n    def forward(self, inp):\n        B, C, H, W = inp.shape\n        h = H // self.scale\n        w = W // self.scale\n\n        # kernel\n        if self.kernel_opt is not None:\n            if self.kernel_opt[\"mix\"]:\n                inp_k = F.interpolate(inp, scale_factor=1/self.scale, mode=\"bicubic\", align_corners=False)\n                if self.kernel_opt[\"nc\"] > 0:\n                    nc = self.kernel_opt[\"nc\"]\n                    if self.kernel_opt[\"spatial\"]:\n                        zk = torch.randn(B, nc, h, w).to(inp.device)\n                    else:\n                        zk = torch.randn(B, nc, 1, 1).to(inp.device)\n                    inp_k = torch.cat([inp_k, zk], 1)\n            else:\n                nc = self.kernel_opt[\"nc\"]\n                if self.kernel_opt[\"spatial\"]:\n                    inp_k = torch.randn(B, nc, h, w).to(inp.device)\n                else:\n                    inp_k = torch.randn(B, nc, 1, 1).to(inp.device)\n            \n            ksize = self.kernel_opt[\"ksize\"]\n            kernel = self.deg_kernel(inp_k).view(B, 1, ksize**2, *inp_k.shape[2:])\n\n            x = inp.view(B*C, 1, H, W)\n            x = F.unfold(\n                self.pad(x), kernel_size=ksize, stride=self.scale, padding=0\n            ).view(B, C, ksize**2, h, w)\n\n            x = torch.mul(x, kernel).sum(2).view(B, C, h, w)\n            kernel = kernel.view(B, ksize**2, *inp_k.shape[2:])\n        else:\n            x = F.interpolate(inp, scale_factor=1/self.scale, mode=\"bicubic\", align_corners=False)\n            kernel = None\n\n        # noise\n        if self.noise_opt is not None:\n            if self.noise_opt[\"mix\"]:\n                # inp_n = x.detach()\n                inp_n = F.interpolate(inp, scale_factor=1/self.scale, mode=\"bicubic\", align_corners=False)\n                if self.noise_opt[\"nc\"] > 0:\n                    nc = self.noise_opt[\"nc\"]\n                    zn = torch.randn(B, nc, h, w).to(inp.device)\n                    inp_n = torch.cat([inp_n, zn], 1)\n            else:\n                nc = self.noise_opt[\"nc\"]\n                inp_n = torch.randn(B, nc, h, w).to(inp.device)\n\n            noise = self.deg_noise(inp_n)\n\n            x = x + noise\n\n        else:\n\n            noise = None\n        \n        return x, kernel, noise\n\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n@LOSS_REGISTRY.register()\nclass ColorLoss(nn.Module):\n    def __init__(self, ksize=5, sigma=None, stride=1, recursion=1, loss_type=\"l1\"):\n        super().__init__()\n        \n        self.stride = stride\n        self.ksize = ksize\n        self.recursion = recursion\n        self.loss_type = loss_type\n\n        if sigma is None:\n            sigma = ksize / 6\n        ax = torch.arange(0, ksize) - (ksize - 1) / 2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / 2 / sigma ** 2)\n        dis = dis / dis.sum()\n\n        weight = dis.view(1, 1, ksize, ksize).repeat(3, 1, 1, 1)\n        self.register_buffer(\"weight\", weight)\n    \n    def forward(self, src, tgt):\n        for i in range(self.recursion):\n            tgt = F.conv2d(tgt, self.weight, stride=self.stride, padding=self.ksize//2, groups=3)\n        if self.loss_type == \"l1\":\n            loss = F.l1_loss(src, tgt)\n        elif self.loss_type == \"mse\":\n            loss = F.mse_loss(src, tgt)\n        return loss\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\nfrom .edsr import default_conv, BasicBlock, ResBlock, Upsampler\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, nb, nf, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n        # define head module\n        if scale >= 1:\n            m_head = [conv(3, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(3, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, 3, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n\n        f = self.head(x)\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=self.scale)\n        \n        return x\n"
  },
  {
    "path": "codes/config/DSGANSR/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/DSGANSR/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/DSGANSR/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/DSGANSR/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/DSGANSR/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/DSGANSR/models/deg_sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\nimport random\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\nclass Quant(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, input):\n        output = torch.clamp(input, 0, 1)\n        output = (output * 255.).round() / 255.\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        return grad_output\n\nclass Quantization(nn.Module):\n    def __init__(self):\n        super(Quantization, self).__init__()\n\n    def forward(self, input):\n        return Quant.apply(input)\n\n\n@MODEL_REGISTRY.register()\nclass DegSRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"src\", \"tgt\"]\n\n        self.network_names = [\"netSR\", \"netDeg\", \"netD1\", \"netD2\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"lr_adv\",\n            \"lr_percep\",\n            \"sr_adv\",\n            \"sr_pix_trans\",\n            \"sr_pix_sr\",\n            \"sr_percep\",\n            \"color\"\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n             # setup loss, optimizers, schedulers\n            self.setup_train(train_opt)\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n            self.quant = Quantization()\n            self.D_ratio = train_opt[\"D_ratio\"]\n\n            self.optim_deg = train_opt[\"optim_deg\"]\n            self.optim_sr = train_opt[\"optim_sr\"]\n\n            ## buffer\n            self.fake_lr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_hr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n\n        self.syn_hr = data[\"tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n\n    def encoder_forward(self):\n        self.fake_real_lr = self.netDeg(self.syn_hr)\n    \n    def decoder_forward(self):\n        if not self.optim_deg:\n            self.fake_real_lr = self.netDeg(self.syn_hr)\n        self.fake_real_lr_quant = self.quant(self.fake_real_lr)\n        self.syn_sr = self.netSR(self.fake_real_lr_quant.detach())\n\n    def optimize_trans_models(self, loss_dict, step):\n\n        self.set_requires_grad([\"netSR\"], False)\n        self.encoder_forward()\n        loss_G = 0\n\n        if self.losses.get(\"lr_adv\"):\n            self.set_requires_grad([\"netD1\"], False)\n            g1_adv_loss = self.calculate_gan_loss_G(\n                self.netD1, self.losses[\"lr_adv\"],\n                self.real_lr, self.fake_real_lr\n            )\n            loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n            loss_G += self.loss_weights[\"lr_adv\"] * g1_adv_loss\n            \n        if self.losses.get(\"lr_percep\"):\n            lr_percep, lr_style = self.losses[\"lr_percep\"](\n                self.real_lr, self.fake_real_lr\n            )\n            loss_dict[\"lr_percep\"] = lr_percep.item()\n            if lr_style is not None:\n                loss_dict[\"lr_style\"] = lr_style.item()\n                loss_G += self.loss_weights[\"lr_percep\"] * lr_style\n            loss_G += self.loss_weights[\"lr_percep\"] * lr_percep\n       \n        if self.losses.get(\"color\"):\n            color = self.losses[\"color\"](\n                self.fake_real_lr, self.syn_hr\n                )\n            loss_dict[\"color\"] = color.item()\n            loss_G += self.loss_weights[\"color\"] * color\n\n        self.set_optimizer(names=[\"netDeg\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm([\"netDeg\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netDeg\"], operation=\"step\")\n\n        self.update_learning_rate([\"netDeg\"], step)\n\n        ## update D\n        if self.losses.get(\"lr_adv\"):\n            if step % self.D_ratio == 0:\n                self.set_requires_grad([\"netD1\"], True)\n                loss_d1 = self.calculate_gan_loss_D(\n                    self.netD1, self.losses[\"lr_adv\"],\n                    self.real_lr, self.fake_lr_buffer.choose(self.fake_real_lr)\n                )\n                loss_dict[\"d1_adv\"] = loss_d1.item()\n                loss_D = self.loss_weights[\"lr_adv\"] * loss_d1\n                self.optimizers[\"netD1\"].zero_grad()\n                loss_D.backward()\n                self.clip_grad_norm([\"netD1\"], self.max_grad_norm)\n                self.optimizers[\"netD1\"].step()\n        \n            self.update_learning_rate([\"netD1\"], step)\n\n        return loss_dict\n    \n    def optimize_sr_models(self, loss_dict, step):\n        self.set_requires_grad([\"netSR\"], True)\n        self.set_requires_grad([\"netDeg\"], False)\n        self.decoder_forward()\n        loss_G = 0\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD2\"], False)\n            sr_adv_loss = self.calculate_gan_loss_G(\n                self.netD2, self.losses[\"sr_adv\"],\n                self.syn_hr, self.syn_sr\n            )\n            loss_dict[\"sr_adv\"] = sr_adv_loss.item()\n            loss_G += self.loss_weights[\"sr_adv\"] * sr_adv_loss\n        \n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](\n                self.syn_hr, self.syn_sr\n            )\n            loss_dict[\"sr_percep\"] = sr_percep.item()\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style.item()\n                loss_G += self.loss_weights[\"sr_percep\"] * sr_style\n            loss_G += self.loss_weights[\"sr_percep\"] * sr_percep\n        \n        if self.losses.get(\"sr_pix_sr\"):\n            sr_pix = self.losses[\"sr_pix_sr\"](self.syn_hr, self.syn_sr)\n            loss_dict[\"sr_pix_sr\"] = sr_pix.item()\n            loss_G += self.loss_weights[\"sr_pix_sr\"] * sr_pix\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm([\"netSR\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        self.update_learning_rate([\"netSR\"], step)\n\n        ## update D2\n        if self.losses.get(\"sr_adv\"):\n            if step % self.D_ratio == 0:\n                self.set_requires_grad([\"netD2\"], True)\n                loss_d2 = self.calculate_gan_loss_D(\n                    self.netD2, self.losses[\"sr_adv\"],\n                    self.syn_hr, self.fake_hr_buffer.choose(self.syn_sr)\n                )\n                loss_dict[\"d2_adv\"] = loss_d2.item()\n                loss_D = self.loss_weights[\"sr_adv\"] * loss_d2\n                self.optimizers[\"netD2\"].zero_grad()\n                loss_D.backward()\n                self.clip_grad_norm([\"netD2\"], self.max_grad_norm)\n                self.optimizers[\"netD2\"].step()\n\n            self.update_learning_rate([\"netD2\"], step)\n\n        return loss_dict\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n\n        # optimize trans\n        if self.optim_deg:\n            loss_dict = self.optimize_trans_models(loss_dict, step)\n\n        # optimize SR\n        if self.optim_sr:\n            loss_dict = self.optimize_sr_models(loss_dict, step)\n        \n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, test_data):\n        self.src = test_data[\"src\"].to(self.device)\n        if test_data.get(\"tgt\") is not None:\n            tgt = test_data[\"tgt\"].to(self.device)\n            b, c, h, w = tgt.shape\n            crop_h = h // 8 * 8; crop_w = w // 8 * 8\n            self.tgt = tgt[:, :, :crop_h, :crop_w]\n        self.set_network_state([\"netDeg\", \"netSR\"], \"eval\")\n        with torch.no_grad():\n            self.fake_tgt = self.netSR(self.src)\n            if test_data.get(\"tgt\") is not None:\n                self.fake_lr = self.netDeg(self.tgt)\n        self.set_network_state([\"netDeg\", \"netSR\"], \"train\")\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.src.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_tgt.detach()[0].float().cpu()\n        if hasattr(self, \"fake_lr\"):\n            out_dict[\"fake_lr\"] = self.fake_lr.detach()[0].float().cpu()\n        return out_dict\n\n\nclass ShuffleBuffer():\n    \"\"\"Random choose previous generated images or ones produced by the latest generators.\n    :param buffer_size: the size of image buffer\n    :type buffer_size: int\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        \"\"\"Initialize the ImagePool class.\n        :param buffer_size: the size of image buffer\n        :type buffer_size: int\n        \"\"\"\n        self.buffer_size = buffer_size\n        self.num_imgs = 0\n        self.images = []\n\n    def choose(self, images, prob=0.5):\n        \"\"\"Return an image from the pool.\n        :param images: the latest generated images from the generator\n        :type images: list\n        :param prob: probability (0~1) of return previous images from buffer\n        :type prob: float\n        :return: Return images from the buffer\n        :rtype: list\n        \"\"\"\n        if self.buffer_size == 0:\n            return  images\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.buffer_size:\n                self.images.append(image)\n                return_images.append(image)\n                self.num_imgs += 1\n            else:\n                p = random.uniform(0, 1)\n                if p < prob:\n                    idx = random.randint(0, self.buffer_size - 1)\n                    stored_image = self.images[idx].clone()\n                    self.images[idx] = image\n                    return_images.append(stored_image)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images"
  },
  {
    "path": "codes/config/DSGANSR/options/test/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /mnt/hdd/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /mnt/hdd/lzx/SRDatasets/NTIRE2020/track1_valid_input.lmdb\n  #   dataroot_tgt: /mnt/hdd/lzx/SRDatasets/NTIRE2020/track1_valid_gt.lmdb\n\n#### network structures\nnetworks:\n  Encoder:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: log/2017Track1_deg/models/200000_Encoder.pth\n      strict_load: true\n\n  Decoder:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2017Track1/models/latest_Decoder.pth\n      strict_load: true\n  "
  },
  {
    "path": "codes/config/DSGANSR/options/test/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [best_psnr, best_ssim, lpips] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\nnetworks:\n  Encoder:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: log/2018Track2_deg/models/200000_Encoder.pth\n      strict_load: true\n\n  Decoder:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2018Track2/models/latest_Decoder.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/DSGANSR/options/test/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [6]\n\nmetrics: [best_psnr, best_ssim, lpips] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\nnetworks:\n  Encoder:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: log/2018Track4_deg/models/200000_Encoder.pth\n      strict_load: true\n\n  Decoder:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2018Track4/models/latest_Decoder.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/DSGANSR/options/test/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips] \n\ndatasets:\n  test1:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\nnetworks:\n  Encoder:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: log/2020Track1_deg/models/70000_Encoder.pth\n      strict_load: true\n\n  Decoder:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2020Track1/models/170000_Decoder.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/deg/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track2_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: log/2017Track1/models/195000_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: true\n  optim_deg: false\n\n  losses:\n    color: \n      type: ColorLoss\n      ksize: 5\n      stride: 4\n      recursion: 1\n      loss_type: mse\n      weight: 1.0\n    \n    lr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.01\n    \n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.005\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.0\n    \n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    deafault:\n      type: Adam\n      lr: !!float 2e-4  \n    netDeg: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/deg/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: true\n  optim_deg: false\n\n  losses:\n    color: \n      type: ColorLoss\n      ksize: 5\n      stride: 4\n      recursion: 1\n      loss_type: mse\n      weight: 1.0\n    \n    lr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.01\n    \n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.005\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.0\n    \n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    deafault:\n      type: Adam\n      lr: !!float 2e-4  \n    netDeg: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/deg/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [4]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      # path: ~\n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: true\n  optim_deg: false\n\n  losses:\n    color: \n      type: ColorLoss\n      ksize: 5\n      stride: 4\n      recursion: 1\n      loss_type: mse\n      weight: 1.0\n    \n    lr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.01\n    \n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.005\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.0\n    \n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    deafault:\n      type: Adam\n      lr: !!float 2e-4  \n    netDeg: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/deg/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: true\n  optim_deg: false\n\n  losses:\n    color: \n      type: ColorLoss\n      ksize: 5\n      stride: 4\n      recursion: 1\n      loss_type: mse\n      weight: 1.0\n    \n    lr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.01\n    \n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.005\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.0\n    \n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    deafault:\n      type: Adam\n      lr: !!float 2e-4  \n    netDeg: ~\n    netSR: ~\n    netD1: ~\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/sr/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track2_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n\n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/sr/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n\n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  "
  },
  {
    "path": "codes/config/DSGANSR/options/train/sr/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [6]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n\n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/DSGANSR/options/train/sr/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [7]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  Encoder:\n    which_network: Translator\n    setting:\n      nb: 16\n      nf: 64\n      scale: 0.25\n      zero_tail: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  Decoder:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n\n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/DSGANSR/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/DSGANSR/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/EDSR/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/EDSR/archs/bicubic.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\nfrom utils.resize_utils import imresize\n\n\n@ARCH_REGISTRY.register()\nclass BicuBic(nn.Module):\n    def __init__(self, upscale=4):\n        super().__init__()\n\n        self.empty = nn.Parameter(torch.FlaotTensor([0.0]))\n        self.upscale = upscale\n\n    def forward(self, x):\n        y  = imresize(x, self.upscale)\n        return y\n"
  },
  {
    "path": "codes/config/EDSR/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/EDSR/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/EDSR/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/EDSR/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/EDSR/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/EDSR/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/EDSR/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/EDSR/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/EDSR/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, scale=4, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n\n        # define head module\n        if scale >= 1:\n            m_head = [conv(in_nc, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(in_nc, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, out_nc, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n\n        x = self.head(x)\n        f = self.body(x)\n        x = f + x\n        x = self.tail(x)\n\n        return x\n"
  },
  {
    "path": "codes/config/EDSR/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/EDSR/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/EDSR/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t})\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/EDSR/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/EDSR/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/EDSR/models/sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass SRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n\n        self.data_names = [\"lr\", \"hr\"]\n\n        self.network_names = [\"netSR\"]\n        self.networks = {}\n\n        self.loss_names = [\"sr_adv\", \"sr_pix\", \"sr_percep\"]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n\n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n\n        if self.is_train:\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n    def feed_data(self, data):\n\n        self.lr = data[\"src\"].to(self.device)\n        self.hr = data[\"tgt\"].to(self.device)\n\n    def forward(self):\n\n        self.sr = self.netSR(self.lr)\n\n    def optimize_parameters(self, step):\n\n        self.forward()\n\n        loss_dict = OrderedDict()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.hr, self.sr)\n        loss_dict[\"sr_pix\"] = sr_pix\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], False)\n            sr_adv_g = self.calculate_rgan_loss_G(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.hr, self.sr)\n            loss_dict[\"sr_percep\"] = sr_percep\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], True)\n            sr_adv_d = self.calculate_rgan_loss_D(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d\n\n            self.optimizers[\"netD\"].zero_grad()\n            sr_adv_d.backward()\n            self.optimizers[\"netD\"].step()\n\n        self.log_dict = self.reduce_loss_dict(loss_dict)\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data, crop_size=None):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_real_hr = self.netSR(self.real_lr)\n            else:\n                self.fake_real_hr = self.crop_test(self.real_lr, crop_size)\n        self.netSR.train()\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n            \n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/EDSR/options/test/2017Track2_2020Track1.yml",
    "content": "#### general settings\nname: Bicubic_2017Track2_2020Track1\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nb: 16\n      nf: 64\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n"
  },
  {
    "path": "codes/config/EDSR/options/test/2018Track2_2020Track4.yml",
    "content": "#### general settings\nname: Bicubic_2018Track2_2018Track4\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nb: 16\n      nf: 64\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n"
  },
  {
    "path": "codes/config/EDSR/options/test/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nb: 16\n      nf: 64\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true"
  },
  {
    "path": "codes/config/EDSR/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            gt_img = None\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n                cropped_gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/EDSR/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Maeda/README.md",
    "content": "This repo supports the training and testing of paper [Unpaired Image Super-Resolution using Pseudo-Supervision](https://arxiv.org/abs/2002.11397)"
  },
  {
    "path": "codes/config/Maeda/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/Maeda/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 4\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=stride, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=2,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/Maeda/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/Maeda/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass TVLoss(nn.Module):\n    def __init__(self, penealty=\"L1Loss\"):\n        super().__init__()\n        self.penealty = getattr(nn, penealty)()\n\n    def forward(self, pred):\n        y_diff = self.penealty(pred[:, :, :-1, :], pred[:, :, 1:, :])\n        x_diff = self.penealty(pred[:, :, :, :-1], pred[:, :, :, 1:])\n\n        loss = x_diff + y_diff\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/Maeda/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/Maeda/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/Maeda/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/Maeda/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.RDB1(x)\n        out = self.RDB2(out)\n        out = self.RDB3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.RRDB_trunk = make_layer(RRDB_block_f, nb)\n        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        if upscale == 4:\n            self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.upconv1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.upconv1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.upconv2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/Maeda/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/Maeda/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, nb, nf, noise_nf=0, scale=4, zero_tail=False, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n        self.noise_nf = noise_nf\n        # define head module\n        if scale >= 1:\n            m_head = [conv(3 + noise_nf, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(3 + noise_nf, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, 3, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n        if zero_tail:\n            nn.init.constant_(self.tail[-1].weight, 0)\n            nn.init.constant_(self.tail[-1].bias, 0)\n\n    def forward(self, x):\n        \n        if self.noise_nf > 0:\n            b, c, h, w = x.shape\n            noise = torch.randn(b, self.noise_nf, h, w).to(x.device)\n            inp = torch.cat([x, noise], 1)\n        else:\n            inp = x\n\n        f = self.head(inp)\n        f = self.body(f)\n        f = self.tail(f)\n\n        if self.scale == 1:\n            x = f + x\n        else:\n            x = f + F.interpolate(x, scale_factor=self.scale)\n        \n        return x\n"
  },
  {
    "path": "codes/config/Maeda/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/Maeda/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/Maeda/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t}, crop_size=512)\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/Maeda/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/Maeda/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/Maeda/models/pseudo_supervision_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass PseudoSupModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"syn_lr\", \"syn_hr\", \"real_lr\"]\n\n        self.network_names = [\"netSR\", \"netG1\", \"netG2\", \"netD1\", \"netD2\", \"netD3\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"sr_pix\",\n            \"srd3_adv\",\n            \"g1d1_adv\",\n            \"g2d2_adv\",\n            \"g1g2_cycle\",\n            \"g1_idt\",\n            \"g2g1_cycle\",\n            \"g2_idt\"\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n       # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n    \n    def feed_data(self, data):\n        self.syn_lr = data[\"ref_src\"].to(self.device)\n        self.syn_hr = data[\"ref_tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n\n    def forward(self):\n\n        self.fake_syn_lr = self.netG1(self.real_lr)\n        self.rec_real_lr = self.netG2(self.fake_syn_lr)\n\n        self.fake_real_lr = self.netG2(self.syn_lr)\n        self.rec_syn_lr = self.netG1(self.fake_real_lr)\n\n        self.fake_real_hr = self.netSR(self.fake_syn_lr)\n        self.fake_syn_hr = self.netSR(self.rec_syn_lr)\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n        \n        self.forward()\n        loss_G = 0\n        self.set_requires_grad([\"netD1\", \"netD2\", \"netD3\"], False)\n\n        g1_adv_loss = self.calculate_gan_loss_G(\n            self.netD1, self.losses[\"g1d1_adv\"], self.syn_lr, self.fake_syn_lr\n        )\n        loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n        loss_G += self.loss_weights[\"g1d1_adv\"] * g1_adv_loss\n\n        g2_adv_loss = self.calculate_gan_loss_G(\n            self.netD2, self.losses[\"g2d2_adv\"], self.real_lr, self.fake_real_lr\n        )\n        loss_dict[\"g2_adv\"] = g2_adv_loss.item()\n        loss_G += self.loss_weights[\"g2d2_adv\"] * g2_adv_loss\n\n        g1g2_cycle = self.losses[\"g1g2_cycle\"](self.rec_real_lr, self.real_lr)\n        loss_dict[\"g1g2_cycle\"] = g1g2_cycle.item()\n        loss_G += self.loss_weights[\"g1g2_cycle\"] * g1g2_cycle\n\n        g2g1_cycle = self.losses[\"g2g1_cycle\"](self.rec_syn_lr, self.syn_lr)\n        loss_dict[\"g2g1_cycle\"] = g2g1_cycle.item()\n        loss_G += self.loss_weights[\"g2g1_cycle\"] * g2g1_cycle\n\n        if self.losses.get(\"g1_idt\"):\n            self.idt_syn_lr = self.netG1(self.syn_lr)\n            g1_idt = self.losses[\"g1_idt\"](self.idt_syn_lr, self.syn_lr)\n            loss_dict[\"g1_idt\"] = g1_idt.item()\n            loss_G += self.loss_weights[\"g1_idt\"] * g1_idt\n        \n        if self.losses.get(\"g2_idt\"):\n            self.idt_real_lr = self.netG2(self.real_lr)\n            g2_idt = self.losses[\"g2_idt\"](self.idt_real_lr, self.real_lr)\n            loss_dict[\"g2_idt\"] = g2_idt.item()\n            loss_G += self.loss_weights[\"g2_idt\"] * g2_idt\n\n        sr_pix = self.losses[\"sr_pix\"](self.fake_syn_hr, self.syn_hr)\n        loss_dict[\"sr_pix\"] = sr_pix.item()\n        loss_G += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        sr_adv = self.calculate_gan_loss_G(\n            self.netD3, self.losses[\"srd3_adv\"], self.syn_hr, self.fake_real_hr\n        )\n        loss_dict[\"sr_adv\"] = sr_adv.item()\n        loss_G += self.loss_weights[\"srd3_adv\"] * sr_adv\n\n        self.set_optimizer(\n            names=[\"netG1\", \"netG2\", \"netSR\"], operation=\"zero_grad\"\n        )\n        loss_G.backward()\n        self.set_optimizer(names=[\"netG1\", \"netG2\", \"netSR\"], operation=\"step\")\n\n        ## update D1, D2, D3\n        self.set_requires_grad([\"netD1\", \"netD2\", \"netD3\"], True)\n\n        loss_D = 0\n        loss_d1 = self.calculate_gan_loss_D(\n            self.netD1, self.losses[\"g1d1_adv\"], self.syn_lr, self.fake_syn_lr\n        )\n        loss_dict[\"d1_adv\"] = loss_d1.item()\n        loss_D += self.loss_weights[\"g1d1_adv\"] * loss_d1\n\n        loss_d2 = self.calculate_gan_loss_D(\n            self.netD2, self.losses[\"g2d2_adv\"], self.real_lr, self.fake_real_lr\n        )\n        loss_dict[\"d2_adv\"] = loss_d2.item()\n        loss_D += self.loss_weights[\"g2d2_adv\"] * loss_d2\n\n        loss_d3 = self.calculate_gan_loss_D(\n            self.netD3, self.losses[\"srd3_adv\"], self.syn_hr, self.fake_real_hr\n        )\n        loss_dict[\"d3_adv\"] = loss_d3.item()\n        loss_D += self.loss_weights[\"srd3_adv\"] * loss_d3\n\n        self.set_optimizer(\n            names=[\"netD1\", \"netD2\", \"netD3\"], operation=\"zero_grad\"\n        )\n        loss_D.backward()\n        self.set_optimizer(names=[\"netD1\", \"netD2\", \"netD3\"], operation=\"step\")\n\n        self.log_dict = loss_dict\n       \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, data):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.set_network_state([\"netSR\", \"netG1\"], \"eval\")\n        with torch.no_grad():\n            self.fake_syn_lr = self.netG1(self.real_lr)\n            self.fake_real_hr = self.netSR(self.fake_syn_lr)\n        self.set_network_state([\"netSR\", \"netG1\"], \"train\")\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/Maeda/options/test/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [0]\n\nmetrics: [psnr, ssim, lpips] \n\ndatasets:\n  test1:\n    name: 2017Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_mild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2017Track2/models/latest_netSR.pth\n      strict_load: true\n\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2017Track2/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/Maeda/options/test/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [1]\n\nmetrics: [best_psnr, best_ssim, lpips] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_wild.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track2/models/latest_netSR.pth\n      strict_load: true\n\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track2/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/Maeda/options/test/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [2]\n\nmetrics: [best_psnr, best_ssim, lpips] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  test4:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test5:\n  #   name: 2020Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/track1_valid_input.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track4/models/latest_netSR.pth\n      strict_load: true\n\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track4/models/latest_netG1.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/Maeda/options/test/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [4]\n\nmetrics: [psnr, ssim, lpips] \n\ndatasets:\n  # test1:\n  #   name: 2017Track1\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test2:\n  #   name: 2018Track2\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  # test3:\n  #   name: 2018Track3\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/valid_difficult.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/NTIRE2018/valid_HR.lmdb\n  # test4:\n  #   name: 2018Track4\n  #   mode: PairedDataset\n  #   data_type: lmdb\n  #   dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n  #   dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test5:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2020Track1/models/285000_netSR.pth\n      strict_load: true\n\n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2020Track1/models/285000_netG1.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/Maeda/options/train/2017Track2.yml",
    "content": "#### general settings\nname: 2017Track2\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [1]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track2_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n  netD3:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: ~\n      strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 16\n      noise_nf: 1\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: ~\n      strict_load: true\n    \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n\n  losses:\n    sr_pix:\n      type: L1Loss\n      weight: 1\n\n    srd3_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.1\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g2g1_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 1\n    \n    g2_idt:\n      type: L1Loss\n      weight: 1\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.9, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netD3: ~\n  \n  niter: 300000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [100000, 180000, 240000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/Maeda/options/train/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [2]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2018Track2/models/210000_netSR.pth\n      strict_load: true\n\n  netD3:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/2018Track2/models/210000_netD3.pth\n      strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track2/models/210000_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2018Track2/models/210000_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 16\n      noise_nf: 1\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track2/models/210000_netG2.pth\n      strict_load: true\n    \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2018Track2/models/210000_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n\n  losses:\n    sr_pix:\n      type: L1Loss\n      weight: 1\n\n    srd3_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.1\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g2g1_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 1\n    \n    g2_idt:\n      type: L1Loss\n      weight: 1\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.9, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netD3: ~\n  \n  niter: 300000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [100000, 180000, 240000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/Maeda/options/train/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [4]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2018Track4/models/210000_netSR.pth\n      strict_load: true\n\n  netD3:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/2018Track4/models/210000_netD3.pth\n      strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track4/models/210000_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2018Track4/models/210000_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 16\n      noise_nf: 1\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2018Track4/models/210000_netG2.pth\n      strict_load: true\n    \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2018Track4/models/210000_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n\n  losses:\n    sr_pix:\n      type: L1Loss\n      weight: 1\n\n    srd3_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.1\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g2g1_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 1\n    \n    g2_idt:\n      type: L1Loss\n      weight: 1\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.9, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netD3: ~\n  \n  niter: 300000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [100000, 180000, 240000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/Maeda/options/train/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1\nuse_tb_logger: false\nmodel: PseudoSupModel\nscale: 4\ngpu_ids: [3]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: PairedRefDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 50]\n\n    dataroot_ref_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_ref_src: /home/lzx/SRDatasets/DIV2K_train/BicLR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 8  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      # path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      path: log/2020Track1/models/180000_netSR.pth\n      strict_load: true\n\n  netD3:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain:\n      path: log/2020Track1/models/180000_netD3.pth\n      strict_load: true\n    \n  #### network structures  \n  netG1:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 8\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2020Track1/models/180000_netG1.pth\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2020Track1/models/180000_netD1.pth\n      strict_load: true\n\n  netG2:\n    which_network: Translator\n    setting:\n      nf: 64\n      nb: 16\n      noise_nf: 1\n      zero_tail: true\n      scale: 1\n    pretrain: \n      path: log/2020Track1/models/180000_netG2.pth\n      strict_load: true\n    \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: log/2020Track1/models/180000_netD2.pth\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  max_grad_norm: 50\n\n  losses:\n    sr_pix:\n      type: L1Loss\n      weight: 1\n\n    srd3_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.1\n\n    g1d1_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n      \n    g2d2_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    g1g2_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g2g1_cycle:\n      type: L1Loss\n      weight: 1.0\n    \n    g1_idt:\n      type: L1Loss\n      weight: 1\n    \n    g2_idt:\n      type: L1Loss\n      weight: 1\n\n  optimizers:\n    default:\n      type: Adam\n        lr: !!float 1e-4\n        betas: [0.9, 0.999]\n    netSR: ~\n    netG1: ~\n    netG2: ~\n    netD1: ~\n    netD2: ~\n    netD3: ~\n  \n  niter: 300000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [100000, 180000, 240000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/Maeda/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n       \n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            cropped_gt_img = None\n        \n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/Maeda/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/deg_arch.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom utils.registry import ARCH_REGISTRY\n\n\nclass ResBlock(nn.Module):\n    def __init__(self, nf, ksize, norm=nn.BatchNorm2d, act=nn.ReLU):\n        super().__init__()\n        \n        self.nf = nf\n        self.body = nn.Sequential(\n            nn.Conv2d(nf, nf, ksize, 1, ksize//2),\n            norm(nf), act(),\n            nn.Conv2d(nf, nf, ksize, 1, ksize//2)\n        )\n    \n    def forward(self, x):\n        return torch.add(x, self.body(x))\n\nclass Quantization(nn.Module):\n    def __init__(self, n=5):\n        super().__init__()\n        self.n = n\n\n    def forward(self, inp):\n        out = inp * 255.0\n        flag = -1\n        for i in range(1, self.n + 1):\n            out = out + flag / np.pi / i * torch.sin(2 * i * np.pi * inp * 255.0)\n            flag = flag * (-1)\n        return out / 255.0\n\nclass KernelModel(nn.Module):\n    def __init__(self, opt, scale):\n        super().__init__()\n\n        self.opt = opt\n        self.scale = scale\n\n        nc, nf, nb = opt[\"nc\"], opt[\"nf\"], opt[\"nb\"]\n        ksize = opt[\"ksize\"]\n\n        if opt[\"spatial\"]:\n            head_k = opt[\"head_k\"]\n            body_k = opt[\"body_k\"]\n        else:\n            head_k = body_k = 1\n        \n        if opt[\"mix\"]:\n            in_nc = 3 + nc\n        else:\n            in_nc = nc\n\n        deg_kernel = [\n            nn.Conv2d(in_nc, nf, head_k, 1, head_k//2),\n            nn.BatchNorm2d(nf), nn.ReLU(True),\n            *[\n                ResBlock(nf=nf, ksize=body_k)\n                for _ in range(nb)\n                ],\n            nn.Conv2d(nf, ksize ** 2, 1, 1, 0),\n            nn.Softmax(1)\n        ]\n        self.deg_kernel = nn.Sequential(*deg_kernel)\n\n        if opt[\"zero_init\"]:\n            nn.init.constant_(self.deg_kernel[-2].weight, 0)\n            nn.init.constant_(self.deg_kernel[-2].bias, 0)\n            self.deg_kernel[-2].bias.data[ksize**2//2] = 1\n\n        self.pad = nn.ReflectionPad2d(ksize//2)\n    \n    def forward(self, x):\n        B, C, H, W = x.shape\n        h = H // self.scale\n        w = W // self.scale\n\n        if self.opt[\"nc\"] > 0:\n            if self.opt[\"spatial\"]:\n                zk = torch.randn(B, self.opt[\"nc\"], H, W).to(x.device)\n            else:\n                zk = torch.randn(B, self.opt[\"nc\"], 1, 1).to(x.device)\n                if self.opt[\"mix\"]:\n                    zk = zk.repeat(1, 1, H, W)\n        \n        if self.opt[\"mix\"]:\n            if self.opt[\"nc\"] > 0:\n                inp = torch.cat([x, zk], 1)\n            else:\n                inp = x\n        else:\n            inp = zk          \n        \n        ksize = self.opt[\"ksize\"]\n        kernel = self.deg_kernel(inp).view(B, 1, ksize**2, *inp.shape[2:])\n\n        x = x.view(B*C, 1, H, W)\n        x = F.unfold(\n            self.pad(x), kernel_size=ksize, stride=self.scale, padding=0\n        ).view(B, C, ksize**2, h, w)\n\n        x = torch.mul(x, kernel).sum(2).view(B, C, h, w)\n        kernel = kernel.view(B, ksize, ksize, *inp.shape[2:]).squeeze()\n\n        return x, kernel\n\nclass NoiseModel(nn.Module):\n    def __init__(self, opt, scale):\n        super().__init__()\n\n        self.scale = scale\n        self.opt = opt\n\n        nc, nf, nb = opt[\"nc\"], opt[\"nf\"], opt[\"nb\"]\n\n        if opt[\"spatial\"]:\n            head_k = opt[\"head_k\"]\n            body_k = opt[\"body_k\"]\n        else:\n            head_k = body_k = 1\n        \n        if opt[\"mix\"]:\n            in_nc = 3 + nc\n        else:\n            in_nc = nc\n\n        deg_noise = [\n            nn.Conv2d(in_nc, nf, head_k, 1, head_k//2),\n            nn.BatchNorm2d(nf), nn.ReLU(True),\n            *[\n                ResBlock(nf=nf, ksize=body_k)\n                for _ in range(nb)\n                ],\n            nn.Conv2d(nf, opt[\"dim\"], 1, 1, 0),\n        ]\n        self.deg_noise = nn.Sequential(*deg_noise)\n\n        if opt[\"zero_init\"]:\n            nn.init.constant_(self.deg_noise[-1].weight, 0)\n            nn.init.constant_(self.deg_noise[-1].bias, 0)\n        else:\n            nn.init.normal_(self.deg_noise[-1].weight, 0.001)\n            nn.init.constant_(self.deg_noise[-1].bias, 0)\n    \n    def forward(self, x):\n        B, C, H, W = x.shape\n\n        if self.opt[\"nc\"] > 0:     \n            if self.opt[\"spatial\"]:\n                zn = torch.randn(x.shape[0], self.opt[\"nc\"], H, W).to(x.device)\n            else:\n                zn = torch.randn(x.shape[0], self.opt[\"nc\"], 1, 1).to(x.device)\n                if self.opt[\"mix\"]:\n                    zn = zn.repeat(1, 1, H, W)\n        \n        if self.opt[\"mix\"]:\n            if self.opt[\"nc\"] > 0:\n                inp = torch.cat([x, zn], 1)\n            else:\n                inp = x\n        else:\n            inp = zn\n            \n        noise = self.deg_noise(inp)\n\n        return noise\n\n@ARCH_REGISTRY.register()\nclass DegModel(nn.Module):\n    def __init__(\n        self,  scale=4, nc_img=3, kernel_opt=None, noise_opt=None\n    ):\n        super().__init__()\n\n        self.scale = scale\n\n        self.kernel_opt = kernel_opt\n        self.noise_opt = noise_opt\n\n        if kernel_opt is not None:\n            self.deg_kernel = KernelModel(kernel_opt, scale)\n        \n        if noise_opt is not None:\n           self.deg_noise = NoiseModel(noise_opt, scale)\n\n        else:\n            self.quant = Quantization()\n        \n    def forward(self, inp):\n        B, C, H, W = inp.shape\n        h = H // self.scale\n        w = W // self.scale\n\n        # kernel\n        if self.kernel_opt is not None:\n            x, kernel = self.deg_kernel(inp)\n        else:\n            x = F.interpolate(inp, scale_factor=1/self.scale, mode=\"bicubic\", align_corners=False)\n            kernel = None\n\n        # noise\n        if self.noise_opt is not None:\n            noise = self.deg_noise(x.detach())\n            x = x + noise\n        else:\n            noise = None\n            x = self.quant(x)\n        return x, kernel, noise\n\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/discriminator.py",
    "content": "import torch\nimport torchvision\nimport functools\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import spectral_norm\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n\n\n@ARCH_REGISTRY.register()\nclass UNetDiscriminatorSN(nn.Module):\n    \"\"\"Defines a U-Net discriminator with spectral normalization (SN)\"\"\"\n\n    def __init__(self, nc, nf=64, skip_connection=True):\n        super(UNetDiscriminatorSN, self).__init__()\n        self.skip_connection = skip_connection\n        norm = spectral_norm\n\n        self.conv0 = nn.Conv2d(nc, nf, kernel_size=3, stride=1, padding=1)\n\n        self.conv1 = norm(nn.Conv2d(nf, nf * 2, 4, 2, 1, bias=False))\n        self.conv2 = norm(nn.Conv2d(nf * 2, nf * 4, 4, 2, 1, bias=False))\n        self.conv3 = norm(nn.Conv2d(nf * 4, nf * 8, 4, 2, 1, bias=False))\n        # upsample\n        self.conv4 = norm(nn.Conv2d(nf * 8, nf * 4, 3, 1, 1, bias=False))\n        self.conv5 = norm(nn.Conv2d(nf * 4, nf * 2, 3, 1, 1, bias=False))\n        self.conv6 = norm(nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=False))\n\n        # extra\n        self.conv7 = norm(nn.Conv2d(nf, nf, 3, 1, 1, bias=False))\n        self.conv8 = norm(nn.Conv2d(nf, nf, 3, 1, 1, bias=False))\n\n        self.conv9 = nn.Conv2d(nf, 1, 3, 1, 1)\n\n    def forward(self, x):\n        x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)\n        x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)\n        x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)\n        x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)\n\n        # upsample\n        x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)\n        x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)\n\n        if self.skip_connection:\n            x4 = x4 + x2\n        x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)\n        x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)\n\n        if self.skip_connection:\n            x5 = x5 + x1\n        x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)\n        x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)\n\n        if self.skip_connection:\n            x6 = x6 + x0\n\n        # extra\n        out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)\n        out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)\n        out = self.conv9(out)\n\n        return out"
  },
  {
    "path": "codes/config/PDM-SR/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass TVLoss(nn.Module):\n    def __init__(self, penealty=\"L1Loss\"):\n        super().__init__()\n        self.penealty = getattr(nn, penealty)()\n\n    def forward(self, pred):\n        y_diff = self.penealty(pred[:, :, :-1, :], pred[:, :, 1:, :])\n        x_diff = self.penealty(pred[:, :, :, :-1], pred[:, :, :, 1:])\n\n        loss = x_diff + y_diff\n\n        return loss\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n       \n        return [\n            group[\"initial_lr\"] * (1 - (self.last_epoch + 1) * self.decay_prop/ self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.rdb1 = ResidualDenseBlock_5C(nf, gc)\n        self.rdb2 = ResidualDenseBlock_5C(nf, gc)\n        self.rdb3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.rdb1(x)\n        out = self.rdb2(out)\n        out = self.rdb3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.body = make_layer(RRDB_block_f, nb)\n        self.conv_body = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.conv_up1 = nn.Conv2d(nf, nf, 3, 1, 1)\n        if upscale == 4:\n            self.conv_up2 = nn.Conv2d(nf, nf, 3, 1, 1)\n        self.conv_hr = nn.Conv2d(nf, nf, 3, 1, 1)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.conv_body(self.body(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.conv_up1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.conv_up1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.conv_up2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.conv_hr(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/PDM-SR/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/PDM-SR/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/PDM-SR/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/PDM-SR/\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t})\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/PDM-SR/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/PDM-SR/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network, build_scheduler\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def setup_train(self, train_opt):\n        # define losses\n        loss_opt = train_opt[\"losses\"]\n        self.losses = self.build_losses(loss_opt)\n\n        # build optmizers\n        optimizer_opts = train_opt[\"optimizers\"]\n        self.optimizers = self.build_optimizers(optimizer_opts)\n\n        # set schedulers\n        scheduler_opts = train_opt[\"schedulers\"]\n        self.schedulers = self.build_schedulers(scheduler_opts)\n\n        # set to training state\n        self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n\n        if isinstance(net, nn.Module):\n            net = self.model_to_device(net)\n\n            if net_opt.get(\"pretrain\"):\n                pretrain = net_opt.pop(\"pretrain\")\n                self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n            self.print_network(net)\n        return net\n\n    def build_losses(self, loss_opt):\n        losses = {}\n\n        defined_loss_names = list(loss_opt.keys())\n        assert set(defined_loss_names).issubset(set(self.loss_names))\n\n        for name in defined_loss_names:\n            loss_conf = loss_opt.get(name)\n            if loss_conf[\"weight\"] > 0:\n                self.loss_weights[name] = loss_conf.pop(\"weight\")\n                losses[name] = build_loss(loss_conf).to(self.device)\n\n        return losses\n\n    def build_optimizers(self, optim_opts):\n        optimizers = {}\n\n        if \"default\" in optim_opts.keys():\n            default_optim = optim_opts.pop(\"default\")\n\n        defined_optimizer_names = list(optim_opts.keys())\n        assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n        for name in defined_optimizer_names:\n            optim_opt = optim_opts[name]\n            if optim_opt is None:\n                optim_opt = default_optim.copy()\n\n            params = []\n            for v in self.networks[name].parameters():\n                if v.requires_grad:\n                    params.append(v)\n\n            optim_type = optim_opt.pop(\"type\")\n            optimizer = getattr(torch.optim, optim_type)(params=params, **optim_opt)\n            optimizers[name] = optimizer\n\n        return optimizers\n\n    def build_schedulers(self, scheduler_opts):\n        \"\"\"Set up scheduler.\"\"\"\n        schedulers = {}\n        if \"default\" in scheduler_opts.keys():\n            default_opt = scheduler_opts.pop(\"default\")\n\n        for name in self.optimizers.keys():\n            scheduler_opt = scheduler_opts[name]\n            if scheduler_opt is None:\n                scheduler_opt = default_opt.copy()\n\n            schedulers[name] = build_scheduler(self.optimizers[name], scheduler_opt)\n\n        return schedulers\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                for v in self.networks[name].parameters():\n                    v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            if isinstance(self.networks[name], nn.Module):\n                getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/PDM-SR/models/deg_sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom kornia.color import rgb_to_grayscale\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\nclass Quant(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, input):\n        output = torch.clamp(input, 0, 1)\n        output = (output * 255.).round() / 255.\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        return grad_output\n\nclass Quantization(nn.Module):\n    def __init__(self):\n        super(Quantization, self).__init__()\n\n    def forward(self, input):\n        return Quant.apply(input)\n\n\n@MODEL_REGISTRY.register()\nclass DegSRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n        else:\n            self.rank = -1  # non dist training\n\n        self.data_names = [\"src\", \"tgt\"]\n\n        self.network_names = [\"netDeg\", \"netSR\", \"netD1\", \"netD2\"]\n        self.networks = {}\n\n        self.loss_names = [\n            \"lr_adv\",\n            \"sr_adv\",\n            \"sr_pix_trans\",\n            \"sr_pix_sr\",\n            \"sr_percep\",\n            \"lr_quant\",\n            \"lr_gauss\",\n            \"noise_mean\",\n            \"color\"\n        ]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n        \n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n            \n        if self.is_train:\n            train_opt = opt[\"train\"]\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n            self.max_grad_norm = train_opt[\"max_grad_norm\"]\n            self.quant = Quantization()\n            self.D_ratio = train_opt[\"D_ratio\"]\n            self.optim_sr = train_opt[\"optim_sr\"]\n            self.optim_deg = train_opt[\"optim_deg\"]\n            self.gray_dis = train_opt[\"gray_dis\"]\n\n            ## buffer\n            self.fake_lr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n            self.fake_hr_buffer = ShuffleBuffer(train_opt[\"buffer_size\"])\n\n    def feed_data(self, data):\n\n        self.syn_hr = data[\"tgt\"].to(self.device)\n        self.real_lr = data[\"src\"].to(self.device)\n\n    def deg_forward(self):\n        (\n            self.fake_real_lr,\n            self.predicted_kernel,\n            self.predicted_noise,\n         ) = self.netDeg(self.syn_hr)\n        if self.losses.get(\"sr_pix_trans\"):\n            self.fake_real_lr_quant = self.quant(self.fake_real_lr)\n            self.syn_sr = self.netSR(self.fake_real_lr_quant)\n    \n    def sr_forward(self):\n        if not self.optim_deg:\n            (\n                self.fake_real_lr,\n                self.predicted_kernel,\n                self.predicted_noise,\n            ) = self.netDeg(self.syn_hr)\n\n        self.fake_real_lr_quant = self.quant(self.fake_real_lr)\n        self.syn_sr = self.netSR(self.fake_real_lr_quant.detach())\n    \n    def optimize_trans_models(self, step, loss_dict):\n\n        self.set_requires_grad([\"netDeg\"], True)\n        self.deg_forward()\n        loss_G = 0\n\n        if self.losses.get(\"lr_adv\"):\n            self.set_requires_grad([\"netD1\"], False)\n            if self.gray_dis:\n                real = rgb_to_grayscale(self.real_lr)\n                fake = rgb_to_grayscale(self.fake_real_lr)\n            else:\n                real = self.real_lr\n                fake = self.fake_real_lr\n            g1_adv_loss = self.calculate_gan_loss_G(\n                self.netD1, self.losses[\"lr_adv\"], real, fake\n            )\n            loss_dict[\"g1_adv\"] = g1_adv_loss.item()\n            loss_G += self.loss_weights[\"lr_adv\"] * g1_adv_loss\n        \n        if self.losses.get(\"sr_pix_trans\"):\n            self.set_requires_grad([\"netSR\"], False)\n            sr_pix = self.losses[\"sr_pix_trans\"](self.syn_hr, self.syn_sr)\n            loss_dict[\"sr_pix_trans\"] = sr_pix.item()\n            loss_G += self.loss_weights[\"sr_pix_trans\"] * sr_pix\n        \n        if self.losses.get(\"noise_mean\"):\n            noise = self.predicted_noise\n            noise_mean = (\n                self.losses[\"noise_mean\"](noise, torch.zeros_like(noise))\n            )\n            loss_dict[\"noise_mean\"] = noise_mean.item()\n            loss_G += self.loss_weights[\"noise_mean\"] * noise_mean\n\n        self.set_optimizer(names=[\"netDeg\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm([\"netDeg\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netDeg\"], operation=\"step\")\n\n        ## update D\n        if step % self.D_ratio == 0:\n            self.set_requires_grad([\"netD1\"], True)\n            if self.gray_dis:\n                real = rgb_to_grayscale(self.real_lr)\n                fake = rgb_to_grayscale(self.fake_real_lr)\n            else:\n                real = self.real_lr\n                fake = self.fake_real_lr\n            loss_d1 = self.calculate_gan_loss_D(\n                self.netD1, self.losses[\"lr_adv\"],\n                real, self.fake_lr_buffer.choose(fake)\n            )\n            loss_dict[\"d1_adv\"] = loss_d1.item()\n            loss_D = self.loss_weights[\"lr_adv\"] * loss_d1\n            self.optimizers[\"netD1\"].zero_grad()\n            loss_D.backward()\n            self.clip_grad_norm([\"netD1\"], self.max_grad_norm)\n            self.optimizers[\"netD1\"].step()\n        \n        return loss_dict\n    \n    def optimize_sr_models(self, step, loss_dict):\n\n        self.set_requires_grad([\"netSR\"], True)\n        self.set_requires_grad([\"netDeg\"], False)\n        self.sr_forward()\n        loss_G = 0\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD2\"], False)\n            sr_adv_loss = self.calculate_gan_loss_G(\n                self.netD2, self.losses[\"sr_adv\"],\n                self.syn_hr, self.syn_sr\n            )\n            loss_dict[\"sr_adv\"] = sr_adv_loss.item()\n            loss_G += self.loss_weights[\"sr_adv\"] * sr_adv_loss\n        \n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](\n                self.syn_hr, self.syn_sr\n            )\n            loss_dict[\"sr_percep\"] = sr_percep.item()\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style.item()\n                loss_G += self.loss_weights[\"sr_percep\"] * sr_style\n            loss_G += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        if self.losses.get(\"sr_pix_sr\"):\n            sr_pix = self.losses[\"sr_pix_sr\"](self.syn_hr, self.syn_sr)\n            loss_dict[\"sr_pix_sr\"] = sr_pix.item()\n            loss_G += self.loss_weights[\"sr_pix_sr\"] * sr_pix\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        loss_G.backward()\n        self.clip_grad_norm([\"netSR\"], self.max_grad_norm)\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        ## update D2\n        if step % self.D_ratio == 0:\n            if self.losses.get(\"sr_adv\"):\n                self.set_requires_grad([\"netD2\"], True)\n                loss_d2 = self.calculate_gan_loss_D(\n                    self.netD2, self.losses[\"sr_adv\"],\n                    self.syn_hr, self.fake_hr_buffer.choose(self.syn_sr)\n                )\n                loss_dict[\"d2_adv\"] = loss_d2.item()\n                loss_D = self.loss_weights[\"sr_adv\"] * loss_d2\n                self.optimizers[\"netD2\"].zero_grad()\n                loss_D.backward()\n                self.clip_grad_norm([\"netD2\"], self.max_grad_norm)\n                self.optimizers[\"netD2\"].step()\n        \n        return loss_dict\n\n\n    def optimize_parameters(self, step):\n        loss_dict = OrderedDict()\n\n        # optimize trans\n        if self.optim_deg:\n            loss_dict = self.optimize_trans_models(step, loss_dict)\n\n        # optimize SR\n        if self.optim_sr:\n            loss_dict = self.optimize_sr_models(step, loss_dict)\n\n        self.log_dict = loss_dict\n    \n    def calculate_gan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n\n        loss_real = criterion(d_pred_real, True, is_disc=True)\n        loss_fake = criterion(d_pred_fake, False, is_disc=True)\n\n        return (loss_real + loss_fake) / 2\n\n    def calculate_gan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        loss_real = criterion(d_pred_fake, True, is_disc=False)\n\n        return loss_real\n\n    def test(self, test_data, crop_size=None):\n        self.src = test_data[\"src\"].to(self.device)\n        if test_data.get(\"tgt\") is not None:\n            self.tgt = test_data[\"tgt\"].to(self.device)\n            \n        self.set_network_state([\"netSR\"], \"eval\")\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_tgt = self.netSR(self.src)\n            else:\n                self.fake_tgt = self.crop_test(self.src, crop_size)\n        self.set_network_state([\"netSR\"], \"train\")\n\n        if hasattr(self, \"netDeg\"):\n            self.set_network_state([\"netDeg\"], \"eval\")\n            if hasattr(self, \"tgt\"):\n                with torch.no_grad():\n                    self.fake_lr = self.netDeg(self.tgt)[0]\n            self.set_network_state([\"netDeg\"], \"train\")\n\n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.src.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_tgt.detach()[0].float().cpu()\n        if hasattr(self, \"fake_lr\"):\n            out_dict[\"fake_lr\"] = self.fake_lr.detach()[0].float().cpu()\n        return out_dict\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n\n\nclass ShuffleBuffer():\n    \"\"\"Random choose previous generated images or ones produced by the latest generators.\n    :param buffer_size: the size of image buffer\n    :type buffer_size: int\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        \"\"\"Initialize the ImagePool class.\n        :param buffer_size: the size of image buffer\n        :type buffer_size: int\n        \"\"\"\n        self.buffer_size = buffer_size\n        self.num_imgs = 0\n        self.images = []\n\n    def choose(self, images, prob=0.5):\n        \"\"\"Return an image from the pool.\n        :param images: the latest generated images from the generator\n        :type images: list\n        :param prob: probability (0~1) of return previous images from buffer\n        :type prob: float\n        :return: Return images from the buffer\n        :rtype: list\n        \"\"\"\n        if self.buffer_size == 0:\n            return  images\n        return_images = []\n        for image in images:\n            image = torch.unsqueeze(image.data, 0)\n            if self.num_imgs < self.buffer_size:\n                self.images.append(image)\n                return_images.append(image)\n                self.num_imgs += 1\n            else:\n                p = random.uniform(0, 1)\n                if p < prob:\n                    idx = random.randint(0, self.buffer_size - 1)\n                    stored_image = self.images[idx].clone()\n                    self.images[idx] = image\n                    return_images.append(stored_image)\n                else:\n                    return_images.append(image)\n        return_images = torch.cat(return_images, 0)\n        return return_images"
  },
  {
    "path": "codes/config/PDM-SR/options/test/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1_percep\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [1]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        body_k: 1\n        head_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: ~\n    pretrain: \n      path: log/2017Track1/2017Track1_deg_best/models/latest_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2017Track1/2017Track1_percep_best/models/latest_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/PDM-SR/options/test/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test0:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n    \n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: log/2018Track2/2018Track2_deg/models/195000_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track2/2018Track2_psnr/models/latest_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/PDM-SR/options/test/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [best_psnr, best_ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test0:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: log/2018Track4_deg/models/latest_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: log/2018Track4_psnr/models/latest_netSR.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/PDM-SR/options/test/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1_percep_bsrgan\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test0:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        spatial: false\n        nc: 3\n        nf: 32\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 1\n        zero_init: false\n    pretrain: \n      path: log/2020Track1_deg/models/latest_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ~\n      strict_load: true\n"
  },
  {
    "path": "codes/config/PDM-SR/options/test/2020Track2.yml",
    "content": "## general settings\nname: 2020Track2_percep\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [7]\n\nmetrics: [niqe] \n\ndatasets:\n  test0:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/PDM_Real_ESRGAN.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/deg/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 192\n    src_size: 48\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        body_k: 1\n        head_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: ~\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: true\n  optim_sr: false\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n  optimizers:\n    netDeg:\n      type: Adam\n      lr: !!float 2e-4\n    netD1:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 2e5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/deg/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_deg_mse10_mixfale\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 192\n    src_size: 48\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: false\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n   netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: true\n  optim_sr: false\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    noise_mean:\n      type: MSELoss\n      weight: 10.0\n\n  optimizers:\n    netDeg:\n      type: Adam\n      lr: !!float 2e-4\n    netD1:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 2e5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/deg/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_deg\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n   netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: true\n  optim_sr: false\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    noise_mean:\n      type: MSELoss\n      weight: 100.0\n\n  optimizers:\n    netDeg:\n      type: Adam\n      lr: !!float 2e-4\n    netD1:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 2e5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/deg/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1_deg_dim1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        spatial: false\n        nc: 3\n        nf: 32\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 1\n        zero_init: false\n    pretrain: \n      path: ~\n      strict_load: true\n\n   netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: true\n  optim_sr: false\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n    \n    noise_mean:\n      type: MSELoss\n      weight: 100.0\n\n  optimizers:\n    netDeg:\n      type: Adam\n      lr: !!float 2e-4\n    netD1:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 2e5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/deg/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2_deg_dim1\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track2/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track2\n    mode: SingleImageDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        spatial: false\n        nc: 3\n        nf: 32\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 1\n        zero_init: false\n    pretrain: \n      path: ~\n      strict_load: true\n\n   netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 2\n    pretrain: \n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: true\n  optim_sr: false\n\n  losses:\n    lr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 1.0\n\n    noise_mean:\n      type: MSELoss\n      weight: 1.0\n\n  optimizers:\n    netDeg:\n      type: Adam\n      lr: !!float 2e-4\n    netD1:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 2e5\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/percep/2017Track1.yml",
    "content": "#### general settings\nname: 2017Track1_percep_best\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2017Track1_mini\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        body_k: 1\n        head_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: ~\n    pretrain: \n      path: log/2017Track1_deg_best/models/latest_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n      \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_sr: true\n  optim_deg: false\n\n  losses:\n\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.05\n    \n    sr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.05\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n    netD2:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/percep/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_percep\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [3]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: false\n  optim_sr: true\n  \n  losses:\n\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.05\n    \n    sr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.05\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n    netD2:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/percep/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_percep\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  \n  optim_sr: true\n  optim_deg: false\n  \n  losses:\n\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.05\n    \n    sr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.05\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n    netD2:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/percep/2020Track1.yml",
    "content": "#### general settings\nname: 2020Track1_percep_bsrgan\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [psnr, ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        spatial: false\n        nc: 3\n        nf: 32\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 1\n        zero_init: false\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n  \n  losses:\n\n    sr_pix_sr: \n      type: L1Loss\n      weight: 0.01\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.005\n    \n    sr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.05\n\n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n    netD2:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/percep/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2_percep_bsrgan\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [5]\nmetrics: [niqe]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track2/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        spatial: false\n        nc: 3\n        nf: 32\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 1\n        zero_init: false\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n  \n  netD1:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netD2:\n    which_network: PatchGANDiscriminator\n    setting:\n      in_c: 3\n      nf: 64\n      nb: 3\n      stride: 1\n    pretrain:\n      path: ~\n      strict_load: true\n      \n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: true\n  optim_sr: true\n  \n  losses:\n\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n    noise_mean:\n      type: MSELoss\n      weight: 1.0\n    \n    sr_adv:\n      type: GANLoss\n      gan_type: lsgan\n      real_label_val: 1.0\n      fake_label_val: 0.0\n      weight: !!float 0.05\n    \n    sr_percep:\n      type: PerceptualLoss\n      layer_weights:\n        'conv5_4': 1  # before relu\n      vgg_type: vgg19\n      use_input_norm: true\n      range_norm: false\n      perceptual_weight: 1.0\n      style_weight: 0\n      criterion: l1\n      weight: !!float 0.05\n\n  optimizers:\n    default:\n      type: Adam\n      lr: !!float 2e-4\n    netSR: ~\n    netD2: ~\n    netD1: ~\n\n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/psnr/2017Track2.yml",
    "content": "  #### general settings\n  name: 2017Track2_psnr\n  use_tb_logger: false\n  model: DegSRModel\n  scale: 4\n  gpu_ids: [0]\n  metrics: [psnr, ssim, lpips]\n\n  #### datasets\n  datasets:\n    train:\n      name: DIV2K\n      mode: UnPairedDataset\n      data_type: lmdb\n      color: RGB\n      ratios: [200, 200]\n\n      dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n      dataroot_src: /home/lzx/SRDatasets/NTIRE2017/train_LR/x4_half.lmdb\n\n      use_shuffle: true\n      workers_per_gpu: 4  # per GPU\n      imgs_per_gpu: 32\n      tgt_size: 128\n      src_size: 32\n      use_flip: true\n      use_rot: true\n\n    val:\n      name: 2017Track2_mini\n      mode: PairedDataset\n      data_type: lmdb\n      color: RGB\n\n      dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4_mini.lmdb\n      dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n  #### network structures\n  networks:\n    netDeg:\n      which_network: DegModel\n      setting:\n        scale: 4\n        nc_img: 3\n        kernel_opt: \n          mix: false\n          spatial: false\n          nc: 64\n          nf: 64\n          nb: 16\n          body_k: 1\n          head_k: 1\n          ksize: 21\n          zero_init: true\n        noise_opt: ~\n      pretrain: \n        path: ~\n        strict_load: true\n      \n    netD1:\n      which_network: PatchGANDiscriminator\n      setting:\n        in_c: 3\n        nf: 64\n        nb: 3\n        stride: 1\n      pretrain:\n        path: ~\n        strict_load: true\n\n    netSR:\n      which_network: EDSR\n      setting:\n        nf: 64\n        nb: 16\n        res_scale: 1\n        upscale: 4\n      pretrain: \n        path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n        strict_load: true\n\n  #### training settings: learning rate scheme, loss\n  train:\n    resume_state: ~\n    D_ratio: 1\n    max_grad_norm: 50\n    buffer_size: 0\n\n    optim_sr: true\n    optim_deg: true\n\n    losses:\n\n      sr_pix_sr: \n        type: L1Loss\n        weight: 1.0\n      \n      lr_adv:\n        type: GANLoss\n        gan_type: lsgan\n        real_label_val: 1.0\n        fake_label_val: 0.0\n        weight: !!float 1.0\n\n    optimizers:\n      default:\n        type: Adam\n        lr: !!float 2e-4\n      netDeg: ~\n      netSR: ~\n      netD1: ~\n    \n    niter: 200000\n    warmup_iter: -1  # no warm up\n\n    schedulers:\n      default:\n        type: MultiStepRestartLR\n        milestones: [50000, 100000, 150000]\n        gamma: 0.5\n    \n    manual_seed: 0\n    val_freq: !!float 5e3\n\n  #### logger\n  logger:\n    print_freq: 100\n    save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/psnr/2018Track2.yml",
    "content": "#### general settings\nname: 2018Track2_psnr_v2\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [0]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [200, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4_half.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/x4_half.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 4  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 192\n    src_size: 48\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  \n  optim_deg: false\n  optim_sr: true\n  \n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    netSR:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/psnr/2018Track4.yml",
    "content": "#### general settings\nname: 2018Track4_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [1]\nmetrics: [best_psnr, best_ssim, lpips]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/x4.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 128\n    src_size: 32\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid_mini.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt: \n        mix: false\n        spatial: false\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 16\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: ~\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n  optim_deg: false\n  optim_sr: true\n  \n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    netSR:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/psnr/2020Track1.yml",
    "content": "  #### general settings\n  name: 2020Track1_psnr\n  use_tb_logger: false\n  model: DegSRModel\n  scale: 4\n  gpu_ids: [1]\n  metrics: [psnr, ssim, lpips]\n\n  #### datasets\n  datasets:\n    train:\n      name: DIV2K\n      mode: UnPairedDataset\n      data_type: lmdb\n      color: RGB\n      ratios: [50, 200]\n\n      dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n      dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/train_source.lmdb\n\n      use_shuffle: true\n      workers_per_gpu: 4  # per GPU\n      imgs_per_gpu: 32\n      tgt_size: 128\n      src_size: 32\n      use_flip: true\n      use_rot: true\n\n    val:\n      name: 2020Track2_mini\n      mode: PairedDataset\n      data_type: lmdb\n      color: RGB\n\n      dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid_mini.lmdb\n      dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4_mini.lmdb\n\n  #### network structures\n  networks:\n    netDeg:\n      which_network: DegModel\n      setting:\n        scale: 4\n        nc_img: 3\n        kernel_opt: \n          mix: false\n          spatial: false\n          nc: 3\n          nf: 64\n          nb: 8\n          body_k: 1\n          head_k: 1\n          ksize: 11\n          zero_init: true\n        noise_opt:\n          mix: true\n          spatial: true\n          nc: 3\n          nf: 64\n          nb: 8\n          body_k: 3\n          head_k: 3\n          dim: 3\n          zero_init: true\n      pretrain: \n        path: ~\n        strict_load: true\n      \n    netD1:\n      which_network: PatchGANDiscriminator\n      setting:\n        in_c: 3\n        nf: 64\n        nb: 3\n        stride: 1\n      pretrain:\n        path: ~\n        strict_load: true\n\n    netSR:\n      which_network: EDSR\n      setting:\n        nf: 64\n        nb: 16\n        res_scale: 1\n        upscale: 4\n      pretrain: \n        path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n        strict_load: true\n\n  #### training settings: learning rate scheme, loss\n  train:\n    resume_state: ~\n    D_ratio: 1\n    max_grad_norm: 50\n    buffer_size: 0\n\n    optim_sr: true\n    optim_deg: true\n\n    losses:\n\n      sr_pix_sr: \n        type: L1Loss\n        weight: 1.0\n      \n      lr_adv:\n        type: GANLoss\n        gan_type: lsgan\n        real_label_val: 1.0\n        fake_label_val: 0.0\n        weight: !!float 1.0\n      \n      noise_mean: \n        type: MSELoss\n        weight: !!float 100\n\n    optimizers:\n      default:\n        type: Adam\n        lr: !!float 2e-4\n      netDeg: ~\n      netSR: ~\n      netD1: ~\n    \n    niter: 200000\n    warmup_iter: -1  # no warm up\n\n    schedulers:\n      default:\n        type: MultiStepRestartLR\n        milestones: [50000, 100000, 150000]\n        gamma: 0.5\n     \n    manual_seed: 0\n    val_freq: !!float 5e3\n\n  #### logger\n  logger:\n    print_freq: 100\n    save_checkpoint_freq: !!float 5e3\n"
  },
  {
    "path": "codes/config/PDM-SR/options/train/psnr/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2_psnr\nuse_tb_logger: false\nmodel: DegSRModel\nscale: 4\ngpu_ids: [2]\nmetrics: [niqe]\n\n#### datasets\ndatasets:\n  train:\n    name: DIV2K\n    mode: UnPairedDataset\n    data_type: lmdb\n    color: RGB\n    ratios: [50, 200]\n\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_train/HR/x4.lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track2/train_source.lmdb\n\n    use_shuffle: true\n    workers_per_gpu: 6  # per GPU\n    imgs_per_gpu: 32\n    tgt_size: 192\n    src_size: 48\n    use_flip: true\n    use_rot: true\n\n  val:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    color: RGB\n\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test_mini.lmdb\n\n#### network structures\nnetworks:\n  netDeg:\n    which_network: DegModel\n    setting:\n      scale: 4\n      nc_img: 3\n      kernel_opt:\n        mix: false\n        spatial: false\n        nc: 64\n        nf: 64\n        nb: 8\n        head_k: 1\n        body_k: 1\n        ksize: 21\n        zero_init: true\n      noise_opt: \n        mix: true\n        nc: 3\n        nf: 64\n        nb: 8\n        head_k: 3\n        body_k: 3\n        dim: 3\n        zero_init: true\n    pretrain: \n      path: log/2020Track2_deg_mse10/models/195000_netDeg.pth\n      strict_load: true\n\n  netSR:\n    which_network: EDSR\n    setting:\n      nf: 64\n      nb: 16\n      res_scale: 1\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/EDSR/edsr_baseline_x4-new.pt\n      strict_load: true\n\n#### training settings: learning rate scheme, loss\ntrain:\n  resume_state: ~\n  D_ratio: 1\n  max_grad_norm: 50\n  buffer_size: 0\n\n  optim_deg: false\n  optim_sr: true\n  \n  losses:\n    sr_pix_sr: \n      type: L1Loss\n      weight: 1.0\n    \n  optimizers:\n    netSR:\n      type: Adam\n      lr: !!float 2e-4\n   \n  niter: 200000\n  warmup_iter: -1  # no warm up\n\n  schedulers:\n    default:\n      type: MultiStepRestartLR\n      milestones: [50000, 100000, 150000]\n      gamma: 0.5\n\n  manual_seed: 0\n  val_freq: !!float 5e3\n\n#### logger\nlogger:\n  print_freq: 100\n  save_checkpoint_freq: !!float 5e3\n  \n"
  },
  {
    "path": "codes/config/PDM-SR/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            gt_img = None\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n                cropped_gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/PDM-SR/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/RealESRGAN/README.md",
    "content": "This repo currently only supports the test of [Real-ESRGAN](https://arxiv.org/abs/2107.10833). The training related codes may be added in the future. "
  },
  {
    "path": "codes/config/RealESRGAN/archs/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nfrom utils.registry import ARCH_REGISTRY, LOSS_REGISTRY, LR_SCHEDULER_REGISTRY\n\narch_folder = osp.dirname(osp.abspath(__file__))\narch_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(arch_folder)\n    if v.endswith(\".py\")\n]\n# import all the arch modules\n_arch_modules = [\n    importlib.import_module(f\"archs.{file_name}\") for file_name in arch_filenames\n]\n\n\ndef build_network(net_opt):\n    which_network = net_opt[\"which_network\"]\n    net = ARCH_REGISTRY.get(which_network)(**net_opt[\"setting\"])\n    return net\n\n\ndef build_loss(loss_opt):\n    loss_type = loss_opt.pop(\"type\")\n    loss = LOSS_REGISTRY.get(loss_type)(**loss_opt)\n    return loss\n\ndef build_scheduler(optimizer, scheduler_opt):\n    scheduler_type = scheduler_opt.pop(\"type\")\n    scheduler = LR_SCHEDULER_REGISTRY.get(scheduler_type)(optimizer, **scheduler_opt)\n    return scheduler\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/discriminator.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision\nimport functools\n\nfrom utils.registry import ARCH_REGISTRY\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG128(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512 * 4 * 4, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass DiscriminatorVGG32(nn.Module):\n    def __init__(self, in_nc, nf):\n        super().__init__()\n        # [64, 128, 128]\n        self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n        self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n        # [64, 64, 64]\n        self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n        self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n        self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n        self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n        # [128, 32, 32]\n        self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n        self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n        self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n        self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n        # [256, 16, 16]\n        self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n        self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n        # [512, 8, 8]\n        self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n        self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n        self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n        self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n        self.linear1 = nn.Linear(512, 100)\n        self.linear2 = nn.Linear(100, 1)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv0_0(x))\n        fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n        fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n        fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n        fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n        fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n        fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n        fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n        fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n        fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n        fea = fea.view(fea.size(0), -1)\n        fea = self.lrelu(self.linear1(fea))\n        out = self.linear2(fea)\n        return out\n\n\n@ARCH_REGISTRY.register()\nclass PatchGANDiscriminator(nn.Module):\n    \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n    def __init__(self, in_c, nf, nb, stride=1, norm_layer=nn.InstanceNorm2d):\n        \"\"\"Construct a PatchGAN discriminator\n\n        Parameters:\n            input_nc (int)  -- the number of channels in input images\n            ndf (int)       -- the number of filters in the last conv layer\n            n_layers (int)  -- the number of conv layers in the discriminator\n            norm_layer      -- normalization layer\n        \"\"\"\n        super().__init__()\n        if (\n            type(norm_layer) == functools.partial\n        ):  # no need to use bias as BatchNorm2d has affine parameters\n            use_bias = norm_layer.func == nn.InstanceNorm2d\n        else:\n            use_bias = norm_layer == nn.InstanceNorm2d\n\n        kw = 3\n        padw = 1\n        sequence = [\n            nn.Conv2d(in_c, nf, kernel_size=kw, stride=1, padding=padw),\n            nn.LeakyReLU(0.2, True),\n        ]\n        nf_mult = 1\n        nf_mult_prev = 1\n        for n in range(1, nb):  # gradually increase the number of filters\n            nf_mult_prev = nf_mult\n            nf_mult = min(2 ** n, 8)\n            sequence += [\n                nn.Conv2d(\n                    nf * nf_mult_prev,\n                    nf * nf_mult,\n                    kernel_size=kw,\n                    stride=stride,\n                    padding=padw,\n                    bias=use_bias,\n                ),\n                norm_layer(nf * nf_mult),\n                nn.LeakyReLU(0.2, True),\n            ]\n\n        nf_mult_prev = nf_mult\n        nf_mult = min(2 ** nb, 8)\n        sequence += [\n            nn.Conv2d(\n                nf * nf_mult_prev,\n                nf * nf_mult,\n                kernel_size=kw,\n                stride=1,\n                padding=padw,\n                bias=use_bias,\n            ),\n            norm_layer(nf * nf_mult),\n            nn.LeakyReLU(0.2, True),\n        ]\n\n        sequence += [\n            nn.Conv2d(nf * nf_mult, nf, kernel_size=kw, stride=1, padding=padw)\n        ]  # output 1 channel prediction map\n        self.model = nn.Sequential(*sequence)\n\n    def forward(self, input):\n        \"\"\"Standard forward.\"\"\"\n        return self.model(input)\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/edsr.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(\n        self,\n        rgb_range,\n        rgb_mean=(0.4488, 0.4371, 0.4040),\n        rgb_std=(1.0, 1.0, 1.0),\n        sign=-1,\n    ):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\n\n\n@ARCH_REGISTRY.register()\nclass EDSR(nn.Module):\n    def __init__(self, nb, nf, res_scale=0.1, upscale=4, conv=default_conv):\n        super(EDSR, self).__init__()\n\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        scale = upscale\n        act = nn.ReLU(True)\n        # url_name = 'r{}f{}x{}'.format(nb, nf, upscale)\n        # if url_name in url:\n        #     self.url = url[url_name]\n        # else:\n        #     self.url = None\n        self.sub_mean = MeanShift(255.0, sign=-1)\n        self.add_mean = MeanShift(255.0, sign=1)\n\n        # define head module\n        m_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale)\n            for _ in range(n_resblocks)\n        ]\n        m_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x * 255.0)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x) / 255.0\n\n        return x\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/loss.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport lpips as lp\n\nfrom utils.registry import LOSS_REGISTRY\n\nfrom .vgg import VGGFeatureExtractor\n\n\n@LOSS_REGISTRY.register()\nclass GaussGuided(nn.Module):\n    def __init__(self, ksize, sigma):\n        super().__init__()\n\n        ax = torch.arange(0, ksize) - ksize//2\n        xx, yy = torch.meshgrid(ax, ax)\n        dis = (xx ** 2 + yy ** 2)\n        dis = torch.exp(-dis / sigma ** 2)\n        dis = dis / dis.sum()\n\n        self.register_buffer(\"gauss\", dis.view(1, ksize**2, 1, 1))\n    \n    def forward(self, kernel):\n\n        return F.mse_loss(self.gauss, kernel)\n\n@LOSS_REGISTRY.register()\nclass PerceptualLossLPIPS(nn.Module):\n    def __init__(self, net=\"alex\", normalize=True):\n        super().__init__()\n        self.fn = lp.LPIPS(net=net, spatial=True)\n        for p in self.fn.parameters():\n            p.requires_grad = False\n        \n        self.normalize = normalize\n    \n    def forward(self, res, ref):\n        return self.fn(res, ref, normalize=self.normalize).mean(), None\n\n\n@LOSS_REGISTRY.register()\nclass MSELoss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.mse_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass L1Loss(nn.Module):\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, res, ref):\n        return F.l1_loss(res, ref)\n\n\n@LOSS_REGISTRY.register()\nclass GANLoss(nn.Module):\n    \"\"\"Define GAN loss.\n    Args:\n        gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n        real_label_val (float): The value for real label. Default: 1.0.\n        fake_label_val (float): The value for fake label. Default: 0.0.\n    \"\"\"\n\n    def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n        super(GANLoss, self).__init__()\n        self.gan_type = gan_type\n        self.real_label_val = real_label_val\n        self.fake_label_val = fake_label_val\n\n        if self.gan_type == \"vanilla\":\n            self.loss = nn.BCEWithLogitsLoss()\n        elif self.gan_type == \"lsgan\":\n            self.loss = nn.MSELoss()\n        elif self.gan_type == \"wgan\":\n            self.loss = self._wgan_loss\n        elif self.gan_type == \"wgan_softplus\":\n            self.loss = self._wgan_softplus_loss\n        elif self.gan_type == \"hinge\":\n            self.loss = nn.ReLU()\n        else:\n            raise NotImplementedError(f\"GAN type {self.gan_type} is not implemented.\")\n\n    def _wgan_loss(self, input, target):\n        \"\"\"wgan loss.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return -input.mean() if target else input.mean()\n\n    def _wgan_softplus_loss(self, input, target):\n        \"\"\"wgan loss with soft plus. softplus is a smooth approximation to the\n        ReLU function.\n        In StyleGAN2, it is called:\n            Logistic loss for discriminator;\n            Non-saturating loss for generator.\n        Args:\n            input (Tensor): Input tensor.\n            target (bool): Target label.\n        Returns:\n            Tensor: wgan loss.\n        \"\"\"\n        return F.softplus(-input).mean() if target else F.softplus(input).mean()\n\n    def get_target_label(self, input, target_is_real):\n        \"\"\"Get target label.\n        Args:\n            input (Tensor): Input tensor.\n            target_is_real (bool): Whether the target is real or fake.\n        Returns:\n            (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n                return Tensor.\n        \"\"\"\n\n        if self.gan_type in [\"wgan\", \"wgan_softplus\"]:\n            return target_is_real\n        target_val = self.real_label_val if target_is_real else self.fake_label_val\n        return input.new_ones(input.size()) * target_val\n\n    def forward(self, input, target_is_real, is_disc=False):\n        \"\"\"\n        Args:\n            input (Tensor): The input for the loss module, i.e., the network\n                prediction.\n            target_is_real (bool): Whether the targe is real or fake.\n            is_disc (bool): Whether the loss for discriminators or not.\n                Default: False.\n        Returns:\n            Tensor: GAN loss value.\n        \"\"\"\n        target_label = self.get_target_label(input, target_is_real)\n        if self.gan_type == \"hinge\":\n            if is_disc:  # for discriminators in hinge-gan\n                input = -input if target_is_real else input\n                loss = self.loss(1 + input).mean()\n            else:  # for generators in hinge-gan\n                loss = -input.mean()\n        else:  # other gan types\n            loss = self.loss(input, target_label)\n\n        return loss\n\n\n@LOSS_REGISTRY.register()\nclass PerceptualLoss(nn.Module):\n    \"\"\"Perceptual loss with commonly used style loss.\n    Args:\n        layer_weights (dict): The weight for each layer of vgg feature.\n            Here is an example: {'conv5_4': 1.}, which means the conv5_4\n            feature layer (before relu5_4) will be extracted with weight\n            1.0 in calculting losses.\n        vgg_type (str): The type of vgg network used as feature extractor.\n            Default: 'vgg19'.\n        use_input_norm (bool):  If True, normalize the input image in vgg.\n            Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n            loss will be calculated and the loss will multiplied by the\n            weight. Default: 1.0.\n        style_weight (float): If `style_weight > 0`, the style loss will be\n            calculated and the loss will multiplied by the weight.\n            Default: 0.\n        criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_weights,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        perceptual_weight=1.0,\n        style_weight=0.0,\n        criterion=\"l1\",\n    ):\n        super(PerceptualLoss, self).__init__()\n        self.perceptual_weight = perceptual_weight\n        self.style_weight = style_weight\n        self.layer_weights = layer_weights\n        self.vgg = VGGFeatureExtractor(\n            layer_name_list=list(layer_weights.keys()),\n            vgg_type=vgg_type,\n            use_input_norm=use_input_norm,\n            range_norm=range_norm,\n        )\n\n        self.criterion_type = criterion\n        if self.criterion_type == \"l1\":\n            self.criterion = torch.nn.L1Loss()\n        elif self.criterion_type == \"l2\":\n            self.criterion = torch.nn.L2loss()\n        elif self.criterion_type == \"fro\":\n            self.criterion = None\n        else:\n            raise NotImplementedError(f\"{criterion} criterion has not been supported.\")\n\n    def forward(self, x, gt):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n            gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        # extract vgg features\n        x_features = self.vgg(x)\n        gt_features = self.vgg(gt.detach())\n\n        # calculate perceptual loss\n        if self.perceptual_weight > 0:\n            percep_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    percep_loss += (\n                        torch.norm(x_features[k] - gt_features[k], p=\"fro\")\n                        * self.layer_weights[k]\n                    )\n                else:\n                    percep_loss += (\n                        self.criterion(x_features[k], gt_features[k])\n                        * self.layer_weights[k]\n                    )\n            percep_loss *= self.perceptual_weight\n        else:\n            percep_loss = None\n\n        # calculate style loss\n        if self.style_weight > 0:\n            style_loss = 0\n            for k in x_features.keys():\n                if self.criterion_type == \"fro\":\n                    style_loss += (\n                        torch.norm(\n                            self._gram_mat(x_features[k])\n                            - self._gram_mat(gt_features[k]),\n                            p=\"fro\",\n                        )\n                        * self.layer_weights[k]\n                    )\n                else:\n                    style_loss += (\n                        self.criterion(\n                            self._gram_mat(x_features[k]),\n                            self._gram_mat(gt_features[k]),\n                        )\n                        * self.layer_weights[k]\n                    )\n            style_loss *= self.style_weight\n        else:\n            style_loss = None\n\n        return percep_loss, style_loss\n\n    def _gram_mat(self, x):\n        \"\"\"Calculate Gram matrix.\n        Args:\n            x (torch.Tensor): Tensor with shape of (n, c, h, w).\n        Returns:\n            torch.Tensor: Gram matrix.\n        \"\"\"\n        n, c, h, w = x.size()\n        features = x.view(n, c, w * h)\n        features_t = features.transpose(1, 2)\n        gram = features.bmm(features_t) / (c * h * w)\n        return gram\n\n\n@LOSS_REGISTRY.register()\nclass CharbonnierLoss(nn.Module):\n    \"\"\"Charbonnier Loss (L1)\"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(CharbonnierLoss, self).__init__()\n        self.eps = eps\n\n    def forward(self, x, y):\n        diff = x - y\n        loss = torch.mean(torch.sqrt(diff * diff + self.eps))\n        return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n    def __init__(self, device=torch.device(\"cpu\")):\n        super(GradientPenaltyLoss, self).__init__()\n        self.register_buffer(\"grad_outputs\", torch.Tensor())\n        self.grad_outputs = self.grad_outputs.to(device)\n\n    def get_grad_outputs(self, input):\n        if self.grad_outputs.size() != input.size():\n            self.grad_outputs.resize_(input.size()).fill_(1.0)\n        return self.grad_outputs\n\n    def forward(self, interp, interp_crit):\n        grad_outputs = self.get_grad_outputs(interp_crit)\n        grad_interp = torch.autograd.grad(\n            outputs=interp_crit,\n            inputs=interp,\n            grad_outputs=grad_outputs,\n            create_graph=True,\n            retain_graph=True,\n            only_inputs=True,\n        )[0]\n        grad_interp = grad_interp.view(grad_interp.size(0), -1)\n        grad_interp_norm = grad_interp.norm(2, dim=1)\n\n        loss = ((grad_interp_norm - 1) ** 2).mean()\n        return loss\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/lr_scheduler.py",
    "content": "import math\nfrom collections import Counter, defaultdict\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom utils.registry import LR_SCHEDULER_REGISTRY\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass LinearDecayLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        decay_prop,\n        total_steps,\n        last_epoch=-1,\n    ):\n        self.decay_prop = decay_prop\n        self.total_steps = total_steps\n\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n\n        return [\n            group[\"initial_lr\"]\n            * (1 - (self.last_epoch + 1) * self.decay_prop / self.total_steps)\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass MultiStepRestartLR(_LRScheduler):\n    def __init__(\n        self,\n        optimizer,\n        milestones,\n        restarts=None,\n        weights=None,\n        gamma=0.1,\n        clear_state=False,\n        last_epoch=-1,\n    ):\n        self.milestones = Counter(milestones)\n        self.gamma = gamma\n        self.clear_state = clear_state\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch in self.restarts:\n            if self.clear_state:\n                self.optimizer.state = defaultdict(dict)\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        if self.last_epoch not in self.milestones:\n            return [group[\"lr\"] for group in self.optimizer.param_groups]\n        return [\n            group[\"lr\"] * self.gamma ** self.milestones[self.last_epoch]\n            for group in self.optimizer.param_groups\n        ]\n\n\n@LR_SCHEDULER_REGISTRY.register()\nclass CosineAnnealingRestartLR(_LRScheduler):\n    def __init__(\n        self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1\n    ):\n        self.T_period = T_period\n        self.T_max = self.T_period[0]  # current T period\n        self.eta_min = eta_min\n        self.restarts = restarts if restarts else [0]\n        self.restart_weights = weights if weights else [1]\n        self.last_restart = 0\n        assert len(self.restarts) == len(\n            self.restart_weights\n        ), \"restarts and their weights do not match.\"\n        super().__init__(optimizer, last_epoch)\n\n    def get_lr(self):\n        if self.last_epoch == 0:\n            return self.base_lrs\n        elif self.last_epoch in self.restarts:\n            self.last_restart = self.last_epoch\n            self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1]\n            weight = self.restart_weights[self.restarts.index(self.last_epoch)]\n            return [\n                group[\"initial_lr\"] * weight for group in self.optimizer.param_groups\n            ]\n        elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (\n            2 * self.T_max\n        ) == 0:\n            return [\n                group[\"lr\"]\n                + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2\n                for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n            ]\n        return [\n            (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max))\n            / (\n                1\n                + math.cos(\n                    math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max\n                )\n            )\n            * (group[\"lr\"] - self.eta_min)\n            + self.eta_min\n            for group in self.optimizer.param_groups\n        ]\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/module_util.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\ndef initialize_weights(net_l, scale=1):\n    if not isinstance(net_l, list):\n        net_l = [net_l]\n    for net in net_l:\n        for m in net.modules():\n            if isinstance(m, nn.Conv2d):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale  # for residual block\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.Linear):\n                init.kaiming_normal_(m.weight, a=0, mode=\"fan_in\")\n                m.weight.data *= scale\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                init.constant_(m.weight, 1)\n                init.constant_(m.bias.data, 0.0)\n\n\ndef make_layer(block, n_layers):\n    layers = []\n    for _ in range(n_layers):\n        layers.append(block())\n    return nn.Sequential(*layers)\n\n\nclass ResidualBlock_noBN(nn.Module):\n    \"\"\"Residual block w/o BN\n    ---Conv-ReLU-Conv-+-\n     |________________|\n    \"\"\"\n\n    def __init__(self, nf=64):\n        super(ResidualBlock_noBN, self).__init__()\n        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n\n        # initialization\n        initialize_weights([self.conv1, self.conv2], 0.1)\n\n    def forward(self, x):\n        identity = x\n        out = F.relu(self.conv1(x), inplace=True)\n        out = self.conv2(out)\n        return identity + out\n\n\ndef flow_warp(x, flow, interp_mode=\"bilinear\", padding_mode=\"zeros\"):\n    \"\"\"Warp an image or feature map with optical flow\n    Args:\n        x (Tensor): size (N, C, H, W)\n        flow (Tensor): size (N, H, W, 2), normal value\n        interp_mode (str): 'nearest' or 'bilinear'\n        padding_mode (str): 'zeros' or 'border' or 'reflection'\n\n    Returns:\n        Tensor: warped image or feature map\n    \"\"\"\n    assert x.size()[-2:] == flow.size()[1:3]\n    B, C, H, W = x.size()\n    # mesh grid\n    grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))\n    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2\n    grid.requires_grad = False\n    grid = grid.type_as(x)\n    vgrid = grid + flow\n    # scale grid to [-1,1]\n    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0\n    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0\n    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)\n    return output\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/rcan.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass MeanShift(nn.Conv2d):\n    def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):\n        super(MeanShift, self).__init__(3, 3, kernel_size=1)\n        std = torch.Tensor(rgb_std)\n        self.weight.data = torch.eye(3).view(3, 3, 1, 1)\n        self.weight.data.div_(std.view(3, 1, 1, 1))\n        self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\n        self.bias.data.div_(std)\n        self.requires_grad = False\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\ndef make_model(args, parent=False):\n    return RCAN(args)\n\n\n## Channel Attention (CA) Layer\nclass CALayer(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(CALayer, self).__init__()\n        # global average pooling: feature --> point\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        # feature channel downscale and upscale --> channel weight\n        self.conv_du = nn.Sequential(\n            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        y = self.avg_pool(x)\n        y = self.conv_du(y)\n        return x * y\n\n\n## Residual Channel Attention Block (RCAB)\nclass RCAB(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        reduction,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(RCAB, self).__init__()\n        modules_body = []\n        for i in range(2):\n            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                modules_body.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                modules_body.append(act)\n        modules_body.append(CALayer(n_feat, reduction))\n        self.body = nn.Sequential(*modules_body)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x)\n        # res = self.body(x).mul(self.res_scale)\n        res += x\n        return res\n\n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n    def __init__(\n        self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks\n    ):\n        super(ResidualGroup, self).__init__()\n        modules_body = []\n        modules_body = [\n            RCAB(\n                conv,\n                n_feat,\n                kernel_size,\n                reduction,\n                bias=True,\n                bn=False,\n                act=nn.ReLU(True),\n                res_scale=1,\n            )\n            for _ in range(n_resblocks)\n        ]\n        modules_body.append(conv(n_feat, n_feat, kernel_size))\n        self.body = nn.Sequential(*modules_body)\n\n    def forward(self, x):\n        res = self.body(x)\n        res += x\n        return res\n\n\n## Residual Channel Attention Network (RCAN)\n@ARCH_REGISTRY.register()\nclass RCAN(nn.Module):\n    def __init__(self, ng, nb, nf, reduction=16, upscale=4, conv=default_conv):\n        super(RCAN, self).__init__()\n\n        n_resgroups = ng\n        n_resblocks = nb\n        n_feats = nf\n        kernel_size = 3\n        reduction = reduction\n        scale = upscale\n\n        act = nn.ReLU(True)\n\n        # RGB mean for DIV2K\n        rgb_mean = (0.4488, 0.4371, 0.4040)\n        rgb_std = (1.0, 1.0, 1.0)\n        self.sub_mean = MeanShift(1.0, rgb_mean, rgb_std, -1)\n\n        # define head module\n        modules_head = [conv(3, n_feats, kernel_size)]\n\n        # define body module\n        modules_body = [\n            ResidualGroup(\n                conv,\n                n_feats,\n                kernel_size,\n                reduction,\n                act=act,\n                res_scale=1.0,\n                n_resblocks=nb,\n            )\n            for _ in range(ng)\n        ]\n\n        modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n        # define tail module\n        modules_tail = [\n            Upsampler(conv, scale, n_feats, act=False),\n            conv(n_feats, 3, kernel_size),\n        ]\n\n        self.add_mean = MeanShift(1.0, rgb_mean, rgb_std, 1)\n\n        self.head = nn.Sequential(*modules_head)\n        self.body = nn.Sequential(*modules_body)\n        self.tail = nn.Sequential(*modules_tail)\n\n    def forward(self, x):\n        x = self.sub_mean(x)\n        x = self.head(x)\n\n        res = self.body(x)\n        res += x\n\n        x = self.tail(res)\n        x = self.add_mean(x)\n\n        return x\n\n    def load_state_dict(self, state_dict, strict=False):\n        own_state = self.state_dict()\n        for name, param in state_dict.items():\n            if name in own_state:\n                if isinstance(param, nn.Parameter):\n                    param = param.data\n                try:\n                    own_state[name].copy_(param)\n                except Exception:\n                    if name.find(\"tail\") >= 0:\n                        print(\"Replace pre-trained upsampler to new one...\")\n                    else:\n                        raise RuntimeError(\n                            \"While copying the parameter named {}, \"\n                            \"whose dimensions in the model are {} and \"\n                            \"whose dimensions in the checkpoint are {}.\".format(\n                                name, own_state[name].size(), param.size()\n                            )\n                        )\n            elif strict:\n                if name.find(\"tail\") == -1:\n                    raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n        if strict:\n            missing = set(own_state.keys()) - set(state_dict.keys())\n            if len(missing) > 0:\n                raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/rrdb.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\nclass ResidualDenseBlock_5C(nn.Module):\n    def __init__(self, nf=64, gc=32, bias=True):\n        super(ResidualDenseBlock_5C, self).__init__()\n        # gc: growth channel, i.e. intermediate channels\n        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1\n        )\n\n    def forward(self, x):\n        x1 = self.lrelu(self.conv1(x))\n        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n        return x5 * 0.2 + x\n\n\nclass RRDB(nn.Module):\n    \"\"\"Residual in Residual Dense Block\"\"\"\n\n    def __init__(self, nf, gc=32):\n        super(RRDB, self).__init__()\n        self.rdb1 = ResidualDenseBlock_5C(nf, gc)\n        self.rdb2 = ResidualDenseBlock_5C(nf, gc)\n        self.rdb3 = ResidualDenseBlock_5C(nf, gc)\n\n    def forward(self, x):\n        out = self.rdb1(x)\n        out = self.rdb2(out)\n        out = self.rdb3(out)\n        return out * 0.2 + x\n\n\n@ARCH_REGISTRY.register()\nclass RRDBNet(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):\n        super(RRDBNet, self).__init__()\n        self.upscale = upscale\n        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        self.body = make_layer(RRDB_block_f, nb)\n        self.conv_body = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        #### upsampling\n        self.conv_up1 = nn.Conv2d(nf, nf, 3, 1, 1)\n        if upscale == 4:\n            self.conv_up2 = nn.Conv2d(nf, nf, 3, 1, 1)\n        self.conv_hr = nn.Conv2d(nf, nf, 3, 1, 1)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n    def forward(self, x):\n        fea = self.conv_first(x)\n        trunk = self.conv_body(self.body(fea))\n        fea = fea + trunk\n\n        if self.upscale == 2 or self.upscale == 3:\n            fea = self.lrelu(\n                self.conv_up1(\n                    F.interpolate(fea, scale_factor=self.upscale, mode=\"nearest\")\n                )\n            )\n        if self.upscale == 4:\n            fea = self.lrelu(\n                self.conv_up1(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n            fea = self.lrelu(\n                self.conv_up2(F.interpolate(fea, scale_factor=2, mode=\"nearest\"))\n            )\n        out = self.conv_last(self.lrelu(self.conv_hr(fea)))\n\n        return out\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/srresnet.py",
    "content": "import functools\n\nfrom utils.registry import ARCH_REGISTRY\n\nfrom .module_util import *\n\n\n@ARCH_REGISTRY.register()\nclass MSRResNet(nn.Module):\n    \"\"\"modified SRResNet\"\"\"\n\n    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n        super(MSRResNet, self).__init__()\n        self.upscale = upscale\n\n        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n        basic_block = functools.partial(ResidualBlock_noBN, nf=nf)\n        self.recon_trunk = make_layer(basic_block, nb)\n\n        # upsampling\n        if self.upscale == 2:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n        elif self.upscale == 3:\n            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(3)\n        elif self.upscale == 4:\n            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n            self.pixel_shuffle = nn.PixelShuffle(2)\n\n        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n        # activation function\n        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n        # initialization\n        initialize_weights(\n            [self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1\n        )\n        if self.upscale == 4:\n            initialize_weights(self.upconv2, 0.1)\n\n    def forward(self, x):\n        fea = self.lrelu(self.conv_first(x))\n        out = self.recon_trunk(fea)\n\n        if self.upscale == 4:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n            out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n        elif self.upscale == 3 or self.upscale == 2:\n            out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n        out = self.conv_last(self.lrelu(self.HRconv(out)))\n        base = F.interpolate(\n            x, scale_factor=self.upscale, mode=\"bilinear\", align_corners=False\n        )\n        out += base\n        return out\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/translator.py",
    "content": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.registry import ARCH_REGISTRY\n\n\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\n    return nn.Conv2d(\n        in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias\n    )\n\n\nclass BasicBlock(nn.Sequential):\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        kernel_size,\n        stride=1,\n        bias=False,\n        bn=True,\n        act=nn.ReLU(True),\n    ):\n\n        m = [\n            nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=(kernel_size // 2),\n                stride=stride,\n                bias=bias,\n            )\n        ]\n        if bn:\n            m.append(nn.BatchNorm2d(out_channels))\n        if act is not None:\n            m.append(act)\n        super(BasicBlock, self).__init__(*m)\n\n\nclass ResBlock(nn.Module):\n    def __init__(\n        self,\n        conv,\n        n_feat,\n        kernel_size,\n        bias=True,\n        bn=False,\n        act=nn.ReLU(True),\n        res_scale=1,\n    ):\n\n        super(ResBlock, self).__init__()\n        m = []\n        for i in range(2):\n            m.append(conv(n_feat, n_feat, kernel_size, bias=bias))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if i == 0:\n                m.append(act)\n\n        self.body = nn.Sequential(*m)\n        self.res_scale = res_scale\n\n    def forward(self, x):\n        res = self.body(x).mul(self.res_scale)\n        res += x\n\n        return res\n\n\nclass Upsampler(nn.Sequential):\n    def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):\n\n        m = []\n        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?\n            for _ in range(int(math.log(scale, 2))):\n                m.append(conv(n_feat, 4 * n_feat, 3, bias))\n                m.append(nn.PixelShuffle(2))\n                if bn:\n                    m.append(nn.BatchNorm2d(n_feat))\n                if act:\n                    m.append(act())\n        elif scale == 3:\n            m.append(conv(n_feat, 9 * n_feat, 3, bias))\n            m.append(nn.PixelShuffle(3))\n            if bn:\n                m.append(nn.BatchNorm2d(n_feat))\n            if act:\n                m.append(act())\n        elif scale == 1:\n            m.append(nn.Identity())\n        else:\n            raise NotImplementedError\n\n        super(Upsampler, self).__init__(*m)\n\n\n@ARCH_REGISTRY.register()\nclass Translator(nn.Module):\n    def __init__(self, in_nc, out_nc, nf, nb, scale=4, conv=default_conv):\n        super().__init__()\n\n        self.scale = scale\n\n        # define head module\n        if scale >= 1:\n            m_head = [conv(in_nc, nf, 3)]\n        else:\n            s = int(1 / scale)\n            m_head = [nn.Conv2d(in_nc, nf, kernel_size=2 * s + 1, stride=s, padding=s)]\n\n        # define body module\n        m_body = [\n            ResBlock(conv, nf, 3, act=nn.ReLU(True), res_scale=1) for _ in range(nb)\n        ]\n        m_body.append(conv(nf, nf, 3))\n\n        # define tail module\n        m_tail = [\n            Upsampler(conv, scale, nf, act=False) if scale > 1 else nn.Identity(),\n            conv(nf, out_nc, 3),\n        ]\n\n        self.head = nn.Sequential(*m_head)\n        self.body = nn.Sequential(*m_body)\n        self.tail = nn.Sequential(*m_tail)\n\n    def forward(self, x):\n\n        x = self.head(x)\n        f = self.body(x)\n        x = f + x\n        x = self.tail(x)\n\n        return x\n"
  },
  {
    "path": "codes/config/RealESRGAN/archs/vgg.py",
    "content": "import os\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn as nn\nfrom torchvision.models import vgg as vgg\n\nfrom utils.registry import ARCH_REGISTRY\n\nVGG_PRETRAIN_PATH = \"checkpoints/pretrained_models/vgg19-dcbb9e9d.pth\"\nNAMES = {\n    \"vgg11\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg13\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"pool5\",\n    ],\n    \"vgg16\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"pool5\",\n    ],\n    \"vgg19\": [\n        \"conv1_1\",\n        \"relu1_1\",\n        \"conv1_2\",\n        \"relu1_2\",\n        \"pool1\",\n        \"conv2_1\",\n        \"relu2_1\",\n        \"conv2_2\",\n        \"relu2_2\",\n        \"pool2\",\n        \"conv3_1\",\n        \"relu3_1\",\n        \"conv3_2\",\n        \"relu3_2\",\n        \"conv3_3\",\n        \"relu3_3\",\n        \"conv3_4\",\n        \"relu3_4\",\n        \"pool3\",\n        \"conv4_1\",\n        \"relu4_1\",\n        \"conv4_2\",\n        \"relu4_2\",\n        \"conv4_3\",\n        \"relu4_3\",\n        \"conv4_4\",\n        \"relu4_4\",\n        \"pool4\",\n        \"conv5_1\",\n        \"relu5_1\",\n        \"conv5_2\",\n        \"relu5_2\",\n        \"conv5_3\",\n        \"relu5_3\",\n        \"conv5_4\",\n        \"relu5_4\",\n        \"pool5\",\n    ],\n}\n\n\ndef insert_bn(names):\n    \"\"\"Insert bn layer after each conv.\n    Args:\n        names (list): The list of layer names.\n    Returns:\n        list: The list of layer names with bn layers.\n    \"\"\"\n    names_bn = []\n    for name in names:\n        names_bn.append(name)\n        if \"conv\" in name:\n            position = name.replace(\"conv\", \"\")\n            names_bn.append(\"bn\" + position)\n    return names_bn\n\n\n@ARCH_REGISTRY.register()\nclass VGGFeatureExtractor(nn.Module):\n    \"\"\"VGG network for feature extraction.\n    In this implementation, we allow users to choose whether use normalization\n    in the input feature and the type of vgg network. Note that the pretrained\n    path must fit the vgg type.\n    Args:\n        layer_name_list (list[str]): Forward function returns the corresponding\n            features according to the layer_name_list.\n            Example: {'relu1_1', 'relu2_1', 'relu3_1'}.\n        vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n        use_input_norm (bool): If True, normalize the input image. Importantly,\n            the input feature must in the range [0, 1]. Default: True.\n        range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n            Default: False.\n        requires_grad (bool): If true, the parameters of VGG network will be\n            optimized. Default: False.\n        remove_pooling (bool): If true, the max pooling operations in VGG net\n            will be removed. Default: False.\n        pooling_stride (int): The stride of max pooling operation. Default: 2.\n    \"\"\"\n\n    def __init__(\n        self,\n        layer_name_list,\n        vgg_type=\"vgg19\",\n        use_input_norm=True,\n        range_norm=False,\n        requires_grad=False,\n        remove_pooling=False,\n        pooling_stride=2,\n    ):\n        super(VGGFeatureExtractor, self).__init__()\n\n        self.layer_name_list = layer_name_list\n        self.use_input_norm = use_input_norm\n        self.range_norm = range_norm\n\n        self.names = NAMES[vgg_type.replace(\"_bn\", \"\")]\n        if \"bn\" in vgg_type:\n            self.names = insert_bn(self.names)\n\n        # only borrow layers that will be used to avoid unused params\n        max_idx = 0\n        for v in layer_name_list:\n            idx = self.names.index(v)\n            if idx > max_idx:\n                max_idx = idx\n\n        if os.path.exists(VGG_PRETRAIN_PATH):\n            vgg_net = getattr(vgg, vgg_type)(pretrained=False)\n            state_dict = torch.load(\n                VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage\n            )\n            vgg_net.load_state_dict(state_dict)\n        else:\n            vgg_net = getattr(vgg, vgg_type)(pretrained=True)\n\n        features = vgg_net.features[: max_idx + 1]\n\n        modified_net = OrderedDict()\n        for k, v in zip(self.names, features):\n            if \"pool\" in k:\n                # if remove_pooling is true, pooling operation will be removed\n                if remove_pooling:\n                    continue\n                else:\n                    # in some cases, we may want to change the default stride\n                    modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)\n            else:\n                modified_net[k] = v\n\n        self.vgg_net = nn.Sequential(modified_net)\n\n        if not requires_grad:\n            self.vgg_net.eval()\n            for param in self.parameters():\n                param.requires_grad = False\n        else:\n            self.vgg_net.train()\n            for param in self.parameters():\n                param.requires_grad = True\n\n        if self.use_input_norm:\n            # the mean is for image with range [0, 1]\n            self.register_buffer(\n                \"mean\", torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n            )\n            # the std is for image with range [0, 1]\n            self.register_buffer(\n                \"std\", torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n            )\n\n    def forward(self, x):\n        \"\"\"Forward function.\n        Args:\n            x (Tensor): Input tensor with shape (n, c, h, w).\n        Returns:\n            Tensor: Forward results.\n        \"\"\"\n        if self.range_norm:\n            x = (x + 1) / 2\n        if self.use_input_norm:\n            x = (x - self.mean) / self.std\n\n        output = {}\n        for key, layer in self.vgg_net._modules.items():\n            x = layer(x)\n            if key in self.layer_name_list:\n                output[key] = x.clone()\n\n        return output\n"
  },
  {
    "path": "codes/config/RealESRGAN/count_flops.py",
    "content": "import argparse\nimport sys\n\nimport torch\nfrom torchsummaryX import summary\n\nsys.path.append(\"../../\")\nimport utils.option as option\nfrom models import create_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"--opt\",\n    type=str,\n    default=\"options/setting1/test/test_setting1_x4.yml\",\n    help=\"Path to option YMAL file of Predictor.\",\n)\nargs = parser.parse_args()\nopt = option.parse(args.opt, root_path=\".\", is_train=True)\n\nopt = option.dict_to_nonedict(opt)\nmodel = create_model(opt)\n\ntest_tensor = torch.randn(1, 3, 270, 180).cuda()\nfor name, net in model.networks.items():\n    summary(net.cuda(), x=test_tensor)\n    print(\"Above are results for net {}\".format(name))\n    input()\n"
  },
  {
    "path": "codes/config/RealESRGAN/inference.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport cv2\nfrom collections import defaultdict\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom data.data_sampler import DistIterSampler\nfrom metrics import IQA\nfrom models import create_model\n\n\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    \"-opt\",\n    type=str,\n    default=\"options/test/2020Track2.yml\",\n    help=\"Path to options YMAL file.\",\n)\nparser.add_argument(\"-input_dir\", type=str, default=\"../../../data_samples/LR\")\nparser.add_argument(\"-output_dir\", type=str, default=\"../../../data_samples/BSRGAN\")\nargs = parser.parse_args()\nopt = option.parse(args.opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\nmodel = create_model(opt)\n\nif not osp.exists(args.output_dir):\n    os.makedirs(args.output_dir)\n\ntest_files = glob(osp.join(args.input_dir, \"*\"))\nfor inx, path in tqdm(enumerate(test_files)):\n    name = path.split(\"/\")[-1].split(\".\")[0]\n\n    img = cv2.imread(path)[:, :, [2, 1, 0]]\n    img = img.transpose(2, 0, 1)[None] / 255\n    img_t = torch.as_tensor(np.ascontiguousarray(img)).float()\n\n    model.test({\"src\": img_t})\n    outdict = model.get_current_visuals()\n\n    sr = outdict[\"sr\"]\n    sr_im = util.tensor2img(sr)\n\n    save_path = osp.join(args.output_dir, \"{}_x{}.png\".format(name, opt[\"scale\"]))\n    cv2.imwrite(save_path, sr_im)\n"
  },
  {
    "path": "codes/config/RealESRGAN/models/__init__.py",
    "content": "import importlib\nimport logging\nimport os\nimport os.path as osp\n\nfrom utils.registry import MODEL_REGISTRY\n\nlogger = logging.getLogger(\"base\")\n\nmodel_folder = osp.dirname(__file__)\nmodel_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(model_folder)\n    if v.endswith(\"_model.py\")\n]\n_model_modules = [\n    importlib.import_module(f\"models.{file_name}\") for file_name in model_names\n]\n\n\ndef create_model(opt, **kwarg):\n    model = opt[\"model\"]\n    m = MODEL_REGISTRY.get(model)(opt, **kwarg)\n    logger.info(\"Model [{:s}] is created.\".format(m.__class__.__name__))\n    return m\n"
  },
  {
    "path": "codes/config/RealESRGAN/models/base_model.py",
    "content": "import logging\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nfrom archs import build_loss, build_network\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .lr_scheduler import CosineAnnealingRestartLR, MultiStepRestartLR\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass BaseModel:\n    def __init__(self, opt):\n        self.opt = opt\n\n        if opt[\"dist\"]:\n            self.rank = torch.distributed.get_rank()\n            self.world_size = torch.distributed.get_world_size()\n        else:\n            self.rank = 0  # non dist training\n\n        self.device = torch.device(\"cuda\" if opt[\"gpu_ids\"] is not None else \"cpu\")\n        self.is_train = opt[\"is_train\"]\n        self.log_dict = OrderedDict()\n\n        self.data_names = []\n        self.network_names = []\n        self.networks = {}\n\n        self.optimizers = {}\n        self.schedulers = {}\n\n    def feed_data(self, data):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        pass\n\n    def get_current_losses(self):\n        pass\n\n    def print_network(self):\n        pass\n\n    def save(self, label):\n        pass\n\n    def load(self):\n        pass\n\n    def build_network(self, net_opt):\n\n        net = build_network(net_opt)\n        net = self.model_to_device(net)\n\n        if net_opt.get(\"pretrain\"):\n            pretrain = net_opt.pop(\"pretrain\")\n            self.load_network(net, pretrain[\"path\"], pretrain[\"strict_load\"])\n\n        self.print_network(net)\n        return net\n\n    def build_loss(self, loss_config):\n        loss = build_loss(loss_config)\n        loss = loss.to(self.device)\n        return loss\n\n    @staticmethod\n    def build_optimizer(net, optim_config):\n        optim_params = []\n        for v in net.parameters():\n            if v.requires_grad:\n                optim_params.append(v)\n        optim_type = optim_config.pop(\"type\")\n        optimizer = getattr(torch.optim, optim_type)(\n            params=optim_params, **optim_config\n        )\n        return optimizer\n\n    def setup_schedulers(self, scheduler_opt):\n        \"\"\"Set up schedulers.\"\"\"\n        scheduler_type = scheduler_opt.pop(\"type\")\n\n        if scheduler_type in [\"MultiStepLR\", \"MultiStepRestartLR\"]:\n            for name, optimizer in self.optimizers.items():\n                self.schedulers[name] = MultiStepRestartLR(optimizer, **scheduler_opt)\n\n        elif scheduler_type == \"CosineAnnealingRestartLR\":\n            for name, optimizer in self.ptimizers.items():\n                self.schedulers[name] = CosineAnnealingRestartLR(\n                    optimizer, **scheduler_opt\n                )\n        else:\n            raise NotImplementedError(\n                f\"Scheduler {scheduler_type} is not implemented yet.\"\n            )\n\n    def model_to_device(self, net):\n        \"\"\"Model to device. It also warps models with DistributedDataParallel\n        or DataParallel.\n        Args:\n            net (nn.Module)\n        \"\"\"\n        net = net.to(self.device)\n        if self.opt[\"dist\"]:\n            net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()])\n        else:\n            net = DataParallel(net)\n        return net\n\n    def print_network(self, net):\n        # Generator\n        s, n = self.get_network_description(net)\n        if isinstance(net, nn.DataParallel) or isinstance(net, DistributedDataParallel):\n            net_struc_str = \"{} - {}\".format(\n                net.__class__.__name__, net.module.__class__.__name__\n            )\n        else:\n            net_struc_str = \"{}\".format(net.__class__.__name__)\n        if self.rank <= 0:\n            logger.info(\n                \"Network G structure: {}, with parameters: {:,d}\".format(\n                    net_struc_str, n\n                )\n            )\n            logger.info(s)\n\n    def set_optimizer(self, names, operation):\n        for name in names:\n            getattr(self.optimizers[name], operation)()\n\n    def set_requires_grad(self, names, requires_grad):\n        for name in names:\n            for v in self.networks[name].parameters():\n                v.requires_grad = requires_grad\n\n    def set_network_state(self, names, state):\n        for name in names:\n            getattr(self.networks[name], state)()\n\n    def clip_grad_norm(self, names, norm):\n        for name in names:\n            nn.utils.clip_grad_norm_(self.networks[name].parameters(), max_norm=norm)\n\n    def _set_lr(self, lr_groups_l):\n        \"\"\"set learning rate for warmup,\n        lr_groups_l: list for lr_groups. each for a optimizer\"\"\"\n        for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):\n            for param_group, lr in zip(optimizer.param_groups, lr_groups):\n                param_group[\"lr\"] = lr\n\n    def _get_init_lr(self):\n        # get the initial lr, which is set by the scheduler\n        init_lr_groups_l = []\n        for optimizer in self.optimizers:\n            init_lr_groups_l.append([v[\"initial_lr\"] for v in optimizer.param_groups])\n        return init_lr_groups_l\n\n    def update_learning_rate(self, cur_iter, warmup_iter=-1):\n        for _, scheduler in self.schedulers.items():\n            scheduler.step()\n        #### set up warm up learning rate\n        if cur_iter < warmup_iter:\n            # get initial lr for each group\n            init_lr_g_l = self._get_init_lr()\n            # modify warming-up learning rates\n            warm_up_lr_l = []\n            for init_lr_g in init_lr_g_l:\n                warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])\n            # set learning rate\n            self._set_lr(warm_up_lr_l)\n\n    def get_current_learning_rate(self):\n        # return self.schedulers[0].get_lr()[0]\n        return list(self.optimizers.values())[0].param_groups[0][\"lr\"]\n\n    def get_network_description(self, network):\n        \"\"\"Get the string and total parameters of the network\"\"\"\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        s = str(network)\n        n = sum(map(lambda x: x.numel(), network.parameters()))\n        return s, n\n\n    def save_network(self, network, network_label, iter_label):\n        save_filename = \"{}_{}.pth\".format(iter_label, network_label)\n        save_path = os.path.join(self.opt[\"path\"][\"models\"], save_filename)\n        if isinstance(network, nn.DataParallel) or isinstance(\n            network, DistributedDataParallel\n        ):\n            network = network.module\n        state_dict = network.state_dict()\n        for key, param in state_dict.items():\n            state_dict[key] = param.cpu()\n        torch.save(state_dict, save_path)\n\n    def save(self, iter_label):\n        for name in self.optimizers.keys():\n            self.save_network(self.networks[name], name, iter_label)\n\n    def load_network(self, network, load_path, strict=True):\n        if load_path is not None:\n            if isinstance(network, nn.DataParallel) or isinstance(\n                network, DistributedDataParallel\n            ):\n                network = network.module\n            load_net = torch.load(load_path)\n            load_net_clean = OrderedDict()  # remove unnecessary 'module.'\n            for k, v in load_net.items():\n                if k.startswith(\"module.\"):\n                    load_net_clean[k[7:]] = v\n                else:\n                    load_net_clean[k] = v\n            network.load_state_dict(load_net_clean, strict=strict)\n\n    def save_training_state(self, epoch, iter_step):\n        \"\"\"Saves training state during training, which will be used for resuming\"\"\"\n        state = {\"epoch\": epoch, \"iter\": iter_step, \"schedulers\": {}, \"optimizers\": {}}\n        for k, s in self.schedulers.items():\n            state[\"schedulers\"][k] = s.state_dict()\n        for k, o in self.optimizers.items():\n            state[\"optimizers\"][k] = o.state_dict()\n        save_filename = \"{}.state\".format(iter_step)\n        save_path = os.path.join(self.opt[\"path\"][\"training_state\"], save_filename)\n        torch.save(state, save_path)\n\n    def resume_training(self, resume_state):\n        \"\"\"Resume the optimizers and schedulers for training\"\"\"\n        resume_optimizers = resume_state[\"optimizers\"]\n        resume_schedulers = resume_state[\"schedulers\"]\n        assert len(resume_optimizers) == len(\n            self.optimizers\n        ), \"Wrong lengths of optimizers\"\n        assert len(resume_schedulers) == len(\n            self.schedulers\n        ), \"Wrong lengths of schedulers\"\n        for name, o in resume_optimizers.items():\n            self.optimizers[name].load_state_dict(o)\n        for name, s in resume_schedulers.items():\n            self.schedulers[name].load_state_dict(s)\n\n    def reduce_loss_dict(self, loss_dict):\n        \"\"\"reduce loss dict.\n        In distributed training, it averages the losses among different GPUs .\n        Args:\n            loss_dict (OrderedDict): Loss dict.\n        \"\"\"\n        with torch.no_grad():\n            if self.opt[\"dist\"]:\n                keys = []\n                losses = []\n                for name, value in loss_dict.items():\n                    keys.append(name)\n                    losses.append(value)\n                losses = torch.stack(losses, 0)\n                torch.distributed.reduce(losses, dst=0)\n                if self.rank == 0:\n                    losses /= self.world_size\n                loss_dict = {key: loss for key, loss in zip(keys, losses)}\n\n            log_dict = OrderedDict()\n            for name, value in loss_dict.items():\n                log_dict[name] = value.mean().item()\n\n            return log_dict\n\n    def get_current_log(self):\n        return self.log_dict\n"
  },
  {
    "path": "codes/config/RealESRGAN/models/sr_model.py",
    "content": "import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass SRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n\n        self.data_names = [\"lr\", \"hr\"]\n\n        self.network_names = [\"netSR\"]\n        self.networks = {}\n\n        self.loss_names = [\"sr_adv\", \"sr_pix\", \"sr_percep\"]\n        self.loss_weights = {}\n        self.losses = {}import logging\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.registry import MODEL_REGISTRY\n\nfrom .base_model import BaseModel\n\nlogger = logging.getLogger(\"base\")\n\n\n@MODEL_REGISTRY.register()\nclass SRModel(BaseModel):\n    def __init__(self, opt):\n        super().__init__(opt)\n\n        self.data_names = [\"lr\", \"hr\"]\n\n        self.network_names = [\"netSR\"]\n        self.networks = {}\n\n        self.loss_names = [\"sr_adv\", \"sr_pix\", \"sr_percep\"]\n        self.loss_weights = {}\n        self.losses = {}\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n\n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n\n        if self.is_train:\n            # setup loss, optimizers, schedulers\n            self.setup_train(opt[\"train\"])\n\n    def feed_data(self, data):\n\n        self.lr = data[\"src\"].to(self.device)\n        self.hr = data[\"tgt\"].to(self.device)\n\n    def forward(self):\n\n        self.sr = self.netSR(self.lr)\n\n    def optimize_parameters(self, step):\n\n        self.forward()\n\n        loss_dict = OrderedDict()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.hr, self.sr)\n        loss_dict[\"sr_pix\"] = sr_pix\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], False)\n            sr_adv_g = self.calculate_rgan_loss_G(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.hr, self.sr)\n            loss_dict[\"sr_percep\"] = sr_percep\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], True)\n            sr_adv_d = self.calculate_rgan_loss_D(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d\n\n            self.optimizers[\"netD\"].zero_grad()\n            sr_adv_d.backward()\n            self.optimizers[\"netD\"].step()\n\n        self.log_dict = self.reduce_loss_dict(loss_dict)\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data, crop_size=None):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_real_hr = self.netSR(self.real_lr)\n            else:\n                self.fake_real_hr = self.crop_test(self.real_lr, crop_size)\n        self.netSR.train()\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n            \n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n\n        self.optimizers = {}\n\n        # define networks and load pretrained models\n        nets_opt = opt[\"networks\"]\n        defined_network_names = list(nets_opt.keys())\n        assert set(defined_network_names).issubset(set(self.network_names))\n\n        for name in defined_network_names:\n            setattr(self, name, self.build_network(nets_opt[name]))\n            self.networks[name] = getattr(self, name)\n\n        if self.is_train:\n            train_opt = opt[\"train\"]\n\n            # define losses\n            loss_opt = train_opt[\"losses\"]\n            defined_loss_names = list(loss_opt.keys())\n            assert set(defined_loss_names).issubset(set(self.loss_names))\n\n            for name in defined_loss_names:\n                loss_conf = loss_opt.get(name)\n                if loss_conf[\"weight\"] > 0:\n                    self.loss_weights[name] = loss_conf.pop(\"weight\")\n                    self.losses[name] = self.build_loss(loss_conf)\n\n            # build optmizers\n            optimizer_opt = train_opt[\"optimizers\"]\n            defined_optimizer_names = list(optimizer_opt.keys())\n            assert set(defined_optimizer_names).issubset(self.networks.keys())\n\n            for name in defined_optimizer_names:\n                optim_config = optimizer_opt[name]\n                self.optimizers[name] = self.build_optimizer(\n                    getattr(self, name), optim_config\n                )\n\n            # set schedulers\n            scheduler_opt = train_opt[\"scheduler\"]\n            self.setup_schedulers(scheduler_opt)\n\n            # set to training state\n            self.set_network_state(self.networks.keys(), \"train\")\n\n    def feed_data(self, data):\n\n        self.lr = data[\"src\"].to(self.device)\n        self.hr = data[\"tgt\"].to(self.device)\n\n    def forward(self):\n\n        self.sr = self.netSR(self.lr)\n\n    def optimize_parameters(self, step):\n\n        self.forward()\n\n        loss_dict = OrderedDict()\n\n        l_sr = 0\n\n        sr_pix = self.losses[\"sr_pix\"](self.hr, self.sr)\n        loss_dict[\"sr_pix\"] = sr_pix\n        l_sr += self.loss_weights[\"sr_pix\"] * sr_pix\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], False)\n            sr_adv_g = self.calculate_rgan_loss_G(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_g\"] = sr_adv_g\n            l_sr += self.loss_weights[\"sr_adv\"] * sr_adv_g\n\n        if self.losses.get(\"sr_percep\"):\n            sr_percep, sr_style = self.losses[\"sr_percep\"](self.hr, self.sr)\n            loss_dict[\"sr_percep\"] = sr_percep\n            if sr_style is not None:\n                loss_dict[\"sr_style\"] = sr_style\n                l_sr += self.loss_weights[\"sr_percep\"] * sr_style\n            l_sr += self.loss_weights[\"sr_percep\"] * sr_percep\n\n        self.set_optimizer(names=[\"netSR\"], operation=\"zero_grad\")\n        l_sr.backward()\n        self.set_optimizer(names=[\"netSR\"], operation=\"step\")\n\n        if self.losses.get(\"sr_adv\"):\n            self.set_requires_grad([\"netD\"], True)\n            sr_adv_d = self.calculate_rgan_loss_D(\n                self.netD, self.losses[\"sr_adv\"], self.hr, self.sr\n            )\n            loss_dict[\"sr_adv_d\"] = sr_adv_d\n\n            self.optimizers[\"netD\"].zero_grad()\n            sr_adv_d.backward()\n            self.optimizers[\"netD\"].step()\n\n        self.log_dict = self.reduce_loss_dict(loss_dict)\n\n    def calculate_rgan_loss_D(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake.detach())\n        d_pred_real = netD(real)\n        loss_real = criterion(\n            d_pred_real - d_pred_fake.detach().mean(), True, is_disc=False\n        )\n        loss_fake = criterion(\n            d_pred_fake - d_pred_real.detach().mean(), False, is_disc=False\n        )\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def calculate_rgan_loss_G(self, netD, criterion, real, fake):\n\n        d_pred_fake = netD(fake)\n        d_pred_real = netD(real).detach()\n        loss_real = criterion(d_pred_real - d_pred_fake.mean(), False, is_disc=False)\n        loss_fake = criterion(d_pred_fake - d_pred_real.mean(), True, is_disc=False)\n\n        loss = (loss_real + loss_fake) / 2\n\n        return loss\n\n    def test(self, data, crop_size=None):\n        self.real_lr = data[\"src\"].to(self.device)\n        self.netSR.eval()\n        with torch.no_grad():\n            if crop_size is None:\n                self.fake_real_hr = self.netSR(self.real_lr)\n            else:\n                self.fake_real_hr = self.crop_test(self.real_lr, crop_size)\n        self.netSR.train()\n    \n    def crop_test(self, lr, crop_size):\n        b, c, h, w = lr.shape\n        scale = self.opt[\"scale\"]\n\n        h_start = list(range(0, h-crop_size, crop_size))\n        w_start = list(range(0, w-crop_size, crop_size))\n\n        sr1 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hs in h_start:\n            for ws in w_start:\n                lr_patch = lr[:, :, hs: hs+crop_size, ws: ws+crop_size]\n                sr_patch = self.netSR(lr_patch)\n\n                sr1[:, :, \n                    int(hs*scale):int((hs+crop_size)*scale),\n                    int(ws*scale):int((ws+crop_size)*scale)\n                ] = sr_patch\n        \n        h_end = list(range(h, crop_size, -crop_size))\n        w_end = list(range(w, crop_size, -crop_size))\n\n        sr2 = torch.zeros(b, c, int(h*scale), int(w* scale), device=self.device) - 1\n        for hd in h_end:\n            for wd in w_end:\n                lr_patch = lr[:, :, hd-crop_size:hd, wd-crop_size:wd]\n                sr_patch = self.netSR(lr_patch)\n\n                sr2[:, :, \n                    int((hd-crop_size)*scale):int(hd*scale),\n                    int((wd-crop_size)*scale):int(wd*scale)\n                ] = sr_patch\n\n        mask1 = (\n            (sr1 == -1).float() * 0 + \n            (sr2 == -1).float() * 1 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        mask2 = (\n            (sr1 == -1).float() * 1 + \n            (sr2 == -1).float() * 0 + \n            ((sr1 > 0) * (sr2 > 0)).float() * 0.5\n        )\n\n        sr = mask1 * sr1 + mask2 * sr2\n\n        return sr\n            \n    def get_current_visuals(self, need_GT=True):\n        out_dict = OrderedDict()\n        out_dict[\"lr\"] = self.real_lr.detach()[0].float().cpu()\n        out_dict[\"sr\"] = self.fake_real_hr.detach()[0].float().cpu()\n        return out_dict\n"
  },
  {
    "path": "codes/config/RealESRGAN/options/test/2017Track2_2020Track1.yml",
    "content": "#### general settings\nname: 2017Track2_2020Track1\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [6]\n\nmetrics: [psnr, ssim, lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2017Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2017/valid_LR/x4.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2020Track1\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2020/track1/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/RealESRGAN/RealESRGANx4.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/RealESRGAN/options/test/2018Track2_2018Track4.yml",
    "content": "#### general settings\nname: 2018Track2_2018Track4\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [6]\n\nmetrics: [best_psnr, best_ssim, best_lpips, niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2018Track2\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track2/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n  test2:\n    name: 2018Track4\n    mode: PairedDataset\n    data_type: lmdb\n    dataroot_src: /home/lzx/SRDatasets/NTIRE2018/track4/valid.lmdb\n    dataroot_tgt: /home/lzx/SRDatasets/DIV2K_valid/HR/x4.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/RealESRGAN/RealESRGANx4.pth\n      strict_load: true\n"
  },
  {
    "path": "codes/config/RealESRGAN/options/test/2020Track2.yml",
    "content": "#### general settings\nname: 2020Track2\nuse_tb_logger: false\nmodel: SRModel\nscale: 4\ngpu_ids: [5]\n\nmetrics: [niqe, piqe, brisque] \n\ndatasets:\n  test1:\n    name: 2020Track2\n    mode: SingleDataset\n    data_type: lmdb\n    dataroot: /home/lzx/SRDatasets/NTIRE2020/track2/test.lmdb\n\n#### network structures\nnetworks:\n  netSR:\n    which_network: RRDBNet\n    setting:\n      in_nc: 3\n      out_nc: 3\n      nf: 64\n      nb: 23\n      gc: 32\n      upscale: 4\n    pretrain: \n      path: ../../../checkpoints/RealESRGAN/RealESRGANx4.pth\n      strict_load: true"
  },
  {
    "path": "codes/config/RealESRGAN/test.py",
    "content": "import argparse\nimport logging\nimport os.path\nimport sys\nimport time\nfrom collections import OrderedDict, defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\nfrom utils import bgr2ycbcr, imresize\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=False)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    util.mkdirs(\n        (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n    )\n\n    os.system(\"rm ./result\")\n    os.symlink(os.path.join(opt[\"path\"][\"results_root\"], \"..\"), \"./result\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    torch.backends.cudnn.benchmark = True\n\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"test_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO,\n        screen=True,\n        tofile=True,\n    )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    logger = logging.getLogger(\"base\")\n    logger.info(option.dict2str(opt))\n\n    # Create test dataset and dataloader\n    test_datasets = []\n    test_loaders = []\n\n    for phase, dataset_opt in sorted(opt[\"datasets\"].items()):\n\n        test_set = create_dataset(dataset_opt)\n        test_loader = create_dataloader(test_set, dataset_opt, opt[\"dist\"])\n\n        if rank == 0:\n            logger.info(\n                \"Number of test images in [{:s}]: {:d}\".format(\n                    dataset_opt[\"name\"], len(test_set)\n                )\n            )\n        test_datasets.append(test_set)\n        test_loaders.append(test_loader)\n\n    # load pretrained model by default\n    model = create_model(opt)\n\n    for test_dataset, test_loader in zip(test_datasets, test_loaders):\n\n        test_set_name = test_dataset.opt[\"name\"]\n        dataset_dir = os.path.join(opt[\"path\"][\"results_root\"], test_set_name)\n\n        if rank == 0:\n            logger.info(\"\\nTesting [{:s}]...\".format(test_set_name))\n            util.mkdir(dataset_dir)\n\n        validate(\n            model,\n            test_dataset,\n            test_loader,\n            opt,\n            measure,\n            dataset_dir,\n            test_set_name,\n            logger,\n        )\n\n\ndef validate(\n    model, dataset, dist_loader, opt, measure, dataset_dir, test_set_name, logger\n):\n\n    test_results = {}\n    test_results_y = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n        test_results_y[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        test_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        img_path = test_data[\"src_path\"][0]\n        img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n\n        model.test(test_data)\n        visuals = model.get_current_visuals()\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n\n        suffix = opt[\"suffix\"]\n        if suffix:\n            save_img_path = os.path.join(dataset_dir, img_name + suffix + \".png\")\n        else:\n            save_img_path = os.path.join(dataset_dir, img_name + \".png\")\n        util.save_img(sr_img, save_img_path)\n\n        message = \"img:{:15s}; \".format(img_name)\n\n        crop_border = opt[\"crop_border\"] if opt[\"crop_border\"] else opt[\"scale\"]\n\n        if crop_border == 0:\n            cropped_sr_img = sr_img\n        else:\n            cropped_sr_img = sr_img[\n                crop_border:-crop_border, crop_border:-crop_border, :\n            ]\n\n        if \"tgt\" in test_data.keys():\n            gt_img = util.tensor2img(test_data[\"tgt\"][0].double().cpu())\n\n            if crop_border == 0:\n                cropped_gt_img = gt_img\n            else:\n                cropped_gt_img = gt_img[\n                    crop_border:-crop_border, crop_border:-crop_border, :\n                ]\n        else:\n            gt_img = None\n            cropped_gt_img = None\n\n        message += \"Scores - \"\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if sr_img.shape[2] == 3:  # RGB image\n            sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n            if crop_border == 0:\n                cropped_sr_img_y = sr_img_y * 255\n            else:\n                cropped_sr_img_y = (\n                    sr_img_y[crop_border:-crop_border, crop_border:-crop_border] * 255\n                )\n            if gt_img is not None:\n                gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n                if crop_border == 0:\n                    cropped_gt_img_y = gt_img_y * 255\n                else:\n                    cropped_gt_img_y = (\n                        gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n                        * 255\n                    )\n            else:\n                gt_img_y = None\n                cropped_gt_img_y = None\n\n            message += \"Y Scores - \"\n            scores = measure(\n                res=cropped_sr_img_y, ref=cropped_gt_img_y, metrics=opt[\"metrics\"]\n            )\n            for k, v in scores.items():\n                test_results_y[k][idx] = v\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        logger.info(message)\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n        for k, v in test_results_y.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    # log\n    avg_results = {}\n    message = \"Average Results for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n    avg_results_y = {}\n    message = \"Average Results on Y channel for {}\\n\".format(test_set_name)\n\n    if rank == 0:\n        for k, v in test_results_y.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger.info(message)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/config/RealESRGAN/train.py",
    "content": "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nsys.path.append(\"../../\")\nimport utils as util\nimport utils.option as option\nfrom data import create_dataloader, create_dataset\nfrom metrics import IQA\nfrom models import create_model\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Train keypoints network\")\n    # general\n    parser.add_argument(\n        \"--opt\", help=\"experiment configure file name\", required=True, type=str\n    )\n    parser.add_argument(\n        \"--root_path\",\n        help=\"experiment configure file name\",\n        default=\"../../../\",\n        type=str,\n    )\n    # distributed training\n    parser.add_argument(\"--gpu\", help=\"gpu id for multiprocessing training\", type=str)\n    parser.add_argument(\n        \"--world-size\",\n        default=1,\n        type=int,\n        help=\"number of nodes for distributed training\",\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=\"tcp://127.0.0.1:23456\",\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\n        \"--rank\", default=0, type=int, help=\"node rank for distributed training\"\n    )\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef setup_dataloaer(opt, logger):\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        rank = 0\n        world_size = 1\n\n    for phase, dataset_opt in opt[\"datasets\"].items():\n        if phase == \"train\":\n            train_set = create_dataset(dataset_opt)\n            train_loader = create_dataloader(train_set, dataset_opt, opt[\"dist\"])\n            total_iters = opt[\"train\"][\"niter\"]\n            total_epochs = total_iters // (len(train_loader) - 1) + 1\n            if rank == 0:\n                logger.info(\n                    \"Number of train images: {:,d}, iters: {:,d}\".format(\n                        len(train_set), len(train_loader)\n                    )\n                )\n                logger.info(\n                    \"Total epochs needed: {:d} for iters {:,d}\".format(\n                        total_epochs, opt[\"train\"][\"niter\"]\n                    )\n                )\n\n        elif phase == \"val\":\n            val_set = create_dataset(dataset_opt)\n            val_loader = create_dataloader(val_set, dataset_opt, opt[\"dist\"])\n            if rank == 0:\n                logger.info(\n                    \"Number of val images in [{:s}]: {:d}\".format(\n                        dataset_opt[\"name\"], len(val_set)\n                    )\n                )\n        else:\n            raise NotImplementedError(\"Phase [{:s}] is not recognized.\".format(phase))\n\n    assert train_loader is not None\n    assert val_loader is not None\n\n    return train_set, train_loader, val_set, val_loader, total_iters, total_epochs\n\n\ndef main():\n    args = parse_args()\n    opt = option.parse(args.opt, args.root_path, is_train=True)\n\n    # convert to NoneDict, which returns None for missing keys\n    opt = option.dict_to_nonedict(opt)\n\n    if args.dist_url == \"env://\" and args.world_size == -1:\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n    ngpus_per_node = torch.cuda.device_count()\n    args.world_size = ngpus_per_node * args.world_size\n\n    opt[\"dist\"] = args.world_size > 1\n\n    if opt[\"train\"].get(\"resume_state\", None) is None:\n        util.mkdir_and_rename(\n            opt[\"path\"][\"experiments_root\"]\n        )  # rename experiment folder if exists\n        util.mkdirs(\n            (path for key, path in opt[\"path\"].items() if not key == \"experiments_root\")\n        )\n        os.system(\"rm ./log\")\n        os.symlink(os.path.join(opt[\"path\"][\"experiments_root\"], \"..\"), \"./log\")\n\n    if opt[\"dist\"]:\n        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, opt, args))\n    else:\n        main_worker(0, 1, opt, args)\n\n\ndef main_worker(gpu, ngpus_per_node, opt, args):\n\n    if opt[\"dist\"]:\n        if args.dist_url == \"env://\" and args.rank == -1:\n            rank = int(os.environ[\"RANK\"])\n\n        rank = args.rank * ngpus_per_node + gpu\n        print(\n            f\"Init process group: dist_url: \\\n            {args.dist_url}, world_size: {args.world_size}, rank: {rank}\"\n        )\n\n        dist.init_process_group(\n            backend=\"nccl\",\n            init_method=args.dist_url,\n            world_size=args.world_size,\n            rank=rank,\n        )\n\n        torch.cuda.set_device(gpu)\n\n    else:\n        rank = 0\n\n    seed = opt[\"train\"][\"manual_seed\"]\n    if seed is None:\n        util.set_random_seed(rank)\n\n    torch.backends.cudnn.benchmark = True\n    # torch.backends.cudnn.deterministic = True\n\n    # setup tensorboard and val logger\n    if rank == 0:\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger = SummaryWriter(log_dir=\"log/{}/tb_logger/\".format(opt[\"name\"]))\n\n        util.setup_logger(\n            \"val\",\n            opt[\"path\"][\"log\"],\n            \"val_\" + opt[\"name\"],\n            level=logging.INFO,\n            screen=True,\n            tofile=True,\n        )\n\n    measure = IQA(metrics=opt[\"metrics\"], cuda=True)\n\n    # config loggers. Before it, the log will not work\n    util.setup_logger(\n        \"base\",\n        opt[\"path\"][\"log\"],\n        \"train_\" + opt[\"name\"] + \"_rank{}\".format(rank),\n        level=logging.INFO if rank == 0 else logging.ERROR,\n        screen=True,\n        tofile=True,\n    )\n\n    logger = logging.getLogger(\"base\")\n    if rank == 0:\n        logger.info(option.dict2str(opt))\n\n    # create dataset\n    (\n        train_set,\n        train_loader,\n        val_set,\n        val_loader,\n        total_iters,\n        total_epochs,\n    ) = setup_dataloaer(opt, logger)\n\n    # create model\n    model = create_model(opt)\n\n    # loading resume state if exists\n    if opt[\"train\"].get(\"resume_state\", None):\n        # distributed resuming: all load into default GPU\n        device_id = gpu\n        resume_state = torch.load(\n            opt[\"train\"][\"resume_state\"],\n            map_location=lambda storage, loc: storage.cuda(device_id),\n        )\n\n        logger.info(\n            \"Resuming training from epoch: {}, iter: {}.\".format(\n                resume_state[\"epoch\"], resume_state[\"iter\"]\n            )\n        )\n\n        start_epoch = resume_state[\"epoch\"]\n        current_step = resume_state[\"iter\"]\n        model.resume_training(resume_state)  # handle optimizers and schedulers\n\n    else:\n        current_step = 0\n        start_epoch = 0\n\n    logger.info(\n        \"Start training from epoch: {:d}, iter: {:d}\".format(start_epoch, current_step)\n    )\n    data_time, iter_time = time.time(), time.time()\n    avg_data_time = avg_iter_time = 0\n    count = 0\n    for epoch in range(start_epoch, total_epochs + 1):\n        for _, train_data in enumerate(train_loader):\n\n            current_step += 1\n            count += 1\n            if current_step > total_iters:\n                break\n\n            data_time = time.time() - data_time\n            avg_data_time = (avg_data_time * (count - 1) + data_time) / count\n\n            model.feed_data(train_data)\n            model.optimize_parameters(current_step)\n            model.update_learning_rate(\n                current_step, warmup_iter=opt[\"train\"][\"warmup_iter\"]\n            )\n\n            iter_time = time.time() - iter_time\n            avg_iter_time = (avg_iter_time * (count - 1) + iter_time) / count\n\n            # log\n            if current_step % opt[\"logger\"][\"print_freq\"] == 0:\n                logs = model.get_current_log()\n                message = (\n                    f\"<epoch:{epoch:3d}, iter:{current_step:8,d}, \"\n                    f\"lr:{model.get_current_learning_rate():.3e}> \"\n                )\n\n                message += f'[time (data): {avg_iter_time:.3f} ({avg_data_time:.3f})] '\n                for k, v in logs.items():\n                    message += \"{:s}: {:.4e}; \".format(k, v)\n                    # tensorboard logger\n                    if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                        if rank == 0:\n                            tb_logger.add_scalar(k, v, current_step)\n                logger.info(message)\n\n            # validation\n            if current_step % opt[\"train\"][\"val_freq\"] == 0:\n\n                avg_results = validate(\n                    model, val_set, val_loader, opt, measure, epoch, current_step\n                )\n\n            # tensorboard logger\n            if rank == 0:\n                if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n                    for k, v in avg_results.items():\n                        tb_logger.add_scalar(k, v, current_step)\n\n            # save models and training states\n            if current_step % opt[\"logger\"][\"save_checkpoint_freq\"] == 0:\n                if rank == 0:\n                    logger.info(\"Saving models and training states.\")\n                    model.save(current_step)\n                    model.save_training_state(epoch, current_step)\n            \n            data_time = time.time()\n            iter_time = time.time()\n\n    if rank == 0:\n        logger.info(\"Saving the final model.\")\n        model.save(\"latest\")\n        logger.info(\"End of training.\")\n        if opt[\"use_tb_logger\"] and \"debug\" not in opt[\"name\"]:\n            tb_logger.close()\n\n\ndef validate(model, dataset, dist_loader, opt, measure, epoch, current_step):\n\n    test_results = {}\n    for metric in opt[\"metrics\"]:\n        test_results[metric] = torch.zeros((len(dataset))).cuda()\n\n    if opt[\"dist\"]:\n        rank = dist.get_rank()\n        world_size = dist.get_world_size()\n    else:\n        world_size = 1\n        rank = 0\n\n    if rank == 0:\n        pbar = tqdm(total=len(dataset), leave=False, dynamic_ncols=True)\n\n    indices = list(range(rank, len(dataset), world_size))\n    for (\n        idx,\n        val_data,\n    ) in enumerate(dist_loader):\n        idx = indices[idx]\n\n        LR_img = val_data[\"src\"]\n        lr_img = util.tensor2img(LR_img)  # save LR image for reference\n\n        model.test(val_data)\n        visuals = model.get_current_visuals()\n\n        # Save images for reference\n        img_name = val_data[\"src_path\"][0].split(\"/\")[-1].split(\".\")[0]\n        img_dir = os.path.join(opt[\"path\"][\"val_images\"], img_name)\n\n        util.mkdir(img_dir)\n        save_lr_path = os.path.join(img_dir, \"{:s}_LR.png\".format(img_name))\n        util.save_img(lr_img, save_lr_path)\n\n        sr_img = util.tensor2img(visuals[\"sr\"])  # uint8\n        save_img_path = os.path.join(\n            img_dir, \"{:s}_{:d}.png\".format(img_name, current_step)\n        )\n        util.save_img(sr_img, save_img_path)\n\n        if \"fake_lr\" in visuals.keys():\n            fake_lr_img = util.tensor2img(visuals[\"fake_lr\"])\n            save_img_path = os.path.join(\n                img_dir, f\"fake_lr_{current_step:d}.png\"\n            )\n            util.save_img(fake_lr_img, save_img_path)\n\n        # calculate scores\n        crop_size = opt[\"scale\"]\n        cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        if \"tgt\" in val_data.keys():\n            gt_img = util.tensor2img(val_data[\"tgt\"])\n            cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n        else:\n            cropped_gt_img = gt_img = None\n\n        scores = measure(res=cropped_sr_img, ref=cropped_gt_img, metrics=opt[\"metrics\"])\n        for k, v in scores.items():\n            test_results[k][idx] = v\n\n        if rank == 0:\n            for _ in range(world_size):\n                pbar.update(1)\n    if rank == 0:\n        pbar.close()\n\n    # log\n    avg_results = {}\n    message = \" <epoch:{:3d}, iter:{:8,d}, Average sccores:\\t\".format(\n        epoch, current_step\n    )\n\n    if opt[\"dist\"]:\n        for k, v in test_results.items():\n            dist.reduce(v, dst=0)\n        dist.barrier()\n\n    if rank == 0:\n        for k, v in test_results.items():\n            avg_results[k] = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, avg_results[k])\n\n        logger_val = logging.getLogger(\"val\")  # validation logger\n        logger_val.info(message)\n    \n    del test_results\n    torch.cuda.empty_cache()\n    return avg_results\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/data/__init__.py",
    "content": "\"\"\"create dataset and dataloader\"\"\"\nimport importlib\nimport logging\nimport os\nimport os.path as osp\nimport numpy as np\nimport random\nfrom functools import partial\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom prefetch_generator import BackgroundGenerator\n\nfrom utils.registry import DATASET_REGISTRY\n\ndata_folder = osp.dirname(osp.abspath(__file__))\ndataset_filenames = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(data_folder)\n    if v.endswith(\"_dataset.py\")\n]\n# import all the dataset modules\n_dataset_modules = [\n    importlib.import_module(f\"data.{file_name}\") for file_name in dataset_filenames\n]\n\n\nclass DataLoaderX(DataLoader):\n\n    def __iter__(self):\n        return BackgroundGenerator(super().__iter__())\n\ndef create_dataloader(dataset, dataset_opt, dist=False):\n    phase = dataset_opt[\"phase\"]\n    if phase == \"train\":\n        num_workers = dataset_opt[\"workers_per_gpu\"]\n        batch_size = dataset_opt[\"imgs_per_gpu\"]\n        if dist:\n            rank = torch.distributed.get_rank()\n            world_size = torch.distributed.get_world_size()\n            sampler = torch.utils.data.DistributedSampler(\n                dataset, shuffle=True, drop_last=True, rank=rank, world_size=world_size\n            )\n        else:\n            rank = 0\n            world_size = 1\n            sampler = None\n        return DataLoaderX(\n            dataset,\n            batch_size=batch_size,\n            shuffle=(sampler is None),\n            num_workers=num_workers,\n            worker_init_fn=partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=rank),\n            sampler=sampler,\n            drop_last=True,\n            pin_memory=False,\n#            prefetch_factor=4\n        )\n    else:\n        if dist:\n            rank = torch.distributed.get_rank()\n            world_size = torch.distributed.get_world_size()\n            indices = list(range(rank, len(dataset), world_size))\n            dataset = torch.utils.data.Subset(dataset, indices)\n\n        return DataLoaderX(\n            dataset,\n            batch_size=1,\n            shuffle=False,\n            num_workers=0,\n            sampler=None,\n            drop_last=True,\n            pin_memory=True,\n        )\n\n\ndef create_dataset(dataset_opt, **kwarg):\n    mode = dataset_opt[\"mode\"]\n    dataset = DATASET_REGISTRY.get(mode)(dataset_opt, **kwarg)\n    logger = logging.getLogger(\"base\")\n    logger.info(\n        \"Dataset [{:s} - {:s}] is created.\".format(\n            dataset.__class__.__name__, dataset_opt[\"name\"]\n        )\n    )\n    return dataset\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n    # Set the worker seed to num_workers * rank + worker_id + seed\n    worker_seed = num_workers * rank + worker_id + seed\n    np.random.seed(worker_seed)\n    random.seed(worker_seed)\n"
  },
  {
    "path": "codes/data/data_sampler.py",
    "content": "\"\"\"\nModified from torch.utils.data.distributed.DistributedSampler\nSupport enlarging the dataset for *iter-oriented* training, for saving time when restart the\ndataloader after each epoch\n\"\"\"\nimport math\n\nimport torch\nimport torch.distributed as dist\nfrom torch.utils.data.sampler import Sampler\n\n\nclass DistIterSampler(Sampler):\n    \"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n    It is especially useful in conjunction with\n    :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n    process can pass a DistributedSampler instance as a DataLoader sampler,\n    and load a subset of the original dataset that is exclusive to it.\n\n    .. note::\n        Dataset is assumed to be of constant size.\n\n    Arguments:\n        dataset: Dataset used for sampling.\n        num_replicas (optional): Number of processes participating in\n            distributed training.\n        rank (optional): Rank of the current process within num_replicas.\n    \"\"\"\n\n    def __init__(self, dataset, num_replicas=None, rank=None):\n        if num_replicas is None:\n            if not dist.is_available():\n                raise RuntimeError(\"Requires distributed package to be available\")\n            num_replicas = dist.get_world_size()\n        if rank is None:\n            if not dist.is_available():\n                raise RuntimeError(\"Requires distributed package to be available\")\n            rank = dist.get_rank()\n        self.dataset = dataset\n        self.num_replicas = num_replicas\n        self.rank = rank\n        self.epoch = 0\n        self.num_samples = int(math.ceil(len(self.dataset) / self.num_replicas))\n        self.total_size = self.num_samples * self.num_replicas\n\n    def __iter__(self):\n        # deterministically shuffle based on epoch\n        g = torch.Generator()\n        g.manual_seed(self.epoch)\n        indices = torch.randperm(\n            self.total_size, generator=g\n        ).tolist()  # Returns a random permutation of integers from 0 to n - 1\n\n        dsize = len(self.dataset)\n        indices = [v % dsize for v in indices]\n\n        # subsample\n        indices = indices[self.rank : self.total_size : self.num_replicas]\n        assert len(indices) == self.num_samples\n\n        return iter(indices)\n\n    def __len__(self):\n        return self.num_samples\n\n    def set_epoch(self, epoch):\n        self.epoch = epoch\n"
  },
  {
    "path": "codes/data/debug_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass DebugDataset(data.Dataset):\n    \"\"\"\n    Read unpaired reference images, i.e., source (src) and target (tgt),\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.src_paths, self.src_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_src\"]\n        )\n        self.tgt_paths, self.tgt_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_tgt\"]\n        )\n\n        if opt.get(\"ratios\"):\n            ratio_src, ratio_tgt = opt[\"ratios\"]\n            self.src_paths *= ratio_src; self.src_sizes *= ratio_src\n            self.tgt_paths *= ratio_tgt; self.tgt_sizes *= ratio_tgt\n\n        merged_src = list(zip(self.src_paths, self.src_sizes))\n        random.shuffle(merged_src)\n        self.src_paths[:], self.src_sizes[:] = zip(*merged_src)\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.src_env, self.tgt_env = self._init_lmdb(\n                [\n                    self.opt[\"dataroot_src\"],\n                    self.opt[\"dataroot_tgt\"],\n                ]\n            )\n\n        scale = self.opt[\"scale\"]\n        cropped_src_size, cropped_tgt_size = self.opt[\"src_size\"], self.opt[\"tgt_size\"]\n\n        # get tgt image\n        tgt_path = self.tgt_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.tgt_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_tgt =  np.zeros([*resolution[1:], 3])\n        # img_tgt = util.read_img(\n        #     self.tgt_env, tgt_path, resolution\n        # )  # return: Numpy float32, HWC, BGR, [0,1]\n\n        # modcrop in the validation / test phase\n        if self.opt[\"phase\"] != \"train\":\n            img_tgt = util.modcrop(img_tgt, scale)\n\n        # get src image\n        src_path = self.src_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.src_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_src = np.zeros([*resolution[1:], 3])\n        # img_src = util.read_img(self.src_env, src_path, resolution)\n\n        if self.opt[\"phase\"] == \"train\":\n            assert (\n                cropped_src_size == cropped_tgt_size // scale\n            ), \"tgt size does not match src size\"\n\n            # randomly crop\n            H, W, C = img_src.shape\n            rnd_h = random.randint(0, max(0, H - cropped_src_size))\n            rnd_w = random.randint(0, max(0, W - cropped_src_size))\n            img_src = img_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n\n            H, W, C = img_tgt.shape\n            rnd_h = random.randint(0, max(0, H - cropped_tgt_size))\n            rnd_w = random.randint(0, max(0, W - cropped_tgt_size))\n            img_tgt = img_tgt[\n                rnd_h : rnd_h + cropped_tgt_size, rnd_w : rnd_w + cropped_tgt_size\n            ]\n\n            # augmentation - flip, rotate\n            img_tgt = util.augment(\n                [img_tgt],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n            img_src = util.augment(\n                [img_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img_src, img_tgt = util.channel_convert(self.opt[\"color\"], [img_src, img_tgt])\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img_src.shape[2] == 3:\n            img_src = img_src[:, :, [2, 1, 0]]\n            img_tgt = img_tgt[:, :, [2, 1, 0]]\n\n        img_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_src, (2, 0, 1)))\n        ).float()\n        img_tgt = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_tgt, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"src\": img_src,\n            \"tgt\": img_tgt,\n            \"src_path\": src_path,\n            \"tgt_path\": tgt_path,\n        }\n        return data_dict\n\n    def __len__(self):\n        return len(self.src_paths)\n"
  },
  {
    "path": "codes/data/fixed_image_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass FixedImageDataset(data.Dataset):\n    \"\"\"\n    Read unpaired reference images, i.e., source (src) and target (tgt),\n    \"\"\"\n\n    def __init__(self, opt, img_path):\n        super().__init__()\n        self.opt = opt\n\n        self.img_src = util.read_img(None, img_path)\n        self.tgt_paths, self.tgt_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_tgt\"]\n        )\n\n        if opt.get(\"ratios\"):\n            ratio_src, ratio_tgt = opt[\"ratios\"]\n            self.tgt_paths *= ratio_tgt; self.tgt_sizes *= ratio_tgt\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.tgt_env = self._init_lmdb(\n                [self.opt[\"dataroot_tgt\"]]\n            )[0]\n\n        scale = self.opt[\"scale\"]\n        cropped_src_size, cropped_tgt_size = self.opt[\"src_size\"], self.opt[\"tgt_size\"]\n\n        # get tgt image\n        tgt_path = self.tgt_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.tgt_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_tgt = util.read_img(\n            self.tgt_env, tgt_path, resolution\n        )  # return: Numpy float32, HWC, BGR, [0,1]\n\n        # modcrop in the validation / test phase\n        if self.opt[\"phase\"] != \"train\":\n            img_tgt = util.modcrop(img_tgt, scale)\n\n        # get src image\n        img_src = self.img_src\n\n        if self.opt[\"phase\"] == \"train\":\n            assert (\n                cropped_src_size == cropped_tgt_size // scale\n            ), \"tgt size does not match src size\"\n\n            # randomly crop\n            H, W, C = img_src.shape\n            rnd_h = random.randint(0, max(0, H - cropped_src_size))\n            rnd_w = random.randint(0, max(0, W - cropped_src_size))\n            img_src = img_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n\n            H, W, C = img_tgt.shape\n            rnd_h = random.randint(0, max(0, H - cropped_tgt_size))\n            rnd_w = random.randint(0, max(0, W - cropped_tgt_size))\n            img_tgt = img_tgt[\n                rnd_h : rnd_h + cropped_tgt_size, rnd_w : rnd_w + cropped_tgt_size\n            ]\n\n            # augmentation - flip, rotate\n            img_tgt = util.augment(\n                [img_tgt],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n            img_src = util.augment(\n                [img_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img_src, img_tgt = util.channel_convert(self.opt[\"color\"], [img_src, img_tgt])\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img_src.shape[2] == 3:\n            img_src = img_src[:, :, [2, 1, 0]]\n            img_tgt = img_tgt[:, :, [2, 1, 0]]\n\n        img_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_src, (2, 0, 1)))\n        ).float()\n        img_tgt = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_tgt, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"src\": img_src,\n            \"tgt\": img_tgt,\n        }\n        return data_dict\n\n    def __len__(self):\n        return len(self.tgt_paths)\n"
  },
  {
    "path": "codes/data/paired_ref_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass PairedRefDataset(data.Dataset):\n    \"\"\"\n    Read paired reference images, i.e., source (src) and target (tgt), and unparied source images.\n    The pair is ensured by 'sorted' function, so please check the name convention.\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.ref_src_paths, self.ref_src_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_ref_src\"]\n        )\n        self.ref_tgt_paths, self.ref_tgt_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_ref_tgt\"]\n        )\n        self.src_paths, self.src_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_src\"]\n        )\n\n        if not len(self.ref_src_paths) == len(self.ref_tgt_paths):\n            raise ValueError(\n                \"Reference source and Reference target datasets have different number of images - {}. {}.\".format(\n                    len(self.ref_src_paths), len(self.ref_tgt_paths)\n                )\n            )\n\n        if opt.get(\"ratios\"):\n            ratio_ref, ratio_src = opt[\"ratios\"]\n            self.ref_src_paths *= ratio_ref\n            self.ref_src_sizes *= ratio_ref\n            self.ref_tgt_paths *= ratio_ref\n            self.ref_tgt_sizes *= ratio_ref\n            self.src_paths *= ratio_src\n            self.src_sizes *= ratio_src\n\n        merged_src = list(zip(self.src_paths, self.src_sizes))\n        random.shuffle(merged_src)\n        self.src_paths[:], self.src_sizes[:] = zip(*merged_src)\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.ref_src_env, self.ref_tgt_env, self.src_env = self._init_lmdb(\n                [\n                    self.opt[\"dataroot_ref_src\"],\n                    self.opt[\"dataroot_ref_tgt\"],\n                    self.opt[\"dataroot_src\"],\n                ]\n            )\n\n        scale = self.opt[\"scale\"]\n        cropped_src_size, cropped_tgt_size = self.opt[\"src_size\"], self.opt[\"tgt_size\"]\n\n        # get ref target image\n        ref_tgt_path = self.ref_tgt_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.ref_tgt_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_ref_tgt = util.read_img(\n            self.ref_tgt_env, ref_tgt_path, resolution\n        )  # return: Numpy float32, HWC, BGR, [0,1]\n\n        # modcrop in the validation / test phase\n        if self.opt[\"phase\"] != \"train\":\n            img_ref_tgt = util.modcrop(img_ref_tgt, scale)\n\n        # get ref source image\n        ref_src_path = self.ref_src_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.ref_src_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_ref_src = util.read_img(self.ref_src_env, ref_src_path, resolution)\n\n        # get source image\n        src_path = self.src_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.src_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_src = util.read_img(self.src_env, src_path, resolution)\n\n        if self.opt[\"phase\"] == \"train\":\n            H, W, C = img_ref_src.shape\n            assert (\n                cropped_src_size == cropped_tgt_size // scale\n            ), \"GT size does not match LR size\"\n\n            # randomly crop\n            rnd_h = random.randint(0, max(0, H - cropped_src_size))\n            rnd_w = random.randint(0, max(0, W - cropped_src_size))\n            img_ref_src = img_ref_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n            rnd_h_tgt, rnd_w_tgt = int(rnd_h * scale), int(rnd_w * scale)\n            img_ref_tgt = img_ref_tgt[\n                rnd_h_tgt : rnd_h_tgt + cropped_tgt_size,\n                rnd_w_tgt : rnd_w_tgt + cropped_tgt_size,\n                :,\n            ]\n\n            src_h, src_w, _ = img_src.shape\n            rnd_h = random.randint(0, max(0, src_h - cropped_src_size))\n            rnd_w = random.randint(0, max(0, src_w - cropped_src_size))\n            img_src = img_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n\n            # augmentation - flip, rotate\n            img_ref_tgt, img_ref_src = util.augment(\n                [img_ref_tgt, img_ref_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n            img_src = util.augment(\n                [img_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img_ref_src, img_ref_tgt, img_src = util.channel_convert(\n                self.opt[\"color\"], [img_ref_src, img_ref_tgt, img_src]\n            )\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img_ref_src.shape[2] == 3:\n            img_ref_src = img_ref_src[:, :, [2, 1, 0]]\n            img_ref_tgt = img_ref_tgt[:, :, [2, 1, 0]]\n            img_src = img_src[:, :, [2, 1, 0]]\n\n        img_ref_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_ref_src, (2, 0, 1)))\n        ).float()\n        img_ref_tgt = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_ref_tgt, (2, 0, 1)))\n        ).float()\n        img_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_src, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"ref_src\": img_ref_src,\n            \"ref_tgt\": img_ref_tgt,\n            \"src\": img_src,\n            \"ref_src_path\": ref_src_path,\n            \"ref_tgt_path\": ref_tgt_path,\n            \"src_path\": src_path,\n        }\n\n        return data_dict\n\n    def __len__(self):\n        return len(self.src_paths)\n"
  },
  {
    "path": "codes/data/paried_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass PairedDataset(data.Dataset):\n    \"\"\"\n    Read paired reference images, i.e., source (src) and target (tgt),\n    The pair is ensured by 'sorted' function, so please check the name convention.\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.src_paths, self.src_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_src\"]\n        )\n        self.tgt_paths, self.tgt_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_tgt\"]\n        )\n\n        if not len(self.src_paths) == len(self.tgt_paths):\n            raise ValueError(\n                \"src and tgt datasets have different number of images - {}. {}.\".format(\n                    len(self.src_paths), len(self.tgt_paths)\n                )\n            )\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.src_env, self.tgt_env = self._init_lmdb(\n                [\n                    self.opt[\"dataroot_src\"],\n                    self.opt[\"dataroot_tgt\"],\n                ]\n            )\n\n        scale = self.opt[\"scale\"]\n        cropped_src_size, cropped_tgt_size = self.opt[\"src_size\"], self.opt[\"tgt_size\"]\n\n        # get tgt image\n        tgt_path = self.tgt_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.tgt_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_tgt = util.read_img(\n            self.tgt_env, tgt_path, resolution\n        )  # return: Numpy float32, HWC, BGR, [0,1]\n\n        # modcrop in the validation / test phase\n        if self.opt[\"phase\"] != \"train\":\n            img_tgt = util.modcrop(img_tgt, scale)\n\n        # get src image\n        src_path = self.src_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.src_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_src = util.read_img(self.src_env, src_path, resolution)\n\n        if self.opt[\"phase\"] == \"train\":\n            H, W, C = img_src.shape\n            assert (\n                cropped_src_size == cropped_tgt_size // scale\n            ), \"tgt size does not match src size\"\n\n            # randomly crop\n            rnd_h = random.randint(0, max(0, H - cropped_src_size))\n            rnd_w = random.randint(0, max(0, W - cropped_src_size))\n            img_src = img_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n            rnd_h_tgt, rnd_w_tgt = int(rnd_h * scale), int(rnd_w * scale)\n            img_tgt = img_tgt[\n                rnd_h_tgt : rnd_h_tgt + cropped_tgt_size,\n                rnd_w_tgt : rnd_w_tgt + cropped_tgt_size,\n            ]\n            # augmentation - flip, rotate\n            img_tgt, img_src = util.augment(\n                [img_tgt, img_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img_src, img_tgt = util.channel_convert(\n                self.opt[\"color\"], [img_src, img_tgt]\n            )\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img_src.shape[2] == 3:\n            img_src = img_src[:, :, [2, 1, 0]]\n            img_tgt = img_tgt[:, :, [2, 1, 0]]\n\n        img_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_src, (2, 0, 1)))\n        ).float()\n        img_tgt = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_tgt, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"src\": img_src,\n            \"tgt\": img_tgt,\n            \"src_path\": src_path,\n            \"tgt_path\": tgt_path,\n        }\n\n        return data_dict\n\n    def __len__(self):\n        return len(self.src_paths)\n"
  },
  {
    "path": "codes/data/single_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass SingleImageDataset(data.Dataset):\n    \"\"\"\n    Read Single Image.\n    The pair is ensured by 'sorted' function, so please check the name convention.\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.img_paths, self.img_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot\"]\n        )\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs[0] if len(envs) == 1 else envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.env = self._init_lmdb([self.opt[\"dataroot\"]])\n\n        scale = self.opt[\"scale\"]\n\n        # get image\n        img_path = self.img_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.img_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img = util.read_img(self.env, img_path, resolution)\n\n        if self.opt[\"phase\"] != \"train\" and self.opt.get(\"scale\"):\n            img = util.modcrop(img, self.opt[\"scale\"])\n\n        if self.opt[\"phase\"] == \"train\":\n            H, W, C = img.shape\n            cropped_size = self.opt[\"img_size\"]\n\n            # randomly crop\n            rnd_h = random.randint(0, max(0, H - cropped_size))\n            rnd_w = random.randint(0, max(0, W - cropped_size))\n            img = img[rnd_h : rnd_h + cropped_size, rnd_w : rnd_w + cropped_size]\n            # augmentation - flip, rotate\n            img = util.augment(\n                [img],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img = util.channel_convert(self.opt[\"color\"], [img])[0]\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img.shape[2] == 3:\n            img = img[:, :, [2, 1, 0]]\n\n        img = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"img\": img,\n            \"img_path\": img_path,\n        }\n\n        return data_dict\n\n    def __len__(self):\n        return len(self.img_paths)\n"
  },
  {
    "path": "codes/data/single_image_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass SingleDataset(data.Dataset):\n    \"\"\"\n    Read Single Image.\n    The pair is ensured by 'sorted' function, so please check the name convention.\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.img_paths, self.img_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot\"]\n        )\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs[0] if len(envs) == 1 else envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.env = self._init_lmdb([self.opt[\"dataroot\"]])\n\n        scale = self.opt[\"scale\"]\n\n        # get image\n        img_path = self.img_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.img_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img = util.read_img(self.env, img_path, resolution)\n\n        if self.opt[\"phase\"] != \"train\" and self.opt.get(\"scale\"):\n            img = util.modcrop(img, self.opt[\"scale\"])\n\n        if self.opt[\"phase\"] == \"train\":\n            H, W, C = img.shape\n            cropped_size = self.opt[\"img_size\"]\n\n            # randomly crop\n            rnd_h = random.randint(0, max(0, H - cropped_size))\n            rnd_w = random.randint(0, max(0, W - cropped_size))\n            img = img[rnd_h : rnd_h + cropped_size, rnd_w : rnd_w + cropped_size]\n            # augmentation - flip, rotate\n            img = util.augment(\n                [img],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img = util.channel_convert(self.opt[\"color\"], [img])[0]\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img.shape[2] == 3:\n            img = img[:, :, [2, 1, 0]]\n\n        img = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"src\": img,\n            \"src_path\": img_path,\n        }\n\n        return data_dict\n\n    def __len__(self):\n        return len(self.img_paths)\n"
  },
  {
    "path": "codes/data/unpaired_dataset.py",
    "content": "import os\nimport random\nimport sys\n\nimport cv2\nimport lmdb\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nimport utils as util\nfrom utils.registry import DATASET_REGISTRY\n\n\n@DATASET_REGISTRY.register()\nclass UnPairedDataset(data.Dataset):\n    \"\"\"\n    Read unpaired reference images, i.e., source (src) and target (tgt),\n    \"\"\"\n\n    def __init__(self, opt):\n        super().__init__()\n        self.opt = opt\n\n        self.src_paths, self.src_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_src\"]\n        )\n        self.tgt_paths, self.tgt_sizes = util.get_image_paths(\n            opt[\"data_type\"], opt[\"dataroot_tgt\"]\n        )\n\n        if opt.get(\"ratios\"):\n            ratio_src, ratio_tgt = opt[\"ratios\"]\n            self.src_paths *= ratio_src; self.src_sizes *= ratio_src\n            self.tgt_paths *= ratio_tgt; self.tgt_sizes *= ratio_tgt\n\n        merged_src = list(zip(self.src_paths, self.src_sizes))\n        random.shuffle(merged_src)\n        self.src_paths[:], self.src_sizes[:] = zip(*merged_src)\n\n        if opt[\"data_type\"] == \"lmdb\":\n            self.lmdb_envs = False\n\n    def _init_lmdb(self, dataroots):\n        envs = []\n        for dataroot in dataroots:\n            envs.append(\n                lmdb.open(\n                    dataroot, readonly=True, lock=False, readahead=False, meminit=False\n                )\n            )\n        self.lmdb_envs = True\n        return envs\n\n    def __getitem__(self, index):\n        if self.opt[\"data_type\"] == \"lmdb\" and (not self.lmdb_envs):\n            self.src_env, self.tgt_env = self._init_lmdb(\n                [\n                    self.opt[\"dataroot_src\"],\n                    self.opt[\"dataroot_tgt\"],\n                ]\n            )\n\n        scale = self.opt[\"scale\"]\n        cropped_src_size, cropped_tgt_size = self.opt[\"src_size\"], self.opt[\"tgt_size\"]\n\n        # get tgt image\n        tgt_path = self.tgt_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.tgt_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_tgt = util.read_img(\n            self.tgt_env, tgt_path, resolution\n        )  # return: Numpy float32, HWC, BGR, [0,1]\n\n        # modcrop in the validation / test phase\n        if self.opt[\"phase\"] != \"train\":\n            img_tgt = util.modcrop(img_tgt, scale)\n\n        # get src image\n        src_path = self.src_paths[index]\n        if self.opt[\"data_type\"] == \"lmdb\":\n            resolution = [int(s) for s in self.src_sizes[index].split(\"_\")]\n        else:\n            resolution = None\n        img_src = util.read_img(self.src_env, src_path, resolution)\n\n        if self.opt[\"phase\"] == \"train\":\n            assert (\n                cropped_src_size == cropped_tgt_size // scale\n            ), \"tgt size does not match src size\"\n\n            # randomly crop\n            H, W, C = img_src.shape\n            rnd_h = random.randint(0, max(0, H - cropped_src_size))\n            rnd_w = random.randint(0, max(0, W - cropped_src_size))\n            img_src = img_src[\n                rnd_h : rnd_h + cropped_src_size, rnd_w : rnd_w + cropped_src_size\n            ]\n\n            H, W, C = img_tgt.shape\n            rnd_h = random.randint(0, max(0, H - cropped_tgt_size))\n            rnd_w = random.randint(0, max(0, W - cropped_tgt_size))\n            img_tgt = img_tgt[\n                rnd_h : rnd_h + cropped_tgt_size, rnd_w : rnd_w + cropped_tgt_size\n            ]\n\n            # augmentation - flip, rotate\n            img_tgt = util.augment(\n                [img_tgt],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n            img_src = util.augment(\n                [img_src],\n                self.opt[\"use_flip\"],\n                self.opt[\"use_rot\"],\n                self.opt[\"mode\"],\n            )\n\n        # change color space if necessary\n        if self.opt[\"color\"]:\n            # TODO during val no definition\n            img_src, img_tgt = util.channel_convert(self.opt[\"color\"], [img_src, img_tgt])\n\n        # BGR to RGB, HWC to CHW, numpy to tensor\n        if img_src.shape[2] == 3:\n            img_src = img_src[:, :, [2, 1, 0]]\n            img_tgt = img_tgt[:, :, [2, 1, 0]]\n\n        img_src = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_src, (2, 0, 1)))\n        ).float()\n        img_tgt = torch.from_numpy(\n            np.ascontiguousarray(np.transpose(img_tgt, (2, 0, 1)))\n        ).float()\n\n        data_dict = {\n            \"src\": img_src,\n            \"tgt\": img_tgt,\n            \"src_path\": src_path,\n            \"tgt_path\": tgt_path,\n        }\n        return data_dict\n\n    def __len__(self):\n        return len(self.src_paths)\n"
  },
  {
    "path": "codes/metrics/__init__.py",
    "content": "from .measure import IQA\nfrom .psnr import psnr\nfrom .ssim import calculate_ssim as ssim\n"
  },
  {
    "path": "codes/metrics/best_psnr.py",
    "content": "import numpy as np\n\nfrom .ssim import ssim\n\n\ndef ignore_boundary(img, SCALE):\n    h, w = img.shape[:2]\n    boundarypixels = 6 + SCALE \n    img = img[:h-h%SCALE, :w-w%SCALE]\n    img = img[boundarypixels:-boundarypixels,boundarypixels:-boundarypixels]\n    return img\n\ndef best_psnr(img_orig, img_out):\n\n    SCALE = 4\n    SHIFT = 40\n    SIZE = 30\n\n    img_orig = ignore_boundary(img_orig, SCALE)\n    img_out = ignore_boundary(img_out, SCALE)\n\n    h, w = img_orig.shape[:2]\n    c = img_orig.shape[2] if len(img_orig.shape) == 3 else 1\n    h_cen, w_cen = int(h / 2), int(w / 2)\n    h_left = h_cen - SIZE\n    h_right = h_cen + SIZE\n    w_left = w_cen - SIZE\n    w_right = w_cen + SIZE\n\n    im_h = img_orig[None, h_left:h_right, w_left:w_right]\n    ssim_h = img_orig[h_left:h_right, w_left:w_right]\n\n\n    im_shifts = np.zeros([(2 * SHIFT + 1) * (2 * SHIFT + 1), *ssim_h.shape])\n    ssim_shifts = np.zeros([(2 * SHIFT + 1) * (2 * SHIFT + 1), c])\n    for hei in range(-SHIFT, SHIFT + 1):\n        for wid in range(-SHIFT, SHIFT + 1):\n            tmp_l = img_out[h_left + hei:h_right + hei, w_left + wid:w_right + wid]\n            im_shifts[(hei + SHIFT) * (SHIFT + 1) + wid + SHIFT, :, :] = tmp_l\n\n\t        #ssim_h = np.squeeze(im_h)\n            # ssim_h = ssim_h.astype('uint8')\n            # ssim_l = tmp_l.astype('uint8')\n            if abs(hei) % 2 == 0 and abs(wid) % 2 == 0:\n                if c == 1:\n                    ssim_shifts[(hei + SHIFT) * (SHIFT + 1) + wid + SHIFT, 0] \\\n                        = ssim(tmp_l[:, :], ssim_h[:, :])\n                else:\n                    for i in range(c):\n                        ssim_shifts[(hei + SHIFT) * (SHIFT + 1) + wid + SHIFT, i] \\\n                            = ssim(tmp_l[:, :, i], ssim_h[:, :, i])\n\n    squared_error = np.square(im_shifts / 255. - im_h / 255.)\n    mean_aixs = (1, 2, 3) if c == 3 else (1, 2)\n    mse = np.mean(squared_error, axis=mean_aixs)\n    psnr = 10 * np.log10(1.0 / mse)\n    return max(psnr), max(np.mean(ssim_shifts, axis=1))"
  },
  {
    "path": "codes/metrics/measure.py",
    "content": "from collections import OrderedDict\n\nimport numpy as np\n\nimport lpips as lp\n\nfrom .psnr import psnr\nfrom .ssim import calculate_ssim as ssim\nfrom .best_psnr import best_psnr\n\n\nclass IQA:\n\n    referecnce_metrics = [\"psnr\", \"ssim\", \"best_psnr\", \"best_ssim\", \"lpips\"]\n    nonreference_metrics = [\"niqe\", \"piqe\", \"brisque\"]\n    supported_metrics = referecnce_metrics + nonreference_metrics\n\n    def __init__(self, metrics, lpips_type=\"alex\", cuda=True):\n        for metric in self.supported_metrics:\n            if not (metric in self.supported_metrics):\n                raise KeyError(\n                    \"{} is not Supported metric. (Support only {})\".format(\n                        metric, self.supported_metrics\n                    )\n                )\n\n        if \"lpips\" in metrics:\n            self.lpips_fn = lp.LPIPS(net=lpips_type)\n            self.cuda = cuda\n            if cuda:\n                self.lpips_fn = self.lpips_fn.cuda()\n        if (\"niqe\" in metrics) or (\"piqe\" in metrics) or (\"brisque\" in metrics):\n            import matlab.engine\n\n            print(\"Starting matlab engine ...\")\n            self.eng = matlab.engine.start_matlab()\n\n    def __call__(self, res, ref=None, metrics=[\"niqe\"]):\n        \"\"\"\n        res, ref: [0, 255]\n        \"\"\"\n        if hasattr(self, \"eng\"):\n            import matlab\n\n            self.matlab_res = matlab.uint8(res.tolist())\n\n        scores = OrderedDict()\n        for metric in metrics:\n            if metric in self.referecnce_metrics:\n                if ref is None:\n                    raise ValueError(\n                        \"Ground-truth refernce is needed for {}\".format(metric)\n                    )\n                scores[metric] = getattr(self, \"calculate_{}\".format(metric))(res, ref)\n\n            elif metric in self.nonreference_metrics:\n                scores[metric] = getattr(self, \"calculate_{}\".format(metric))(res)\n\n            else:\n                raise KeyError(\n                    \"{} is not Supported metric. (Support only {})\".format(\n                        metric, self.supported_metrics\n                    )\n                )\n        return scores\n\n    def calculate_lpips(self, res, ref):\n        if res.ndim < 3:\n            return 0\n        res = lp.im2tensor(res)\n        ref = lp.im2tensor(ref)\n        if self.cuda:\n            res = res.cuda()\n            ref = ref.cuda()\n        score = self.lpips_fn(res, ref)\n        return score.item()\n\n    def calculate_niqe(self, res):\n        return self.eng.niqe(self.matlab_res)\n\n    def calculate_brisque(self, res):\n        return self.eng.brisque(self.matlab_res)\n\n    def calculate_piqe(self, piqe):\n        return self.eng.piqe(self.matlab_res)\n    \n    def calculate_best_psnr(self, res, ref):\n        best_psnr_, best_ssim_ = best_psnr(res, ref)\n        self.best_ssim = best_ssim_\n        return best_psnr_\n    \n    def calculate_best_ssim(self, res, ref):\n        assert hasattr(self, \"best_ssim\")\n        return self.best_ssim\n\n    @staticmethod\n    def calculate_psnr(res, ref):\n        return psnr(res, ref)\n\n    @staticmethod\n    def calculate_ssim(res, ref):\n        return ssim(res, ref)\n"
  },
  {
    "path": "codes/metrics/psnr.py",
    "content": "import math\n\nimport numpy as np\n\n\ndef psnr(img1, img2):\n    # img1 and img2 have range [0, 255]\n    img1 = img1.astype(np.float64)\n    img2 = img2.astype(np.float64)\n    mse = np.mean((img1 - img2) ** 2)\n    if mse == 0:\n        return float(\"inf\")\n    return 20 * math.log10(255.0 / math.sqrt(mse))\n"
  },
  {
    "path": "codes/metrics/ssim.py",
    "content": "import math\n\nimport cv2\nimport numpy as np\n\n\ndef ssim(img1, img2):\n    C1 = (0.01 * 255) ** 2\n    C2 = (0.03 * 255) ** 2\n\n    img1 = img1.astype(np.float64)\n    img2 = img2.astype(np.float64)\n    kernel = cv2.getGaussianKernel(11, 1.5)\n    window = np.outer(kernel, kernel.transpose())\n\n    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid\n    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n    mu1_sq = mu1 ** 2\n    mu2_sq = mu2 ** 2\n    mu1_mu2 = mu1 * mu2\n    sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq\n    sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq\n    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (\n        (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)\n    )\n    return ssim_map.mean()\n\n\ndef calculate_ssim(img1, img2):\n    \"\"\"calculate SSIM\n    the same outputs as MATLAB's\n    img1, img2: [0, 255]\n    \"\"\"\n    if not img1.shape == img2.shape:\n        raise ValueError(\"Input images must have the same dimensions.\")\n    if img1.ndim == 2:\n        return ssim(img1, img2)\n    elif img1.ndim == 3:\n        if img1.shape[2] == 3:\n            ssims = []\n            for i in range(3):\n                ssims.append(ssim(img1, img2))\n            return np.array(ssims).mean()\n        elif img1.shape[2] == 1:\n            return ssim(np.squeeze(img1), np.squeeze(img2))\n    else:\n        raise ValueError(\"Wrong input image dimensions.\")\n"
  },
  {
    "path": "codes/scripts/create_lmdb.py",
    "content": "import glob\nimport os\nimport os.path as osp\nimport pickle\nimport sys\n\nimport cv2\nimport lmdb\n\nsys.path.append(\"../\")\nfrom utils import ProgressBar\n\nsys.path.append(\"../\")\n\n# configurations\nimg_folder = \"/home/lzx/SRDatasets/DIV2K_train/HR/x4/*\"\nlmdb_save_path = \"/home/lzx/SRDatasets/DIV2K_train/HR/x4_new.lmdb\"\nmeta_info = {\"name\": \"x4\"}\n\nmode = (\n    2  # 1 for reading all the images to memory and then writing to lmdb (more memory);\n)\n# 2 for reading several images and then writing to lmdb, loop over (less memory)\nbatch = 1000  # Used in mode 2. After batch images, lmdb commits.\n###########################################\nif not lmdb_save_path.endswith(\".lmdb\"):\n    raise ValueError(\"lmdb_save_path must end with 'lmdb'.\")\n#### whether the lmdb file exist\nif osp.exists(lmdb_save_path):\n    print(\"Folder [{:s}] already exists. Exit...\".format(lmdb_save_path))\n    sys.exit(1)\nimg_list = sorted(glob.glob(img_folder))\nif mode == 1:\n    print(\"Read images...\")\n    dataset = [cv2.imread(v, cv2.IMREAD_UNCHANGED) for v in img_list]\n    data_size = sum([img.nbytes for img in dataset])\nelif mode == 2:\n    print(\"Calculating the total size of images...\")\n    data_size = sum(os.stat(v).st_size for v in img_list)\nelse:\n    raise ValueError(\"mode should be 1 or 2\")\n\nkey_l = []\nresolution_l = []\npbar = ProgressBar(len(img_list))\nenv = lmdb.open(lmdb_save_path, map_size=data_size * 10)\ntxn = env.begin(write=True)  # txn is a Transaction object\nfor i, v in enumerate(img_list):\n    pbar.update(\"Write {}\".format(v))\n    base_name = osp.splitext(osp.basename(v))[0]\n    key = base_name.encode(\"ascii\")\n    data = dataset[i] if mode == 1 else cv2.imread(v, cv2.IMREAD_UNCHANGED)\n    if data.ndim == 2:\n        H, W = data.shape\n        C = 1\n    else:\n        H, W, C = data.shape\n    txn.put(key, data)\n    key_l.append(base_name)\n    resolution_l.append(\"{:d}_{:d}_{:d}\".format(C, H, W))\n    # commit in mode 2\n    if mode == 2 and i % batch == 1:\n        txn.commit()\n        txn = env.begin(write=True)\n\ntxn.commit()\nenv.close()\n\nprint(\"Finish writing lmdb.\")\n\n#### create meta information\n# check whether all the images are the same size\nsame_resolution = len(set(resolution_l)) <= 1\nif same_resolution:\n    meta_info[\"resolution\"] = [resolution_l[0]]\n    meta_info[\"keys\"] = key_l\n    print(\"All images have the same resolution. Simplify the meta info...\")\nelse:\n    meta_info[\"resolution\"] = resolution_l\n    meta_info[\"keys\"] = key_l\n    print(\"Not all images have the same resolution. Save meta info for each image...\")\n\n#### pickle dump\npickle.dump(meta_info, open(osp.join(lmdb_save_path, \"meta_info.pkl\"), \"wb\"))\nprint(\"Finish creating lmdb meta info.\")\n"
  },
  {
    "path": "codes/scripts/extract_subimgs_single.py",
    "content": "import os\nimport os.path as osp\nimport sys\nfrom multiprocessing import Pool\n\nimport cv2\nimport numpy as np\n\ntry:\n    sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))\n    from utils.util import ProgressBar\nexcept ImportError:\n    pass\n\n\ndef main():\n    \"\"\"A multi-thread tool to crop sub imags.\"\"\"\n    input_folder = \"/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800\"\n    save_folder = \"/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800_sub\"\n    n_thread = 20\n    crop_sz = 480\n    step = 240\n    thres_sz = 48\n    compression_level = 3  # 3 is the default value in cv2\n    # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n    # compression time. If read raw images during training, use 0 for faster IO speed.\n\n    if not os.path.exists(save_folder):\n        os.makedirs(save_folder)\n        print(\"mkdir [{:s}] ...\".format(save_folder))\n    else:\n        print(\"Folder [{:s}] already exists. Exit...\".format(save_folder))\n        sys.exit(1)\n\n    img_list = []\n    for root, _, file_list in sorted(os.walk(input_folder)):\n        path = [\n            os.path.join(root, x) for x in file_list\n        ]  # assume only images in the input_folder\n        img_list.extend(path)\n\n    def update(arg):\n        pbar.update(arg)\n\n    pbar = ProgressBar(len(img_list))\n\n    pool = Pool(n_thread)\n    for path in img_list:\n        pool.apply_async(\n            worker,\n            args=(path, save_folder, crop_sz, step, thres_sz, compression_level),\n            callback=update,\n        )\n    pool.close()\n    pool.join()\n    print(\"All subprocesses done.\")\n\n\ndef worker(path, save_folder, crop_sz, step, thres_sz, compression_level):\n    img_name = os.path.basename(path)\n    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n\n    n_channels = len(img.shape)\n    if n_channels == 2:\n        h, w = img.shape\n    elif n_channels == 3:\n        h, w, c = img.shape\n    else:\n        raise ValueError(\"Wrong image shape - {}\".format(n_channels))\n\n    h_space = np.arange(0, h - crop_sz + 1, step)\n    if h - (h_space[-1] + crop_sz) > thres_sz:\n        h_space = np.append(h_space, h - crop_sz)\n    w_space = np.arange(0, w - crop_sz + 1, step)\n    if w - (w_space[-1] + crop_sz) > thres_sz:\n        w_space = np.append(w_space, w - crop_sz)\n\n    index = 0\n    for x in h_space:\n        for y in w_space:\n            index += 1\n            if n_channels == 2:\n                crop_img = img[x : x + crop_sz, y : y + crop_sz]\n            else:\n                crop_img = img[x : x + crop_sz, y : y + crop_sz, :]\n            crop_img = np.ascontiguousarray(crop_img)\n            # var = np.var(crop_img / 255)\n            # if var > 0.008:\n            #     print(img_name, index_str, var)\n            cv2.imwrite(\n                os.path.join(\n                    save_folder, img_name.replace(\".png\", \"_s{:03d}.png\".format(index))\n                ),\n                crop_img,\n                [cv2.IMWRITE_PNG_COMPRESSION, compression_level],\n            )\n    return \"Processing {:s} ...\".format(img_name)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/scripts/generate_mod_LR_bic.m",
    "content": "function generate_mod_LR_bic()\n%% matlab code to genetate mod images, bicubic-downsampled LR, bicubic_upsampled images.\n\n%% set parameters\n% comment the unnecessary line\ninput_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800_sub';\n% save_mod_folder = '';\nsave_LR_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800_sub_bicLRx4';\n% save_bic_folder = '';\n\nup_scale = 4;\nmod_scale = 4;\n\nif exist('save_mod_folder', 'var')\n    if exist(save_mod_folder, 'dir')\n        disp(['It will cover ', save_mod_folder]);\n    else\n        mkdir(save_mod_folder);\n    end\nend\nif exist('save_LR_folder', 'var')\n    if exist(save_LR_folder, 'dir')\n        disp(['It will cover ', save_LR_folder]);\n    else\n        mkdir(save_LR_folder);\n    end\nend\nif exist('save_bic_folder', 'var')\n    if exist(save_bic_folder, 'dir')\n        disp(['It will cover ', save_bic_folder]);\n    else\n        mkdir(save_bic_folder);\n    end\nend\n\nidx = 0;\nfilepaths = dir(fullfile(input_folder,'*.*'));\nfor i = 1 : length(filepaths)\n    [paths,imname,ext] = fileparts(filepaths(i).name);\n    if isempty(imname)\n        disp('Ignore . folder.');\n    elseif strcmp(imname, '.')\n        disp('Ignore .. folder.');\n    else\n        idx = idx + 1;\n        str_rlt = sprintf('%d\\t%s.\\n', idx, imname);\n        fprintf(str_rlt);\n        % read image\n        img = imread(fullfile(input_folder, [imname, ext]));\n        img = im2double(img);\n        % modcrop\n        img = modcrop(img, mod_scale);\n        if exist('save_mod_folder', 'var')\n            imwrite(img, fullfile(save_mod_folder, [imname, '.png']));\n        end\n        % LR\n        im_LR = imresize(img, 1/up_scale, 'bicubic');\n        if exist('save_LR_folder', 'var')\n            imwrite(im_LR, fullfile(save_LR_folder, [imname, '_bicLRx4.png']));\n        end\n        % Bicubic\n        if exist('save_bic_folder', 'var')\n            im_B = imresize(im_LR, up_scale, 'bicubic');\n            imwrite(im_B, fullfile(save_bic_folder, [imname, '_bicx4.png']));\n        end\n    end\nend\nend\n\n%% modcrop\nfunction img = modcrop(img, modulo)\nif size(img,3) == 1\n    sz = size(img);\n    sz = sz - mod(sz, modulo);\n    img = img(1:sz(1), 1:sz(2));\nelse\n    tmpsz = size(img);\n    sz = tmpsz(1:2);\n    sz = sz - mod(sz, modulo);\n    img = img(1:sz(1), 1:sz(2),:);\nend\nend\n"
  },
  {
    "path": "codes/scripts/generate_mod_LR_bic.py",
    "content": "import os\nimport sys\n\nimport cv2\nimport numpy as np\n\ntry:\n    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n    from utils import imresize\nexcept ImportError:\n    pass\n\n\ndef generate_mod_LR_bic():\n    # set parameters\n    up_scale = 2\n    mod_scale = 2\n    # set data dir\n    sourcedir = \"/mnt/hdd/lzx/SRDatasets/NTIRE2018/DIV2K_valid_HR/\"\n    savedir = \"/mnt/hdd/lzx/SRDatasets/DIV2K_valid/\"\n\n    saveHRpath = os.path.join(savedir, \"HR\", \"x\" + str(mod_scale))\n    saveLRpath = os.path.join(savedir, \"BicLR\", \"x\" + str(up_scale))\n    # saveBicpath = os.path.join(savedir, \"Bic\", \"x\" + str(up_scale))\n\n    if not os.path.isdir(sourcedir):\n        print(\"Error: No source data found\")\n        exit(0)\n    if not os.path.isdir(savedir):\n        os.mkdir(savedir)\n\n    if not os.path.isdir(os.path.join(savedir, \"HR\")):\n        os.mkdir(os.path.join(savedir, \"HR\"))\n    if not os.path.isdir(os.path.join(savedir, \"BicLR\")):\n        os.mkdir(os.path.join(savedir, \"BicLR\"))\n    # if not os.path.isdir(os.path.join(savedir, \"Bic\")):\n    #     os.mkdir(os.path.join(savedir, \"Bic\"))\n\n    if not os.path.isdir(saveHRpath):\n        os.mkdir(saveHRpath)\n    else:\n        print(\"It will cover \" + str(saveHRpath))\n\n    if not os.path.isdir(saveLRpath):\n        os.mkdir(saveLRpath)\n    else:\n        print(\"It will cover \" + str(saveLRpath))\n\n    # if not os.path.isdir(saveBicpath):\n    #     os.mkdir(saveBicpath)\n    # else:\n    #     print(\"It will cover \" + str(saveBicpath))\n\n    filepaths = [f for f in os.listdir(sourcedir) if f.endswith(\".png\")]\n    num_files = len(filepaths)\n\n    # prepare data with augementation\n    for i in range(num_files):\n        filename = filepaths[i]\n        print(\"No.{} -- Processing {}\".format(i, filename))\n        # read image\n        image = cv2.imread(os.path.join(sourcedir, filename))\n\n        width = int(np.floor(image.shape[1] / mod_scale))\n        height = int(np.floor(image.shape[0] / mod_scale))\n        # modcrop\n        if len(image.shape) == 3:\n            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width, :]\n        else:\n            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width]\n        # LR\n        image_LR = imresize(image_HR, 1 / up_scale, True)\n        # bic\n        # image_Bic = imresize(image_LR, up_scale, True)\n\n        cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)\n        cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)\n        # cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)\n\n\nif __name__ == \"__main__\":\n    generate_mod_LR_bic()\n"
  },
  {
    "path": "codes/scripts/generate_mod_blur_LR_bic.py",
    "content": "import os\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\n\ntry:\n    sys.path.append(\"..\")\n    from utils import imresize\n    import utils as util\nexcept ImportError:\n    pass\n\n\ndef generate_mod_LR_bic():\n    # set parameters\n    up_scale = 4\n    mod_scale = 4\n    # set data dir\n    sourcedir = \"/data/Set5/source/\"\n    savedir = \"/data/Set5/\"\n\n    # load PCA matrix of enough kernel\n    print(\"load PCA matrix\")\n    pca_matrix = torch.load(\n        \"../../pca_matrix.pth\", map_location=lambda storage, loc: storage\n    )\n    print(\"PCA matrix shape: {}\".format(pca_matrix.shape))\n\n    degradation_setting = {\n        \"random_kernel\": False,\n        \"code_length\": 10,\n        \"ksize\": 21,\n        \"pca_matrix\": pca_matrix,\n        \"scale\": up_scale,\n        \"cuda\": True,\n        \"rate_iso\": 1.0,\n    }\n\n    # set random seed\n    util.set_random_seed(0)\n\n    saveHRpath = os.path.join(savedir, \"HR\", \"x\" + str(mod_scale))\n    saveLRpath = os.path.join(savedir, \"LR\", \"x\" + str(up_scale))\n    saveBicpath = os.path.join(savedir, \"Bic\", \"x\" + str(up_scale))\n    saveLRblurpath = os.path.join(savedir, \"LRblur\", \"x\" + str(up_scale))\n\n    if not os.path.isdir(sourcedir):\n        print(\"Error: No source data found\")\n        exit(0)\n    if not os.path.isdir(savedir):\n        os.mkdir(savedir)\n\n    if not os.path.isdir(os.path.join(savedir, \"HR\")):\n        os.mkdir(os.path.join(savedir, \"HR\"))\n    if not os.path.isdir(os.path.join(savedir, \"LR\")):\n        os.mkdir(os.path.join(savedir, \"LR\"))\n    if not os.path.isdir(os.path.join(savedir, \"Bic\")):\n        os.mkdir(os.path.join(savedir, \"Bic\"))\n    if not os.path.isdir(os.path.join(savedir, \"LRblur\")):\n        os.mkdir(os.path.join(savedir, \"LRblur\"))\n\n    if not os.path.isdir(saveHRpath):\n        os.mkdir(saveHRpath)\n    else:\n        print(\"It will cover \" + str(saveHRpath))\n\n    if not os.path.isdir(saveLRpath):\n        os.mkdir(saveLRpath)\n    else:\n        print(\"It will cover \" + str(saveLRpath))\n\n    if not os.path.isdir(saveBicpath):\n        os.mkdir(saveBicpath)\n    else:\n        print(\"It will cover \" + str(saveBicpath))\n\n    if not os.path.isdir(saveLRblurpath):\n        os.mkdir(saveLRblurpath)\n    else:\n        print(\"It will cover \" + str(saveLRblurpath))\n\n    filepaths = sorted([f for f in os.listdir(sourcedir) if f.endswith(\".png\")])\n    print(filepaths)\n    num_files = len(filepaths)\n\n    # kernel_map_tensor = torch.zeros((num_files, 1, 10)) # each kernel map: 1*10\n\n    # prepare data with augementation\n\n    for i in range(num_files):\n        filename = filepaths[i]\n        print(\"No.{} -- Processing {}\".format(i, filename))\n        # read image\n        image = cv2.imread(os.path.join(sourcedir, filename))\n\n        width = int(np.floor(image.shape[1] / mod_scale))\n        height = int(np.floor(image.shape[0] / mod_scale))\n        # modcrop\n        if len(image.shape) == 3:\n            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width, :]\n        else:\n            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width]\n        # LR_blur, by random gaussian kernel\n        img_HR = util.img2tensor(image_HR)\n        C, H, W = img_HR.size()\n\n        for sig in np.linspace(1.8, 3.2, 8):\n\n            prepro = util.SRMDPreprocessing(sig=sig, **degradation_setting)\n\n            LR_img, ker_map = prepro(img_HR.view(1, C, H, W))\n            image_LR_blur = util.tensor2img(LR_img)\n            cv2.imwrite(\n                os.path.join(saveLRblurpath, \"sig{}_{}\".format(sig, filename)),\n                image_LR_blur,\n            )\n            cv2.imwrite(\n                os.path.join(saveHRpath, \"sig{}_{}\".format(sig, filename)), image_HR\n            )\n        # LR\n        image_LR = imresize(image_HR, 1 / up_scale, True)\n        # bic\n        image_Bic = imresize(image_LR, up_scale, True)\n\n        # cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)\n        cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)\n        cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)\n\n        # kernel_map_tensor[i] = ker_map\n    # save dataset corresponding kernel maps\n    # torch.save(kernel_map_tensor, './Set5_sig2.6_kermap.pth')\n    print(\"Image Blurring & Down smaple Done: X\" + str(up_scale))\n\n\nif __name__ == \"__main__\":\n    generate_mod_LR_bic()\n"
  },
  {
    "path": "codes/scripts/test_imgs.py",
    "content": "import argparse\nimport glob\nimport importlib as imp\nimport os\nimport os.path as osp\nimport sys\nfrom collections import defaultdict\n\nimport cv2\nimport numpy as np\n\nsys.path.append(\"../\")\nfrom metrics.measure import IQA\n\n\ndef parse_argumnets():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--res_dir\", type=str, default=None, help=\"directory of test images\"\n    )\n    parser.add_argument(\n        \"--ref_dir\", type=str, default=None, help=\"directory of reference images\"\n    )\n    parser.add_argument(\n        \"--save_dir\", type=str, default=None, help=\"directory of saved results\"\n    )\n    parser.add_argument(\"--metrics\", type=list, default=[\"psnr\", \"ssim\", \"lpips\", \"niqe\", \"piqe\", \"brisque\"])\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef bgr2ycbcr(img, only_y=True):\n    \"\"\"bgr version of rgb2ycbcr\n    only_y: only return Y channel\n    Input:\n        uint8, [0, 255]\n        float, [0, 1]\n    \"\"\"\n    in_img_type = img.dtype\n    img.astype(np.float32)\n    if in_img_type != np.uint8:\n        img *= 255.0\n    # convert\n    if only_y:\n        rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0\n    else:\n        rlt = (\n            np.matmul(\n                img,\n                [\n                    [24.966, 112.0, -18.214],\n                    [128.553, -74.203, -93.786],\n                    [65.481, -37.797, 112.0],\n                ],\n            )\n            / 255.0\n            + [16, 128, 128]\n        )\n    if in_img_type == np.uint8:\n        rlt = rlt.round()\n    else:\n        rlt /= 255.0\n    return rlt.astype(in_img_type)\n\n\ndef main():\n    args = parse_argumnets()\n    if args.save_dir is None:\n        args.save_dir = args.res_dir\n    if args.res_dir is None:\n        raise TypeError(\"res dir can not be None\")\n        if not osp.exists(args.res_dir):\n            raise ValueError(\"res dir dose not exist\")\n\n    res_paths = sorted(glob.glob(osp.join(args.res_dir, \"*.png\")))\n    print(f\"{len(res_paths)} images to be tested\")\n    if args.ref_dir is not None:\n        ref_paths = sorted(glob.glob(osp.join(args.ref_dir, \"*.png\")))\n\n        if not len(res_paths) == len(ref_paths):\n            raise ValueError(\n                f\"Number of res images {len(res_paths)} must be equal\\\n                to Number of ref images {len(ref_paths)}\"\n            )\n\n    score_file_name = \"_\".join(osp.abspath(args.res_dir).split(\"/\"))\n    score_file_name = osp.join(args.save_dir, f\"{score_file_name}.txt\")\n    score_file = open(score_file_name, \"w\")\n\n    measure = IQA(metrics=args.metrics, cuda=False)\n    test_results_rgb = defaultdict(list)\n    test_results_y = defaultdict(list)\n    for indx, res_path in enumerate(res_paths):\n        res_img = cv2.imread(res_path)\n\n        message = f\"image {res_path}\\t\"\n        if args.ref_dir is not None:\n            ref_img = cv2.imread(ref_paths[indx])\n        else:\n            ref_img = None\n\n        message += \"Original Scores\\t\"\n        scores = measure(res=res_img, ref=ref_img, metrics=args.metrics)\n        for k, v in scores.items():\n            test_results_rgb[k].append(v)\n            message += \"{}: {:.6f}; \".format(k, v)\n\n        if res_img.ndim == 3:\n            res_img_y = bgr2ycbcr(res_img, only_y=True)\n\n            if ref_img is not None:\n                ref_img_y = bgr2ycbcr(ref_img, only_y=True)\n            else:\n                ref_img_y = None\n\n            message += \"Y Scores\\t\"\n            scores = measure(res=res_img_y, ref=ref_img_y, metrics=args.metrics)\n            for k, v in scores.items():\n                test_results_y[k].append(v)\n                message += \"{}: {:.6f}; \".format(k, v)\n\n        print(message)\n        score_file.write(message + \"\\n\")\n\n    message = \"-\" * 10 + \"Average Results\" + \"-\" * 10 + \"\\n\"\n    message += \"Origianl Scores\\t\"\n    for k, v in test_results_rgb.items():\n        ave = sum(v) / len(v)\n        message += \"{}: {:.6f}; \".format(k, ave)\n\n    if len(test_results_y) > 0:\n        message += \"Y Scores\\t\"\n        for k, v in test_results_y.items():\n            ave = sum(v) / len(v)\n            message += \"{}: {:.6f}; \".format(k, ave)\n\n    print(message)\n    score_file.write(message)\n    score_file.close()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "codes/utils/__init__.py",
    "content": "import importlib\nimport os\nimport os.path as osp\n\nutils_folder = osp.dirname(__file__)\nutils_names = [\n    osp.splitext(osp.basename(v))[0]\n    for v in os.listdir(utils_folder)\n    if v.endswith(\"_utils.py\")\n]\nfor file_name in utils_names:\n    exec(f\"from .{file_name} import *\")\n"
  },
  {
    "path": "codes/utils/data_utils.py",
    "content": "import math\nimport os\nimport pickle\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\n\n# Files & IO\n\nIMG_EXTENSIONS = [\n    \".jpg\",\n    \".JPG\",\n    \".jpeg\",\n    \".JPEG\",\n    \".png\",\n    \".PNG\",\n    \".ppm\",\n    \".PPM\",\n    \".bmp\",\n    \".BMP\",\n]\n\n\ndef is_image_file(filename):\n    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef _get_paths_from_images(path):\n    \"\"\"get image path list from image folder\"\"\"\n    assert os.path.isdir(path), \"{:s} is not a valid directory\".format(path)\n    images = []\n    for dirpath, _, fnames in sorted(os.walk(path)):\n        for fname in sorted(fnames):\n            if is_image_file(fname):\n                img_path = os.path.join(dirpath, fname)\n                images.append(img_path)\n    assert images, \"{:s} has no valid image file\".format(path)\n    return images\n\n\ndef _get_paths_from_lmdb(dataroot):\n    \"\"\"get image path list from lmdb meta info\"\"\"\n    meta_info = pickle.load(open(os.path.join(dataroot, \"meta_info.pkl\"), \"rb\"))\n    paths = meta_info[\"keys\"]\n    sizes = meta_info[\"resolution\"]\n    if len(sizes) == 1:\n        sizes = sizes * len(paths)\n    return paths, sizes\n\n\ndef get_image_paths(data_type, dataroot):\n    \"\"\"get image path list\n    support lmdb or image files\"\"\"\n    paths, sizes = None, None\n    if dataroot is None:\n        return None, None\n    else:\n        if data_type == \"lmdb\":\n            paths, sizes = _get_paths_from_lmdb(dataroot)\n            return paths, sizes\n        elif data_type == \"img\":\n            paths = sorted(_get_paths_from_images(dataroot))\n            return paths, None\n        else:\n            raise NotImplementedError(\n                \"data_type [{:s}] is not recognized.\".format(data_type)\n            )\n\n\ndef _read_img_lmdb(env, key, size):\n    \"\"\"read image from lmdb with key (w/ and w/o fixed size)\n    size: (C, H, W) tuple\"\"\"\n    with env.begin(write=False) as txn:\n        buf = txn.get(key.encode(\"ascii\"))\n    img_flat = np.frombuffer(buf, dtype=np.uint8)\n    C, H, W = size\n    img = img_flat.reshape(H, W, C)\n    return img\n\n\ndef read_img(env, path, size=None):\n    \"\"\"read image by cv2 or from lmdb\n    return: Numpy float32, HWC, BGR, [0,1]\"\"\"\n    if env is None:  # img\n        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n    else:\n        img = _read_img_lmdb(env, path, size)\n    img = img.astype(np.float32) / 255.0\n    if img.ndim == 2:\n        img = np.expand_dims(img, axis=2)\n    # some images have 4 channels\n    if img.shape[2] > 3:\n        img = img[:, :, :3]\n    return img\n\n\n# image processing\n# process on numpy image\n\n\ndef augment(img, hflip=True, rot=True, mode=None):\n    # horizontal flip OR rotate\n    hflip = hflip and random.random() < 0.5\n    vflip = rot and random.random() < 0.5\n    rot90 = rot and random.random() < 0.5\n\n    def _augment(img):\n        if hflip:\n            img = img[:, ::-1, :]\n        if vflip:\n            img = img[::-1, :, :]\n        if rot90:\n            img = img.transpose(1, 0, 2)\n        return img\n\n    if len(img) == 1:\n        return _augment(img[0])\n    else:\n        return [_augment(I) for I in img]\n\n\ndef augment_flow(img_list, flow_list, hflip=True, rot=True):\n    # horizontal flip OR rotate\n    hflip = hflip and random.random() < 0.5\n    vflip = rot and random.random() < 0.5\n    rot90 = rot and random.random() < 0.5\n\n    def _augment(img):\n        if hflip:\n            img = img[:, ::-1, :]\n        if vflip:\n            img = img[::-1, :, :]\n        if rot90:\n            img = img.transpose(1, 0, 2)\n        return img\n\n    def _augment_flow(flow):\n        if hflip:\n            flow = flow[:, ::-1, :]\n            flow[:, :, 0] *= -1\n        if vflip:\n            flow = flow[::-1, :, :]\n            flow[:, :, 1] *= -1\n        if rot90:\n            flow = flow.transpose(1, 0, 2)\n            flow = flow[:, :, [1, 0]]\n        return flow\n\n    rlt_img_list = [_augment(img) for img in img_list]\n    rlt_flow_list = [_augment_flow(flow) for flow in flow_list]\n\n    return rlt_img_list, rlt_flow_list\n"
  },
  {
    "path": "codes/utils/deg_utils.py",
    "content": "import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.io import loadmat\n\nfrom .resize_utils import imresize\n\n\ndef DUF_downsample(x, scale=4):\n    \"\"\"Downsamping with Gaussian kernel used in the DUF official code\n\n    Args:\n        x (Tensor, [B, T, C, H, W]): frames to be downsampled.\n        scale (int): downsampling factor: 2 | 3 | 4.\n    \"\"\"\n\n    assert scale in [2, 3, 4], \"Scale [{}] is not supported\".format(scale)\n\n    def gkern(kernlen=13, nsig=1.6):\n        import scipy.ndimage.filters as fi\n\n        inp = np.zeros((kernlen, kernlen))\n        # set element at the middle to one, a dirac delta\n        inp[kernlen // 2, kernlen // 2] = 1\n        # gaussian-smooth the dirac, resulting in a gaussian filter mask\n        return fi.gaussian_filter(inp, nsig)\n\n    B, T, C, H, W = x.size()\n    x = x.view(-1, 1, H, W)\n    pad_w, pad_h = 6 + scale * 2, 6 + scale * 2  # 6 is the pad of the gaussian filter\n    r_h, r_w = 0, 0\n    if scale == 3:\n        r_h = 3 - (H % 3)\n        r_w = 3 - (W % 3)\n    x = F.pad(x, [pad_w, pad_w + r_w, pad_h, pad_h + r_h], \"reflect\")\n\n    gaussian_filter = (\n        torch.from_numpy(gkern(13, 0.4 * scale)).type_as(x).unsqueeze(0).unsqueeze(0)\n    )\n    x = F.conv2d(x, gaussian_filter, stride=scale)\n    x = x[:, :, 2:-2, 2:-2]\n    x = x.view(B, T, C, x.size(2), x.size(3))\n    return x\n\n\ndef PCA(data, k=2):\n    X = torch.from_numpy(data)\n    X_mean = torch.mean(X, 0)\n    X = X - X_mean.expand_as(X)\n    U, S, V = torch.svd(torch.t(X))\n    return U[:, :k]  # PCA matrix\n\n\ndef random_batch_kernel(\n    batch,\n    l=21,\n    sig_min=0.2,\n    sig_max=4.0,\n    rate_iso=1.0,\n    tensor=True,\n    random_disturb=False,\n):\n\n    if rate_iso == 1:\n\n        sigma = np.random.uniform(sig_min, sig_max, (batch, 1, 1))\n        ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)\n        xx, yy = np.meshgrid(ax, ax)\n        xx = xx[None].repeat(batch, 0)\n        yy = yy[None].repeat(batch, 0)\n        kernel = np.exp(-(xx ** 2 + yy ** 2) / (2.0 * sigma ** 2))\n        kernel = kernel / np.sum(kernel, (1, 2), keepdims=True)\n        return torch.FloatTensor(kernel) if tensor else kernel\n\n    else:\n\n        sigma_x = np.random.uniform(sig_min, sig_max, (batch, 1, 1))\n        sigma_y = np.random.uniform(sig_min, sig_max, (batch, 1, 1))\n\n        D = np.zeros((batch, 2, 2))\n        D[:, 0, 0] = sigma_x.squeeze() ** 2\n        D[:, 1, 1] = sigma_y.squeeze() ** 2\n\n        radians = np.random.uniform(-np.pi, np.pi, (batch))\n        mask_iso = np.random.uniform(0, 1, (batch)) < rate_iso\n        radians[mask_iso] = 0\n        sigma_y[mask_iso] = sigma_x[mask_iso]\n\n        U = np.zeros((batch, 2, 2))\n        U[:, 0, 0] = np.cos(radians)\n        U[:, 0, 1] = -np.sin(radians)\n        U[:, 1, 0] = np.sin(radians)\n        U[:, 1, 1] = np.cos(radians)\n        sigma = np.matmul(U, np.matmul(D, U.transpose(0, 2, 1)))\n        ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)\n        xx, yy = np.meshgrid(ax, ax)\n        xy = np.hstack((xx.reshape((l * l, 1)), yy.reshape(l * l, 1))).reshape(l, l, 2)\n        xy = xy[None].repeat(batch, 0)\n        inverse_sigma = np.linalg.inv(sigma)[:, None, None]\n        kernel = np.exp(\n            -0.5\n            * np.matmul(\n                np.matmul(xy[:, :, :, None], inverse_sigma), xy[:, :, :, :, None]\n            )\n        )\n        kernel = kernel.reshape(batch, l, l)\n        if random_disturb:\n            kernel = kernel + np.random.uniform(0, 0.25, (batch, l, l)) * kernel\n        kernel = kernel / np.sum(kernel, (1, 2), keepdims=True)\n\n        return torch.FloatTensor(kernel) if tensor else kernel\n\n\ndef stable_batch_kernel(batch, l=21, sig=2.6, tensor=True):\n    sigma = sig\n    ax = np.arange(-l // 2 + 1.0, l // 2 + 1.0)\n    xx, yy = np.meshgrid(ax, ax)\n    xx = xx[None].repeat(batch, 0)\n    yy = yy[None].repeat(batch, 0)\n    kernel = np.exp(-(xx ** 2 + yy ** 2) / (2.0 * sigma ** 2))\n    kernel = kernel / np.sum(kernel, (1, 2), keepdims=True)\n    return torch.FloatTensor(kernel) if tensor else kernel\n\n\ndef b_Bicubic(variable, scale):\n    B, C, H, W = variable.size()\n    H_new = int(H / scale)\n    W_new = int(W / scale)\n    tensor_v = variable.view((B, C, H, W))\n    re_tensor = imresize(tensor_v, 1 / scale)\n    return re_tensor\n\n\ndef random_batch_noise(batch, high, rate_cln=1.0):\n    noise_level = np.random.uniform(size=(batch, 1)) * high\n    noise_mask = np.random.uniform(size=(batch, 1))\n    noise_mask[noise_mask < rate_cln] = 0\n    noise_mask[noise_mask >= rate_cln] = 1\n    return noise_level * noise_mask\n\n\ndef b_GaussianNoising(tensor, sigma, mean=0.0, noise_size=None, min=0.0, max=1.0):\n    if noise_size is None:\n        size = tensor.size()\n    else:\n        size = noise_size\n    noise = torch.mul(\n        torch.FloatTensor(np.random.normal(loc=mean, scale=1.0, size=size)),\n        sigma.view(sigma.size() + (1, 1)),\n    ).to(tensor.device)\n    return torch.clamp(noise + tensor, min=min, max=max)\n\n\ndef b_GaussianNoising(tensor, noise_high, mean=0.0, noise_size=None, min=0.0, max=1.0):\n    if noise_size is None:\n        size = tensor.size()\n    else:\n        size = noise_size\n    noise = torch.FloatTensor(\n        np.random.normal(loc=mean, scale=noise_high, size=size)\n    ).to(tensor.device)\n    return torch.clamp(noise + tensor, min=min, max=max)\n\n\nclass BatchSRKernel(object):\n    def __init__(\n        self,\n        l=21,\n        sig=2.6,\n        sig_min=0.2,\n        sig_max=4.0,\n        rate_iso=1.0,\n        random_disturb=False,\n    ):\n        self.l = l\n        self.sig = sig\n        self.sig_min = sig_min\n        self.sig_max = sig_max\n        self.rate = rate_iso\n        self.random_disturb = random_disturb\n\n    def __call__(self, random, batch, tensor=False):\n        if random == True:  # random kernel\n            return random_batch_kernel(\n                batch,\n                l=self.l,\n                sig_min=self.sig_min,\n                sig_max=self.sig_max,\n                rate_iso=self.rate,\n                tensor=tensor,\n                random_disturb=self.random_disturb,\n            )\n        else:  # stable kernel\n            return stable_batch_kernel(batch, l=self.l, sig=self.sig, tensor=tensor)\n\n\nclass BatchBlurKernel(object):\n    def __init__(self, kernels_path):\n        kernels = loadmat(kernels_path)[\"kernels\"]\n        self.num_kernels = kernels.shape[0]\n        self.kernels = kernels\n\n    def __call__(self, random, batch, tensor=False):\n        index = np.random.randint(0, self.num_kernels, batch)\n        kernels = self.kernels[index]\n        return torch.FloatTensor(kernels).contiguous() if tensor else kernels\n\n\nclass PCAEncoder(nn.Module):\n    def __init__(self, weight):\n        super().__init__()\n        self.register_buffer(\"weight\", weight)\n        self.size = self.weight.size()\n\n    def forward(self, batch_kernel):\n        B, H, W = batch_kernel.size()  # [B, l, l]\n        return torch.bmm(\n            batch_kernel.view((B, 1, H * W)), self.weight.expand((B,) + self.size)\n        ).view((B, -1))\n\n\nclass BatchBlur(object):\n    def __init__(self, l=15):\n        self.l = l\n        if l % 2 == 1:\n            self.pad = (l // 2, l // 2, l // 2, l // 2)\n        else:\n            self.pad = (l // 2, l // 2 - 1, l // 2, l // 2 - 1)\n        # self.pad = nn.ZeroPad2d(l // 2)\n\n    def __call__(self, input, kernel):\n        B, C, H, W = input.size()\n        pad = F.pad(input, self.pad, mode=\"reflect\")\n        H_p, W_p = pad.size()[-2:]\n\n        if len(kernel.size()) == 2:\n            input_CBHW = pad.view((C * B, 1, H_p, W_p))\n            kernel_var = kernel.contiguous().view((1, 1, self.l, self.l))\n            return F.conv2d(input_CBHW, kernel_var, padding=0).view((B, C, H, W))\n        else:\n            input_CBHW = pad.view((1, C * B, H_p, W_p))\n            kernel_var = (\n                kernel.contiguous()\n                .view((B, 1, self.l, self.l))\n                .repeat(1, C, 1, 1)\n                .view((B * C, 1, self.l, self.l))\n            )\n            return F.conv2d(input_CBHW, kernel_var, groups=B * C).view((B, C, H, W))\n\n\nclass SRMDPreprocessing(object):\n    def __init__(\n        self,\n        scale,\n        pca_matrix,\n        ksize=21,\n        code_length=10,\n        random_kernel=True,\n        noise=False,\n        cuda=False,\n        random_disturb=False,\n        sig=0,\n        sig_min=0,\n        sig_max=0,\n        rate_iso=1.0,\n        rate_cln=1,\n        noise_high=0,\n        stored_kernel=False,\n        pre_kernel_path=None,\n    ):\n        self.encoder = PCAEncoder(pca_matrix).cuda() if cuda else PCAEncoder(pca)\n\n        self.kernel_gen = (\n            BatchSRKernel(\n                l=ksize,\n                sig=sig,\n                sig_min=sig_min,\n                sig_max=sig_max,\n                rate_iso=rate_iso,\n                random_disturb=random_disturb,\n            )\n            if not stored_kernel\n            else BatchBlurKernel(pre_kernel_path)\n        )\n\n        self.blur = BatchBlur(l=ksize)\n        self.para_in = code_length\n        self.l = ksize\n        self.noise = noise\n        self.scale = scale\n        self.cuda = cuda\n        self.rate_cln = rate_cln\n        self.noise_high = noise_high\n        self.random = random_kernel\n\n    def __call__(self, hr_tensor, kernel=False):\n        # hr_tensor is tensor, not cuda tensor\n\n        hr_var = (\n            torch.FloatTensor(hr_tensor).cuda()\n            if self.cuda\n            else torch.FloatTensor(hr_tensor)\n        )\n        device = hr_var.device\n        B, C, H, W = hr_var.size()\n\n        b_kernels = torch.FloatTensor(self.kernel_gen(self.random, B, tensor=True)).to(\n            device\n        )\n        hr_blured_var = self.blur(hr_var, b_kernels)\n\n        # B x self.para_input\n        kernel_code = self.encoder(b_kernels)\n\n        # Down sample\n        if self.scale != 1:\n            lr_blured_t = b_Bicubic(hr_blured_var, self.scale)\n        else:\n            lr_blured_t = hr_blured_var\n\n        # Noisy\n        if self.noise:\n            Noise_level = torch.FloatTensor(\n                random_batch_noise(B, self.noise_high, self.rate_cln)\n            )\n            lr_noised_t = b_GaussianNoising(lr_blured_t, self.noise_high)\n        else:\n            Noise_level = torch.zeros((B, 1))\n            lr_noised_t = lr_blured_t\n\n        Noise_level = torch.FloatTensor(Noise_level).cuda()\n        re_code = (\n            torch.cat([kernel_code, Noise_level * 10], dim=1)\n            if self.noise\n            else kernel_code\n        )\n        lr_re = torch.FloatTensor(lr_noised_t).to(device)\n\n        return (lr_re, re_code, b_kernels) if kernel else (lr_re, re_code)\n"
  },
  {
    "path": "codes/utils/file_utils.py",
    "content": "import logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom shutil import get_terminal_size\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef get_timestamp():\n    return datetime.now().strftime(\"%y%m%d-%H%M%S\")\n\n\ndef mkdir(path):\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n\ndef mkdirs(paths):\n    if isinstance(paths, str):\n        mkdir(paths)\n    else:\n        for path in paths:\n            mkdir(path)\n\n\ndef mkdir_and_rename(path):\n    if os.path.exists(path):\n        new_name = path + \"_archived_\" + get_timestamp()\n        print(\"Path already exists. Rename it to [{:s}]\".format(new_name))\n        logger = logging.getLogger(\"base\")\n        logger.info(\"Path already exists. Rename it to [{:s}]\".format(new_name))\n        os.rename(path, new_name)\n    os.makedirs(path)\n\n\ndef set_random_seed(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)\n\n\ndef setup_logger(\n    logger_name, root, phase, level=logging.INFO, screen=False, tofile=False\n):\n    \"\"\"set up logger\"\"\"\n    lg = logging.getLogger(logger_name)\n    formatter = logging.Formatter(\n        \"%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s\",\n        datefmt=\"%y-%m-%d %H:%M:%S\",\n    )\n    lg.setLevel(level)\n    lg.propagate = False\n    if tofile:\n        log_file = os.path.join(root, phase + \"_{}.log\".format(get_timestamp()))\n        fh = logging.FileHandler(log_file, mode=\"w\")\n        fh.setFormatter(formatter)\n        lg.addHandler(fh)\n    if screen:\n        sh = logging.StreamHandler()\n        sh.setFormatter(formatter)\n        lg.addHandler(sh)\n\n\nclass ProgressBar(object):\n    \"\"\"A progress bar which can print the progress\n    modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py\n    \"\"\"\n\n    def __init__(self, task_num=0, bar_width=50, start=True):\n        self.task_num = task_num\n        max_bar_width = self._get_max_bar_width()\n        self.bar_width = bar_width if bar_width <= max_bar_width else max_bar_width\n        self.completed = 0\n        if start:\n            self.start()\n\n    def _get_max_bar_width(self):\n        terminal_width, _ = get_terminal_size()\n        max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)\n        if max_bar_width < 10:\n            print(\n                \"terminal width is too small ({}), please consider widen the terminal for better \"\n                \"progressbar visualization\".format(terminal_width)\n            )\n            max_bar_width = 10\n        return max_bar_width\n\n    def start(self):\n        if self.task_num > 0:\n            sys.stdout.write(\n                \"[{}] 0/{}, elapsed: 0s, ETA:\\n{}\\n\".format(\n                    \" \" * self.bar_width, self.task_num, \"Start...\"\n                )\n            )\n        else:\n            sys.stdout.write(\"completed: 0, elapsed: 0s\")\n        sys.stdout.flush()\n        self.start_time = time.time()\n\n    def update(self, msg=\"In progress...\"):\n        self.completed += 1\n        elapsed = time.time() - self.start_time\n        fps = self.completed / elapsed\n        if self.task_num > 0:\n            percentage = self.completed / float(self.task_num)\n            eta = int(elapsed * (1 - percentage) / percentage + 0.5)\n            mark_width = int(self.bar_width * percentage)\n            bar_chars = \">\" * mark_width + \"-\" * (self.bar_width - mark_width)\n            sys.stdout.write(\"\\033[2F\")  # cursor up 2 lines\n            sys.stdout.write(\n                \"\\033[J\"\n            )  # clean the output (remove extra chars since last display)\n            sys.stdout.write(\n                \"[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\\n{}\\n\".format(\n                    bar_chars,\n                    self.completed,\n                    self.task_num,\n                    fps,\n                    int(elapsed + 0.5),\n                    eta,\n                    msg,\n                )\n            )\n        else:\n            sys.stdout.write(\n                \"completed: {}, elapsed: {}s, {:.1f} tasks/s\".format(\n                    self.completed, int(elapsed + 0.5), fps\n                )\n            )\n        sys.stdout.flush()\n"
  },
  {
    "path": "codes/utils/img_utils.py",
    "content": "import math\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.utils import make_grid\n\n\ndef tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n    \"\"\"\n    Converts a torch Tensor into an image Numpy array\n    Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n    Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n    \"\"\"\n    tensor = tensor.squeeze().float().cpu().clamp_(*min_max)  # clamp\n    tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])  # to range [0,1]\n    n_dim = tensor.dim()\n    if n_dim == 4:\n        n_img = len(tensor)\n        img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR\n    elif n_dim == 3:\n        img_np = tensor.numpy()\n        img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR\n    elif n_dim == 2:\n        img_np = tensor.numpy()\n    else:\n        raise TypeError(\n            \"Only support 4D, 3D and 2D tensor. But received with dimension: {:d}\".format(\n                n_dim\n            )\n        )\n    if out_type == np.uint8:\n        img_np = (img_np * 255.0).round()\n        # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n    return img_np.astype(out_type)\n\n\ndef save_img(img, img_path, mode=\"BGR\"):\n    cv2.imwrite(img_path, img)\n\n\ndef img2tensor(img):\n    \"\"\"\n    # BGR to RGB, HWC to CHW, numpy to tensor\n    Input: img(H, W, C), [0,255], np.uint8 (default)\n    Output: 3D(C,H,W), RGB order, float tensor\n    \"\"\"\n    img = img.astype(np.float32) / 255.0\n    img = img[:, :, [2, 1, 0]]\n    img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()\n    return img\n\n\ndef channel_convert(tar_type, img_list):\n    # conversion among BGR, gray and y\n    if tar_type == \"gray\":  # BGR to gray\n        gray_list = []\n        for img in img_list:\n            if len(img.shape) == 3:\n                if img.shape[2] == 3:\n                    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, None]\n                gray_list.append(img)\n            else:\n                gray_list.append(img[:, :, None])\n        return gray_list\n    elif tar_type == \"y\":\n        y_list = []\n        for img in img_list:\n            if len(img.shape) == 3:\n                if img.shape[2] == 3:\n                    img = bgr2ycbcr(img, only_y=True)[:, :, None]\n                y_list.append(img)\n            else:\n                y_list.append(img[:, :, None])\n        return y_list\n    elif tar_type == \"RGB\":\n        rbg_list = []\n        for img in img_list:\n            if len(img.shape) == 3:\n                rbg_list.append(img)\n            else:\n                rbg_list.append(cv2.cvtColor(img, cv2.COLOR_GRAY2BGR))\n        return rbg_list\n    else:\n        return img_list\n\n\ndef rgb2ycbcr(img, only_y=True):\n    \"\"\"same as matlab rgb2ycbcr\n    only_y: only return Y channel\n    Input:\n        uint8, [0, 255]\n        float, [0, 1]\n    \"\"\"\n    in_img_type = img.dtype\n    img.astype(np.float32)\n    if in_img_type != np.uint8:\n        img *= 255.0\n    # convert\n    if only_y:\n        rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0\n    else:\n        rlt = (\n            np.matmul(\n                img,\n                [\n                    [65.481, -37.797, 112.0],\n                    [128.553, -74.203, -93.786],\n                    [24.966, 112.0, -18.214],\n                ],\n            )\n            / 255.0\n            + [16, 128, 128]\n        )\n    if in_img_type == np.uint8:\n        rlt = rlt.round()\n    else:\n        rlt /= 255.0\n    return rlt.astype(in_img_type)\n\n\ndef bgr2ycbcr(img, only_y=True):\n    \"\"\"bgr version of rgb2ycbcr\n    only_y: only return Y channel\n    Input:\n        uint8, [0, 255]\n        float, [0, 1]\n    \"\"\"\n    in_img_type = img.dtype\n    img.astype(np.float32)\n    if in_img_type != np.uint8:\n        img *= 255.0\n    # convert\n    if only_y:\n        rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0\n    else:\n        rlt = (\n            np.matmul(\n                img,\n                [\n                    [24.966, 112.0, -18.214],\n                    [128.553, -74.203, -93.786],\n                    [65.481, -37.797, 112.0],\n                ],\n            )\n            / 255.0\n            + [16, 128, 128]\n        )\n    if in_img_type == np.uint8:\n        rlt = rlt.round()\n    else:\n        rlt /= 255.0\n    return rlt.astype(in_img_type)\n\n\ndef ycbcr2rgb(img):\n    \"\"\"same as matlab ycbcr2rgb\n    Input:\n        uint8, [0, 255]\n        float, [0, 1]\n    \"\"\"\n    in_img_type = img.dtype\n    img.astype(np.float32)\n    if in_img_type != np.uint8:\n        img *= 255.0\n    # convert\n    rlt = (\n        np.matmul(\n            img,\n            [\n                [0.00456621, 0.00456621, 0.00456621],\n                [0, -0.00153632, 0.00791071],\n                [0.00625893, -0.00318811, 0],\n            ],\n        )\n        * 255.0\n        + [-222.921, 135.576, -276.836]\n    )\n    if in_img_type == np.uint8:\n        rlt = rlt.round()\n    else:\n        rlt /= 255.0\n    return rlt.astype(in_img_type)\n\n\ndef modcrop(img_in, scale):\n    # img_in: Numpy, HWC or HW\n    img = np.copy(img_in)\n    if img.ndim == 2:\n        H, W = img.shape\n        H_r, W_r = H % scale, W % scale\n        img = img[: H - H_r, : W - W_r]\n    elif img.ndim == 3:\n        H, W, C = img.shape\n        H_r, W_r = H % scale, W % scale\n        img = img[: H - H_r, : W - W_r, :]\n    else:\n        raise ValueError(\"Wrong img ndim: [{:d}].\".format(img.ndim))\n    return img\n"
  },
  {
    "path": "codes/utils/option.py",
    "content": "import logging\nimport os\nimport os.path as osp\nimport sys\nfrom collections import OrderedDict\n\nimport yaml\n\n\ndef ordered_yaml():\n    \"\"\"Support OrderedDict for yaml.\n    Returns:\n        yaml Loader and Dumper.\n    \"\"\"\n    try:\n        from yaml import CDumper as Dumper\n        from yaml import CLoader as Loader\n    except ImportError:\n        from yaml import Dumper, Loader\n\n    _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n    def dict_representer(dumper, data):\n        return dumper.represent_dict(data.items())\n\n    def dict_constructor(loader, node):\n        return OrderedDict(loader.construct_pairs(node))\n\n    Dumper.add_representer(OrderedDict, dict_representer)\n    Loader.add_constructor(_mapping_tag, dict_constructor)\n    return Loader, Dumper\n\n\ndef parse(opt_path, root_path=\".\", is_train=True):\n\n    opt_path = osp.abspath(opt_path)\n    with open(opt_path, mode=\"r\") as f:\n        Loader, _ = ordered_yaml()\n        opt = yaml.load(f, Loader=Loader)\n\n    # export CUDA_VISIBLE_DEVICES\n    gpu_list = \",\".join(str(x) for x in opt[\"gpu_ids\"])\n    os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_list\n    print(\"export CUDA_VISIBLE_DEVICES=\" + gpu_list)\n\n    opt[\"is_train\"] = is_train\n    # datasets\n    for phase, dataset in opt[\"datasets\"].items():\n        for p in [\"train\", \"val\", \"test\"]:\n            if p in phase:\n                dataset[\"phase\"] = phase\n        dataset[\"scale\"] = opt.get(\"scale\", 1)\n\n    # path\n    if not opt.get(\"path\"):\n        opt[\"path\"] = {}\n    opt[\"path\"][\"root\"] = osp.abspath(root_path)\n    config_paths = osp.abspath(opt_path).split(\"/\")\n    config_dir = config_paths[config_paths.index(\"config\") + 1]\n    if is_train:\n        experiments_root = osp.join(\n            opt[\"path\"][\"root\"], \"experiments\", config_dir, opt[\"name\"]\n        )\n        opt[\"path\"][\"experiments_root\"] = experiments_root\n\n        for dirname in [\"models\", \"training_state\", \"log\", \"val_images\"]:\n            opt[\"path\"][dirname] = osp.join(experiments_root, dirname)\n\n        # change some options for debug mode\n        if \"debug\" in opt[\"name\"]:\n            opt[\"train\"][\"val_freq\"] = 8\n            opt[\"logger\"][\"print_freq\"] = 1\n            opt[\"logger\"][\"save_checkpoint_freq\"] = 8\n    else:  # test\n        results_root = osp.join(opt[\"path\"][\"root\"], \"results\", config_dir, opt[\"name\"])\n        opt[\"path\"][\"results_root\"] = results_root\n        opt[\"path\"][\"log\"] = osp.join(results_root, \"log\")\n\n    return opt\n\n\ndef dict2str(opt, indent_l=1):\n    \"\"\"dict to string for logger\"\"\"\n    msg = \"\"\n    for k, v in opt.items():\n        if isinstance(v, dict):\n            msg += \" \" * (indent_l * 2) + k + \":[\\n\"\n            msg += dict2str(v, indent_l + 1)\n            msg += \" \" * (indent_l * 2) + \"]\\n\"\n        else:\n            msg += \" \" * (indent_l * 2) + k + \": \" + str(v) + \"\\n\"\n    return msg\n\n\nclass NoneDict(dict):\n    def __missing__(self, key):\n        return None\n\n\n# convert to NoneDict, which return None for missing key.\ndef dict_to_nonedict(opt):\n    if isinstance(opt, dict):\n        new_opt = dict()\n        for key, sub_opt in opt.items():\n            new_opt[key] = dict_to_nonedict(sub_opt)\n        return NoneDict(**new_opt)\n    elif isinstance(opt, list):\n        return [dict_to_nonedict(sub_opt) for sub_opt in opt]\n    else:\n        return opt\n"
  },
  {
    "path": "codes/utils/registry.py",
    "content": "# Modified from: https://github.com/facebookresearch/fvcore/blob/master/fvcore/common/registry.py  # noqa: E501\nclass Registry:\n    \"\"\"\n    The registry that provides name -> object mapping, to support third-party\n    users' custom modules.\n    To create a registry (e.g. a backbone registry):\n    .. code-block:: python\n        BACKBONE_REGISTRY = Registry('BACKBONE')\n    To register an object:\n    .. code-block:: python\n        @BACKBONE_REGISTRY.register()\n        class MyBackbone():\n            ...\n    Or:\n    .. code-block:: python\n        BACKBONE_REGISTRY.register(MyBackbone)\n    \"\"\"\n\n    def __init__(self, name):\n        \"\"\"\n        Args:\n            name (str): the name of this registry\n        \"\"\"\n        self._name = name\n        self._obj_map = {}\n\n    def _do_register(self, name, obj):\n        assert name not in self._obj_map, (\n            f\"An object named '{name}' was already registered \"\n            f\"in '{self._name}' registry!\"\n        )\n        self._obj_map[name] = obj\n\n    def register(self, obj=None):\n        \"\"\"\n        Register the given object under the the name `obj.__name__`.\n        Can be used as either a decorator or not.\n        See docstring of this class for usage.\n        \"\"\"\n        if obj is None:\n            # used as a decorator\n            def deco(func_or_class):\n                name = func_or_class.__name__\n                self._do_register(name, func_or_class)\n                return func_or_class\n\n            return deco\n\n        # used as a function call\n        name = obj.__name__\n        self._do_register(name, obj)\n\n    def get(self, name):\n        ret = self._obj_map.get(name)\n        if ret is None:\n            raise KeyError(\n                f\"No object named '{name}' found in '{self._name}' registry!\"\n            )\n        return ret\n\n    def __contains__(self, name):\n        return name in self._obj_map\n\n    def __iter__(self):\n        return iter(self._obj_map.items())\n\n    def keys(self):\n        return self._obj_map.keys()\n\n\nDATASET_REGISTRY = Registry(\"dataset\")\nARCH_REGISTRY = Registry(\"arch\")\nMODEL_REGISTRY = Registry(\"model\")\nLOSS_REGISTRY = Registry(\"loss\")\nMETRIC_REGISTRY = Registry(\"metric\")\nLR_SCHEDULER_REGISTRY = Registry(\"lr_scheduler\")\n"
  },
  {
    "path": "codes/utils/resize_utils.py",
    "content": "import math\n\nimport numpy as np\nimport torch\n\n\n# matlab 'imresize' function, now only support 'bicubic'\ndef cubic(x):\n    absx = torch.abs(x)\n    absx2 = absx ** 2\n    absx3 = absx ** 3\n\n    weight = (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + (\n        -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2\n    ) * (((absx > 1) * (absx <= 2)).type_as(absx))\n    return weight\n\n\ndef calculate_weights_indices(\n    in_length, out_length, scale, kernel, kernel_width, antialiasing\n):\n    if (scale < 1) and (antialiasing):\n        # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width\n        kernel_width = kernel_width / scale\n\n    # Output-space coordinates\n    x = torch.linspace(1, out_length, out_length)\n\n    # Input-space coordinates. Calculate the inverse mapping such that 0.5\n    # in output space maps to 0.5 in input space, and 0.5+scale in output\n    # space maps to 1.5 in input space.\n    u = x / scale + 0.5 * (1 - 1 / scale)\n\n    # What is the left-most pixel that can be involved in the computation?\n    left = torch.floor(u - kernel_width / 2)\n\n    # What is the maximum number of pixels that can be involved in the\n    # computation?  Note: it's OK to use an extra pixel here; if the\n    # corresponding weights are all zero, it will be eliminated at the end\n    # of this function.\n    P = math.ceil(kernel_width) + 2\n\n    # The indices of the input pixels involved in computing the k-th output\n    # pixel are in row k of the indices matrix.\n    indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(\n        0, P - 1, P\n    ).view(1, P).expand(out_length, P)\n\n    # The weights used to compute the k-th output pixel are in row k of the\n    # weights matrix.\n    distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices\n    # apply cubic kernel\n    if (scale < 1) and (antialiasing):\n        weights = scale * cubic(distance_to_center * scale)\n    else:\n        weights = cubic(distance_to_center)\n    # Normalize the weights matrix so that each row sums to 1.\n    weights_sum = torch.sum(weights, 1).view(out_length, 1)\n    weights = weights / weights_sum.expand(out_length, P)\n\n    # If a column in weights is all zero, get rid of it. only consider the first and last column.\n    weights_zero_tmp = torch.sum((weights == 0), 0)\n    if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):\n        indices = indices.narrow(1, 1, P - 2)\n        weights = weights.narrow(1, 1, P - 2)\n    if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):\n        indices = indices.narrow(1, 0, P - 2)\n        weights = weights.narrow(1, 0, P - 2)\n    weights = weights.contiguous()\n    indices = indices.contiguous()\n    sym_len_s = -indices.min() + 1\n    sym_len_e = indices.max() - in_length\n    indices = indices + sym_len_s - 1\n    return weights, indices, int(sym_len_s), int(sym_len_e)\n\n\ndef imresize(img, scale, antialiasing=True):\n    # Now the scale should be the same for H and W\n    # input: img: CHW RGB [0,1]\n    # output: CHW RGB [0,1] w/o round\n    is_numpy = False\n    if isinstance(img, np.ndarray):\n        img = torch.from_numpy(img.transpose(2, 0, 1))\n        is_numpy = True\n    device = img.device\n\n    is_batch = True\n    if len(img.shape) == 3:  # C, H, W\n        img = img[None]\n        is_batch = False\n\n    B, in_C, in_H, in_W = img.size()\n    img = img.view(-1, in_H, in_W)\n    _, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)\n    kernel_width = 4\n    kernel = \"cubic\"\n\n    # Return the desired dimension order for performing the resize.  The\n    # strategy is to perform the resize first along the dimension with the\n    # smallest scale factor.\n    # Now we do not support this.\n\n    # get weights and indices\n    weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(\n        in_H, out_H, scale, kernel, kernel_width, antialiasing\n    )\n    weights_H, indices_H = weights_H.to(device), indices_H.to(device)\n    weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(\n        in_W, out_W, scale, kernel, kernel_width, antialiasing\n    )\n    weights_W, indices_W = weights_W.to(device), indices_W.to(device)\n    # process H dimension\n    # symmetric copying\n    img_aug = torch.FloatTensor(B * in_C, in_H + sym_len_Hs + sym_len_He, in_W).to(\n        device\n    )\n    img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)\n\n    sym_patch = img[:, :sym_len_Hs, :]\n    inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long().to(device)\n    sym_patch_inv = sym_patch.index_select(1, inv_idx)\n    img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)\n\n    sym_patch = img[:, -sym_len_He:, :]\n    inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long().to(device)\n    sym_patch_inv = sym_patch.index_select(1, inv_idx)\n    img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)\n\n    out_1 = torch.FloatTensor(B * in_C, out_H, in_W).to(device)\n    kernel_width = weights_H.size(1)\n    for i in range(out_H):\n        idx = int(indices_H[i][0])\n        out_1[:, i, :] = (\n            img_aug[:, idx : idx + kernel_width, :]\n            .transpose(1, 2)\n            .matmul(weights_H[i][None, :, None].repeat(B * in_C, 1, 1))\n        ).squeeze()\n\n    # process W dimension\n    # symmetric copying\n    out_1_aug = torch.FloatTensor(B * in_C, out_H, in_W + sym_len_Ws + sym_len_We).to(\n        device\n    )\n    out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)\n\n    sym_patch = out_1[:, :, :sym_len_Ws]\n    inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long().to(device)\n    sym_patch_inv = sym_patch.index_select(2, inv_idx)\n    out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)\n\n    sym_patch = out_1[:, :, -sym_len_We:]\n    inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long().to(device)\n    sym_patch_inv = sym_patch.index_select(2, inv_idx)\n    out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)\n\n    out_2 = torch.FloatTensor(B * in_C, out_H, out_W).to(device)\n    kernel_width = weights_W.size(1)\n    for i in range(out_W):\n        idx = int(indices_W[i][0])\n        out_2[:, :, i] = (\n            out_1_aug[:, :, idx : idx + kernel_width].matmul(\n                weights_W[i][None, :, None].repeat(B * in_C, 1, 1)\n            )\n        ).squeeze()\n\n    out_2 = out_2.contiguous().view(B, in_C, out_H, out_W)\n    if not is_batch:\n        out_2 = out_2[0]\n    return out_2.cpu().numpy().transpose(1, 2, 0) if is_numpy else out_2\n"
  }
]