[
  {
    "path": ".gitignore",
    "content": "*.ipynb\n*.pth\n*.zip\n\n__pycache__/\ntemp_colorization/\n\nstatic/temp_images/\n"
  },
  {
    "path": "colorizator.py",
    "content": "import torch\nfrom torchvision.transforms import ToTensor\nimport numpy as np\n\nfrom networks.models import Colorizer\nfrom denoising.denoiser import FFDNetDenoiser\nfrom utils.utils import resize_pad\n\nclass MangaColorizator:\n    def __init__(self, device, generator_path = 'networks/generator.zip', extractor_path = 'networks/extractor.pth'):\n        self.colorizer = Colorizer().to(device)\n        self.colorizer.generator.load_state_dict(torch.load(generator_path, map_location = device))\n        self.colorizer = self.colorizer.eval()\n        \n        self.denoiser = FFDNetDenoiser(device)\n        \n        self.current_image = None\n        self.current_hint = None\n        self.current_pad = None\n        \n        self.device = device\n        \n    def set_image(self, image, size = 576, apply_denoise = True, denoise_sigma = 25, transform = ToTensor()):\n        if (size % 32 != 0):\n            raise RuntimeError(\"size is not divisible by 32\")\n        \n        if apply_denoise:\n            image = self.denoiser.get_denoised_image(image, sigma = denoise_sigma)\n        \n        image, self.current_pad = resize_pad(image, size)\n        self.current_image = transform(image).unsqueeze(0).to(self.device)\n        self.current_hint = torch.zeros(1, 4, self.current_image.shape[2], self.current_image.shape[3]).float().to(self.device)\n    \n    def update_hint(self, hint, mask):\n        '''\n        Args:\n           hint: numpy.ndarray with shape (self.current_image.shape[2], self.current_image.shape[3], 3)\n           mask: numpy.ndarray with shape (self.current_image.shape[2], self.current_image.shape[3])\n        '''\n        \n        if issubclass(hint.dtype.type, np.integer):\n            hint = hint.astype('float32') / 255\n            \n        hint = (hint - 0.5) / 0.5\n        hint = torch.FloatTensor(hint).permute(2, 0, 1)\n        mask = torch.FloatTensor(np.expand_dims(mask, 0))\n\n        self.current_hint = torch.cat([hint * mask, mask], 0).unsqueeze(0).to(self.device)\n\n    def colorize(self):\n        with torch.no_grad():\n            fake_color, _ = self.colorizer(torch.cat([self.current_image, self.current_hint], 1))\n            fake_color = fake_color.detach()\n\n        result = fake_color[0].detach().cpu().permute(1, 2, 0) * 0.5 + 0.5\n\n        if self.current_pad[0] != 0:\n            result = result[:-self.current_pad[0]]\n        if self.current_pad[1] != 0:\n            result = result[:, :-self.current_pad[1]]\n            \n        return result.numpy()\n"
  },
  {
    "path": "denoising/denoiser.py",
    "content": "\"\"\"\nDenoise an image with the FFDNet denoising method\n\nCopyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>\n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport os\nimport argparse\nimport time\n\n\nimport numpy as np\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom .models import FFDNet\nfrom .utils import normalize, variable_to_cv2_image, remove_dataparallel_wrapper, is_rgb\n    \nclass FFDNetDenoiser:\n    def __init__(self, _device, _sigma = 25, _weights_dir = 'denoising/models/', _in_ch = 3):\n        self.sigma = _sigma / 255\n        self.weights_dir = _weights_dir\n        self.channels = _in_ch\n        self.device = _device\n        \n        self.model = FFDNet(num_input_channels = _in_ch)\n        self.load_weights()\n        self.model.eval()\n       \n    \n    def load_weights(self):\n        weights_name = 'net_rgb.pth' if self.channels == 3 else 'net_gray.pth'\n        weights_path = os.path.join(self.weights_dir, weights_name)\n        if self.device == 'cuda':\n            state_dict = torch.load(weights_path, map_location=torch.device('cpu'))\n            device_ids = [0]\n            self.model = nn.DataParallel(self.model, device_ids=device_ids).cuda()\n        else:\n            state_dict = torch.load(weights_path, map_location='cpu')\n            # CPU mode: remove the DataParallel wrapper\n            state_dict = remove_dataparallel_wrapper(state_dict)\n        self.model.load_state_dict(state_dict)\n        \n    def get_denoised_image(self, imorig, sigma = None):\n        \n        if sigma is not None:\n            cur_sigma = sigma / 255\n        else:\n            cur_sigma = self.sigma \n    \n        if len(imorig.shape) < 3 or imorig.shape[2] == 1:\n            imorig = np.repeat(np.expand_dims(imorig, 2), 3, 2)\n            \n        imorig = imorig[..., :3]\n\n        if (max(imorig.shape[0], imorig.shape[1]) > 1200):\n            ratio = max(imorig.shape[0], imorig.shape[1]) / 1200\n            imorig = cv2.resize(imorig, (int(imorig.shape[1] / ratio), int(imorig.shape[0] / ratio)), interpolation = cv2.INTER_AREA)\n\n        imorig = imorig.transpose(2, 0, 1)\n \n        if (imorig.max() > 1.2):\n            imorig = normalize(imorig)\n        imorig = np.expand_dims(imorig, 0)\n\n        # Handle odd sizes\n        expanded_h = False\n        expanded_w = False\n        sh_im = imorig.shape\n        if sh_im[2]%2 == 1:\n            expanded_h = True\n            imorig = np.concatenate((imorig, imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)\n\n        if sh_im[3]%2 == 1:\n            expanded_w = True\n            imorig = np.concatenate((imorig, imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)\n\n\n        imorig = torch.Tensor(imorig)\n\n\n        # Sets data type according to CPU or GPU modes\n        if self.device == 'cuda':\n            dtype = torch.cuda.FloatTensor\n        else:\n            dtype = torch.FloatTensor\n\n        imnoisy = imorig.clone()\n\n\n        with torch.no_grad():\n            imorig, imnoisy = imorig.type(dtype), imnoisy.type(dtype)\n            nsigma = torch.FloatTensor([cur_sigma]).type(dtype)\n\n\n        # Estimate noise and subtract it to the input image\n        im_noise_estim = self.model(imnoisy, nsigma)\n        outim = torch.clamp(imnoisy-im_noise_estim, 0., 1.)\n\n        if expanded_h:\n            imorig = imorig[:, :, :-1, :]\n            outim = outim[:, :, :-1, :]\n            imnoisy = imnoisy[:, :, :-1, :]\n\n        if expanded_w:\n            imorig = imorig[:, :, :, :-1]\n            outim = outim[:, :, :, :-1]\n            imnoisy = imnoisy[:, :, :, :-1]\n        \n        return variable_to_cv2_image(outim)\n"
  },
  {
    "path": "denoising/functions.py",
    "content": "\"\"\"\nFunctions implementing custom NN layers\n\nCopyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>\n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport torch\nfrom torch.autograd import Function, Variable\n\ndef concatenate_input_noise_map(input, noise_sigma):\n    r\"\"\"Implements the first layer of FFDNet. This function returns a\n    torch.autograd.Variable composed of the concatenation of the downsampled\n    input image and the noise map. Each image of the batch of size CxHxW gets\n    converted to an array of size 4*CxH/2xW/2. Each of the pixels of the\n    non-overlapped 2x2 patches of the input image are placed in the new array\n    along the first dimension.\n\n    Args:\n        input: batch containing CxHxW images\n        noise_sigma: the value of the pixels of the CxH/2xW/2 noise map\n    \"\"\"\n    # noise_sigma is a list of length batch_size\n    N, C, H, W = input.size()\n    dtype = input.type()\n    sca = 2\n    sca2 = sca*sca\n    Cout = sca2*C\n    Hout = H//sca\n    Wout = W//sca\n    idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n    # Fill the downsampled image with zeros\n    if 'cuda' in dtype:\n        downsampledfeatures = torch.cuda.FloatTensor(N, Cout, Hout, Wout).fill_(0)\n    else:\n        downsampledfeatures = torch.FloatTensor(N, Cout, Hout, Wout).fill_(0)\n\n    # Build the CxH/2xW/2 noise map\n    noise_map = noise_sigma.view(N, 1, 1, 1).repeat(1, C, Hout, Wout)\n\n    # Populate output\n    for idx in range(sca2):\n        downsampledfeatures[:, idx:Cout:sca2, :, :] = \\\n            input[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]\n\n    # concatenate de-interleaved mosaic with noise map\n    return torch.cat((noise_map, downsampledfeatures), 1)\n\nclass UpSampleFeaturesFunction(Function):\n    r\"\"\"Extends PyTorch's modules by implementing a torch.autograd.Function.\n    This class implements the forward and backward methods of the last layer\n    of FFDNet. It basically performs the inverse of\n    concatenate_input_noise_map(): it converts each of the images of a\n    batch of size CxH/2xW/2 to images of size C/4xHxW\n    \"\"\"\n    @staticmethod\n    def forward(ctx, input):\n        N, Cin, Hin, Win = input.size()\n        dtype = input.type()\n        sca = 2\n        sca2 = sca*sca\n        Cout = Cin//sca2\n        Hout = Hin*sca\n        Wout = Win*sca\n        idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n        assert (Cin%sca2 == 0), 'Invalid input dimensions: number of channels should be divisible by 4'\n\n        result = torch.zeros((N, Cout, Hout, Wout)).type(dtype)\n        for idx in range(sca2):\n            result[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca] = input[:, idx:Cin:sca2, :, :]\n\n        return result\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        N, Cg_out, Hg_out, Wg_out = grad_output.size()\n        dtype = grad_output.data.type()\n        sca = 2\n        sca2 = sca*sca\n        Cg_in = sca2*Cg_out\n        Hg_in = Hg_out//sca\n        Wg_in = Wg_out//sca\n        idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n        # Build output\n        grad_input = torch.zeros((N, Cg_in, Hg_in, Wg_in)).type(dtype)\n        # Populate output\n        for idx in range(sca2):\n            grad_input[:, idx:Cg_in:sca2, :, :] = grad_output.data[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]\n\n        return Variable(grad_input)\n\n# Alias functions\nupsamplefeatures = UpSampleFeaturesFunction.apply\n"
  },
  {
    "path": "denoising/models.py",
    "content": "\"\"\"\nDefinition of the FFDNet model and its custom layers\n\nCopyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>\n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport denoising.functions as functions\n    \nclass UpSampleFeatures(nn.Module):\n    r\"\"\"Implements the last layer of FFDNet\n    \"\"\"\n    def __init__(self):\n        super(UpSampleFeatures, self).__init__()\n    def forward(self, x):\n        return functions.upsamplefeatures(x)\n\nclass IntermediateDnCNN(nn.Module):\n    r\"\"\"Implements the middel part of the FFDNet architecture, which\n    is basically a DnCNN net\n    \"\"\"\n    def __init__(self, input_features, middle_features, num_conv_layers):\n        super(IntermediateDnCNN, self).__init__()\n        self.kernel_size = 3\n        self.padding = 1\n        self.input_features = input_features\n        self.num_conv_layers = num_conv_layers\n        self.middle_features = middle_features\n        if self.input_features == 5:\n            self.output_features = 4 #Grayscale image\n        elif self.input_features == 15:\n            self.output_features = 12 #RGB image\n        else:\n            raise Exception('Invalid number of input features')\n\n        layers = []\n        layers.append(nn.Conv2d(in_channels=self.input_features,\\\n                                out_channels=self.middle_features,\\\n                                kernel_size=self.kernel_size,\\\n                                padding=self.padding,\\\n                                bias=False))\n        layers.append(nn.ReLU(inplace=True))\n        for _ in range(self.num_conv_layers-2):\n            layers.append(nn.Conv2d(in_channels=self.middle_features,\\\n                                    out_channels=self.middle_features,\\\n                                    kernel_size=self.kernel_size,\\\n                                    padding=self.padding,\\\n                                    bias=False))\n            layers.append(nn.BatchNorm2d(self.middle_features))\n            layers.append(nn.ReLU(inplace=True))\n        layers.append(nn.Conv2d(in_channels=self.middle_features,\\\n                                out_channels=self.output_features,\\\n                                kernel_size=self.kernel_size,\\\n                                padding=self.padding,\\\n                                bias=False))\n        self.itermediate_dncnn = nn.Sequential(*layers)\n    def forward(self, x):\n        out = self.itermediate_dncnn(x)\n        return out\n\nclass FFDNet(nn.Module):\n    r\"\"\"Implements the FFDNet architecture\n    \"\"\"\n    def __init__(self, num_input_channels):\n        super(FFDNet, self).__init__()\n        self.num_input_channels = num_input_channels\n        if self.num_input_channels == 1:\n            # Grayscale image\n            self.num_feature_maps = 64\n            self.num_conv_layers = 15\n            self.downsampled_channels = 5\n            self.output_features = 4\n        elif self.num_input_channels == 3:\n            # RGB image\n            self.num_feature_maps = 96\n            self.num_conv_layers = 12\n            self.downsampled_channels = 15\n            self.output_features = 12\n        else:\n            raise Exception('Invalid number of input features')\n\n        self.intermediate_dncnn = IntermediateDnCNN(\\\n                input_features=self.downsampled_channels,\\\n                middle_features=self.num_feature_maps,\\\n                num_conv_layers=self.num_conv_layers)\n        self.upsamplefeatures = UpSampleFeatures()\n\n    def forward(self, x, noise_sigma):\n        concat_noise_x = functions.concatenate_input_noise_map(x.data, noise_sigma.data)\n        concat_noise_x = Variable(concat_noise_x)\n        h_dncnn = self.intermediate_dncnn(concat_noise_x)\n        pred_noise = self.upsamplefeatures(h_dncnn)\n        return pred_noise\n"
  },
  {
    "path": "denoising/utils.py",
    "content": "\"\"\"\nDifferent utilities such as orthogonalization of weights, initialization of\nloggers, etc\n\nCopyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>\n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport numpy as np\nimport cv2\n\n\ndef variable_to_cv2_image(varim):\n    r\"\"\"Converts a torch.autograd.Variable to an OpenCV image\n\n    Args:\n        varim: a torch.autograd.Variable\n    \"\"\"\n    nchannels = varim.size()[1]\n    if nchannels == 1:\n        res = (varim.data.cpu().numpy()[0, 0, :]*255.).clip(0, 255).astype(np.uint8)\n    elif nchannels == 3:\n        res = varim.data.cpu().numpy()[0]\n        res = cv2.cvtColor(res.transpose(1, 2, 0), cv2.COLOR_RGB2BGR)\n        res = (res*255.).clip(0, 255).astype(np.uint8)\n    else:\n        raise Exception('Number of color channels not supported')\n    return res\n\n\ndef normalize(data):\n    return np.float32(data/255.)\n\ndef remove_dataparallel_wrapper(state_dict):\n    r\"\"\"Converts a DataParallel model to a normal one by removing the \"module.\"\n    wrapper in the module dictionary\n\n    Args:\n        state_dict: a torch.nn.DataParallel state dictionary\n    \"\"\"\n    from collections import OrderedDict\n\n    new_state_dict = OrderedDict()\n    for k, vl in state_dict.items():\n        name = k[7:] # remove 'module.' of DataParallel\n        new_state_dict[name] = vl\n\n    return new_state_dict\n\ndef is_rgb(im_path):\n    r\"\"\" Returns True if the image in im_path is an RGB image\n    \"\"\"\n    from skimage.io import imread\n    rgb = False\n    im = imread(im_path)\n    if (len(im.shape) == 3):\n        if not(np.allclose(im[...,0], im[...,1]) and np.allclose(im[...,2], im[...,1])):\n            rgb = True\n    print(\"rgb: {}\".format(rgb))\n    print(\"im shape: {}\".format(im.shape))\n    return rgb\n"
  },
  {
    "path": "inference.py",
    "content": "import os\nimport argparse\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom colorizator import MangaColorizator\n\ndef process_image(image, colorizator, args):\n    colorizator.set_image(image, args.size, args.denoiser, args.denoiser_sigma)\n        \n    return colorizator.colorize()\n    \ndef colorize_single_image(image_path, save_path, colorizator, args):\n    \n        image = plt.imread(image_path)\n\n        colorization = process_image(image, colorizator, args)\n        \n        plt.imsave(save_path, colorization)\n        \n        return True\n    \n\ndef colorize_images(target_path, colorizator, args):\n    images = os.listdir(args.path)\n    \n    for image_name in images:\n        file_path = os.path.join(args.path, image_name)\n        \n        if os.path.isdir(file_path):\n            continue\n        \n        name, ext = os.path.splitext(image_name)\n        if (ext != '.png'):\n            image_name = name + '.png'\n        \n        print(file_path)\n        \n        save_path = os.path.join(target_path, image_name)\n        colorize_single_image(file_path, save_path, colorizator, args)\n    \ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"-p\", \"--path\", required=True)\n    parser.add_argument(\"-gen\", \"--generator\", default = 'networks/generator.zip')\n    parser.add_argument(\"-ext\", \"--extractor\", default = 'networks/extractor.pth')\n    parser.add_argument('-g', '--gpu', dest = 'gpu', action = 'store_true')\n    parser.add_argument('-nd', '--no_denoise', dest = 'denoiser', action = 'store_false')\n    parser.add_argument(\"-ds\", \"--denoiser_sigma\", type = int, default = 25)\n    parser.add_argument(\"-s\", \"--size\", type = int, default = 576)\n    parser.set_defaults(gpu = False)\n    parser.set_defaults(denoiser = True)\n    args = parser.parse_args()\n    \n    return args\n\n    \nif __name__ == \"__main__\":\n    \n    args = parse_args()\n    \n    if args.gpu:\n        device = 'cuda'\n    else:\n        device = 'cpu'\n        \n    colorizer = MangaColorizator(device, args.generator, args.extractor)\n    \n    if os.path.isdir(args.path):\n        colorization_path = os.path.join(args.path, 'colorization')\n        if not os.path.exists(colorization_path):\n            os.makedirs(colorization_path)\n              \n        colorize_images(colorization_path, colorizer, args)\n        \n    elif os.path.isfile(args.path):\n        \n        split = os.path.splitext(args.path)\n        \n        if split[1].lower() in ('.jpg', '.png', '.jpeg'):\n            new_image_path = split[0] + '_colorized' + '.png'\n            \n            colorize_single_image(args.path, new_image_path, colorizer, args)\n        else:\n            print('Wrong format')\n    else:\n        print('Wrong path')\n    \n"
  },
  {
    "path": "networks/extractor.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\n\n'''https://github.com/blandocs/Tag2Pix/blob/master/model/pretrained.py'''\n\n# Pretrained version\nclass Selayer(nn.Module):\n    def __init__(self, inplanes):\n        super(Selayer, self).__init__()\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1)\n        self.conv2 = nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=True)\n        self.sigmoid = nn.Sigmoid()\n\n    def forward(self, x):\n        out = self.global_avgpool(x)\n        out = self.conv1(out)\n        out = self.relu(out)\n        out = self.conv2(out)\n        out = self.sigmoid(out)\n\n        return x * out\n\n\nclass BottleneckX_Origin(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None):\n        super(BottleneckX_Origin, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes * 2)\n\n        self.conv2 = nn.Conv2d(planes * 2, planes * 2, kernel_size=3, stride=stride,\n                               padding=1, groups=cardinality, bias=False)\n        self.bn2 = nn.BatchNorm2d(planes * 2)\n\n        self.conv3 = nn.Conv2d(planes * 2, planes * 4, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * 4)\n\n        self.selayer = Selayer(planes * 4)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n\n        out = self.selayer(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\nclass SEResNeXt_Origin(nn.Module):\n    def __init__(self, block, layers, input_channels=3, cardinality=32, num_classes=1000):\n        super(SEResNeXt_Origin, self).__init__()\n        self.cardinality = cardinality\n        self.inplanes = 64\n        self.input_channels = input_channels\n\n        self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.relu = nn.ReLU(inplace=True)\n\n        self.layer1 = self._make_layer(block, 64, layers[0])\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, self.cardinality, stride, downsample))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes, self.cardinality))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        \n        x = self.conv1(x)\n        x = self.bn1(x)\n        x1 = self.relu(x)\n        \n        x2 = self.layer1(x1)\n        \n        x3 = self.layer2(x2)\n        \n        x4 = self.layer3(x3)\n        \n        return x1, x2, x3, x4\n"
  },
  {
    "path": "networks/models.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as M\nimport math\nfrom torch import Tensor\nfrom torch.nn import Parameter\n\nfrom .extractor import SEResNeXt_Origin, BottleneckX_Origin\n\n'''https://github.com/orashi/AlacGAN/blob/master/models/standard.py'''\n\ndef l2normalize(v, eps=1e-12):\n    return v / (v.norm() + eps)\n\n\nclass SpectralNorm(nn.Module):\n    def __init__(self, module, name='weight', power_iterations=1):\n        super(SpectralNorm, self).__init__()\n        self.module = module\n        self.name = name\n        self.power_iterations = power_iterations\n        if not self._made_params():\n            self._make_params()\n\n    def _update_u_v(self):\n        u = getattr(self.module, self.name + \"_u\")\n        v = getattr(self.module, self.name + \"_v\")\n        w = getattr(self.module, self.name + \"_bar\")\n\n        height = w.data.shape[0]\n        for _ in range(self.power_iterations):\n            v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))\n            u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))\n\n        # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))\n        sigma = u.dot(w.view(height, -1).mv(v))\n        setattr(self.module, self.name, w / sigma.expand_as(w))\n\n    def _made_params(self):\n        try:\n            u = getattr(self.module, self.name + \"_u\")\n            v = getattr(self.module, self.name + \"_v\")\n            w = getattr(self.module, self.name + \"_bar\")\n            return True\n        except AttributeError:\n            return False\n\n\n    def _make_params(self):\n        w = getattr(self.module, self.name)\n        height = w.data.shape[0]\n        width = w.view(height, -1).data.shape[1]\n\n        u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)\n        v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)\n        u.data = l2normalize(u.data)\n        v.data = l2normalize(v.data)\n        w_bar = Parameter(w.data)\n\n        del self.module._parameters[self.name]\n\n        self.module.register_parameter(self.name + \"_u\", u)\n        self.module.register_parameter(self.name + \"_v\", v)\n        self.module.register_parameter(self.name + \"_bar\", w_bar)\n\n\n    def forward(self, *args):\n        self._update_u_v()\n        return self.module.forward(*args)\n\nclass Selayer(nn.Module):\n    def __init__(self, inplanes):\n        super(Selayer, self).__init__()\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1)\n        self.conv2 = nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1)\n        self.relu = nn.ReLU(inplace=True)\n        self.sigmoid = nn.Sigmoid()\n\n    def forward(self, x):\n        out = self.global_avgpool(x)\n        out = self.conv1(out)\n        out = self.relu(out)\n        out = self.conv2(out)\n        out = self.sigmoid(out)\n\n        return x * out\n    \nclass SelayerSpectr(nn.Module):\n    def __init__(self, inplanes):\n        super(SelayerSpectr, self).__init__()\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = SpectralNorm(nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1))\n        self.conv2 = SpectralNorm(nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1))\n        self.relu = nn.ReLU(inplace=True)\n        self.sigmoid = nn.Sigmoid()\n\n    def forward(self, x):\n        out = self.global_avgpool(x)\n        out = self.conv1(out)\n        out = self.relu(out)\n        out = self.conv2(out)\n        out = self.sigmoid(out)\n\n        return x * out\n\nclass ResNeXtBottleneck(nn.Module):\n    def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):\n        super(ResNeXtBottleneck, self).__init__()\n        D = out_channels // 2\n        self.out_channels = out_channels\n        self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n        self.conv_conv = nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,\n                                   groups=cardinality,\n                                   bias=False)\n        self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n        self.shortcut = nn.Sequential()\n        if stride != 1:\n            self.shortcut.add_module('shortcut',\n                                     nn.AvgPool2d(2, stride=2))\n            \n        self.selayer = Selayer(out_channels)\n\n    def forward(self, x):\n        bottleneck = self.conv_reduce.forward(x)\n        bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n        bottleneck = self.conv_conv.forward(bottleneck)\n        bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n        bottleneck = self.conv_expand.forward(bottleneck)\n        bottleneck = self.selayer(bottleneck)\n        \n        x = self.shortcut.forward(x)\n        return x + bottleneck\n    \nclass SpectrResNeXtBottleneck(nn.Module):\n    def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):\n        super(SpectrResNeXtBottleneck, self).__init__()\n        D = out_channels // 2\n        self.out_channels = out_channels\n        self.conv_reduce = SpectralNorm(nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False))\n        self.conv_conv = SpectralNorm(nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,\n                                   groups=cardinality,\n                                   bias=False))\n        self.conv_expand = SpectralNorm(nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False))\n        self.shortcut = nn.Sequential()\n        if stride != 1:\n            self.shortcut.add_module('shortcut',\n                                     nn.AvgPool2d(2, stride=2))\n            \n        self.selayer = SelayerSpectr(out_channels)\n\n    def forward(self, x):\n        bottleneck = self.conv_reduce.forward(x)\n        bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n        bottleneck = self.conv_conv.forward(bottleneck)\n        bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n        bottleneck = self.conv_expand.forward(bottleneck)\n        bottleneck = self.selayer(bottleneck)\n        \n        x = self.shortcut.forward(x)\n        return x + bottleneck\n    \nclass FeatureConv(nn.Module):\n    def __init__(self, input_dim=512, output_dim=512):\n        super(FeatureConv, self).__init__()\n\n        no_bn = True\n        \n        seq = []\n        seq.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=1, padding=1, bias=False))\n        if not no_bn: seq.append(nn.BatchNorm2d(output_dim))\n        seq.append(nn.ReLU(inplace=True))\n        seq.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))\n        if not no_bn: seq.append(nn.BatchNorm2d(output_dim))\n        seq.append(nn.ReLU(inplace=True))\n        seq.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=1, bias=False))\n        seq.append(nn.ReLU(inplace=True))\n\n        self.network = nn.Sequential(*seq)\n\n    def forward(self, x):\n        return self.network(x)\n    \nclass Generator(nn.Module):\n    def __init__(self, ngf=64):\n        super(Generator, self).__init__()\n\n        self.encoder = SEResNeXt_Origin(BottleneckX_Origin, [3, 4, 6, 3], num_classes= 370, input_channels=1)\n        \n        self.to0 =  self._make_encoder_block_first(5, 32)\n        self.to1 = self._make_encoder_block(32, 64)\n        self.to2 = self._make_encoder_block(64, 92)\n        self.to3 = self._make_encoder_block(92, 128)\n        self.to4 = self._make_encoder_block(128, 256)\n        \n        self.deconv_for_decoder = nn.Sequential(\n            nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1, output_padding=1), # output is 64 * 64\n            nn.LeakyReLU(0.2),\n            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1), # output is 128 * 128\n            nn.LeakyReLU(0.2),\n            nn.ConvTranspose2d(64, 32, 3, stride=1, padding=1, output_padding=0), # output is 256 * 256\n            nn.LeakyReLU(0.2),\n            nn.ConvTranspose2d(32, 3, 3, stride=1, padding=1, output_padding=0), # output is 256 * 256\n            nn.Tanh(),\n        )\n\n        tunnel4 = nn.Sequential(*[ResNeXtBottleneck(512, 512, cardinality=32, dilate=1) for _ in range(20)])\n\n        \n        self.tunnel4 = nn.Sequential(nn.Conv2d(1024 + 128, 512, kernel_size=3, stride=1, padding=1),\n                                     nn.LeakyReLU(0.2, True),\n                                     tunnel4,\n                                     nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1),\n                                     nn.PixelShuffle(2),\n                                     nn.LeakyReLU(0.2, True)\n                                     )  # 64\n\n        depth = 2\n        tunnel = [ResNeXtBottleneck(256, 256, cardinality=32, dilate=1) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(256, 256, cardinality=32, dilate=2) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(256, 256, cardinality=32, dilate=4) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(256, 256, cardinality=32, dilate=2),\n                   ResNeXtBottleneck(256, 256, cardinality=32, dilate=1)]\n        tunnel3 = nn.Sequential(*tunnel)\n\n        self.tunnel3 = nn.Sequential(nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1),\n                                     nn.LeakyReLU(0.2, True),\n                                     tunnel3,\n                                     nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),\n                                     nn.PixelShuffle(2),\n                                     nn.LeakyReLU(0.2, True)\n                                     )  # 128\n\n        tunnel = [ResNeXtBottleneck(128, 128, cardinality=32, dilate=1) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(128, 128, cardinality=32, dilate=2) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(128, 128, cardinality=32, dilate=4) for _ in range(depth)]\n        tunnel += [ResNeXtBottleneck(128, 128, cardinality=32, dilate=2),\n                   ResNeXtBottleneck(128, 128, cardinality=32, dilate=1)]\n        tunnel2 = nn.Sequential(*tunnel)\n\n        self.tunnel2 = nn.Sequential(nn.Conv2d(128 + 256 + 64, 128, kernel_size=3, stride=1, padding=1),\n                                     nn.LeakyReLU(0.2, True),\n                                     tunnel2,\n                                     nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\n                                     nn.PixelShuffle(2),\n                                     nn.LeakyReLU(0.2, True)\n                                     )\n\n        tunnel = [ResNeXtBottleneck(64, 64, cardinality=16, dilate=1)]\n        tunnel += [ResNeXtBottleneck(64, 64, cardinality=16, dilate=2)]\n        tunnel += [ResNeXtBottleneck(64, 64, cardinality=16, dilate=4)]\n        tunnel += [ResNeXtBottleneck(64, 64, cardinality=16, dilate=2),\n                   ResNeXtBottleneck(64, 64, cardinality=16, dilate=1)]\n        tunnel1 = nn.Sequential(*tunnel)\n\n        self.tunnel1 = nn.Sequential(nn.Conv2d(64 + 32, 64, kernel_size=3, stride=1, padding=1),\n                                     nn.LeakyReLU(0.2, True),\n                                     tunnel1,\n                                     nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\n                                     nn.PixelShuffle(2),\n                                     nn.LeakyReLU(0.2, True)\n                                     )\n\n        self.exit = nn.Sequential(nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1),\n                                 nn.LeakyReLU(0.2, True),\n                                 nn.Conv2d(32, 3, kernel_size= 1, stride = 1, padding = 0))\n        \n        \n    def _make_encoder_block(self, inplanes, planes):\n        return nn.Sequential(\n            nn.Conv2d(inplanes, planes, 3, 2, 1),\n            nn.LeakyReLU(0.2),\n            nn.Conv2d(planes, planes, 3, 1, 1),\n            nn.LeakyReLU(0.2),\n        )\n\n    def _make_encoder_block_first(self, inplanes, planes):\n        return nn.Sequential(\n            nn.Conv2d(inplanes, planes, 3, 1, 1),\n            nn.LeakyReLU(0.2),\n            nn.Conv2d(planes, planes, 3, 1, 1),\n            nn.LeakyReLU(0.2),\n        )    \n        \n    def forward(self, sketch):\n\n        x0 = self.to0(sketch)\n        aux_out = self.to1(x0)\n        aux_out = self.to2(aux_out)\n        aux_out = self.to3(aux_out)\n        \n        x1, x2, x3, x4 = self.encoder(sketch[:, 0:1])\n        \n        out = self.tunnel4(torch.cat([x4, aux_out], 1))\n        \n        \n        \n        x = self.tunnel3(torch.cat([out, x3], 1))\n        \n        x = self.tunnel2(torch.cat([x, x2, x1], 1))\n        \n        \n        x = torch.tanh(self.exit(torch.cat([x, x0], 1)))\n        \n        decoder_output = self.deconv_for_decoder(out)\n\n        return x, decoder_output  \n\n\nclass Colorizer(nn.Module):\n    def __init__(self):\n        super(Colorizer, self).__init__()\n        \n        self.generator = Generator()\n        \n    def forward(self, x, extractor_grad = False):\n        fake, guide = self.generator(x)\n        return fake, guide\n"
  },
  {
    "path": "readme.md",
    "content": "## **UPD!!!** **A demo of Manga Colorization v2.5 is now available [link](https://mangacol.com). Feel free to check it out!**\n\n\n# Automatic colorization\n\n1. Download [generator](https://drive.google.com/file/d/1qmxUEKADkEM4iYLp1fpPLLKnfZ6tcF-t/view?usp=sharing) and [denoiser](https://drive.google.com/file/d/161oyQcYpdkVdw8gKz_MA8RD-Wtg9XDp3/view?usp=sharing) weights. Put generator and extractor weights in `networks` and denoiser weights in `denoising/models`.\n2. To colorize image or folder of images, use the following command:\n```\n$ python inference.py -p \"path to file or folder\"\n```\n\n| Original      | Colorization      |\n|------------|-------------|\n| <img src=\"figures/bw1.jpg\" width=\"512\"> | <img src=\"figures/color1.png\" width=\"512\"> |\n| <img src=\"figures/bw2.jpg\" width=\"512\"> | <img src=\"figures/color2.png\" width=\"512\"> |\n| <img src=\"figures/bw3.jpg\" width=\"512\"> | <img src=\"figures/color3.png\" width=\"512\"> |\n| <img src=\"figures/bw4.jpg\" width=\"512\"> | <img src=\"figures/color4.png\" width=\"512\"> |\n| <img src=\"figures/bw5.jpg\" width=\"512\"> | <img src=\"figures/color5.png\" width=\"512\"> |\n| <img src=\"figures/bw6.jpg\" width=\"512\"> | <img src=\"figures/color6.png\" width=\"512\"> |\n"
  },
  {
    "path": "requirements.txt",
    "content": "torch\ntorchvision\nopencv-python\nmatplotlib"
  },
  {
    "path": "start_kr.md",
    "content": "# requirements\n- 모델 다운 -> [모델](https://drive.google.com/file/d/161oyQcYpdkVdw8gKz_MA8RD-Wtg9XDp3/view)\n- 다운 받은 모델 `denoising/models`에 넣기\n- generator 다운 -> [generator](https://drive.google.com/file/d/1qmxUEKADkEM4iYLp1fpPLLKnfZ6tcF-t/view)\n- 다운 받은 generator.zip `networks/`에 넣기\n# start\n```\n    $ python3 -m venv venv\n    $ source venv/bin/activate\n    $ pip install -r requirements.txt\n    $ python inference.py -p <이미지 폴더 주소>\n```\n"
  },
  {
    "path": "utils/utils.py",
    "content": "import numpy as np\nimport cv2\n\ndef resize_pad(img, size = 256):\n            \n    if len(img.shape) == 2:\n        img = np.expand_dims(img, 2)\n        \n    if img.shape[2] == 1:\n        img = np.repeat(img, 3, 2)\n        \n    if img.shape[2] == 4:\n        img = img[:, :, :3]\n\n    pad = None        \n            \n    if (img.shape[0] < img.shape[1]):\n        height = img.shape[0]\n        ratio = height / (size * 1.5)\n        width = int(np.ceil(img.shape[1] / ratio))\n        img = cv2.resize(img, (width, int(size * 1.5)), interpolation = cv2.INTER_AREA)\n\n        \n        new_width = width + (32 - width % 32)\n            \n        pad = (0, new_width - width)\n        \n        img = np.pad(img, ((0, 0), (0, pad[1]), (0, 0)), 'maximum')\n    else:\n        width = img.shape[1]\n        ratio = width / size\n        height = int(np.ceil(img.shape[0] / ratio))\n        img = cv2.resize(img, (size, height), interpolation = cv2.INTER_AREA)\n\n        new_height = height + (32 - height % 32)\n            \n        pad = (new_height - height, 0)\n        \n        img = np.pad(img, ((0, pad[0]), (0, 0), (0, 0)), 'maximum')\n        \n    if (img.dtype == 'float32'):\n        np.clip(img, 0, 1, out = img)\n\n    return img[:, :, :1], pad\n"
  }
]