diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..c16688bac8e36daa0451af9d0cfcfa7d0033199f
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,6 @@
+*.ipynb
+
+model/*.pth
+
+temp_colorization/
+__pycache__/
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7b410735f809a633173828b7b5c1284e9003f8f6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+*.ipynb
+*.pth
+*.zip
+
+__pycache__/
+temp_colorization/
+
+static/temp_images/*
+
+!.gitkeep
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..4f9335ea2fd0ca8b10681ce70fab11b09241d487
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,11 @@
+FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime
+
+RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxext6 libxrender-dev
+
+COPY . .
+
+RUN pip install --no-cache-dir -r ./requirements.txt
+
+EXPOSE 5000
+
+CMD gunicorn --timeout 200 -w 3 -b 0.0.0.0:5000 drawing:app
diff --git a/configs/train_config.json b/configs/train_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..628e032096ba8b2af6ac0eef9e3fd34673a9249e
--- /dev/null
+++ b/configs/train_config.json
@@ -0,0 +1,10 @@
+{
+ "generator_lr" : 1e-4,
+ "discriminator_lr" : 4e-4,
+ "epochs" : 3,
+ "lr_decrease_epoch" : 10,
+ "finetuning_generator_lr" : 1e-6,
+ "finetuning_iterations" : 3500,
+ "batch_size" : 5,
+ "number_of_mults" : 3
+}
\ No newline at end of file
diff --git a/configs/xdog_config.json b/configs/xdog_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..447ce2fac19908189dcd3eb617e5b720c2e21a5e
--- /dev/null
+++ b/configs/xdog_config.json
@@ -0,0 +1,8 @@
+{
+ "sigma" : 0.5,
+ "k" : 8,
+ "phi" : 89.25,
+ "gamma" : 0.95,
+ "eps" : -0.1,
+ "mult" : 7
+}
diff --git a/dataset/__pycache__/datasets.cpython-39.pyc b/dataset/__pycache__/datasets.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f58e018d5c450ed5471464512f8e03a38e388c07
Binary files /dev/null and b/dataset/__pycache__/datasets.cpython-39.pyc differ
diff --git a/dataset/datasets.py b/dataset/datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..978606498ba5bd872bcb17c67dec106d508d2f78
--- /dev/null
+++ b/dataset/datasets.py
@@ -0,0 +1,104 @@
+import torch
+import os
+import torchvision.transforms as transforms
+import matplotlib.pyplot as plt
+import numpy as np
+
+from utils.utils import generate_mask
+
+class TrainDataset(torch.utils.data.Dataset):
+ def __init__(self, data_path, transform=None, mults_amount=1):
+ self.data = os.listdir(os.path.join(data_path, 'color'))
+ self.data_path = data_path
+ self.transform = transform
+ self.mults_amount = mults_amount
+ self.ToTensor = transforms.ToTensor()
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, idx):
+ image_name = self.data[idx]
+
+ if self.mults_amount > 1:
+ mult_number = np.random.choice(range(self.mults_amount))
+ bw_name = image_name[:image_name.rfind('.')] + '_{}.png'.format(mult_number)
+ dfm_name = image_name[:image_name.rfind('.')] + '_{}_dfm.png'.format(mult_number)
+ else:
+ bw_name = image_name
+ dfm_name = os.path.splitext(image_name)[0] + '_dfm.png'
+
+ # Resto del código permanece igual...
+
+ bw_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'bw', bw_name), 0)) # Load as grayscale
+ dfm_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'bw', dfm_name), 0)) # Load as grayscale
+
+ # Concatenate bw and dfm channels
+ bw_dfm_img = np.concatenate([bw_img, dfm_img], axis=2)
+
+ color_img = plt.imread(os.path.join(self.data_path, 'color', image_name))
+ if self.transform:
+ result = self.transform(image=color_img, mask=bw_dfm_img)
+ color_img = result['image']
+ bw_dfm_img = result['mask']
+
+ dfm_img = bw_dfm_img[:, :, 1]
+ bw_img = bw_dfm_img[:, :, 0]
+
+ color_img = self.ToTensor(color_img)
+ bw_img = self.ToTensor(bw_img)
+ dfm_img = self.ToTensor(dfm_img)
+
+ color_img = (color_img - 0.5) / 0.5
+
+ mask = generate_mask(bw_img.shape[1], bw_img.shape[2])
+ hint = torch.cat((color_img * mask, mask), 0)
+
+ return bw_img, color_img, hint, dfm_img
+
+class FineTuningDataset(torch.utils.data.Dataset):
+ def __init__(self, data_path, transform = None, mult_amount = 1):
+ self.data = [x for x in os.listdir(os.path.join(data_path, 'real_manga')) if x.find('_dfm') == -1]
+ self.color_data = [x for x in os.listdir(os.path.join(data_path, 'color'))]
+ self.data_path = data_path
+ self.transform = transform
+ self.mults_amount = mult_amount
+
+ np.random.shuffle(self.color_data)
+
+ self.ToTensor = transforms.ToTensor()
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, idx):
+ color_img = plt.imread(os.path.join(self.data_path, 'color', self.color_data[idx]))
+
+ image_name = self.data[idx]
+ if self.mults_amount > 1:
+ mult_number = np.random.choice(range(self.mults_amount))
+
+ bw_name = image_name[:image_name.rfind('.')] + '_' + str(self.mults_amount) + '.png'
+ dfm_name = image_name[:image_name.rfind('.')] + '_' + str(self.mults_amount) + '_dfm.png'
+ else:
+ bw_name = self.data[idx]
+ dfm_name = os.path.splitext(self.data[idx])[0] + '_dfm.png'
+
+
+ bw_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'real_manga', image_name)), 2)
+ dfm_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'real_manga', dfm_name)), 2)
+
+ if self.transform:
+ result = self.transform(image = color_img)
+ color_img = result['image']
+
+ result = self.transform(image = bw_img, mask = dfm_img)
+ bw_img = result['image']
+ dfm_img = result['mask']
+
+ color_img = self.ToTensor(color_img)
+ bw_img = self.ToTensor(bw_img)
+ dfm_img = self.ToTensor(dfm_img)
+
+ color_img = (color_img - 0.5) / 0.5
+
+ return bw_img, dfm_img, color_img
diff --git a/denoising/denoiser.py b/denoising/denoiser.py
new file mode 100644
index 0000000000000000000000000000000000000000..c13f5c76f4a518e4a023c3cb0a260d0d7ac821ec
--- /dev/null
+++ b/denoising/denoiser.py
@@ -0,0 +1,113 @@
+"""
+Denoise an image with the FFDNet denoising method
+
+Copyright (C) 2018, Matias Tassano
+
+This program is free software: you can use, modify and/or
+redistribute it under the terms of the GNU General Public
+License as published by the Free Software Foundation, either
+version 3 of the License, or (at your option) any later
+version. You should have received a copy of this license along
+this program. If not, see .
+"""
+import os
+import argparse
+import time
+import numpy as np
+import cv2
+import torch
+import torch.nn as nn
+from torch.autograd import Variable
+from denoising.models import FFDNet
+from denoising.utils import normalize, variable_to_cv2_image, remove_dataparallel_wrapper, is_rgb
+
+class FFDNetDenoiser:
+ def __init__(self, _device, _sigma = 25, _weights_dir = 'denoising/models/', _in_ch = 3):
+ self.sigma = _sigma / 255
+ self.weights_dir = _weights_dir
+ self.channels = _in_ch
+ self.device = _device
+
+ self.model = FFDNet(num_input_channels = _in_ch)
+ self.load_weights()
+ self.model.eval()
+
+
+ def load_weights(self):
+ weights_name = 'net_rgb.pth' if self.channels == 3 else 'net_gray.pth'
+ weights_path = os.path.join(self.weights_dir, weights_name)
+ if self.device == 'cuda':
+ state_dict = torch.load(weights_path, map_location=torch.device('cpu'))
+ device_ids = [0]
+ self.model = nn.DataParallel(self.model, device_ids=device_ids).cuda()
+ else:
+ state_dict = torch.load(weights_path, map_location='cpu')
+ # CPU mode: remove the DataParallel wrapper
+ state_dict = remove_dataparallel_wrapper(state_dict)
+ self.model.load_state_dict(state_dict)
+
+ def get_denoised_image(self, imorig, sigma = None):
+
+ if sigma is not None:
+ cur_sigma = sigma / 255
+ else:
+ cur_sigma = self.sigma
+
+ if len(imorig.shape) < 3 or imorig.shape[2] == 1:
+ imorig = np.repeat(np.expand_dims(imorig, 2), 3, 2)
+
+ if (max(imorig.shape[0], imorig.shape[1]) > 1200):
+ ratio = max(imorig.shape[0], imorig.shape[1]) / 1200
+ imorig = cv2.resize(imorig, (int(imorig.shape[1] / ratio), int(imorig.shape[0] / ratio)), interpolation = cv2.INTER_AREA)
+
+ imorig = imorig.transpose(2, 0, 1)
+
+ if (imorig.max() > 1.2):
+ imorig = normalize(imorig)
+ imorig = np.expand_dims(imorig, 0)
+
+ # Handle odd sizes
+ expanded_h = False
+ expanded_w = False
+ sh_im = imorig.shape
+ if sh_im[2]%2 == 1:
+ expanded_h = True
+ imorig = np.concatenate((imorig, imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)
+
+ if sh_im[3]%2 == 1:
+ expanded_w = True
+ imorig = np.concatenate((imorig, imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)
+
+
+ imorig = torch.Tensor(imorig)
+
+
+ # Sets data type according to CPU or GPU modes
+ if self.device == 'cuda':
+ dtype = torch.cuda.FloatTensor
+ else:
+ dtype = torch.FloatTensor
+
+ imnoisy = imorig.clone()
+
+
+ with torch.no_grad():
+ imorig, imnoisy = imorig.type(dtype), imnoisy.type(dtype)
+ nsigma = torch.FloatTensor([cur_sigma]).type(dtype)
+
+
+ # Estimate noise and subtract it to the input image
+ im_noise_estim = self.model(imnoisy, nsigma)
+ outim = torch.clamp(imnoisy-im_noise_estim, 0., 1.)
+
+ if expanded_h:
+ imorig = imorig[:, :, :-1, :]
+ outim = outim[:, :, :-1, :]
+ imnoisy = imnoisy[:, :, :-1, :]
+
+ if expanded_w:
+ imorig = imorig[:, :, :, :-1]
+ outim = outim[:, :, :, :-1]
+ imnoisy = imnoisy[:, :, :, :-1]
+
+ return variable_to_cv2_image(outim)
diff --git a/denoising/functions.py b/denoising/functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..954bc239d9578078a669039f15bc51425e2b11a5
--- /dev/null
+++ b/denoising/functions.py
@@ -0,0 +1,101 @@
+"""
+Functions implementing custom NN layers
+
+Copyright (C) 2018, Matias Tassano
+
+This program is free software: you can use, modify and/or
+redistribute it under the terms of the GNU General Public
+License as published by the Free Software Foundation, either
+version 3 of the License, or (at your option) any later
+version. You should have received a copy of this license along
+this program. If not, see .
+"""
+import torch
+from torch.autograd import Function, Variable
+
+def concatenate_input_noise_map(input, noise_sigma):
+ r"""Implements the first layer of FFDNet. This function returns a
+ torch.autograd.Variable composed of the concatenation of the downsampled
+ input image and the noise map. Each image of the batch of size CxHxW gets
+ converted to an array of size 4*CxH/2xW/2. Each of the pixels of the
+ non-overlapped 2x2 patches of the input image are placed in the new array
+ along the first dimension.
+
+ Args:
+ input: batch containing CxHxW images
+ noise_sigma: the value of the pixels of the CxH/2xW/2 noise map
+ """
+ # noise_sigma is a list of length batch_size
+ N, C, H, W = input.size()
+ dtype = input.type()
+ sca = 2
+ sca2 = sca*sca
+ Cout = sca2*C
+ Hout = H//sca
+ Wout = W//sca
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
+
+ # Fill the downsampled image with zeros
+ if 'cuda' in dtype:
+ downsampledfeatures = torch.cuda.FloatTensor(N, Cout, Hout, Wout).fill_(0)
+ else:
+ downsampledfeatures = torch.FloatTensor(N, Cout, Hout, Wout).fill_(0)
+
+ # Build the CxH/2xW/2 noise map
+ noise_map = noise_sigma.view(N, 1, 1, 1).repeat(1, C, Hout, Wout)
+
+ # Populate output
+ for idx in range(sca2):
+ downsampledfeatures[:, idx:Cout:sca2, :, :] = \
+ input[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]
+
+ # concatenate de-interleaved mosaic with noise map
+ return torch.cat((noise_map, downsampledfeatures), 1)
+
+class UpSampleFeaturesFunction(Function):
+ r"""Extends PyTorch's modules by implementing a torch.autograd.Function.
+ This class implements the forward and backward methods of the last layer
+ of FFDNet. It basically performs the inverse of
+ concatenate_input_noise_map(): it converts each of the images of a
+ batch of size CxH/2xW/2 to images of size C/4xHxW
+ """
+ @staticmethod
+ def forward(ctx, input):
+ N, Cin, Hin, Win = input.size()
+ dtype = input.type()
+ sca = 2
+ sca2 = sca*sca
+ Cout = Cin//sca2
+ Hout = Hin*sca
+ Wout = Win*sca
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
+
+ assert (Cin%sca2 == 0), 'Invalid input dimensions: number of channels should be divisible by 4'
+
+ result = torch.zeros((N, Cout, Hout, Wout)).type(dtype)
+ for idx in range(sca2):
+ result[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca] = input[:, idx:Cin:sca2, :, :]
+
+ return result
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ N, Cg_out, Hg_out, Wg_out = grad_output.size()
+ dtype = grad_output.data.type()
+ sca = 2
+ sca2 = sca*sca
+ Cg_in = sca2*Cg_out
+ Hg_in = Hg_out//sca
+ Wg_in = Wg_out//sca
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
+
+ # Build output
+ grad_input = torch.zeros((N, Cg_in, Hg_in, Wg_in)).type(dtype)
+ # Populate output
+ for idx in range(sca2):
+ grad_input[:, idx:Cg_in:sca2, :, :] = grad_output.data[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]
+
+ return Variable(grad_input)
+
+# Alias functions
+upsamplefeatures = UpSampleFeaturesFunction.apply
diff --git a/denoising/models.py b/denoising/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab50648e2c6e7067167a77f09d242f0c06098abb
--- /dev/null
+++ b/denoising/models.py
@@ -0,0 +1,100 @@
+"""
+Definition of the FFDNet model and its custom layers
+
+Copyright (C) 2018, Matias Tassano
+
+This program is free software: you can use, modify and/or
+redistribute it under the terms of the GNU General Public
+License as published by the Free Software Foundation, either
+version 3 of the License, or (at your option) any later
+version. You should have received a copy of this license along
+this program. If not, see .
+"""
+import torch.nn as nn
+from torch.autograd import Variable
+import denoising.functions as functions
+
+class UpSampleFeatures(nn.Module):
+ r"""Implements the last layer of FFDNet
+ """
+ def __init__(self):
+ super(UpSampleFeatures, self).__init__()
+ def forward(self, x):
+ return functions.upsamplefeatures(x)
+
+class IntermediateDnCNN(nn.Module):
+ r"""Implements the middel part of the FFDNet architecture, which
+ is basically a DnCNN net
+ """
+ def __init__(self, input_features, middle_features, num_conv_layers):
+ super(IntermediateDnCNN, self).__init__()
+ self.kernel_size = 3
+ self.padding = 1
+ self.input_features = input_features
+ self.num_conv_layers = num_conv_layers
+ self.middle_features = middle_features
+ if self.input_features == 5:
+ self.output_features = 4 #Grayscale image
+ elif self.input_features == 15:
+ self.output_features = 12 #RGB image
+ else:
+ raise Exception('Invalid number of input features')
+
+ layers = []
+ layers.append(nn.Conv2d(in_channels=self.input_features,\
+ out_channels=self.middle_features,\
+ kernel_size=self.kernel_size,\
+ padding=self.padding,\
+ bias=False))
+ layers.append(nn.ReLU(inplace=True))
+ for _ in range(self.num_conv_layers-2):
+ layers.append(nn.Conv2d(in_channels=self.middle_features,\
+ out_channels=self.middle_features,\
+ kernel_size=self.kernel_size,\
+ padding=self.padding,\
+ bias=False))
+ layers.append(nn.BatchNorm2d(self.middle_features))
+ layers.append(nn.ReLU(inplace=True))
+ layers.append(nn.Conv2d(in_channels=self.middle_features,\
+ out_channels=self.output_features,\
+ kernel_size=self.kernel_size,\
+ padding=self.padding,\
+ bias=False))
+ self.itermediate_dncnn = nn.Sequential(*layers)
+ def forward(self, x):
+ out = self.itermediate_dncnn(x)
+ return out
+
+class FFDNet(nn.Module):
+ r"""Implements the FFDNet architecture
+ """
+ def __init__(self, num_input_channels):
+ super(FFDNet, self).__init__()
+ self.num_input_channels = num_input_channels
+ if self.num_input_channels == 1:
+ # Grayscale image
+ self.num_feature_maps = 64
+ self.num_conv_layers = 15
+ self.downsampled_channels = 5
+ self.output_features = 4
+ elif self.num_input_channels == 3:
+ # RGB image
+ self.num_feature_maps = 96
+ self.num_conv_layers = 12
+ self.downsampled_channels = 15
+ self.output_features = 12
+ else:
+ raise Exception('Invalid number of input features')
+
+ self.intermediate_dncnn = IntermediateDnCNN(\
+ input_features=self.downsampled_channels,\
+ middle_features=self.num_feature_maps,\
+ num_conv_layers=self.num_conv_layers)
+ self.upsamplefeatures = UpSampleFeatures()
+
+ def forward(self, x, noise_sigma):
+ concat_noise_x = functions.concatenate_input_noise_map(x.data, noise_sigma.data)
+ concat_noise_x = Variable(concat_noise_x)
+ h_dncnn = self.intermediate_dncnn(concat_noise_x)
+ pred_noise = self.upsamplefeatures(h_dncnn)
+ return pred_noise
diff --git a/denoising/models/.gitkeep b/denoising/models/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/denoising/utils.py b/denoising/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..faa5016d7c1c044cf4eae49b5d572a984f4e97cf
--- /dev/null
+++ b/denoising/utils.py
@@ -0,0 +1,66 @@
+"""
+Different utilities such as orthogonalization of weights, initialization of
+loggers, etc
+
+Copyright (C) 2018, Matias Tassano
+
+This program is free software: you can use, modify and/or
+redistribute it under the terms of the GNU General Public
+License as published by the Free Software Foundation, either
+version 3 of the License, or (at your option) any later
+version. You should have received a copy of this license along
+this program. If not, see .
+"""
+import numpy as np
+import cv2
+
+
+def variable_to_cv2_image(varim):
+ r"""Converts a torch.autograd.Variable to an OpenCV image
+
+ Args:
+ varim: a torch.autograd.Variable
+ """
+ nchannels = varim.size()[1]
+ if nchannels == 1:
+ res = (varim.data.cpu().numpy()[0, 0, :]*255.).clip(0, 255).astype(np.uint8)
+ elif nchannels == 3:
+ res = varim.data.cpu().numpy()[0]
+ res = cv2.cvtColor(res.transpose(1, 2, 0), cv2.COLOR_RGB2BGR)
+ res = (res*255.).clip(0, 255).astype(np.uint8)
+ else:
+ raise Exception('Number of color channels not supported')
+ return res
+
+
+def normalize(data):
+ return np.float32(data/255.)
+
+def remove_dataparallel_wrapper(state_dict):
+ r"""Converts a DataParallel model to a normal one by removing the "module."
+ wrapper in the module dictionary
+
+ Args:
+ state_dict: a torch.nn.DataParallel state dictionary
+ """
+ from collections import OrderedDict
+
+ new_state_dict = OrderedDict()
+ for k, vl in state_dict.items():
+ name = k[7:] # remove 'module.' of DataParallel
+ new_state_dict[name] = vl
+
+ return new_state_dict
+
+def is_rgb(im_path):
+ r""" Returns True if the image in im_path is an RGB image
+ """
+ from skimage.io import imread
+ rgb = False
+ im = imread(im_path)
+ if (len(im.shape) == 3):
+ if not(np.allclose(im[...,0], im[...,1]) and np.allclose(im[...,2], im[...,1])):
+ rgb = True
+ print("rgb: {}".format(rgb))
+ print("im shape: {}".format(im.shape))
+ return rgb
diff --git a/drawing.py b/drawing.py
new file mode 100644
index 0000000000000000000000000000000000000000..8df453ad5141061cf5066437f3f57aa985ca02c6
--- /dev/null
+++ b/drawing.py
@@ -0,0 +1,165 @@
+import os
+from datetime import datetime
+import base64
+import random
+import string
+import shutil
+import torch
+import matplotlib.pyplot as plt
+import numpy as np
+from flask import Flask, request, jsonify, abort, redirect, url_for, render_template, send_file, Response
+from flask_wtf import FlaskForm
+from wtforms import StringField, FileField, BooleanField, DecimalField
+from wtforms.validators import DataRequired
+from flask import after_this_request
+
+from model.models import Colorizer, Generator
+from model.extractor import get_seresnext_extractor
+from utils.xdog import XDoGSketcher
+from utils.utils import open_json
+from denoising.denoiser import FFDNetDenoiser
+from inference import process_image_with_hint
+from utils.utils import resize_pad
+from utils.dataset_utils import get_sketch
+
+def generate_id(size=25, chars=string.ascii_letters + string.digits):
+ return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
+
+def generate_unique_id(current_ids = set()):
+ id_t = generate_id()
+ while id_t in current_ids:
+ id_t = generate_id()
+
+ current_ids.add(id_t)
+
+ return id_t
+
+app = Flask(__name__)
+app.config.update(dict(
+ SECRET_KEY="lol kek",
+ WTF_CSRF_SECRET_KEY="cheburek"
+))
+
+if torch.cuda.is_available():
+ device = 'cuda'
+else:
+ device = 'cpu'
+
+colorizer = torch.jit.load('./model/colorizer.zip', map_location=torch.device(device))
+
+sketcher = XDoGSketcher()
+xdog_config = open_json('configs/xdog_config.json')
+for key in xdog_config.keys():
+ if key in sketcher.params:
+ sketcher.params[key] = xdog_config[key]
+
+denoiser = FFDNetDenoiser(device)
+
+color_args = {'colorizer':colorizer, 'sketcher':sketcher, 'device':device, 'dfm' : True, 'auto_hint' : False, 'ignore_gray' : False, 'denoiser' : denoiser, 'denoiser_sigma' : 25}
+
+
+class SubmitForm(FlaskForm):
+ file = FileField(validators=[DataRequired(), ])
+
+def preprocess_image(file_id, ext):
+ directory_path = os.path.join('static', 'temp_images', file_id)
+ original_path = os.path.join(directory_path, 'original') + ext
+ original_image = plt.imread(original_path)
+
+ resized_image, _ = resize_pad(original_image)
+ resized_image = denoiser.get_denoised_image(resized_image, 25)
+ bw, dfm = get_sketch(resized_image, sketcher, True)
+
+ resized_name = 'resized_' + str(resized_image.shape[0]) + '_' + str(resized_image.shape[1]) + '.png'
+ plt.imsave(os.path.join(directory_path, resized_name), resized_image)
+ plt.imsave(os.path.join(directory_path, 'bw.png'), bw, cmap = 'gray')
+ plt.imsave(os.path.join(directory_path, 'dfm.png'), dfm, cmap = 'gray')
+ os.remove(original_path)
+
+ empty_hint = np.zeros((resized_image.shape[0], resized_image.shape[1], 4), dtype = np.float32)
+ plt.imsave(os.path.join(directory_path, 'hint.png'), empty_hint)
+
+@app.route('/', methods=['GET', 'POST'])
+def upload():
+ form = SubmitForm()
+ if form.validate_on_submit():
+ input_data = form.file.data
+
+ _, ext = os.path.splitext(input_data.filename)
+
+ if ext not in ('.jpg', '.png', '.jpeg'):
+ return abort(400)
+
+ file_id = generate_unique_id()
+ directory = os.path.join('static', 'temp_images', file_id)
+ original_filename = os.path.join(directory, 'original') + ext
+
+ try :
+ os.mkdir(directory)
+ input_data.save(original_filename)
+
+ preprocess_image(file_id, ext)
+
+ return redirect(f'/draw/{file_id}')
+
+ except :
+ print('Failed to colorize')
+ if os.path.exists(directory):
+ shutil.rmtree(directory)
+ return abort(400)
+
+
+ return render_template("upload.html", form = form)
+
+@app.route('/img/')
+def show_image(file_id):
+ if not os.path.exists(os.path.join('static', 'temp_images', str(file_id))):
+ abort(404)
+ return f'
'
+
+def colorize_image(file_id):
+ directory_path = os.path.join('static', 'temp_images', file_id)
+
+ bw = plt.imread(os.path.join(directory_path, 'bw.png'))[..., :1]
+ dfm = plt.imread(os.path.join(directory_path, 'dfm.png'))[..., :1]
+ hint = plt.imread(os.path.join(directory_path, 'hint.png'))
+
+ return process_image_with_hint(bw, dfm, hint, color_args)
+
+@app.route('/colorize', methods=['POST'])
+def colorize():
+
+ file_id = request.form['save_file_id']
+ file_id = file_id[file_id.rfind('/') + 1:]
+
+ img_data = request.form['save_image']
+ img_data = img_data[img_data.find(',') + 1:]
+
+ directory_path = os.path.join('static', 'temp_images', file_id)
+
+ with open(os.path.join(directory_path, 'hint.png'), "wb") as im:
+ im.write(base64.decodestring(str.encode(img_data)))
+
+ result = colorize_image(file_id)
+
+ plt.imsave(os.path.join(directory_path, 'colorized.png'), result)
+
+ src_path = f'../static/temp_images/{file_id}/colorized.png?{random. randint(1,1000000)}'
+
+ return src_path
+
+@app.route('/draw/', methods=['GET', 'POST'])
+def paintapp(file_id):
+ if request.method == 'GET':
+
+ directory_path = os.path.join('static', 'temp_images', str(file_id))
+ if not os.path.exists(directory_path):
+ abort(404)
+
+ resized_name = [x for x in os.listdir(directory_path) if x.startswith('resized_')][0]
+
+ split = os.path.splitext(resized_name)[0].split('_')
+ width = int(split[2])
+ height = int(split[1])
+
+ return render_template("drawing.html", height = height, width = width, img_path = os.path.join('temp_images', str(file_id), resized_name))
diff --git a/inference.py b/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..948bd02500c4fe641e6ed7705aa58e5c580b26b9
--- /dev/null
+++ b/inference.py
@@ -0,0 +1,215 @@
+import torch
+import torch.nn as nn
+import numpy as np
+from utils.dataset_utils import get_sketch
+from utils.utils import resize_pad, generate_mask, extract_cbr, create_cbz, sorted_alphanumeric, subfolder_image_search, remove_folder
+from torchvision.transforms import ToTensor
+import os
+import matplotlib.pyplot as plt
+import argparse
+from model.models import Colorizer, Generator
+from model.extractor import get_seresnext_extractor
+from utils.xdog import XDoGSketcher
+from utils.utils import open_json
+import sys
+from denoising.denoiser import FFDNetDenoiser
+
+def colorize_without_hint(inp, color_args):
+ i_hint = torch.zeros(1, 4, inp.shape[2], inp.shape[3]).float().to(color_args['device'])
+
+ with torch.no_grad():
+ fake_color, _ = color_args['colorizer'](torch.cat([inp, i_hint], 1))
+
+ if color_args['auto_hint']:
+ mask = generate_mask(fake_color.shape[2], fake_color.shape[3], full = False, prob = 1, sigma = color_args['auto_hint_sigma']).unsqueeze(0)
+ mask = mask.to(color_args['device'])
+
+
+ if color_args['ignore_gray']:
+ diff1 = torch.abs(fake_color[:, 0] - fake_color[:, 1])
+ diff2 = torch.abs(fake_color[:, 0] - fake_color[:, 2])
+ diff3 = torch.abs(fake_color[:, 1] - fake_color[:, 2])
+ mask = ((mask + ((diff1 + diff2 + diff3) > 60 / 255).float().unsqueeze(1)) == 2).float()
+
+
+ i_hint = torch.cat([fake_color * mask, mask], 1)
+
+ with torch.no_grad():
+ fake_color, _ = color_args['colorizer'](torch.cat([inp, i_hint], 1))
+
+ return fake_color
+
+
+def process_image(image, color_args, to_tensor = ToTensor()):
+ image, pad = resize_pad(image)
+
+ if color_args['denoiser'] is not None:
+ image = color_args['denoiser'].get_denoised_image(image, color_args['denoiser_sigma'])
+
+ bw, dfm = get_sketch(image, color_args['sketcher'], color_args['dfm'])
+
+ bw = to_tensor(bw).unsqueeze(0).to(color_args['device'])
+ dfm = to_tensor(dfm).unsqueeze(0).to(color_args['device'])
+
+ output = colorize_without_hint(torch.cat([bw, dfm], 1), color_args)
+ result = output[0].cpu().permute(1, 2, 0).numpy() * 0.5 + 0.5
+
+ if pad[0] != 0:
+ result = result[:-pad[0]]
+ if pad[1] != 0:
+ result = result[:, :-pad[1]]
+
+ return result
+
+def colorize_with_hint(inp, color_args):
+ with torch.no_grad():
+ fake_color, _ = color_args['colorizer'](inp)
+
+ return fake_color
+
+def process_image_with_hint(bw, dfm, hint, color_args, to_tensor = ToTensor()):
+ bw = to_tensor(bw).unsqueeze(0).to(color_args['device'])
+ dfm = to_tensor(dfm).unsqueeze(0).to(color_args['device'])
+
+ i_hint = (torch.FloatTensor(hint[..., :3]).permute(2, 0, 1) - 0.5) / 0.5
+ mask = torch.FloatTensor(hint[..., 3:]).permute(2, 0, 1)
+ i_hint = torch.cat([i_hint * mask, mask], 0).unsqueeze(0).to(color_args['device'])
+
+ output = colorize_with_hint(torch.cat([bw, dfm, i_hint], 1), color_args)
+ result = output[0].cpu().permute(1, 2, 0).numpy() * 0.5 + 0.5
+
+ return result
+
+def colorize_single_image(file_path, save_path, color_args):
+ try:
+ image = plt.imread(file_path)
+
+ colorization = process_image(image, color_args)
+
+ plt.imsave(save_path, colorization)
+
+ return True
+ except KeyboardInterrupt:
+ sys.exit(0)
+ except:
+ print('Failed to colorize {}'.format(file_path))
+ return False
+
+def colorize_images(source_path, target_path, color_args):
+ images = os.listdir(source_path)
+
+ for image_name in images:
+ file_path = os.path.join(source_path, image_name)
+
+ name, ext = os.path.splitext(image_name)
+ if (ext != '.png'):
+ image_name = name + '.png'
+
+ save_path = os.path.join(target_path, image_name)
+ colorize_single_image(file_path, save_path, color_args)
+
+def colorize_cbr(file_path, color_args):
+ file_name = os.path.splitext(os.path.basename(file_path))[0]
+ temp_path = 'temp_colorization'
+
+ if not os.path.exists(temp_path):
+ os.makedirs(temp_path)
+ extract_cbr(file_path, temp_path)
+
+ images = subfolder_image_search(temp_path)
+
+ result_images = []
+ for image_path in images:
+ save_path = image_path
+
+ path, ext = os.path.splitext(save_path)
+ if (ext != '.png'):
+ save_path = path + '.png'
+
+ res_flag = colorize_single_image(image_path, save_path, color_args)
+
+ result_images.append(save_path if res_flag else image_path)
+
+
+ result_name = os.path.join(os.path.dirname(file_path), file_name + '_colorized.cbz')
+
+ create_cbz(result_name, result_images)
+
+ remove_folder(temp_path)
+
+ return result_name
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-p", "--path", required=True)
+ parser.add_argument("-gen", "--generator", default = 'model/generator.pth')
+ parser.add_argument("-ext", "--extractor", default = 'model/extractor.pth')
+ parser.add_argument("-s", "--sigma", type = float, default = 0.003)
+ parser.add_argument('-g', '--gpu', dest = 'gpu', action = 'store_true')
+ parser.add_argument('-ah', '--auto', dest = 'autohint', action = 'store_true')
+ parser.add_argument('-ig', '--ignore_grey', dest = 'ignore', action = 'store_true')
+ parser.add_argument('-nd', '--no_denoise', dest = 'denoiser', action = 'store_false')
+ parser.add_argument("-ds", "--denoiser_sigma", type = int, default = 25)
+ parser.set_defaults(gpu = False)
+ parser.set_defaults(autohint = False)
+ parser.set_defaults(ignore = False)
+ parser.set_defaults(denoiser = True)
+ args = parser.parse_args()
+
+ return args
+
+
+if __name__ == "__main__":
+
+ args = parse_args()
+
+ if args.gpu:
+ device = 'cuda'
+ else:
+ device = 'cpu'
+
+ generator = Generator()
+ generator.load_state_dict(torch.load(args.generator))
+
+ extractor = get_seresnext_extractor()
+ extractor.load_state_dict(torch.load(args.extractor))
+
+ colorizer = Colorizer(generator, extractor)
+ colorizer = colorizer.eval().to(device)
+
+ sketcher = XDoGSketcher()
+ xdog_config = open_json('configs/xdog_config.json')
+ for key in xdog_config.keys():
+ if key in sketcher.params:
+ sketcher.params[key] = xdog_config[key]
+
+ denoiser = None
+ if args.denoiser:
+ denoiser = FFDNetDenoiser(device, args.denoiser_sigma)
+
+ color_args = {'colorizer':colorizer, 'sketcher':sketcher, 'auto_hint':args.autohint, 'auto_hint_sigma':args.sigma,\
+ 'ignore_gray':args.ignore, 'device':device, 'dfm' : True, 'denoiser':denoiser, 'denoiser_sigma' : args.denoiser_sigma}
+
+
+ if os.path.isdir(args.path):
+ colorization_path = os.path.join(args.path, 'colorization')
+ if not os.path.exists(colorization_path):
+ os.makedirs(colorization_path)
+
+ colorize_images(args.path, colorization_path, color_args)
+
+ elif os.path.isfile(args.path):
+
+ split = os.path.splitext(args.path)
+
+ if split[1].lower() in ('.cbr', '.cbz', '.rar', '.zip'):
+ colorize_cbr(args.path, color_args)
+ elif split[1].lower() in ('.jpg', '.png', ',jpeg'):
+ new_image_path = split[0] + '_colorized' + '.png'
+
+ colorize_single_image(args.path, new_image_path, color_args)
+ else:
+ print('Wrong format')
+ else:
+ print('Wrong path')
+
diff --git a/manga/bw/001-0000-0000.png b/manga/bw/001-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..eda458020958d1c206f146bcc5738deebc26f420
--- /dev/null
+++ b/manga/bw/001-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6bd52e61f3f10f698ccbf074becf58c34ff03415377e70e1bf89f1f2768d9bc
+size 1883215
diff --git a/manga/bw/002-0000-0000.png b/manga/bw/002-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..b46f6b041a05eec3cd0c0726a9a0d8d9a26b5524
--- /dev/null
+++ b/manga/bw/002-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91f59c85df765ac3ef9435aadfa2f6d0a6226d643db89af950997ef754f6048a
+size 1719059
diff --git a/manga/bw/003-0000-0000.png b/manga/bw/003-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..3c49ebfa64e823e64c9edd30477fa92ce20da12a
--- /dev/null
+++ b/manga/bw/003-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a02d1dd89549f704a79588b8f6b1d0d32a7ba8b7f0d663f8a71e375416d3a42
+size 301463
diff --git a/manga/bw/004-0000-0000.png b/manga/bw/004-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..312be3be3657a77bb9a66e17cb7a4f558f6b1c60
--- /dev/null
+++ b/manga/bw/004-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ce252a56fd3dc9745b517ce5a7023cb78ed23a985d4c4dc9ef884258688620f
+size 206460
diff --git a/manga/bw/x1-0000-0000.png b/manga/bw/x1-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..171dc4b3b2ddbc33549e481f5ee98ec6753bef55
--- /dev/null
+++ b/manga/bw/x1-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c347e8b43de1c3d2615c9b4efbf30bf8dfe8ca16f0a5aee327bea898b0b19e84
+size 1677651
diff --git a/manga/bw/x2-0000-0000.png b/manga/bw/x2-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..fa6ccb7aa847372220c775c2c7e045601ed217dc
--- /dev/null
+++ b/manga/bw/x2-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10957841fd23284c3a92beaed594efc7ec6d2680483d9bebd7055e9bb7885f26
+size 3063419
diff --git a/manga/bw/x3-0000-0000.png b/manga/bw/x3-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..b182da90d8963677b26443f5a149606b451c25c6
--- /dev/null
+++ b/manga/bw/x3-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:44a56370209961a1ac4e69ad672113c7302a991b7d0b1d36ecdddb718aed8367
+size 1589346
diff --git a/manga/bw/x4-0000-0000.png b/manga/bw/x4-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..190542a9d237bd08d3deb02e42bfc8e24c08e78b
--- /dev/null
+++ b/manga/bw/x4-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2df448fad5bc289361cbfe2fabd3cb666103a27a201f7a447b52ca85ae9362f0
+size 1519170
diff --git a/manga/bw/x5-0000-0000.png b/manga/bw/x5-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..d93a359cdc143836cc7770020d9fda3704fbdc86
--- /dev/null
+++ b/manga/bw/x5-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:006617b354cc17d1ff7c65ce19410bc7b1632cf5369ad5165deacad77a739c03
+size 1711422
diff --git a/manga/color/001-0000-0000.png b/manga/color/001-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..9cefd8c8abac78c6945b23948aeda466f87b0aeb
--- /dev/null
+++ b/manga/color/001-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e38ea5f3cbb32c13262dda878724f3733e2b46bfdcb13a0af09aa4fb36cd420
+size 6805448
diff --git a/manga/color/002-0000-0000.png b/manga/color/002-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..2081415225a2843a79950cd1a6da712516a3084e
--- /dev/null
+++ b/manga/color/002-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a289ac6f4c1b477e987eef553232e70cefd536b62cf7c0662e27d84c55b3f02
+size 6256919
diff --git a/manga/color/003-0000-0000.png b/manga/color/003-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..6863b310f55afd980523d4e76b8103d08234edda
--- /dev/null
+++ b/manga/color/003-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a5a43ea8c8d2ef1c784a82dc96050fed27e673a0d6baba50d593cc487c73f75
+size 870557
diff --git a/manga/color/004-0000-0000.png b/manga/color/004-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..465cafa406a995d1dbb842df71651ada650df989
--- /dev/null
+++ b/manga/color/004-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42c1ccdad566868f845369c5c10268d269b943c86de81b189d8b80fa25554443
+size 583796
diff --git a/manga/color/x1-0000-0000.png b/manga/color/x1-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..1afd4d15cf607e9c65ef8735fcf5e6ff1febe9fc
--- /dev/null
+++ b/manga/color/x1-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fee65ac5d942ccbcf46250042077897a1a1d0288dbcbd4d4e01a95667b4321f
+size 5736776
diff --git a/manga/color/x2-0000-0000.png b/manga/color/x2-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..8d0a18e3efbd3b218ae7ab9d1ae267e731377f31
--- /dev/null
+++ b/manga/color/x2-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3934c58bf1876be001bd806f637d7722dbd6ba34ba9cc2aed98288019771a248
+size 11348109
diff --git a/manga/color/x3-0000-0000.png b/manga/color/x3-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..06ad2916bc2b6053c0348a60d7d9e747bedf14ab
--- /dev/null
+++ b/manga/color/x3-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8690c5617dbb1215cfdc0f70a2e31156794b0abf21dde7f9c50bbe2675de71e
+size 5254743
diff --git a/manga/color/x4-0000-0000.png b/manga/color/x4-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..13412aca416e085d8e9bacd27672136b7c01d6c9
--- /dev/null
+++ b/manga/color/x4-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5d0f633082871aa456f2d7864d0d9f4fa1ca9ae2b1cc229df4927b38a4df40c
+size 4983819
diff --git a/manga/color/x5-0000-0000.png b/manga/color/x5-0000-0000.png
new file mode 100644
index 0000000000000000000000000000000000000000..24ef89fa9e3a9a2d3af9a2baebfa007b7928e584
--- /dev/null
+++ b/manga/color/x5-0000-0000.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acba9510555cebb18fbdb220ef3f22724617893f35d8a3fe1c509ed31f257dbb
+size 5633434
diff --git a/manga/real_manga/06 - copia.webp b/manga/real_manga/06 - copia.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5780b006a85fc26fe9d07729854faebadbca847c
--- /dev/null
+++ b/manga/real_manga/06 - copia.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e68ca338e75c67f2b185d180827b7e06c3e7fe8cf8424750c3581603bb314eaf
+size 95992
diff --git a/manga/real_manga/06.webp b/manga/real_manga/06.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5780b006a85fc26fe9d07729854faebadbca847c
--- /dev/null
+++ b/manga/real_manga/06.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e68ca338e75c67f2b185d180827b7e06c3e7fe8cf8424750c3581603bb314eaf
+size 95992
diff --git a/manga/real_manga/09 - copia.webp b/manga/real_manga/09 - copia.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a632540e7424e69bedeeed843f8c66b17b47ec37
--- /dev/null
+++ b/manga/real_manga/09 - copia.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7adabde7144919e799f80059db5c295e318f7f463e49f2f8d9d2b96e29d155b5
+size 134598
diff --git a/manga/real_manga/11.webp b/manga/real_manga/11.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2ece5084577c9899e79fcae9357f1c52f8f17519
--- /dev/null
+++ b/manga/real_manga/11.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:808e2b43f64275f8308d595503ec4b275b91e0eaa8e065714244b840e60822f0
+size 129338
diff --git a/manga/real_manga/12 - copia.webp b/manga/real_manga/12 - copia.webp
new file mode 100644
index 0000000000000000000000000000000000000000..02934b3d6a598ec9e6164772089ca3854d38cf38
--- /dev/null
+++ b/manga/real_manga/12 - copia.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77fac1072dec146c69648b649848ba868cccc8852a41b0e9cbc0bdd1f1878c27
+size 149288
diff --git a/manga/real_manga/12.webp b/manga/real_manga/12.webp
new file mode 100644
index 0000000000000000000000000000000000000000..02934b3d6a598ec9e6164772089ca3854d38cf38
--- /dev/null
+++ b/manga/real_manga/12.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77fac1072dec146c69648b649848ba868cccc8852a41b0e9cbc0bdd1f1878c27
+size 149288
diff --git a/manga/real_manga/13 - copia.webp b/manga/real_manga/13 - copia.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bc3ba2a5927ab4b7bf3c48024752da931bd54152
--- /dev/null
+++ b/manga/real_manga/13 - copia.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99a1d040bd25ff4df102bde65451a4e0a71de0de77b134a5820a67bda8bef3af
+size 136186
diff --git a/manga/real_manga/13.webp b/manga/real_manga/13.webp
new file mode 100644
index 0000000000000000000000000000000000000000..bc3ba2a5927ab4b7bf3c48024752da931bd54152
--- /dev/null
+++ b/manga/real_manga/13.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99a1d040bd25ff4df102bde65451a4e0a71de0de77b134a5820a67bda8bef3af
+size 136186
diff --git a/manga/real_manga/14 - copia.webp b/manga/real_manga/14 - copia.webp
new file mode 100644
index 0000000000000000000000000000000000000000..72a8519609ea75e6f8abc6eac48461a99fc74012
--- /dev/null
+++ b/manga/real_manga/14 - copia.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b954c13098fda8eb659915944857c7918740c334ca86e8885d6dfbb70478036
+size 151384
diff --git a/manga/real_manga/14.webp b/manga/real_manga/14.webp
new file mode 100644
index 0000000000000000000000000000000000000000..72a8519609ea75e6f8abc6eac48461a99fc74012
--- /dev/null
+++ b/manga/real_manga/14.webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b954c13098fda8eb659915944857c7918740c334ca86e8885d6dfbb70478036
+size 151384
diff --git a/manga/real_manga/OP_(13).webp b/manga/real_manga/OP_(13).webp
new file mode 100644
index 0000000000000000000000000000000000000000..d9b76d46b180de577fbc38ae06301ba6aa757fd5
--- /dev/null
+++ b/manga/real_manga/OP_(13).webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b781d94c36c67bd7d13685c421dae7d7e448c308eb8e044af6d776c5adeab88
+size 213950
diff --git a/manga/real_manga/OP_(16).webp b/manga/real_manga/OP_(16).webp
new file mode 100644
index 0000000000000000000000000000000000000000..4458c1e1bb3abb9958c468f418ffc4e211e1cccf
--- /dev/null
+++ b/manga/real_manga/OP_(16).webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:674d4b51806acfac9265ebfd30e66e8154e12d90418e4ec4ff7eb7cccc780770
+size 128274
diff --git a/manga/real_manga/OP_(18).webp b/manga/real_manga/OP_(18).webp
new file mode 100644
index 0000000000000000000000000000000000000000..832ca1f2e26c3cf5a4c67e86a9aa966d81781915
--- /dev/null
+++ b/manga/real_manga/OP_(18).webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac8b9da7926bad9cef67dc6b0d2af3def0f90e216685c19a99f0d97307b6925a
+size 94780
diff --git a/manga/real_manga/OP_(19).webp b/manga/real_manga/OP_(19).webp
new file mode 100644
index 0000000000000000000000000000000000000000..91292c9a8450f48d3bf5c1fdb4ce6c51477bf424
--- /dev/null
+++ b/manga/real_manga/OP_(19).webp
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c956de243bb5688c37ccff1764cec458b602870019a7adb9bf565ef43621643f
+size 238510
diff --git a/model/__pycache__/extractor.cpython-39.pyc b/model/__pycache__/extractor.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba937f2bfa5f539256128bc88eea5ead7384e756
Binary files /dev/null and b/model/__pycache__/extractor.cpython-39.pyc differ
diff --git a/model/__pycache__/models.cpython-39.pyc b/model/__pycache__/models.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0623d54c2dab1b402aba104f5e7b32cd6ee84288
Binary files /dev/null and b/model/__pycache__/models.cpython-39.pyc differ
diff --git a/model/extractor.pth b/model/extractor.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ea1791293b0df38d22db5dca09c703e111c89c3
--- /dev/null
+++ b/model/extractor.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee3c59f02ac8c59298fd9b819fa33d2efa168847e15e4be39b35c286f7c18607
+size 6340842
diff --git a/model/extractor.py b/model/extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..91b9ffc7e5de4f127e228afbcfcf2465a1f8bdf4
--- /dev/null
+++ b/model/extractor.py
@@ -0,0 +1,127 @@
+import torch
+import torch.nn as nn
+import math
+
+'''https://github.com/blandocs/Tag2Pix/blob/master/model/pretrained.py'''
+
+# Pretrained version
+class Selayer(nn.Module):
+ def __init__(self, inplanes):
+ super(Selayer, self).__init__()
+ self.global_avgpool = nn.AdaptiveAvgPool2d(1)
+ self.conv1 = nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1)
+ self.conv2 = nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1)
+ self.relu = nn.ReLU(inplace=True)
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, x):
+ out = self.global_avgpool(x)
+ out = self.conv1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.sigmoid(out)
+
+ return x * out
+
+
+class BottleneckX_Origin(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None):
+ super(BottleneckX_Origin, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes * 2)
+
+ self.conv2 = nn.Conv2d(planes * 2, planes * 2, kernel_size=3, stride=stride,
+ padding=1, groups=cardinality, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes * 2)
+
+ self.conv3 = nn.Conv2d(planes * 2, planes * 4, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * 4)
+
+ self.selayer = Selayer(planes * 4)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ out = self.selayer(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+class SEResNeXt_extractor(nn.Module):
+ def __init__(self, block, layers, input_channels=3, cardinality=32):
+ super(SEResNeXt_extractor, self).__init__()
+ self.cardinality = cardinality
+ self.inplanes = 64
+ self.input_channels = input_channels
+
+ self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ if m.bias is not None:
+ m.bias.data.zero_()
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes, planes * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, self.cardinality, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes, self.cardinality))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+
+ x = self.layer1(x)
+ x = self.layer2(x)
+
+ return x
+
+def get_seresnext_extractor():
+ return SEResNeXt_extractor(BottleneckX_Origin, [3, 4, 6, 3], 1)
\ No newline at end of file
diff --git a/model/models.py b/model/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b74d4a3fdb7d67882298f22e83cba764b807759
--- /dev/null
+++ b/model/models.py
@@ -0,0 +1,422 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torchvision.models as M
+import math
+from torch import Tensor
+from torch.nn import Parameter
+
+'''https://github.com/orashi/AlacGAN/blob/master/models/standard.py'''
+
+def l2normalize(v, eps=1e-12):
+ return v / (v.norm() + eps)
+
+
+class SpectralNorm(nn.Module):
+ def __init__(self, module, name='weight', power_iterations=1):
+ super(SpectralNorm, self).__init__()
+ self.module = module
+ self.name = name
+ self.power_iterations = power_iterations
+ if not self._made_params():
+ self._make_params()
+
+ def _update_u_v(self):
+ u = getattr(self.module, self.name + "_u")
+ v = getattr(self.module, self.name + "_v")
+ w = getattr(self.module, self.name + "_bar")
+
+ height = w.data.shape[0]
+ for _ in range(self.power_iterations):
+ v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
+ u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
+
+ # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
+ sigma = u.dot(w.view(height, -1).mv(v))
+ setattr(self.module, self.name, w / sigma.expand_as(w))
+
+ def _made_params(self):
+ try:
+ u = getattr(self.module, self.name + "_u")
+ v = getattr(self.module, self.name + "_v")
+ w = getattr(self.module, self.name + "_bar")
+ return True
+ except AttributeError:
+ return False
+
+
+ def _make_params(self):
+ w = getattr(self.module, self.name)
+ height = w.data.shape[0]
+ width = w.view(height, -1).data.shape[1]
+
+ u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
+ v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
+ u.data = l2normalize(u.data)
+ v.data = l2normalize(v.data)
+ w_bar = Parameter(w.data)
+
+ del self.module._parameters[self.name]
+
+ self.module.register_parameter(self.name + "_u", u)
+ self.module.register_parameter(self.name + "_v", v)
+ self.module.register_parameter(self.name + "_bar", w_bar)
+
+
+ def forward(self, *args):
+ self._update_u_v()
+ return self.module.forward(*args)
+
+class Selayer(nn.Module):
+ def __init__(self, inplanes):
+ super(Selayer, self).__init__()
+ self.global_avgpool = nn.AdaptiveAvgPool2d(1)
+ self.conv1 = nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1)
+ self.conv2 = nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1)
+ self.relu = nn.ReLU(inplace=True)
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, x):
+ out = self.global_avgpool(x)
+ out = self.conv1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.sigmoid(out)
+
+ return x * out
+
+class SelayerSpectr(nn.Module):
+ def __init__(self, inplanes):
+ super(SelayerSpectr, self).__init__()
+ self.global_avgpool = nn.AdaptiveAvgPool2d(1)
+ self.conv1 = SpectralNorm(nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1))
+ self.conv2 = SpectralNorm(nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1))
+ self.relu = nn.ReLU(inplace=True)
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, x):
+ out = self.global_avgpool(x)
+ out = self.conv1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.sigmoid(out)
+
+ return x * out
+
+class ResNeXtBottleneck(nn.Module):
+ def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):
+ super(ResNeXtBottleneck, self).__init__()
+ D = out_channels // 2
+ self.out_channels = out_channels
+ self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
+ self.conv_conv = nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,
+ groups=cardinality,
+ bias=False)
+ self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
+ self.shortcut = nn.Sequential()
+ if stride != 1:
+ self.shortcut.add_module('shortcut',
+ nn.AvgPool2d(2, stride=2))
+
+ self.selayer = Selayer(out_channels)
+
+ def forward(self, x):
+ bottleneck = self.conv_reduce.forward(x)
+ bottleneck = F.leaky_relu(bottleneck, 0.2, True)
+ bottleneck = self.conv_conv.forward(bottleneck)
+ bottleneck = F.leaky_relu(bottleneck, 0.2, True)
+ bottleneck = self.conv_expand.forward(bottleneck)
+ bottleneck = self.selayer(bottleneck)
+
+ x = self.shortcut.forward(x)
+ return x + bottleneck
+
+class SpectrResNeXtBottleneck(nn.Module):
+ def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):
+ super(SpectrResNeXtBottleneck, self).__init__()
+ D = out_channels // 2
+ self.out_channels = out_channels
+ self.conv_reduce = SpectralNorm(nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False))
+ self.conv_conv = SpectralNorm(nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,
+ groups=cardinality,
+ bias=False))
+ self.conv_expand = SpectralNorm(nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False))
+ self.shortcut = nn.Sequential()
+ if stride != 1:
+ self.shortcut.add_module('shortcut',
+ nn.AvgPool2d(2, stride=2))
+
+ self.selayer = SelayerSpectr(out_channels)
+
+ def forward(self, x):
+ bottleneck = self.conv_reduce.forward(x)
+ bottleneck = F.leaky_relu(bottleneck, 0.2, True)
+ bottleneck = self.conv_conv.forward(bottleneck)
+ bottleneck = F.leaky_relu(bottleneck, 0.2, True)
+ bottleneck = self.conv_expand.forward(bottleneck)
+ bottleneck = self.selayer(bottleneck)
+
+ x = self.shortcut.forward(x)
+ return x + bottleneck
+
+class FeatureConv(nn.Module):
+ def __init__(self, input_dim=512, output_dim=512):
+ super(FeatureConv, self).__init__()
+
+ no_bn = True
+
+ seq = []
+ seq.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=1, padding=1, bias=False))
+ if not no_bn: seq.append(nn.BatchNorm2d(output_dim))
+ seq.append(nn.ReLU(inplace=True))
+ seq.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))
+ if not no_bn: seq.append(nn.BatchNorm2d(output_dim))
+ seq.append(nn.ReLU(inplace=True))
+ seq.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=1, bias=False))
+ seq.append(nn.ReLU(inplace=True))
+
+ self.network = nn.Sequential(*seq)
+
+ def forward(self, x):
+ return self.network(x)
+
+class Generator(nn.Module):
+ def __init__(self, ngf=64):
+ super(Generator, self).__init__()
+
+ self.feature_conv = FeatureConv()
+
+ self.to0 = self._make_encoder_block_first(6, 32)
+ self.to1 = self._make_encoder_block(32, 64)
+ self.to2 = self._make_encoder_block(64, 128)
+ self.to3 = self._make_encoder_block(128, 256)
+ self.to4 = self._make_encoder_block(256, 512)
+
+ self.deconv_for_decoder = nn.Sequential(
+ nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1, output_padding=1), # output is 64 * 64
+ nn.LeakyReLU(0.2),
+ nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1), # output is 128 * 128
+ nn.LeakyReLU(0.2),
+ nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1), # output is 256 * 256
+ nn.LeakyReLU(0.2),
+ nn.ConvTranspose2d(32, 3, 3, stride=1, padding=1, output_padding=0), # output is 256 * 256
+ nn.Tanh(),
+ )
+
+ tunnel4 = nn.Sequential(*[ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1) for _ in range(20)])
+
+ self.tunnel4 = nn.Sequential(nn.Conv2d(ngf * 8 + 512, ngf * 8, kernel_size=3, stride=1, padding=1),
+ nn.LeakyReLU(0.2, True),
+ tunnel4,
+ nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3, stride=1, padding=1),
+ nn.PixelShuffle(2),
+ nn.LeakyReLU(0.2, True)
+ ) # 64
+
+ depth = 2
+ tunnel = [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=4) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2),
+ ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)]
+ tunnel3 = nn.Sequential(*tunnel)
+
+ self.tunnel3 = nn.Sequential(nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),
+ nn.LeakyReLU(0.2, True),
+ tunnel3,
+ nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3, stride=1, padding=1),
+ nn.PixelShuffle(2),
+ nn.LeakyReLU(0.2, True)
+ ) # 128
+
+ tunnel = [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=4) for _ in range(depth)]
+ tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2),
+ ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)]
+ tunnel2 = nn.Sequential(*tunnel)
+
+ self.tunnel2 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),
+ nn.LeakyReLU(0.2, True),
+ tunnel2,
+ nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),
+ nn.PixelShuffle(2),
+ nn.LeakyReLU(0.2, True)
+ )
+
+ tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
+ tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2)]
+ tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=4)]
+ tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2),
+ ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
+ tunnel1 = nn.Sequential(*tunnel)
+
+ self.tunnel1 = nn.Sequential(nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),
+ nn.LeakyReLU(0.2, True),
+ tunnel1,
+ nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),
+ nn.PixelShuffle(2),
+ nn.LeakyReLU(0.2, True)
+ )
+
+ self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)
+
+
+ def _make_encoder_block(self, inplanes, planes):
+ return nn.Sequential(
+ nn.Conv2d(inplanes, planes, 3, 2, 1),
+ nn.LeakyReLU(0.2),
+ nn.Conv2d(planes, planes, 3, 1, 1),
+ nn.LeakyReLU(0.2),
+ )
+
+ def _make_encoder_block_first(self, inplanes, planes):
+ return nn.Sequential(
+ nn.Conv2d(inplanes, planes, 3, 1, 1),
+ nn.LeakyReLU(0.2),
+ nn.Conv2d(planes, planes, 3, 1, 1),
+ nn.LeakyReLU(0.2),
+ )
+
+ def forward(self, sketch, sketch_feat):
+
+ x0 = self.to0(sketch)
+ x1 = self.to1(x0)
+ x2 = self.to2(x1)
+ x3 = self.to3(x2)
+ x4 = self.to4(x3)
+
+ sketch_feat = self.feature_conv(sketch_feat)
+
+ out = self.tunnel4(torch.cat([x4, sketch_feat], 1))
+
+
+
+
+ x = self.tunnel3(torch.cat([out, x3], 1))
+ x = self.tunnel2(torch.cat([x, x2], 1))
+ x = self.tunnel1(torch.cat([x, x1], 1))
+ x = torch.tanh(self.exit(torch.cat([x, x0], 1)))
+
+ decoder_output = self.deconv_for_decoder(out)
+
+ return x, decoder_output
+'''
+class Colorizer(nn.Module):
+ def __init__(self, extractor_path = 'model/model.pth'):
+ super(Colorizer, self).__init__()
+
+ self.generator = Generator()
+ self.extractor = se_resnext_half(dump_path=extractor_path, num_classes=370, input_channels=1)
+
+ def extractor_eval(self):
+ for param in self.extractor.parameters():
+ param.requires_grad = False
+
+ def extractor_train(self):
+ for param in extractor.parameters():
+ param.requires_grad = True
+
+ def forward(self, x, extractor_grad = False):
+
+ if extractor_grad:
+ features = self.extractor(x[:, 0:1])
+ else:
+ with torch.no_grad():
+ features = self.extractor(x[:, 0:1]).detach()
+
+ fake, guide = self.generator(x, features)
+
+ return fake, guide
+'''
+
+class Colorizer(nn.Module):
+ def __init__(self, generator_model, extractor_model):
+ super(Colorizer, self).__init__()
+
+ self.generator = generator_model
+ self.extractor = extractor_model
+
+ def load_generator_weights(self, gen_weights):
+ self.generator.load_state_dict(gen_weights)
+
+ def load_extractor_weights(self, ext_weights):
+ self.extractor.load_state_dict(ext_weights)
+
+ def extractor_eval(self):
+ for param in self.extractor.parameters():
+ param.requires_grad = False
+ self.extractor.eval()
+
+ def extractor_train(self):
+ for param in extractor.parameters():
+ param.requires_grad = True
+ self.extractor.train()
+
+ def forward(self, x, extractor_grad = False):
+
+ if extractor_grad:
+ features = self.extractor(x[:, 0:1])
+ else:
+ with torch.no_grad():
+ features = self.extractor(x[:, 0:1]).detach()
+
+ fake, guide = self.generator(x, features)
+
+ return fake, guide
+
+class Discriminator(nn.Module):
+ def __init__(self, ndf=64):
+ super(Discriminator, self).__init__()
+
+ self.feed = nn.Sequential(SpectralNorm(nn.Conv2d(3, 64, 3, 1, 1)),
+ nn.LeakyReLU(0.2, True),
+ SpectralNorm(nn.Conv2d(64, 64, 3, 2, 0)),
+ nn.LeakyReLU(0.2, True),
+
+
+
+
+ SpectrResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),
+ SpectrResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2), # 128
+ SpectralNorm(nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False)),
+ nn.LeakyReLU(0.2, True),
+
+ SpectrResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),
+ SpectrResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2), # 64
+ SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False)),
+ nn.LeakyReLU(0.2, True),
+
+ SpectrResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),
+ SpectrResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2), # 32,
+ SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=1, bias=False)),
+ nn.LeakyReLU(0.2, True),
+ SpectrResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
+ SpectrResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 16
+ SpectrResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
+ SpectrResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
+ nn.AdaptiveAvgPool2d((1, 1))
+ )
+
+ self.out = nn.Linear(512, 1)
+
+ def forward(self, color):
+ x = self.feed(color)
+
+ out = self.out(x.view(color.size(0), -1))
+ return out
+
+class Content(nn.Module):
+ def __init__(self, path):
+ super(Content, self).__init__()
+ vgg16 = M.vgg16()
+ vgg16.load_state_dict(torch.load(path))
+ vgg16.features = nn.Sequential(
+ *list(vgg16.features.children())[:9]
+ )
+ self.model = vgg16.features
+ self.register_buffer('mean', torch.FloatTensor([0.485 - 0.5, 0.456 - 0.5, 0.406 - 0.5]).view(1, 3, 1, 1))
+ self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
+
+ def forward(self, images):
+ return self.model((images.mul(0.5) - self.mean) / self.std)
diff --git a/model/vgg16-397923af.pth b/model/vgg16-397923af.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9dfa9aa51ae961694af6b9dfa41bd8338de8272a
--- /dev/null
+++ b/model/vgg16-397923af.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:397923af8e79cdbb6a7127f12361acd7a2f83e06b05044ddf496e83de57a5bf0
+size 553433881
diff --git a/readme.md b/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd1120612a70520f1a2a2237a5bfc44a75475d6a
--- /dev/null
+++ b/readme.md
@@ -0,0 +1,22 @@
+UPD. See the [improved version](https://github.com/qweasdd/manga-colorization-v2).
+
+# Automatic colorization
+
+1. Download [generator](https://drive.google.com/file/d/1Oo6ycphJ3sUOpDCDoG29NA5pbhQVCevY/view?usp=sharing), [extractor](https://drive.google.com/file/d/12cbNyJcCa1zI2EBz6nea3BXl21Fm73Bt/view?usp=sharing) and [denoiser ](https://drive.google.com/file/d/161oyQcYpdkVdw8gKz_MA8RD-Wtg9XDp3/view?usp=sharing) weights. Put generator and extractor weights in `model` and denoiser weights in `denoising/models`.
+2. To colorize image, folder of images, `.cbz` or `.cbr` file, use the following command:
+```
+$ python inference.py -p "path to file or folder"
+```
+
+# Manual colorization with color hints
+
+1. Download [colorizer](https://drive.google.com/file/d/1BERrMl9e7cKsk9m2L0q1yO4k7blNhEWC/view?usp=sharing) and [denoiser ](https://drive.google.com/file/d/161oyQcYpdkVdw8gKz_MA8RD-Wtg9XDp3/view?usp=sharing) weights. Put colorizer weights in `model` and denoiser weights in `denoising/models`.
+2. Run gunicorn server with:
+```
+$ ./run_drawing.sh
+```
+3. Open `localhost:5000` with a browser.
+
+# References
+1. Extractor weights are taken from https://github.com/blandocs/Tag2Pix/releases/download/release/model.pth
+2. Denoiser weights are taken from http://www.ipol.im/pub/art/2019/231.
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b94fd62b1ba415ea1d519dbb6d3ee09a9c245fd0
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,10 @@
+flask==1.1.1
+gunicorn
+numpy==1.16.6
+flask_wtf==0.14.3
+matplotlib==3.1.1
+opencv-python==4.8.1.78
+snowy
+scipy==1.3.3
+scikit-image==0.15.0
+patool==1.12
diff --git a/run_drawing.sh b/run_drawing.sh
new file mode 100644
index 0000000000000000000000000000000000000000..01486532894bea793248d433dad02f1d9b9640d5
--- /dev/null
+++ b/run_drawing.sh
@@ -0,0 +1 @@
+gunicorn --worker-class gevent --timeout 150 -w 1 -b 0.0.0.0:5000 drawing:app
diff --git a/static/js/draw.js b/static/js/draw.js
new file mode 100644
index 0000000000000000000000000000000000000000..2e5ea4082f693ffc47376fa3a08097a19730ab8b
--- /dev/null
+++ b/static/js/draw.js
@@ -0,0 +1,120 @@
+var canvas = document.getElementById('draw_canvas');
+var ctx = canvas.getContext('2d');
+var canvasWidth = canvas.width;
+var canvasHeight = canvas.height;
+var prevX, prevY;
+
+var result_canvas = document.getElementById('result');
+var result_ctx = result_canvas.getContext('2d');
+result_canvas.width = canvas.width;
+result_canvas.height = canvas.height;
+
+var color_indicator = document.getElementById('color');
+ctx.fillStyle = 'black';
+color_indicator.value = '#000000';
+
+var cur_id = window.location.pathname.substring(window.location.pathname.lastIndexOf('/') + 1);
+
+function getRandomInt(max) {
+ return Math.floor(Math.random() * Math.floor(max));
+}
+
+var init_hint = new Image();
+init_hint.addEventListener('load', function() {
+ ctx.drawImage(init_hint, 0, 0);
+});
+init_hint.src = '../static/temp_images/' + cur_id + '/hint.png?' + getRandomInt(100000).toString();
+
+result_canvas.addEventListener('load', function(e) {
+ var img = new Image();
+ img.addEventListener('load', function() {
+ ctx.drawImage(img, 0, 0);
+ }, false);
+ console.log(window.location.pathname);
+})
+
+
+canvas.onload = function (e) {
+ var img = new Image();
+ img.addEventListener('load', function() {
+ ctx.drawImage(img, 0, 0);
+ }, false);
+ console.log(window.location.pathname);
+ //img.src = ;
+}
+
+function reset() {
+ ctx.clearRect(0, 0, canvasWidth, canvasHeight);
+}
+
+function getMousePos(canvas, evt) {
+ var rect = canvas.getBoundingClientRect();
+ return {
+ x: (evt.clientX - rect.left) / (rect.right - rect.left) * canvas.width,
+ y: (evt.clientY - rect.top) / (rect.bottom - rect.top) * canvas.height
+ };
+}
+
+function colorize() {
+ var file_id = document.location.pathname;
+ var image = canvas.toDataURL();
+
+ $.post("/colorize", { save_file_id: file_id, save_image: image}).done(function( data ) {
+ //console.log(document.location.origin + '/img/' + data)
+ //window.open(document.location.origin + '/img/' + data, '_blank');
+ //result.src = data;
+ var img = new Image();
+ img.addEventListener('load', function() {
+ result_ctx.drawImage(img, 0, 0);
+ }, false);
+ img.src = data;
+ });
+}
+
+canvas.addEventListener('mousedown', function(e) {
+ var mousePos = getMousePos(canvas, e);
+ if (e.button == 0) {
+ ctx.fillRect(mousePos['x'], mousePos['y'], 1, 1);
+ }
+
+ if (e.button == 2) {
+ prevX = mousePos['x']
+ prevY = mousePos['y']
+ }
+
+})
+
+canvas.addEventListener('mouseup', function(e) {
+ if (e.button == 2) {
+ var mousePos = getMousePos(canvas, e);
+ var diff_width = mousePos['x'] - prevX;
+ var diff_height = mousePos['y'] - prevY;
+
+ ctx.clearRect(prevX, prevY, diff_width, diff_height);
+ }
+})
+
+
+canvas.addEventListener('contextmenu', function(evt) {
+ evt.preventDefault();
+})
+
+function color(color_value){
+ ctx.fillStyle = color_value;
+ color_indicator.value = color_value;
+}
+
+color_indicator.oninput = function() {
+ color(this.value);
+}
+
+function rgbToHex(rgb){
+ return '#' + ((rgb[0] << 16) | (rgb[1] << 8) | rgb[2]).toString(16);
+};
+
+result_canvas.addEventListener('click', function(e) {
+ if (e.button == 0) {
+ var cur_pixel = result_ctx.getImageData(e.offsetX, e.offsetY, 1, 1).data;
+ color(rgbToHex(cur_pixel));
+ }
+ })
diff --git a/static/temp_images/.gitkeep b/static/temp_images/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/templates/drawing.html b/templates/drawing.html
new file mode 100644
index 0000000000000000000000000000000000000000..5df1fa3a18670e24b9aa17b0ffc144e704bd11ea
--- /dev/null
+++ b/templates/drawing.html
@@ -0,0 +1,206 @@
+
+
+
+
+ Colorization app
+
+
+
+
+
+
+
+
+Left click - colorize, right hold - remove with rectangle, left click on result - use corresponding color.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/templates/submit.html b/templates/submit.html
new file mode 100644
index 0000000000000000000000000000000000000000..913deca099c3d32e4610dfbc275db75992708570
--- /dev/null
+++ b/templates/submit.html
@@ -0,0 +1,11 @@
+
diff --git a/templates/upload.html b/templates/upload.html
new file mode 100644
index 0000000000000000000000000000000000000000..a396d3aa3ab329d6869a115427f963e8454273ee
--- /dev/null
+++ b/templates/upload.html
@@ -0,0 +1,20 @@
+
+
+
+
+
+
diff --git a/train.py b/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcc5b818984ecff568eb2101ec8dc06575450854
--- /dev/null
+++ b/train.py
@@ -0,0 +1,304 @@
+import torch
+import torch.nn as nn
+import torch.optim as optim
+import numpy as np
+import albumentations as albu
+import argparse
+import datetime
+
+from utils.utils import open_json, weights_init, weights_init_spectr, generate_mask
+from model.models import Colorizer, Generator, Content, Discriminator
+from model.extractor import get_seresnext_extractor
+from dataset.datasets import TrainDataset, FineTuningDataset
+
+def get_dataloaders(data_path, transforms, batch_size, fine_tuning, mult_number):
+ train_dataset = TrainDataset(data_path, transforms, mult_number)
+ train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
+
+ finetuning_dataloader = None
+ if fine_tuning:
+ finetuning_dataset = FineTuningDataset(data_path, transforms)
+ finetuning_dataloader = torch.utils.data.DataLoader(finetuning_dataset, batch_size=batch_size, shuffle=True)
+
+ return train_dataloader, finetuning_dataloader
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-p", "--path", required=True, help = "dataset path")
+ parser.add_argument('-ft', '--fine_tuning', dest = 'fine_tuning', action = 'store_true')
+ parser.add_argument('-g', '--gpu', dest = 'gpu', action = 'store_true')
+ parser.set_defaults(fine_tuning = False)
+ parser.set_defaults(gpu = False)
+ args = parser.parse_args()
+
+ return args
+
+def get_transforms():
+ return albu.Compose([albu.RandomCrop(512, 512, always_apply = True), albu.HorizontalFlip(p = 0.5)], p = 1.)
+
+def get_dataloaders(data_path, transforms, batch_size, fine_tuning, mult_number):
+ train_dataset = TrainDataset(data_path, transforms, mult_number)
+ train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = batch_size, shuffle = True)
+
+ if fine_tuning:
+ finetuning_dataset = FineTuningDataset(data_path, transforms)
+ finetuning_dataloader = torch.utils.data.DataLoader(finetuning_dataset, batch_size = batch_size, shuffle = True)
+
+ return train_dataloader, finetuning_dataloader
+
+def get_models(device):
+ generator = Generator()
+ extractor = get_seresnext_extractor()
+ colorizer = Colorizer(generator, extractor)
+
+ colorizer.extractor_eval()
+ colorizer = colorizer.to(device)
+
+ discriminator = Discriminator().to(device)
+
+ content = Content('model/vgg16-397923af.pth').eval().to(device)
+ for param in content.parameters():
+ param.requires_grad = False
+
+ return colorizer, discriminator, content
+
+def set_weights(colorizer, discriminator):
+ colorizer.generator.apply(weights_init)
+ colorizer.load_extractor_weights(torch.load('model/extractor.pth'))
+
+ discriminator.apply(weights_init_spectr)
+
+def generator_loss(disc_output, true_labels, main_output, guide_output, real_image, content_gen, content_true, dist_loss = nn.L1Loss(), content_dist_loss = nn.MSELoss(), class_loss = nn.BCEWithLogitsLoss()):
+ sim_loss_full = dist_loss(main_output, real_image)
+ sim_loss_guide = dist_loss(guide_output, real_image)
+
+ adv_loss = class_loss(disc_output, true_labels)
+
+ content_loss = content_dist_loss(content_gen, content_true)
+
+ sum_loss = 10 * (sim_loss_full + 0.9 * sim_loss_guide) + adv_loss + content_loss
+
+ return sum_loss
+
+def get_optimizers(colorizer, discriminator, generator_lr, discriminator_lr):
+ optimizerG = optim.Adam(colorizer.generator.parameters(), lr = generator_lr, betas=(0.5, 0.9))
+ optimizerD = optim.Adam(discriminator.parameters(), lr = discriminator_lr, betas=(0.5, 0.9))
+
+ return optimizerG, optimizerD
+
+def generator_step(inputs, colorizer, discriminator, content, loss_function, optimizer, device, white_penalty = True):
+ for p in discriminator.parameters():
+ p.requires_grad = False
+ for p in colorizer.generator.parameters():
+ p.requires_grad = True
+
+ colorizer.generator.zero_grad()
+
+ bw, color, hint, dfm = inputs
+ bw, color, hint, dfm = bw.to(device), color.to(device), hint.to(device), dfm.to(device)
+
+ fake, guide = colorizer(torch.cat([bw, dfm, hint], 1))
+
+ logits_fake = discriminator(fake)
+ y_real = torch.ones((bw.size(0), 1), device = device)
+
+ content_fake = content(fake)
+ with torch.no_grad():
+ content_true = content(color)
+
+ generator_loss = loss_function(logits_fake, y_real, fake, guide, color, content_fake, content_true)
+
+ if white_penalty:
+ mask = (~((color > 0.85).float().sum(dim = 1) == 3).unsqueeze(1).repeat((1, 3, 1, 1 ))).float()
+ white_zones = mask * (fake + 1) / 2
+ white_penalty = (torch.pow(white_zones.sum(dim = 1), 2).sum(dim = (1, 2)) / (mask.sum(dim = (1, 2, 3)) + 1)).mean()
+
+ generator_loss += white_penalty
+
+ generator_loss.backward()
+
+ optimizer.step()
+
+ return generator_loss.item()
+
+def discriminator_step(inputs, colorizer, discriminator, optimizer, device, loss_function = nn.BCEWithLogitsLoss()):
+
+ for p in discriminator.parameters():
+ p.requires_grad = True
+ for p in colorizer.generator.parameters():
+ p.requires_grad = False
+
+ discriminator.zero_grad()
+
+ bw, color, hint, dfm = inputs
+ bw, color, hint, dfm = bw.to(device), color.to(device), hint.to(device), dfm.to(device)
+
+ y_real = torch.full((bw.size(0), 1), 0.9, device = device)
+
+ y_fake = torch.zeros((bw.size(0), 1), device = device)
+
+ with torch.no_grad():
+ fake_color, _ = colorizer(torch.cat([bw, dfm, hint], 1))
+ fake_color.detach()
+
+ logits_fake = discriminator(fake_color)
+ logits_real = discriminator(color)
+
+ fake_loss = loss_function(logits_fake, y_fake)
+ real_loss = loss_function(logits_real, y_real)
+
+ discriminator_loss = real_loss + fake_loss
+
+ discriminator_loss.backward()
+ optimizer.step()
+
+ return discriminator_loss.item()
+
+def decrease_lr(optimizer, rate):
+ for group in optimizer.param_groups:
+ group['lr'] /= rate
+
+def set_lr(optimizer, value):
+ for group in optimizer.param_groups:
+ group['lr'] = value
+
+def train(colorizer, discriminator, content, dataloader, epochs, colorizer_optimizer, discriminator_optimizer, lr_decay_epoch = -1, device = 'cpu'):
+ colorizer.generator.train()
+ discriminator.train()
+
+ disc_step = True
+
+ for epoch in range(epochs):
+ if (epoch == lr_decay_epoch):
+ decrease_lr(colorizer_optimizer, 10)
+ decrease_lr(discriminator_optimizer, 10)
+
+ sum_disc_loss = 0
+ sum_gen_loss = 0
+
+ for n, inputs in enumerate(dataloader):
+ if n % 5 == 0:
+ print(datetime.datetime.now().time())
+ print('Step : %d Discr loss: %.4f Gen loss : %.4f \n'%(n, sum_disc_loss / (n // 2 + 1), sum_gen_loss / (n // 2 + 1)))
+
+
+ if disc_step:
+ step_loss = discriminator_step(inputs, colorizer, discriminator, discriminator_optimizer, device)
+ sum_disc_loss += step_loss
+ else:
+ step_loss = generator_step(inputs, colorizer, discriminator, content, generator_loss, colorizer_optimizer, device)
+ sum_gen_loss += step_loss
+
+ disc_step = disc_step ^ True
+
+
+ print(datetime.datetime.now().time())
+ print('Epoch : %d Discr loss: %.4f Gen loss : %.4f \n'%(epoch, sum_disc_loss / (n // 2 + 1), sum_gen_loss / (n // 2 + 1)))
+
+
+def fine_tuning_step(data_iter, colorizer, discriminator, gen_optimizer, disc_optimizer, device, loss_function = nn.BCEWithLogitsLoss()):
+
+ for p in discriminator.parameters():
+ p.requires_grad = True
+ for p in colorizer.generator.parameters():
+ p.requires_grad = False
+
+ for cur_disc_step in range(5):
+ discriminator.zero_grad()
+
+ bw, dfm, color_for_real = data_iter.next()
+ bw, dfm, color_for_real = bw.to(device), dfm.to(device), color_for_real.to(device)
+
+ y_real = torch.full((bw.size(0), 1), 0.9, device = device)
+ y_fake = torch.zeros((bw.size(0), 1), device = device)
+
+ empty_hint = torch.zeros(bw.shape[0], 4, bw.shape[2] , bw.shape[3] ).float().to(device)
+
+ with torch.no_grad():
+ fake_color_manga, _ = colorizer(torch.cat([bw, dfm, empty_hint ], 1))
+ fake_color_manga.detach()
+
+ logits_fake = discriminator(fake_color_manga)
+ logits_real = discriminator(color_for_real)
+
+ fake_loss = loss_function(logits_fake, y_fake)
+ real_loss = loss_function(logits_real, y_real)
+ discriminator_loss = real_loss + fake_loss
+
+ discriminator_loss.backward()
+ disc_optimizer.step()
+
+
+ for p in discriminator.parameters():
+ p.requires_grad = False
+ for p in colorizer.generator.parameters():
+ p.requires_grad = True
+
+ colorizer.generator.zero_grad()
+
+ bw, dfm, _ = data_iter.next()
+ bw, dfm = bw.to(device), dfm.to(device)
+
+ y_real = torch.ones((bw.size(0), 1), device = device)
+
+ empty_hint = torch.zeros(bw.shape[0], 4, bw.shape[2] , bw.shape[3]).float().to(device)
+
+ fake_manga, _ = colorizer(torch.cat([bw, dfm, empty_hint], 1))
+
+ logits_fake = discriminator(fake_manga)
+ adv_loss = loss_function(logits_fake, y_real)
+
+ generator_loss = adv_loss
+
+ generator_loss.backward()
+ gen_optimizer.step()
+
+
+
+def fine_tuning(colorizer, discriminator, content, dataloader, iterations, colorizer_optimizer, discriminator_optimizer, data_iter, device = 'cpu'):
+ colorizer.generator.train()
+ discriminator.train()
+
+ disc_step = True
+
+ for n, inputs in enumerate(dataloader):
+
+ if n == iterations:
+ return
+
+ if disc_step:
+ discriminator_step(inputs, colorizer, discriminator, discriminator_optimizer, device)
+ else:
+ generator_step(inputs, colorizer, discriminator, content, generator_loss, colorizer_optimizer, device)
+
+ disc_step = disc_step ^ True
+
+ if n % 10 == 5:
+ fine_tuning_step(data_iter, colorizer, discriminator, colorizer_optimizer, discriminator_optimizer, device)
+
+if __name__ == '__main__':
+ args = parse_args()
+ config = open_json('configs/train_config.json')
+
+ if args.gpu:
+ device = 'cuda'
+ else:
+ device = 'cpu'
+
+ augmentations = get_transforms()
+
+ train_dataloader, ft_dataloader = get_dataloaders(args.path, augmentations, config['batch_size'], args.fine_tuning, config['number_of_mults'])
+
+ colorizer, discriminator, content = get_models(device)
+ set_weights(colorizer, discriminator)
+
+ gen_optimizer, disc_optimizer = get_optimizers(colorizer, discriminator, config['generator_lr'], config['discriminator_lr'])
+
+ train(colorizer, discriminator, content, train_dataloader, config['epochs'], gen_optimizer, disc_optimizer, config['lr_decrease_epoch'], device)
+
+ if args.fine_tuning:
+ set_lr(gen_optimizer, config["finetuning_generator_lr"])
+ fine_tuning(colorizer, discriminator, content, train_dataloader, config['finetuning_iterations'], gen_optimizer, disc_optimizer, iter(ft_dataloader), device)
+
+ torch.save(colorizer.generator.state_dict(), str(datetime.datetime.now().time()))
\ No newline at end of file
diff --git a/utils/__pycache__/utils.cpython-39.pyc b/utils/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1fde55f1c47e79a90ffcdcd638490ac68382654
Binary files /dev/null and b/utils/__pycache__/utils.cpython-39.pyc differ
diff --git a/utils/dataset_utils.py b/utils/dataset_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5313a25d62f08fe4fa400c58afa1597ba06c592
--- /dev/null
+++ b/utils/dataset_utils.py
@@ -0,0 +1,141 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import cv2
+import snowy
+import os
+
+
+def get_resized_image(img, size):
+ if len(img.shape) == 2:
+ img = np.repeat(np.expand_dims(img, 2), 3, 2)
+
+ if (img.shape[0] < img.shape[1]):
+ height = img.shape[0]
+ ratio = height / size
+ width = int(np.ceil(img.shape[1] / ratio))
+ img = cv2.resize(img, (width, size), interpolation = cv2.INTER_AREA)
+ else:
+ width = img.shape[1]
+ ratio = width / size
+ height = int(np.ceil(img.shape[0] / ratio))
+ img = cv2.resize(img, (size, height), interpolation = cv2.INTER_AREA)
+
+ if (img.dtype == 'float32'):
+ np.clip(img, 0, 1, out = img)
+
+ return img
+
+
+def get_sketch_image(img, sketcher, mult_val):
+
+ if mult_val:
+ sketch_image = sketcher.get_sketch_with_resize(img, mult = mult_val)
+ else:
+ sketch_image = sketcher.get_sketch_with_resize(img)
+
+ return sketch_image
+
+
+def get_dfm_image(sketch):
+ dfm_image = snowy.unitize(snowy.generate_sdf(np.expand_dims(1 - sketch, 2) != 0)).squeeze()
+ return dfm_image
+
+def get_sketch(image, sketcher, dfm, mult = None):
+ sketch_image = get_sketch_image(image, sketcher, mult)
+
+ dfm_image = None
+
+ if dfm:
+ dfm_image = get_dfm_image(sketch_image)
+
+ sketch_image = (sketch_image * 255).astype('uint8')
+
+ if dfm:
+ dfm_image = (dfm_image * 255).astype('uint8')
+
+ return sketch_image, dfm_image
+
+def get_sketches(image, sketcher, mult_list, dfm):
+ for mult in mult_list:
+ yield get_sketch(image, sketcher, dfm, mult)
+
+
+def create_resized_dataset(source_path, target_path, side_size):
+ images = os.listdir(source_path)
+
+ for image_name in images:
+
+ new_image_name = image_name[:image_name.rfind('.')] + '.png'
+ new_path = os.path.join(target_path, new_image_name)
+
+ if not os.path.exists(new_path):
+ try:
+ image = cv2.imread(os.path.join(source_path, image_name))
+
+ if image is None:
+ raise Exception()
+
+ image = get_resized_image(image, side_size)
+
+ cv2.imwrite(new_path, image)
+ except:
+ print('Failed to process {}'.format(image_name))
+
+
+def create_sketches_dataset(source_path, target_path, sketcher, mult_list, dfm = False):
+
+ images = os.listdir(source_path)
+ for image_name in images:
+ try:
+ image = cv2.imread(os.path.join(source_path, image_name))
+
+ if image is None:
+ raise Exception()
+
+ for number, (sketch_image, dfm_image) in enumerate(get_sketches(image, sketcher, mult_list, dfm)):
+ new_sketch_name = image_name[:image_name.rfind('.')] + '_' + str(number) + '.png'
+ cv2.imwrite(os.path.join(target_path, new_sketch_name), sketch_image)
+
+ if dfm:
+ dfm_name = image_name[:image_name.rfind('.')] + '_' + str(number) + '_dfm.png'
+ cv2.imwrite(os.path.join(target_path, dfm_name), dfm_image)
+
+ except:
+ print('Failed to process {}'.format(image_name))
+
+
+def create_dataset(source_path, target_path, sketcher, mult_list, side_size, dfm = False):
+ images = os.listdir(source_path)
+
+ color_path = os.path.join(target_path, 'color')
+ sketch_path = os.path.join(target_path, 'bw')
+
+ if not os.path.exists(color_path):
+ os.makedirs(color_path)
+
+ if not os.path.exists(sketch_path):
+ os.makedirs(sketch_path)
+
+ for image_name in images:
+ new_image_name = image_name[:image_name.rfind('.')] + '.png'
+
+ try:
+ image = cv2.imread(os.path.join(source_path, image_name))
+
+ if image is None:
+ raise Exception()
+
+ resized_image = get_resized_image(image, side_size)
+ cv2.imwrite(os.path.join(color_path, new_image_name), resized_image)
+
+ for number, (sketch_image, dfm_image) in enumerate(get_sketches(resized_image, sketcher, mult_list, dfm)):
+ new_sketch_name = image_name[:image_name.rfind('.')] + '_' + str(number) + '.png'
+ cv2.imwrite(os.path.join(sketch_path, new_sketch_name), sketch_image)
+
+ if dfm:
+ dfm_name = image_name[:image_name.rfind('.')] + '_' + str(number) + '_dfm.png'
+ cv2.imwrite(os.path.join(sketch_path, dfm_name), dfm_image)
+
+ except:
+ print('Failed to process {}'.format(image_name))
+
\ No newline at end of file
diff --git a/utils/utils.py b/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..128e3575775d6afa1a58391f85b4d0bdce67f728
--- /dev/null
+++ b/utils/utils.py
@@ -0,0 +1,102 @@
+import torch
+import torch.nn as nn
+import numpy as np
+import scipy.stats as stats
+import cv2
+import json
+import patoolib
+import re
+from pathlib import Path
+from shutil import rmtree
+
+def weights_init(m):
+ classname = m.__class__.__name__
+ if classname.find('Conv2d') != -1:
+ nn.init.xavier_uniform_(m.weight.data)
+
+def weights_init_spectr(m):
+ classname = m.__class__.__name__
+ if classname.find('Conv2d') != -1:
+ nn.init.xavier_uniform_(m.weight_bar.data)
+
+def generate_mask(height, width, mu = 1, sigma = 0.0005, prob = 0.5, full = True, full_prob = 0.01):
+ X = stats.truncnorm((0 - mu) / sigma, (1 - mu) / sigma, loc=mu, scale=sigma)
+
+ if full:
+ if (np.random.binomial(1, p = full_prob) == 1):
+ return torch.ones(1, height, width).float()
+
+ if np.random.binomial(1, p = prob) == 1:
+ mask = torch.rand(1, height, width).ge(X.rvs(1)[0]).float()
+ else:
+ mask = torch.zeros(1, height, width).float()
+
+ return mask
+
+def resize_pad(img, size = 512):
+
+ if len(img.shape) == 2:
+ img = np.expand_dims(img, 2)
+
+ if img.shape[2] == 1:
+ img = np.repeat(img, 3, 2)
+
+ if img.shape[2] == 4:
+ img = img[:, :, :3]
+
+ pad = None
+
+ if (img.shape[0] < img.shape[1]):
+ height = img.shape[0]
+ ratio = height / size
+ width = int(np.ceil(img.shape[1] / ratio))
+ img = cv2.resize(img, (width, size), interpolation = cv2.INTER_AREA)
+
+ new_width = width
+ while (new_width % 32 != 0):
+ new_width += 1
+
+ pad = (0, new_width - width)
+
+ img = np.pad(img, ((0, 0), (0, pad[1]), (0, 0)), 'maximum')
+ else:
+ width = img.shape[1]
+ ratio = width / size
+ height = int(np.ceil(img.shape[0] / ratio))
+ img = cv2.resize(img, (size, height), interpolation = cv2.INTER_AREA)
+
+ new_height = height
+ while (new_height % 32 != 0):
+ new_height += 1
+
+ pad = (new_height - height, 0)
+
+ img = np.pad(img, ((0, pad[0]), (0, 0), (0, 0)), 'maximum')
+
+ if (img.dtype == 'float32'):
+ np.clip(img, 0, 1, out = img)
+
+ return img, pad
+
+def open_json(file):
+ with open(file) as json_file:
+ data = json.load(json_file)
+
+ return data
+
+def extract_cbr(file, out_dir):
+ patoolib.extract_archive(file, outdir = out_dir, verbosity = 1, interactive = False)
+
+def create_cbz(file_path, files):
+ patoolib.create_archive(file_path, files, verbosity = 1, interactive = False)
+
+def subfolder_image_search(start_folder):
+ return [x.as_posix() for x in Path(start_folder).rglob("*.[pPjJ][nNpP][gG]")]
+
+def remove_folder(folder_path):
+ rmtree(folder_path)
+
+def sorted_alphanumeric(data):
+ convert = lambda text: int(text) if text.isdigit() else text.lower()
+ alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
+ return sorted(data, key=alphanum_key)
\ No newline at end of file
diff --git a/utils/xdog.py b/utils/xdog.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb4615e4da025cef27240b5695c9ab5d3d20342b
--- /dev/null
+++ b/utils/xdog.py
@@ -0,0 +1,68 @@
+from cv2 import resize, INTER_LANCZOS4, INTER_AREA
+from skimage.color import rgb2gray
+import numpy as np
+from scipy.ndimage.filters import gaussian_filter
+from skimage.filters import threshold_otsu
+import matplotlib.pyplot as plt
+
+class XDoGSketcher:
+
+ def __init__(self, gamma = 0.95, phi = 89.25, eps = -0.1, k = 8, sigma = 0.5, mult = 1):
+ self.params = {}
+ self.params['gamma'] = gamma
+ self.params['phi'] = phi
+ self.params['eps'] = eps
+ self.params['k'] = k
+ self.params['sigma'] = sigma
+
+ self.params['mult'] = mult
+
+ def _xdog(self, im, **transform_params):
+ # Source : https://github.com/CemalUnal/XDoG-Filter
+ # Reference : XDoG: An eXtended difference-of-Gaussians compendium including advanced image stylization
+ # Link : http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.151&rep=rep1&type=pdf
+
+ if im.shape[2] == 3:
+ im = rgb2gray(im)
+
+ imf1 = gaussian_filter(im, transform_params['sigma'])
+ imf2 = gaussian_filter(im, transform_params['sigma'] * transform_params['k'])
+ imdiff = imf1 - transform_params['gamma'] * imf2
+ imdiff = (imdiff < transform_params['eps']) * 1.0 \
+ + (imdiff >= transform_params['eps']) * (1.0 + np.tanh(transform_params['phi'] * imdiff))
+ imdiff -= imdiff.min()
+ imdiff /= imdiff.max()
+
+
+ th = threshold_otsu(imdiff)
+ imdiff = imdiff >= th
+
+ imdiff = imdiff.astype('float32')
+
+ return imdiff
+
+
+ def get_sketch(self, image, **kwargs):
+ current_params = self.params.copy()
+
+ for key in kwargs.keys():
+ if key in current_params.keys():
+ current_params[key] = kwargs[key]
+
+ result_image = self._xdog(image, **current_params)
+
+ return result_image
+
+ def get_sketch_with_resize(self, image, **kwargs):
+ if 'mult' in kwargs.keys():
+ mult = kwargs['mult']
+ else:
+ mult = self.params['mult']
+
+ temp_image = resize(image, (image.shape[1] * mult, image.shape[0] * mult), interpolation = INTER_LANCZOS4)
+ temp_image = self.get_sketch(temp_image, **kwargs)
+ image = resize(temp_image, (image.shape[1], image.shape[0]), interpolation = INTER_AREA)
+
+ return image
+
+
\ No newline at end of file
diff --git a/web.py b/web.py
new file mode 100644
index 0000000000000000000000000000000000000000..a52a3270dc79b2e9201d2081d8d297a6b56c8eb5
--- /dev/null
+++ b/web.py
@@ -0,0 +1,108 @@
+from flask import Flask, request, jsonify, abort, redirect, url_for, render_template, send_file
+from flask_wtf import FlaskForm
+from wtforms import StringField, FileField, BooleanField, DecimalField
+from wtforms.validators import DataRequired
+from flask import after_this_request
+
+import torch
+
+import os
+from model.models import Colorizer, Generator
+from model.extractor import get_seresnext_extractor
+from utils.xdog import XDoGSketcher
+from utils.utils import open_json
+from denoising.denoiser import FFDNetDenoiser
+from datetime import datetime
+
+from inference import colorize_single_image, colorize_images, colorize_cbr
+
+if torch.cuda.is_available():
+ device = 'cuda'
+else:
+ device = 'cpu'
+
+generator = Generator()
+generator.load_state_dict(torch.load('model/generator.pth'))
+
+extractor = get_seresnext_extractor()
+extractor.load_state_dict(torch.load('model/extractor.pth'))
+
+colorizer = Colorizer(generator, extractor)
+colorizer = colorizer.eval().to(device)
+
+sketcher = XDoGSketcher()
+xdog_config = open_json('configs/xdog_config.json')
+for key in xdog_config.keys():
+ if key in sketcher.params:
+ sketcher.params[key] = xdog_config[key]
+
+denoiser = FFDNetDenoiser(device)
+
+
+app = Flask(__name__)
+app.config.update(dict(
+ SECRET_KEY="lol kek",
+ WTF_CSRF_SECRET_KEY="cheburek"
+))
+
+color_args = {'colorizer':colorizer, 'sketcher':sketcher, 'device':device, 'dfm' : True}
+
+class SubmitForm(FlaskForm):
+ file = FileField(validators=[DataRequired()])
+ denoise = BooleanField(default = 'checked')
+ denoise_sigma = DecimalField(label = 'Denoise sigma', validators=[DataRequired()], default = 25, places = None)
+ autohint = BooleanField(default = None)
+ autohint_sigma = DecimalField(label = 'Autohint sigma', validators=[DataRequired()], default= 0.0003, places = None)
+ ignore_gray = BooleanField(label = 'Ignore gray autohint', default = None)
+
+@app.route('/img/')
+def show_image(path):
+ return f'
'
+
+@app.route('/', methods=('GET', 'POST'))
+def submit_data():
+ form = SubmitForm()
+ if form.validate_on_submit():
+
+ input_data = form.file.data
+
+ _, ext = os.path.splitext(input_data.filename)
+ filename = str(datetime.now()) + ext
+
+ input_data.save(filename)
+
+ color_args['auto_hint'] = form.autohint.data
+ color_args['auto_hint_sigma'] = float(form.autohint_sigma.data)
+ color_args['ignore_gray'] = form.ignore_gray.data
+ color_args['denoiser'] = None
+
+ if form.denoise.data:
+ color_args['denoiser'] = denoiser
+ color_args['denoiser_sigma'] = float(form.denoise_sigma.data)
+
+ if ext.lower() in ('.cbr', '.cbz', '.rar', '.zip'):
+ result_name = colorize_cbr(filename, color_args)
+ os.remove(filename)
+
+ @after_this_request
+ def remove_file(response):
+ try:
+ os.remove(result_name)
+ except Exception as error:
+ app.logger.error("Error removing or closing downloaded file handle", error)
+ return response
+
+ return send_file(result_name, mimetype='application/vnd.comicbook-rar', attachment_filename=result_name, as_attachment=True)
+
+ elif ext.lower() in ('.jpg', '.png', ',jpeg'):
+ random_name = str(datetime.now()) + '.png'
+ new_image_path = os.path.join('static', random_name)
+
+ colorize_single_image(filename, new_image_path, color_args)
+ os.remove(filename)
+
+ return redirect(f'/img/{random_name}')
+ else:
+ return 'Wrong format'
+
+ return render_template('submit.html', form=form)
\ No newline at end of file