Raid41 commited on
Commit
8a587d3
·
1 Parent(s): 9737b75

Upload 66 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +6 -0
  2. .gitignore +10 -0
  3. Dockerfile +11 -0
  4. configs/train_config.json +10 -0
  5. configs/xdog_config.json +8 -0
  6. dataset/__pycache__/datasets.cpython-39.pyc +0 -0
  7. dataset/datasets.py +104 -0
  8. denoising/denoiser.py +113 -0
  9. denoising/functions.py +101 -0
  10. denoising/models.py +100 -0
  11. denoising/models/.gitkeep +0 -0
  12. denoising/utils.py +66 -0
  13. drawing.py +165 -0
  14. inference.py +215 -0
  15. manga/bw/001-0000-0000.png +3 -0
  16. manga/bw/002-0000-0000.png +3 -0
  17. manga/bw/003-0000-0000.png +3 -0
  18. manga/bw/004-0000-0000.png +3 -0
  19. manga/bw/x1-0000-0000.png +3 -0
  20. manga/bw/x2-0000-0000.png +3 -0
  21. manga/bw/x3-0000-0000.png +3 -0
  22. manga/bw/x4-0000-0000.png +3 -0
  23. manga/bw/x5-0000-0000.png +3 -0
  24. manga/color/001-0000-0000.png +3 -0
  25. manga/color/002-0000-0000.png +3 -0
  26. manga/color/003-0000-0000.png +3 -0
  27. manga/color/004-0000-0000.png +3 -0
  28. manga/color/x1-0000-0000.png +3 -0
  29. manga/color/x2-0000-0000.png +3 -0
  30. manga/color/x3-0000-0000.png +3 -0
  31. manga/color/x4-0000-0000.png +3 -0
  32. manga/color/x5-0000-0000.png +3 -0
  33. manga/real_manga/06 - copia.webp +3 -0
  34. manga/real_manga/06.webp +3 -0
  35. manga/real_manga/09 - copia.webp +3 -0
  36. manga/real_manga/11.webp +3 -0
  37. manga/real_manga/12 - copia.webp +3 -0
  38. manga/real_manga/12.webp +3 -0
  39. manga/real_manga/13 - copia.webp +3 -0
  40. manga/real_manga/13.webp +3 -0
  41. manga/real_manga/14 - copia.webp +3 -0
  42. manga/real_manga/14.webp +3 -0
  43. manga/real_manga/OP_(13).webp +3 -0
  44. manga/real_manga/OP_(16).webp +3 -0
  45. manga/real_manga/OP_(18).webp +3 -0
  46. manga/real_manga/OP_(19).webp +3 -0
  47. model/__pycache__/extractor.cpython-39.pyc +0 -0
  48. model/__pycache__/models.cpython-39.pyc +0 -0
  49. model/extractor.pth +3 -0
  50. model/extractor.py +127 -0
.dockerignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ *.ipynb
2
+
3
+ model/*.pth
4
+
5
+ temp_colorization/
6
+ __pycache__/
.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ *.ipynb
2
+ *.pth
3
+ *.zip
4
+
5
+ __pycache__/
6
+ temp_colorization/
7
+
8
+ static/temp_images/*
9
+
10
+ !.gitkeep
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime
2
+
3
+ RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxext6 libxrender-dev
4
+
5
+ COPY . .
6
+
7
+ RUN pip install --no-cache-dir -r ./requirements.txt
8
+
9
+ EXPOSE 5000
10
+
11
+ CMD gunicorn --timeout 200 -w 3 -b 0.0.0.0:5000 drawing:app
configs/train_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "generator_lr" : 1e-4,
3
+ "discriminator_lr" : 4e-4,
4
+ "epochs" : 3,
5
+ "lr_decrease_epoch" : 10,
6
+ "finetuning_generator_lr" : 1e-6,
7
+ "finetuning_iterations" : 3500,
8
+ "batch_size" : 5,
9
+ "number_of_mults" : 3
10
+ }
configs/xdog_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "sigma" : 0.5,
3
+ "k" : 8,
4
+ "phi" : 89.25,
5
+ "gamma" : 0.95,
6
+ "eps" : -0.1,
7
+ "mult" : 7
8
+ }
dataset/__pycache__/datasets.cpython-39.pyc ADDED
Binary file (3.55 kB). View file
 
dataset/datasets.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import torchvision.transforms as transforms
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+
7
+ from utils.utils import generate_mask
8
+
9
+ class TrainDataset(torch.utils.data.Dataset):
10
+ def __init__(self, data_path, transform=None, mults_amount=1):
11
+ self.data = os.listdir(os.path.join(data_path, 'color'))
12
+ self.data_path = data_path
13
+ self.transform = transform
14
+ self.mults_amount = mults_amount
15
+ self.ToTensor = transforms.ToTensor()
16
+
17
+ def __len__(self):
18
+ return len(self.data)
19
+
20
+ def __getitem__(self, idx):
21
+ image_name = self.data[idx]
22
+
23
+ if self.mults_amount > 1:
24
+ mult_number = np.random.choice(range(self.mults_amount))
25
+ bw_name = image_name[:image_name.rfind('.')] + '_{}.png'.format(mult_number)
26
+ dfm_name = image_name[:image_name.rfind('.')] + '_{}_dfm.png'.format(mult_number)
27
+ else:
28
+ bw_name = image_name
29
+ dfm_name = os.path.splitext(image_name)[0] + '_dfm.png'
30
+
31
+ # Resto del código permanece igual...
32
+
33
+ bw_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'bw', bw_name), 0)) # Load as grayscale
34
+ dfm_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'bw', dfm_name), 0)) # Load as grayscale
35
+
36
+ # Concatenate bw and dfm channels
37
+ bw_dfm_img = np.concatenate([bw_img, dfm_img], axis=2)
38
+
39
+ color_img = plt.imread(os.path.join(self.data_path, 'color', image_name))
40
+ if self.transform:
41
+ result = self.transform(image=color_img, mask=bw_dfm_img)
42
+ color_img = result['image']
43
+ bw_dfm_img = result['mask']
44
+
45
+ dfm_img = bw_dfm_img[:, :, 1]
46
+ bw_img = bw_dfm_img[:, :, 0]
47
+
48
+ color_img = self.ToTensor(color_img)
49
+ bw_img = self.ToTensor(bw_img)
50
+ dfm_img = self.ToTensor(dfm_img)
51
+
52
+ color_img = (color_img - 0.5) / 0.5
53
+
54
+ mask = generate_mask(bw_img.shape[1], bw_img.shape[2])
55
+ hint = torch.cat((color_img * mask, mask), 0)
56
+
57
+ return bw_img, color_img, hint, dfm_img
58
+
59
+ class FineTuningDataset(torch.utils.data.Dataset):
60
+ def __init__(self, data_path, transform = None, mult_amount = 1):
61
+ self.data = [x for x in os.listdir(os.path.join(data_path, 'real_manga')) if x.find('_dfm') == -1]
62
+ self.color_data = [x for x in os.listdir(os.path.join(data_path, 'color'))]
63
+ self.data_path = data_path
64
+ self.transform = transform
65
+ self.mults_amount = mult_amount
66
+
67
+ np.random.shuffle(self.color_data)
68
+
69
+ self.ToTensor = transforms.ToTensor()
70
+ def __len__(self):
71
+ return len(self.data)
72
+
73
+ def __getitem__(self, idx):
74
+ color_img = plt.imread(os.path.join(self.data_path, 'color', self.color_data[idx]))
75
+
76
+ image_name = self.data[idx]
77
+ if self.mults_amount > 1:
78
+ mult_number = np.random.choice(range(self.mults_amount))
79
+
80
+ bw_name = image_name[:image_name.rfind('.')] + '_' + str(self.mults_amount) + '.png'
81
+ dfm_name = image_name[:image_name.rfind('.')] + '_' + str(self.mults_amount) + '_dfm.png'
82
+ else:
83
+ bw_name = self.data[idx]
84
+ dfm_name = os.path.splitext(self.data[idx])[0] + '_dfm.png'
85
+
86
+
87
+ bw_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'real_manga', image_name)), 2)
88
+ dfm_img = np.expand_dims(plt.imread(os.path.join(self.data_path, 'real_manga', dfm_name)), 2)
89
+
90
+ if self.transform:
91
+ result = self.transform(image = color_img)
92
+ color_img = result['image']
93
+
94
+ result = self.transform(image = bw_img, mask = dfm_img)
95
+ bw_img = result['image']
96
+ dfm_img = result['mask']
97
+
98
+ color_img = self.ToTensor(color_img)
99
+ bw_img = self.ToTensor(bw_img)
100
+ dfm_img = self.ToTensor(dfm_img)
101
+
102
+ color_img = (color_img - 0.5) / 0.5
103
+
104
+ return bw_img, dfm_img, color_img
denoising/denoiser.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Denoise an image with the FFDNet denoising method
3
+
4
+ Copyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>
5
+
6
+ This program is free software: you can use, modify and/or
7
+ redistribute it under the terms of the GNU General Public
8
+ License as published by the Free Software Foundation, either
9
+ version 3 of the License, or (at your option) any later
10
+ version. You should have received a copy of this license along
11
+ this program. If not, see <http://www.gnu.org/licenses/>.
12
+ """
13
+ import os
14
+ import argparse
15
+ import time
16
+ import numpy as np
17
+ import cv2
18
+ import torch
19
+ import torch.nn as nn
20
+ from torch.autograd import Variable
21
+ from denoising.models import FFDNet
22
+ from denoising.utils import normalize, variable_to_cv2_image, remove_dataparallel_wrapper, is_rgb
23
+
24
+ class FFDNetDenoiser:
25
+ def __init__(self, _device, _sigma = 25, _weights_dir = 'denoising/models/', _in_ch = 3):
26
+ self.sigma = _sigma / 255
27
+ self.weights_dir = _weights_dir
28
+ self.channels = _in_ch
29
+ self.device = _device
30
+
31
+ self.model = FFDNet(num_input_channels = _in_ch)
32
+ self.load_weights()
33
+ self.model.eval()
34
+
35
+
36
+ def load_weights(self):
37
+ weights_name = 'net_rgb.pth' if self.channels == 3 else 'net_gray.pth'
38
+ weights_path = os.path.join(self.weights_dir, weights_name)
39
+ if self.device == 'cuda':
40
+ state_dict = torch.load(weights_path, map_location=torch.device('cpu'))
41
+ device_ids = [0]
42
+ self.model = nn.DataParallel(self.model, device_ids=device_ids).cuda()
43
+ else:
44
+ state_dict = torch.load(weights_path, map_location='cpu')
45
+ # CPU mode: remove the DataParallel wrapper
46
+ state_dict = remove_dataparallel_wrapper(state_dict)
47
+ self.model.load_state_dict(state_dict)
48
+
49
+ def get_denoised_image(self, imorig, sigma = None):
50
+
51
+ if sigma is not None:
52
+ cur_sigma = sigma / 255
53
+ else:
54
+ cur_sigma = self.sigma
55
+
56
+ if len(imorig.shape) < 3 or imorig.shape[2] == 1:
57
+ imorig = np.repeat(np.expand_dims(imorig, 2), 3, 2)
58
+
59
+ if (max(imorig.shape[0], imorig.shape[1]) > 1200):
60
+ ratio = max(imorig.shape[0], imorig.shape[1]) / 1200
61
+ imorig = cv2.resize(imorig, (int(imorig.shape[1] / ratio), int(imorig.shape[0] / ratio)), interpolation = cv2.INTER_AREA)
62
+
63
+ imorig = imorig.transpose(2, 0, 1)
64
+
65
+ if (imorig.max() > 1.2):
66
+ imorig = normalize(imorig)
67
+ imorig = np.expand_dims(imorig, 0)
68
+
69
+ # Handle odd sizes
70
+ expanded_h = False
71
+ expanded_w = False
72
+ sh_im = imorig.shape
73
+ if sh_im[2]%2 == 1:
74
+ expanded_h = True
75
+ imorig = np.concatenate((imorig, imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)
76
+
77
+ if sh_im[3]%2 == 1:
78
+ expanded_w = True
79
+ imorig = np.concatenate((imorig, imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)
80
+
81
+
82
+ imorig = torch.Tensor(imorig)
83
+
84
+
85
+ # Sets data type according to CPU or GPU modes
86
+ if self.device == 'cuda':
87
+ dtype = torch.cuda.FloatTensor
88
+ else:
89
+ dtype = torch.FloatTensor
90
+
91
+ imnoisy = imorig.clone()
92
+
93
+
94
+ with torch.no_grad():
95
+ imorig, imnoisy = imorig.type(dtype), imnoisy.type(dtype)
96
+ nsigma = torch.FloatTensor([cur_sigma]).type(dtype)
97
+
98
+
99
+ # Estimate noise and subtract it to the input image
100
+ im_noise_estim = self.model(imnoisy, nsigma)
101
+ outim = torch.clamp(imnoisy-im_noise_estim, 0., 1.)
102
+
103
+ if expanded_h:
104
+ imorig = imorig[:, :, :-1, :]
105
+ outim = outim[:, :, :-1, :]
106
+ imnoisy = imnoisy[:, :, :-1, :]
107
+
108
+ if expanded_w:
109
+ imorig = imorig[:, :, :, :-1]
110
+ outim = outim[:, :, :, :-1]
111
+ imnoisy = imnoisy[:, :, :, :-1]
112
+
113
+ return variable_to_cv2_image(outim)
denoising/functions.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions implementing custom NN layers
3
+
4
+ Copyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>
5
+
6
+ This program is free software: you can use, modify and/or
7
+ redistribute it under the terms of the GNU General Public
8
+ License as published by the Free Software Foundation, either
9
+ version 3 of the License, or (at your option) any later
10
+ version. You should have received a copy of this license along
11
+ this program. If not, see <http://www.gnu.org/licenses/>.
12
+ """
13
+ import torch
14
+ from torch.autograd import Function, Variable
15
+
16
+ def concatenate_input_noise_map(input, noise_sigma):
17
+ r"""Implements the first layer of FFDNet. This function returns a
18
+ torch.autograd.Variable composed of the concatenation of the downsampled
19
+ input image and the noise map. Each image of the batch of size CxHxW gets
20
+ converted to an array of size 4*CxH/2xW/2. Each of the pixels of the
21
+ non-overlapped 2x2 patches of the input image are placed in the new array
22
+ along the first dimension.
23
+
24
+ Args:
25
+ input: batch containing CxHxW images
26
+ noise_sigma: the value of the pixels of the CxH/2xW/2 noise map
27
+ """
28
+ # noise_sigma is a list of length batch_size
29
+ N, C, H, W = input.size()
30
+ dtype = input.type()
31
+ sca = 2
32
+ sca2 = sca*sca
33
+ Cout = sca2*C
34
+ Hout = H//sca
35
+ Wout = W//sca
36
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
37
+
38
+ # Fill the downsampled image with zeros
39
+ if 'cuda' in dtype:
40
+ downsampledfeatures = torch.cuda.FloatTensor(N, Cout, Hout, Wout).fill_(0)
41
+ else:
42
+ downsampledfeatures = torch.FloatTensor(N, Cout, Hout, Wout).fill_(0)
43
+
44
+ # Build the CxH/2xW/2 noise map
45
+ noise_map = noise_sigma.view(N, 1, 1, 1).repeat(1, C, Hout, Wout)
46
+
47
+ # Populate output
48
+ for idx in range(sca2):
49
+ downsampledfeatures[:, idx:Cout:sca2, :, :] = \
50
+ input[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]
51
+
52
+ # concatenate de-interleaved mosaic with noise map
53
+ return torch.cat((noise_map, downsampledfeatures), 1)
54
+
55
+ class UpSampleFeaturesFunction(Function):
56
+ r"""Extends PyTorch's modules by implementing a torch.autograd.Function.
57
+ This class implements the forward and backward methods of the last layer
58
+ of FFDNet. It basically performs the inverse of
59
+ concatenate_input_noise_map(): it converts each of the images of a
60
+ batch of size CxH/2xW/2 to images of size C/4xHxW
61
+ """
62
+ @staticmethod
63
+ def forward(ctx, input):
64
+ N, Cin, Hin, Win = input.size()
65
+ dtype = input.type()
66
+ sca = 2
67
+ sca2 = sca*sca
68
+ Cout = Cin//sca2
69
+ Hout = Hin*sca
70
+ Wout = Win*sca
71
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
72
+
73
+ assert (Cin%sca2 == 0), 'Invalid input dimensions: number of channels should be divisible by 4'
74
+
75
+ result = torch.zeros((N, Cout, Hout, Wout)).type(dtype)
76
+ for idx in range(sca2):
77
+ result[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca] = input[:, idx:Cin:sca2, :, :]
78
+
79
+ return result
80
+
81
+ @staticmethod
82
+ def backward(ctx, grad_output):
83
+ N, Cg_out, Hg_out, Wg_out = grad_output.size()
84
+ dtype = grad_output.data.type()
85
+ sca = 2
86
+ sca2 = sca*sca
87
+ Cg_in = sca2*Cg_out
88
+ Hg_in = Hg_out//sca
89
+ Wg_in = Wg_out//sca
90
+ idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]
91
+
92
+ # Build output
93
+ grad_input = torch.zeros((N, Cg_in, Hg_in, Wg_in)).type(dtype)
94
+ # Populate output
95
+ for idx in range(sca2):
96
+ grad_input[:, idx:Cg_in:sca2, :, :] = grad_output.data[:, :, idxL[idx][0]::sca, idxL[idx][1]::sca]
97
+
98
+ return Variable(grad_input)
99
+
100
+ # Alias functions
101
+ upsamplefeatures = UpSampleFeaturesFunction.apply
denoising/models.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Definition of the FFDNet model and its custom layers
3
+
4
+ Copyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>
5
+
6
+ This program is free software: you can use, modify and/or
7
+ redistribute it under the terms of the GNU General Public
8
+ License as published by the Free Software Foundation, either
9
+ version 3 of the License, or (at your option) any later
10
+ version. You should have received a copy of this license along
11
+ this program. If not, see <http://www.gnu.org/licenses/>.
12
+ """
13
+ import torch.nn as nn
14
+ from torch.autograd import Variable
15
+ import denoising.functions as functions
16
+
17
+ class UpSampleFeatures(nn.Module):
18
+ r"""Implements the last layer of FFDNet
19
+ """
20
+ def __init__(self):
21
+ super(UpSampleFeatures, self).__init__()
22
+ def forward(self, x):
23
+ return functions.upsamplefeatures(x)
24
+
25
+ class IntermediateDnCNN(nn.Module):
26
+ r"""Implements the middel part of the FFDNet architecture, which
27
+ is basically a DnCNN net
28
+ """
29
+ def __init__(self, input_features, middle_features, num_conv_layers):
30
+ super(IntermediateDnCNN, self).__init__()
31
+ self.kernel_size = 3
32
+ self.padding = 1
33
+ self.input_features = input_features
34
+ self.num_conv_layers = num_conv_layers
35
+ self.middle_features = middle_features
36
+ if self.input_features == 5:
37
+ self.output_features = 4 #Grayscale image
38
+ elif self.input_features == 15:
39
+ self.output_features = 12 #RGB image
40
+ else:
41
+ raise Exception('Invalid number of input features')
42
+
43
+ layers = []
44
+ layers.append(nn.Conv2d(in_channels=self.input_features,\
45
+ out_channels=self.middle_features,\
46
+ kernel_size=self.kernel_size,\
47
+ padding=self.padding,\
48
+ bias=False))
49
+ layers.append(nn.ReLU(inplace=True))
50
+ for _ in range(self.num_conv_layers-2):
51
+ layers.append(nn.Conv2d(in_channels=self.middle_features,\
52
+ out_channels=self.middle_features,\
53
+ kernel_size=self.kernel_size,\
54
+ padding=self.padding,\
55
+ bias=False))
56
+ layers.append(nn.BatchNorm2d(self.middle_features))
57
+ layers.append(nn.ReLU(inplace=True))
58
+ layers.append(nn.Conv2d(in_channels=self.middle_features,\
59
+ out_channels=self.output_features,\
60
+ kernel_size=self.kernel_size,\
61
+ padding=self.padding,\
62
+ bias=False))
63
+ self.itermediate_dncnn = nn.Sequential(*layers)
64
+ def forward(self, x):
65
+ out = self.itermediate_dncnn(x)
66
+ return out
67
+
68
+ class FFDNet(nn.Module):
69
+ r"""Implements the FFDNet architecture
70
+ """
71
+ def __init__(self, num_input_channels):
72
+ super(FFDNet, self).__init__()
73
+ self.num_input_channels = num_input_channels
74
+ if self.num_input_channels == 1:
75
+ # Grayscale image
76
+ self.num_feature_maps = 64
77
+ self.num_conv_layers = 15
78
+ self.downsampled_channels = 5
79
+ self.output_features = 4
80
+ elif self.num_input_channels == 3:
81
+ # RGB image
82
+ self.num_feature_maps = 96
83
+ self.num_conv_layers = 12
84
+ self.downsampled_channels = 15
85
+ self.output_features = 12
86
+ else:
87
+ raise Exception('Invalid number of input features')
88
+
89
+ self.intermediate_dncnn = IntermediateDnCNN(\
90
+ input_features=self.downsampled_channels,\
91
+ middle_features=self.num_feature_maps,\
92
+ num_conv_layers=self.num_conv_layers)
93
+ self.upsamplefeatures = UpSampleFeatures()
94
+
95
+ def forward(self, x, noise_sigma):
96
+ concat_noise_x = functions.concatenate_input_noise_map(x.data, noise_sigma.data)
97
+ concat_noise_x = Variable(concat_noise_x)
98
+ h_dncnn = self.intermediate_dncnn(concat_noise_x)
99
+ pred_noise = self.upsamplefeatures(h_dncnn)
100
+ return pred_noise
denoising/models/.gitkeep ADDED
File without changes
denoising/utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Different utilities such as orthogonalization of weights, initialization of
3
+ loggers, etc
4
+
5
+ Copyright (C) 2018, Matias Tassano <matias.tassano@parisdescartes.fr>
6
+
7
+ This program is free software: you can use, modify and/or
8
+ redistribute it under the terms of the GNU General Public
9
+ License as published by the Free Software Foundation, either
10
+ version 3 of the License, or (at your option) any later
11
+ version. You should have received a copy of this license along
12
+ this program. If not, see <http://www.gnu.org/licenses/>.
13
+ """
14
+ import numpy as np
15
+ import cv2
16
+
17
+
18
+ def variable_to_cv2_image(varim):
19
+ r"""Converts a torch.autograd.Variable to an OpenCV image
20
+
21
+ Args:
22
+ varim: a torch.autograd.Variable
23
+ """
24
+ nchannels = varim.size()[1]
25
+ if nchannels == 1:
26
+ res = (varim.data.cpu().numpy()[0, 0, :]*255.).clip(0, 255).astype(np.uint8)
27
+ elif nchannels == 3:
28
+ res = varim.data.cpu().numpy()[0]
29
+ res = cv2.cvtColor(res.transpose(1, 2, 0), cv2.COLOR_RGB2BGR)
30
+ res = (res*255.).clip(0, 255).astype(np.uint8)
31
+ else:
32
+ raise Exception('Number of color channels not supported')
33
+ return res
34
+
35
+
36
+ def normalize(data):
37
+ return np.float32(data/255.)
38
+
39
+ def remove_dataparallel_wrapper(state_dict):
40
+ r"""Converts a DataParallel model to a normal one by removing the "module."
41
+ wrapper in the module dictionary
42
+
43
+ Args:
44
+ state_dict: a torch.nn.DataParallel state dictionary
45
+ """
46
+ from collections import OrderedDict
47
+
48
+ new_state_dict = OrderedDict()
49
+ for k, vl in state_dict.items():
50
+ name = k[7:] # remove 'module.' of DataParallel
51
+ new_state_dict[name] = vl
52
+
53
+ return new_state_dict
54
+
55
+ def is_rgb(im_path):
56
+ r""" Returns True if the image in im_path is an RGB image
57
+ """
58
+ from skimage.io import imread
59
+ rgb = False
60
+ im = imread(im_path)
61
+ if (len(im.shape) == 3):
62
+ if not(np.allclose(im[...,0], im[...,1]) and np.allclose(im[...,2], im[...,1])):
63
+ rgb = True
64
+ print("rgb: {}".format(rgb))
65
+ print("im shape: {}".format(im.shape))
66
+ return rgb
drawing.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+ import base64
4
+ import random
5
+ import string
6
+ import shutil
7
+ import torch
8
+ import matplotlib.pyplot as plt
9
+ import numpy as np
10
+ from flask import Flask, request, jsonify, abort, redirect, url_for, render_template, send_file, Response
11
+ from flask_wtf import FlaskForm
12
+ from wtforms import StringField, FileField, BooleanField, DecimalField
13
+ from wtforms.validators import DataRequired
14
+ from flask import after_this_request
15
+
16
+ from model.models import Colorizer, Generator
17
+ from model.extractor import get_seresnext_extractor
18
+ from utils.xdog import XDoGSketcher
19
+ from utils.utils import open_json
20
+ from denoising.denoiser import FFDNetDenoiser
21
+ from inference import process_image_with_hint
22
+ from utils.utils import resize_pad
23
+ from utils.dataset_utils import get_sketch
24
+
25
+ def generate_id(size=25, chars=string.ascii_letters + string.digits):
26
+ return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
27
+
28
+ def generate_unique_id(current_ids = set()):
29
+ id_t = generate_id()
30
+ while id_t in current_ids:
31
+ id_t = generate_id()
32
+
33
+ current_ids.add(id_t)
34
+
35
+ return id_t
36
+
37
+ app = Flask(__name__)
38
+ app.config.update(dict(
39
+ SECRET_KEY="lol kek",
40
+ WTF_CSRF_SECRET_KEY="cheburek"
41
+ ))
42
+
43
+ if torch.cuda.is_available():
44
+ device = 'cuda'
45
+ else:
46
+ device = 'cpu'
47
+
48
+ colorizer = torch.jit.load('./model/colorizer.zip', map_location=torch.device(device))
49
+
50
+ sketcher = XDoGSketcher()
51
+ xdog_config = open_json('configs/xdog_config.json')
52
+ for key in xdog_config.keys():
53
+ if key in sketcher.params:
54
+ sketcher.params[key] = xdog_config[key]
55
+
56
+ denoiser = FFDNetDenoiser(device)
57
+
58
+ color_args = {'colorizer':colorizer, 'sketcher':sketcher, 'device':device, 'dfm' : True, 'auto_hint' : False, 'ignore_gray' : False, 'denoiser' : denoiser, 'denoiser_sigma' : 25}
59
+
60
+
61
+ class SubmitForm(FlaskForm):
62
+ file = FileField(validators=[DataRequired(), ])
63
+
64
+ def preprocess_image(file_id, ext):
65
+ directory_path = os.path.join('static', 'temp_images', file_id)
66
+ original_path = os.path.join(directory_path, 'original') + ext
67
+ original_image = plt.imread(original_path)
68
+
69
+ resized_image, _ = resize_pad(original_image)
70
+ resized_image = denoiser.get_denoised_image(resized_image, 25)
71
+ bw, dfm = get_sketch(resized_image, sketcher, True)
72
+
73
+ resized_name = 'resized_' + str(resized_image.shape[0]) + '_' + str(resized_image.shape[1]) + '.png'
74
+ plt.imsave(os.path.join(directory_path, resized_name), resized_image)
75
+ plt.imsave(os.path.join(directory_path, 'bw.png'), bw, cmap = 'gray')
76
+ plt.imsave(os.path.join(directory_path, 'dfm.png'), dfm, cmap = 'gray')
77
+ os.remove(original_path)
78
+
79
+ empty_hint = np.zeros((resized_image.shape[0], resized_image.shape[1], 4), dtype = np.float32)
80
+ plt.imsave(os.path.join(directory_path, 'hint.png'), empty_hint)
81
+
82
+ @app.route('/', methods=['GET', 'POST'])
83
+ def upload():
84
+ form = SubmitForm()
85
+ if form.validate_on_submit():
86
+ input_data = form.file.data
87
+
88
+ _, ext = os.path.splitext(input_data.filename)
89
+
90
+ if ext not in ('.jpg', '.png', '.jpeg'):
91
+ return abort(400)
92
+
93
+ file_id = generate_unique_id()
94
+ directory = os.path.join('static', 'temp_images', file_id)
95
+ original_filename = os.path.join(directory, 'original') + ext
96
+
97
+ try :
98
+ os.mkdir(directory)
99
+ input_data.save(original_filename)
100
+
101
+ preprocess_image(file_id, ext)
102
+
103
+ return redirect(f'/draw/{file_id}')
104
+
105
+ except :
106
+ print('Failed to colorize')
107
+ if os.path.exists(directory):
108
+ shutil.rmtree(directory)
109
+ return abort(400)
110
+
111
+
112
+ return render_template("upload.html", form = form)
113
+
114
+ @app.route('/img/<file_id>')
115
+ def show_image(file_id):
116
+ if not os.path.exists(os.path.join('static', 'temp_images', str(file_id))):
117
+ abort(404)
118
+ return f'<img src="/static/temp_images/{file_id}/colorized.png?{random. randint(1,1000000)}">'
119
+
120
+ def colorize_image(file_id):
121
+ directory_path = os.path.join('static', 'temp_images', file_id)
122
+
123
+ bw = plt.imread(os.path.join(directory_path, 'bw.png'))[..., :1]
124
+ dfm = plt.imread(os.path.join(directory_path, 'dfm.png'))[..., :1]
125
+ hint = plt.imread(os.path.join(directory_path, 'hint.png'))
126
+
127
+ return process_image_with_hint(bw, dfm, hint, color_args)
128
+
129
+ @app.route('/colorize', methods=['POST'])
130
+ def colorize():
131
+
132
+ file_id = request.form['save_file_id']
133
+ file_id = file_id[file_id.rfind('/') + 1:]
134
+
135
+ img_data = request.form['save_image']
136
+ img_data = img_data[img_data.find(',') + 1:]
137
+
138
+ directory_path = os.path.join('static', 'temp_images', file_id)
139
+
140
+ with open(os.path.join(directory_path, 'hint.png'), "wb") as im:
141
+ im.write(base64.decodestring(str.encode(img_data)))
142
+
143
+ result = colorize_image(file_id)
144
+
145
+ plt.imsave(os.path.join(directory_path, 'colorized.png'), result)
146
+
147
+ src_path = f'../static/temp_images/{file_id}/colorized.png?{random. randint(1,1000000)}'
148
+
149
+ return src_path
150
+
151
+ @app.route('/draw/<file_id>', methods=['GET', 'POST'])
152
+ def paintapp(file_id):
153
+ if request.method == 'GET':
154
+
155
+ directory_path = os.path.join('static', 'temp_images', str(file_id))
156
+ if not os.path.exists(directory_path):
157
+ abort(404)
158
+
159
+ resized_name = [x for x in os.listdir(directory_path) if x.startswith('resized_')][0]
160
+
161
+ split = os.path.splitext(resized_name)[0].split('_')
162
+ width = int(split[2])
163
+ height = int(split[1])
164
+
165
+ return render_template("drawing.html", height = height, width = width, img_path = os.path.join('temp_images', str(file_id), resized_name))
inference.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from utils.dataset_utils import get_sketch
5
+ from utils.utils import resize_pad, generate_mask, extract_cbr, create_cbz, sorted_alphanumeric, subfolder_image_search, remove_folder
6
+ from torchvision.transforms import ToTensor
7
+ import os
8
+ import matplotlib.pyplot as plt
9
+ import argparse
10
+ from model.models import Colorizer, Generator
11
+ from model.extractor import get_seresnext_extractor
12
+ from utils.xdog import XDoGSketcher
13
+ from utils.utils import open_json
14
+ import sys
15
+ from denoising.denoiser import FFDNetDenoiser
16
+
17
+ def colorize_without_hint(inp, color_args):
18
+ i_hint = torch.zeros(1, 4, inp.shape[2], inp.shape[3]).float().to(color_args['device'])
19
+
20
+ with torch.no_grad():
21
+ fake_color, _ = color_args['colorizer'](torch.cat([inp, i_hint], 1))
22
+
23
+ if color_args['auto_hint']:
24
+ mask = generate_mask(fake_color.shape[2], fake_color.shape[3], full = False, prob = 1, sigma = color_args['auto_hint_sigma']).unsqueeze(0)
25
+ mask = mask.to(color_args['device'])
26
+
27
+
28
+ if color_args['ignore_gray']:
29
+ diff1 = torch.abs(fake_color[:, 0] - fake_color[:, 1])
30
+ diff2 = torch.abs(fake_color[:, 0] - fake_color[:, 2])
31
+ diff3 = torch.abs(fake_color[:, 1] - fake_color[:, 2])
32
+ mask = ((mask + ((diff1 + diff2 + diff3) > 60 / 255).float().unsqueeze(1)) == 2).float()
33
+
34
+
35
+ i_hint = torch.cat([fake_color * mask, mask], 1)
36
+
37
+ with torch.no_grad():
38
+ fake_color, _ = color_args['colorizer'](torch.cat([inp, i_hint], 1))
39
+
40
+ return fake_color
41
+
42
+
43
+ def process_image(image, color_args, to_tensor = ToTensor()):
44
+ image, pad = resize_pad(image)
45
+
46
+ if color_args['denoiser'] is not None:
47
+ image = color_args['denoiser'].get_denoised_image(image, color_args['denoiser_sigma'])
48
+
49
+ bw, dfm = get_sketch(image, color_args['sketcher'], color_args['dfm'])
50
+
51
+ bw = to_tensor(bw).unsqueeze(0).to(color_args['device'])
52
+ dfm = to_tensor(dfm).unsqueeze(0).to(color_args['device'])
53
+
54
+ output = colorize_without_hint(torch.cat([bw, dfm], 1), color_args)
55
+ result = output[0].cpu().permute(1, 2, 0).numpy() * 0.5 + 0.5
56
+
57
+ if pad[0] != 0:
58
+ result = result[:-pad[0]]
59
+ if pad[1] != 0:
60
+ result = result[:, :-pad[1]]
61
+
62
+ return result
63
+
64
+ def colorize_with_hint(inp, color_args):
65
+ with torch.no_grad():
66
+ fake_color, _ = color_args['colorizer'](inp)
67
+
68
+ return fake_color
69
+
70
+ def process_image_with_hint(bw, dfm, hint, color_args, to_tensor = ToTensor()):
71
+ bw = to_tensor(bw).unsqueeze(0).to(color_args['device'])
72
+ dfm = to_tensor(dfm).unsqueeze(0).to(color_args['device'])
73
+
74
+ i_hint = (torch.FloatTensor(hint[..., :3]).permute(2, 0, 1) - 0.5) / 0.5
75
+ mask = torch.FloatTensor(hint[..., 3:]).permute(2, 0, 1)
76
+ i_hint = torch.cat([i_hint * mask, mask], 0).unsqueeze(0).to(color_args['device'])
77
+
78
+ output = colorize_with_hint(torch.cat([bw, dfm, i_hint], 1), color_args)
79
+ result = output[0].cpu().permute(1, 2, 0).numpy() * 0.5 + 0.5
80
+
81
+ return result
82
+
83
+ def colorize_single_image(file_path, save_path, color_args):
84
+ try:
85
+ image = plt.imread(file_path)
86
+
87
+ colorization = process_image(image, color_args)
88
+
89
+ plt.imsave(save_path, colorization)
90
+
91
+ return True
92
+ except KeyboardInterrupt:
93
+ sys.exit(0)
94
+ except:
95
+ print('Failed to colorize {}'.format(file_path))
96
+ return False
97
+
98
+ def colorize_images(source_path, target_path, color_args):
99
+ images = os.listdir(source_path)
100
+
101
+ for image_name in images:
102
+ file_path = os.path.join(source_path, image_name)
103
+
104
+ name, ext = os.path.splitext(image_name)
105
+ if (ext != '.png'):
106
+ image_name = name + '.png'
107
+
108
+ save_path = os.path.join(target_path, image_name)
109
+ colorize_single_image(file_path, save_path, color_args)
110
+
111
+ def colorize_cbr(file_path, color_args):
112
+ file_name = os.path.splitext(os.path.basename(file_path))[0]
113
+ temp_path = 'temp_colorization'
114
+
115
+ if not os.path.exists(temp_path):
116
+ os.makedirs(temp_path)
117
+ extract_cbr(file_path, temp_path)
118
+
119
+ images = subfolder_image_search(temp_path)
120
+
121
+ result_images = []
122
+ for image_path in images:
123
+ save_path = image_path
124
+
125
+ path, ext = os.path.splitext(save_path)
126
+ if (ext != '.png'):
127
+ save_path = path + '.png'
128
+
129
+ res_flag = colorize_single_image(image_path, save_path, color_args)
130
+
131
+ result_images.append(save_path if res_flag else image_path)
132
+
133
+
134
+ result_name = os.path.join(os.path.dirname(file_path), file_name + '_colorized.cbz')
135
+
136
+ create_cbz(result_name, result_images)
137
+
138
+ remove_folder(temp_path)
139
+
140
+ return result_name
141
+
142
+ def parse_args():
143
+ parser = argparse.ArgumentParser()
144
+ parser.add_argument("-p", "--path", required=True)
145
+ parser.add_argument("-gen", "--generator", default = 'model/generator.pth')
146
+ parser.add_argument("-ext", "--extractor", default = 'model/extractor.pth')
147
+ parser.add_argument("-s", "--sigma", type = float, default = 0.003)
148
+ parser.add_argument('-g', '--gpu', dest = 'gpu', action = 'store_true')
149
+ parser.add_argument('-ah', '--auto', dest = 'autohint', action = 'store_true')
150
+ parser.add_argument('-ig', '--ignore_grey', dest = 'ignore', action = 'store_true')
151
+ parser.add_argument('-nd', '--no_denoise', dest = 'denoiser', action = 'store_false')
152
+ parser.add_argument("-ds", "--denoiser_sigma", type = int, default = 25)
153
+ parser.set_defaults(gpu = False)
154
+ parser.set_defaults(autohint = False)
155
+ parser.set_defaults(ignore = False)
156
+ parser.set_defaults(denoiser = True)
157
+ args = parser.parse_args()
158
+
159
+ return args
160
+
161
+
162
+ if __name__ == "__main__":
163
+
164
+ args = parse_args()
165
+
166
+ if args.gpu:
167
+ device = 'cuda'
168
+ else:
169
+ device = 'cpu'
170
+
171
+ generator = Generator()
172
+ generator.load_state_dict(torch.load(args.generator))
173
+
174
+ extractor = get_seresnext_extractor()
175
+ extractor.load_state_dict(torch.load(args.extractor))
176
+
177
+ colorizer = Colorizer(generator, extractor)
178
+ colorizer = colorizer.eval().to(device)
179
+
180
+ sketcher = XDoGSketcher()
181
+ xdog_config = open_json('configs/xdog_config.json')
182
+ for key in xdog_config.keys():
183
+ if key in sketcher.params:
184
+ sketcher.params[key] = xdog_config[key]
185
+
186
+ denoiser = None
187
+ if args.denoiser:
188
+ denoiser = FFDNetDenoiser(device, args.denoiser_sigma)
189
+
190
+ color_args = {'colorizer':colorizer, 'sketcher':sketcher, 'auto_hint':args.autohint, 'auto_hint_sigma':args.sigma,\
191
+ 'ignore_gray':args.ignore, 'device':device, 'dfm' : True, 'denoiser':denoiser, 'denoiser_sigma' : args.denoiser_sigma}
192
+
193
+
194
+ if os.path.isdir(args.path):
195
+ colorization_path = os.path.join(args.path, 'colorization')
196
+ if not os.path.exists(colorization_path):
197
+ os.makedirs(colorization_path)
198
+
199
+ colorize_images(args.path, colorization_path, color_args)
200
+
201
+ elif os.path.isfile(args.path):
202
+
203
+ split = os.path.splitext(args.path)
204
+
205
+ if split[1].lower() in ('.cbr', '.cbz', '.rar', '.zip'):
206
+ colorize_cbr(args.path, color_args)
207
+ elif split[1].lower() in ('.jpg', '.png', ',jpeg'):
208
+ new_image_path = split[0] + '_colorized' + '.png'
209
+
210
+ colorize_single_image(args.path, new_image_path, color_args)
211
+ else:
212
+ print('Wrong format')
213
+ else:
214
+ print('Wrong path')
215
+
manga/bw/001-0000-0000.png ADDED

Git LFS Details

  • SHA256: b6bd52e61f3f10f698ccbf074becf58c34ff03415377e70e1bf89f1f2768d9bc
  • Pointer size: 132 Bytes
  • Size of remote file: 1.88 MB
manga/bw/002-0000-0000.png ADDED

Git LFS Details

  • SHA256: 91f59c85df765ac3ef9435aadfa2f6d0a6226d643db89af950997ef754f6048a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.72 MB
manga/bw/003-0000-0000.png ADDED

Git LFS Details

  • SHA256: 9a02d1dd89549f704a79588b8f6b1d0d32a7ba8b7f0d663f8a71e375416d3a42
  • Pointer size: 131 Bytes
  • Size of remote file: 301 kB
manga/bw/004-0000-0000.png ADDED

Git LFS Details

  • SHA256: 2ce252a56fd3dc9745b517ce5a7023cb78ed23a985d4c4dc9ef884258688620f
  • Pointer size: 131 Bytes
  • Size of remote file: 206 kB
manga/bw/x1-0000-0000.png ADDED

Git LFS Details

  • SHA256: c347e8b43de1c3d2615c9b4efbf30bf8dfe8ca16f0a5aee327bea898b0b19e84
  • Pointer size: 132 Bytes
  • Size of remote file: 1.68 MB
manga/bw/x2-0000-0000.png ADDED

Git LFS Details

  • SHA256: 10957841fd23284c3a92beaed594efc7ec6d2680483d9bebd7055e9bb7885f26
  • Pointer size: 132 Bytes
  • Size of remote file: 3.06 MB
manga/bw/x3-0000-0000.png ADDED

Git LFS Details

  • SHA256: 44a56370209961a1ac4e69ad672113c7302a991b7d0b1d36ecdddb718aed8367
  • Pointer size: 132 Bytes
  • Size of remote file: 1.59 MB
manga/bw/x4-0000-0000.png ADDED

Git LFS Details

  • SHA256: 2df448fad5bc289361cbfe2fabd3cb666103a27a201f7a447b52ca85ae9362f0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.52 MB
manga/bw/x5-0000-0000.png ADDED

Git LFS Details

  • SHA256: 006617b354cc17d1ff7c65ce19410bc7b1632cf5369ad5165deacad77a739c03
  • Pointer size: 132 Bytes
  • Size of remote file: 1.71 MB
manga/color/001-0000-0000.png ADDED

Git LFS Details

  • SHA256: 6e38ea5f3cbb32c13262dda878724f3733e2b46bfdcb13a0af09aa4fb36cd420
  • Pointer size: 132 Bytes
  • Size of remote file: 6.81 MB
manga/color/002-0000-0000.png ADDED

Git LFS Details

  • SHA256: 1a289ac6f4c1b477e987eef553232e70cefd536b62cf7c0662e27d84c55b3f02
  • Pointer size: 132 Bytes
  • Size of remote file: 6.26 MB
manga/color/003-0000-0000.png ADDED

Git LFS Details

  • SHA256: 6a5a43ea8c8d2ef1c784a82dc96050fed27e673a0d6baba50d593cc487c73f75
  • Pointer size: 131 Bytes
  • Size of remote file: 871 kB
manga/color/004-0000-0000.png ADDED

Git LFS Details

  • SHA256: 42c1ccdad566868f845369c5c10268d269b943c86de81b189d8b80fa25554443
  • Pointer size: 131 Bytes
  • Size of remote file: 584 kB
manga/color/x1-0000-0000.png ADDED

Git LFS Details

  • SHA256: 4fee65ac5d942ccbcf46250042077897a1a1d0288dbcbd4d4e01a95667b4321f
  • Pointer size: 132 Bytes
  • Size of remote file: 5.74 MB
manga/color/x2-0000-0000.png ADDED

Git LFS Details

  • SHA256: 3934c58bf1876be001bd806f637d7722dbd6ba34ba9cc2aed98288019771a248
  • Pointer size: 133 Bytes
  • Size of remote file: 11.3 MB
manga/color/x3-0000-0000.png ADDED

Git LFS Details

  • SHA256: e8690c5617dbb1215cfdc0f70a2e31156794b0abf21dde7f9c50bbe2675de71e
  • Pointer size: 132 Bytes
  • Size of remote file: 5.25 MB
manga/color/x4-0000-0000.png ADDED

Git LFS Details

  • SHA256: c5d0f633082871aa456f2d7864d0d9f4fa1ca9ae2b1cc229df4927b38a4df40c
  • Pointer size: 132 Bytes
  • Size of remote file: 4.98 MB
manga/color/x5-0000-0000.png ADDED

Git LFS Details

  • SHA256: acba9510555cebb18fbdb220ef3f22724617893f35d8a3fe1c509ed31f257dbb
  • Pointer size: 132 Bytes
  • Size of remote file: 5.63 MB
manga/real_manga/06 - copia.webp ADDED

Git LFS Details

  • SHA256: e68ca338e75c67f2b185d180827b7e06c3e7fe8cf8424750c3581603bb314eaf
  • Pointer size: 130 Bytes
  • Size of remote file: 96 kB
manga/real_manga/06.webp ADDED

Git LFS Details

  • SHA256: e68ca338e75c67f2b185d180827b7e06c3e7fe8cf8424750c3581603bb314eaf
  • Pointer size: 130 Bytes
  • Size of remote file: 96 kB
manga/real_manga/09 - copia.webp ADDED

Git LFS Details

  • SHA256: 7adabde7144919e799f80059db5c295e318f7f463e49f2f8d9d2b96e29d155b5
  • Pointer size: 131 Bytes
  • Size of remote file: 135 kB
manga/real_manga/11.webp ADDED

Git LFS Details

  • SHA256: 808e2b43f64275f8308d595503ec4b275b91e0eaa8e065714244b840e60822f0
  • Pointer size: 131 Bytes
  • Size of remote file: 129 kB
manga/real_manga/12 - copia.webp ADDED

Git LFS Details

  • SHA256: 77fac1072dec146c69648b649848ba868cccc8852a41b0e9cbc0bdd1f1878c27
  • Pointer size: 131 Bytes
  • Size of remote file: 149 kB
manga/real_manga/12.webp ADDED

Git LFS Details

  • SHA256: 77fac1072dec146c69648b649848ba868cccc8852a41b0e9cbc0bdd1f1878c27
  • Pointer size: 131 Bytes
  • Size of remote file: 149 kB
manga/real_manga/13 - copia.webp ADDED

Git LFS Details

  • SHA256: 99a1d040bd25ff4df102bde65451a4e0a71de0de77b134a5820a67bda8bef3af
  • Pointer size: 131 Bytes
  • Size of remote file: 136 kB
manga/real_manga/13.webp ADDED

Git LFS Details

  • SHA256: 99a1d040bd25ff4df102bde65451a4e0a71de0de77b134a5820a67bda8bef3af
  • Pointer size: 131 Bytes
  • Size of remote file: 136 kB
manga/real_manga/14 - copia.webp ADDED

Git LFS Details

  • SHA256: 5b954c13098fda8eb659915944857c7918740c334ca86e8885d6dfbb70478036
  • Pointer size: 131 Bytes
  • Size of remote file: 151 kB
manga/real_manga/14.webp ADDED

Git LFS Details

  • SHA256: 5b954c13098fda8eb659915944857c7918740c334ca86e8885d6dfbb70478036
  • Pointer size: 131 Bytes
  • Size of remote file: 151 kB
manga/real_manga/OP_(13).webp ADDED

Git LFS Details

  • SHA256: 3b781d94c36c67bd7d13685c421dae7d7e448c308eb8e044af6d776c5adeab88
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
manga/real_manga/OP_(16).webp ADDED

Git LFS Details

  • SHA256: 674d4b51806acfac9265ebfd30e66e8154e12d90418e4ec4ff7eb7cccc780770
  • Pointer size: 131 Bytes
  • Size of remote file: 128 kB
manga/real_manga/OP_(18).webp ADDED

Git LFS Details

  • SHA256: ac8b9da7926bad9cef67dc6b0d2af3def0f90e216685c19a99f0d97307b6925a
  • Pointer size: 130 Bytes
  • Size of remote file: 94.8 kB
manga/real_manga/OP_(19).webp ADDED

Git LFS Details

  • SHA256: c956de243bb5688c37ccff1764cec458b602870019a7adb9bf565ef43621643f
  • Pointer size: 131 Bytes
  • Size of remote file: 239 kB
model/__pycache__/extractor.cpython-39.pyc ADDED
Binary file (3.95 kB). View file
 
model/__pycache__/models.cpython-39.pyc ADDED
Binary file (13.5 kB). View file
 
model/extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee3c59f02ac8c59298fd9b819fa33d2efa168847e15e4be39b35c286f7c18607
3
+ size 6340842
model/extractor.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import math
4
+
5
+ '''https://github.com/blandocs/Tag2Pix/blob/master/model/pretrained.py'''
6
+
7
+ # Pretrained version
8
+ class Selayer(nn.Module):
9
+ def __init__(self, inplanes):
10
+ super(Selayer, self).__init__()
11
+ self.global_avgpool = nn.AdaptiveAvgPool2d(1)
12
+ self.conv1 = nn.Conv2d(inplanes, inplanes // 16, kernel_size=1, stride=1)
13
+ self.conv2 = nn.Conv2d(inplanes // 16, inplanes, kernel_size=1, stride=1)
14
+ self.relu = nn.ReLU(inplace=True)
15
+ self.sigmoid = nn.Sigmoid()
16
+
17
+ def forward(self, x):
18
+ out = self.global_avgpool(x)
19
+ out = self.conv1(out)
20
+ out = self.relu(out)
21
+ out = self.conv2(out)
22
+ out = self.sigmoid(out)
23
+
24
+ return x * out
25
+
26
+
27
+ class BottleneckX_Origin(nn.Module):
28
+ expansion = 4
29
+
30
+ def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None):
31
+ super(BottleneckX_Origin, self).__init__()
32
+ self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
33
+ self.bn1 = nn.BatchNorm2d(planes * 2)
34
+
35
+ self.conv2 = nn.Conv2d(planes * 2, planes * 2, kernel_size=3, stride=stride,
36
+ padding=1, groups=cardinality, bias=False)
37
+ self.bn2 = nn.BatchNorm2d(planes * 2)
38
+
39
+ self.conv3 = nn.Conv2d(planes * 2, planes * 4, kernel_size=1, bias=False)
40
+ self.bn3 = nn.BatchNorm2d(planes * 4)
41
+
42
+ self.selayer = Selayer(planes * 4)
43
+
44
+ self.relu = nn.ReLU(inplace=True)
45
+ self.downsample = downsample
46
+ self.stride = stride
47
+
48
+ def forward(self, x):
49
+ residual = x
50
+
51
+ out = self.conv1(x)
52
+ out = self.bn1(out)
53
+ out = self.relu(out)
54
+
55
+ out = self.conv2(out)
56
+ out = self.bn2(out)
57
+ out = self.relu(out)
58
+
59
+ out = self.conv3(out)
60
+ out = self.bn3(out)
61
+
62
+ out = self.selayer(out)
63
+
64
+ if self.downsample is not None:
65
+ residual = self.downsample(x)
66
+
67
+ out += residual
68
+ out = self.relu(out)
69
+
70
+ return out
71
+
72
+ class SEResNeXt_extractor(nn.Module):
73
+ def __init__(self, block, layers, input_channels=3, cardinality=32):
74
+ super(SEResNeXt_extractor, self).__init__()
75
+ self.cardinality = cardinality
76
+ self.inplanes = 64
77
+ self.input_channels = input_channels
78
+
79
+ self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
80
+ bias=False)
81
+ self.bn1 = nn.BatchNorm2d(64)
82
+ self.relu = nn.ReLU(inplace=True)
83
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
84
+
85
+ self.layer1 = self._make_layer(block, 64, layers[0])
86
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
87
+
88
+ for m in self.modules():
89
+ if isinstance(m, nn.Conv2d):
90
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
91
+ m.weight.data.normal_(0, math.sqrt(2. / n))
92
+ if m.bias is not None:
93
+ m.bias.data.zero_()
94
+ elif isinstance(m, nn.BatchNorm2d):
95
+ m.weight.data.fill_(1)
96
+ m.bias.data.zero_()
97
+
98
+ def _make_layer(self, block, planes, blocks, stride=1):
99
+ downsample = None
100
+ if stride != 1 or self.inplanes != planes * block.expansion:
101
+ downsample = nn.Sequential(
102
+ nn.Conv2d(self.inplanes, planes * block.expansion,
103
+ kernel_size=1, stride=stride, bias=False),
104
+ nn.BatchNorm2d(planes * block.expansion),
105
+ )
106
+
107
+ layers = []
108
+ layers.append(block(self.inplanes, planes, self.cardinality, stride, downsample))
109
+ self.inplanes = planes * block.expansion
110
+ for i in range(1, blocks):
111
+ layers.append(block(self.inplanes, planes, self.cardinality))
112
+
113
+ return nn.Sequential(*layers)
114
+
115
+ def forward(self, x):
116
+ x = self.conv1(x)
117
+ x = self.bn1(x)
118
+ x = self.relu(x)
119
+ x = self.maxpool(x)
120
+
121
+ x = self.layer1(x)
122
+ x = self.layer2(x)
123
+
124
+ return x
125
+
126
+ def get_seresnext_extractor():
127
+ return SEResNeXt_extractor(BottleneckX_Origin, [3, 4, 6, 3], 1)