code stringlengths 17 6.64M |
|---|
@DATASETS.register_module()
class AdobeComp1kDataset(BaseMattingDataset):
'Adobe composition-1k dataset.\n\n The dataset loads (alpha, fg, bg) data and apply specified transforms to\n the data. You could specify whether composite merged image online or load\n composited merged image in pipeline.\n\n Example for online comp-1k dataset:\n\n ::\n\n [\n {\n "alpha": \'alpha/000.png\',\n "fg": \'fg/000.png\',\n "bg": \'bg/000.png\'\n },\n {\n "alpha": \'alpha/001.png\',\n "fg": \'fg/001.png\',\n "bg": \'bg/001.png\'\n },\n ]\n\n Example for offline comp-1k dataset:\n\n ::\n\n [\n {\n "alpha": \'alpha/000.png\',\n "merged": \'merged/000.png\',\n "fg": \'fg/000.png\',\n "bg": \'bg/000.png\'\n },\n {\n "alpha": \'alpha/001.png\',\n "merged": \'merged/001.png\',\n "fg": \'fg/001.png\',\n "bg": \'bg/001.png\'\n },\n ]\n\n '
def load_annotations(self):
'Load annotations for Adobe Composition-1k dataset.\n\n It loads image paths from json file.\n\n Returns:\n dict: Loaded dict.\n '
data_infos = mmcv.load(self.ann_file)
for data_info in data_infos:
for key in data_info:
data_info[key] = osp.join(self.data_prefix, data_info[key])
return data_infos
|
@DATASETS.register_module()
class RepeatDataset():
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n '
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
'Get item at each call.\n\n Args:\n idx (int): Index for getting each item.\n '
return self.dataset[(idx % self._ori_len)]
def __len__(self):
'Length of the dataset.\n\n Returns:\n int: Length of the dataset.\n '
return (self.times * self._ori_len)
|
@DATASETS.register_module()
class GenerationPairedDataset(BaseGenerationDataset):
"General paired image folder dataset for image generation.\n\n It assumes that the training directory is '/path/to/data/train'.\n During test time, the directory is '/path/to/data/test'. '/path/to/data'\n can be initialized by args 'dataroot'. Each sample contains a pair of\n images concatenated in the w dimension (A|B).\n\n Args:\n dataroot (str | :obj:`Path`): Path to the folder root of paired images.\n pipeline (List[dict | callable]): A sequence of data transformations.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n "
def __init__(self, dataroot, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
phase = ('test' if test_mode else 'train')
self.dataroot = osp.join(str(dataroot), phase)
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load paired image paths.\n\n Returns:\n list[dict]: List that contains paired image paths.\n '
data_infos = []
pair_paths = sorted(self.scan_folder(self.dataroot))
for pair_path in pair_paths:
data_infos.append(dict(pair_path=pair_path))
return data_infos
|
@DATASETS.register_module()
class GenerationUnpairedDataset(BaseGenerationDataset):
"General unpaired image folder dataset for image generation.\n\n It assumes that the training directory of images from domain A is\n '/path/to/data/trainA', and that from domain B is '/path/to/data/trainB',\n respectively. '/path/to/data' can be initialized by args 'dataroot'.\n During test time, the directory is '/path/to/data/testA' and\n '/path/to/data/testB', respectively.\n\n Args:\n dataroot (str | :obj:`Path`): Path to the folder root of unpaired\n images.\n pipeline (List[dict | callable]): A sequence of data transformations.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n "
def __init__(self, dataroot, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
phase = ('test' if test_mode else 'train')
self.dataroot_a = osp.join(str(dataroot), (phase + 'A'))
self.dataroot_b = osp.join(str(dataroot), (phase + 'B'))
self.data_infos_a = self.load_annotations(self.dataroot_a)
self.data_infos_b = self.load_annotations(self.dataroot_b)
self.len_a = len(self.data_infos_a)
self.len_b = len(self.data_infos_b)
def load_annotations(self, dataroot):
'Load unpaired image paths of one domain.\n\n Args:\n dataroot (str): Path to the folder root for unpaired images of\n one domain.\n\n Returns:\n list[dict]: List that contains unpaired image paths of one domain.\n '
data_infos = []
paths = sorted(self.scan_folder(dataroot))
for path in paths:
data_infos.append(dict(path=path))
return data_infos
def prepare_train_data(self, idx):
'Prepare unpaired training data.\n\n Args:\n idx (int): Index of current batch.\n\n Returns:\n dict: Prepared training data batch.\n '
img_a_path = self.data_infos_a[(idx % self.len_a)]['path']
idx_b = np.random.randint(0, self.len_b)
img_b_path = self.data_infos_b[idx_b]['path']
results = dict(img_a_path=img_a_path, img_b_path=img_b_path)
return self.pipeline(results)
def prepare_test_data(self, idx):
'Prepare unpaired test data.\n\n Args:\n idx (int): Index of current batch.\n\n Returns:\n list[dict]: Prepared test data batch.\n '
img_a_path = self.data_infos_a[(idx % self.len_a)]['path']
img_b_path = self.data_infos_b[(idx % self.len_b)]['path']
results = dict(img_a_path=img_a_path, img_b_path=img_b_path)
return self.pipeline(results)
def __len__(self):
return max(self.len_a, self.len_b)
|
@DATASETS.register_module()
class ImgInpaintingDataset(BaseDataset):
'Image dataset for inpainting.\n '
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super().__init__(pipeline, test_mode)
self.ann_file = str(ann_file)
self.data_prefix = str(data_prefix)
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for dataset.\n\n Returns:\n list[dict]: Contain dataset annotations.\n '
with open(self.ann_file, 'r') as f:
img_infos = []
for (idx, line) in enumerate(f):
line = line.strip()
_info = dict()
line_split = line.split(' ')
_info = dict(gt_img_path=Path(self.data_prefix).joinpath(line_split[0]).as_posix(), gt_img_idx=idx)
img_infos.append(_info)
return img_infos
def evaluate(self, outputs, logger=None, **kwargs):
metric_keys = outputs[0]['eval_result'].keys()
stats = {}
for key in metric_keys:
val = sum([x['eval_result'][key] for x in outputs])
val /= self.__len__()
stats[key] = val
return stats
|
@DATASETS.register_module()
class LDPPQFDataset(BaseSRDataset):
'LDP PQF dataset for compressed video quality enhancement.\n\n The dataset loads three LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n i_frame_idx (int): Index of the I frame.\n Default: 0\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n\n @ryanxingql\n '
def __init__(self, lq_folder, gt_folder, pipeline, scale, filename_tmpl='f{:03d}', i_frame_idx=0, max_need_frms=100, test_mode=False):
super().__init__(pipeline, scale)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.i_frame_idx = i_frame_idx
assert (filename_tmpl == 'f{:03d}')
self.filename_tmpl = filename_tmpl
self.test_mode = test_mode
self.max_need_frms = max_need_frms
self.data_infos = self.load_annotations()
def find_left_right_pqf(self, center_pqf, pqf_list):
ord_center_pqf = pqf_list.index(center_pqf)
if (ord_center_pqf == 0):
return (pqf_list[0], pqf_list[1])
elif (ord_center_pqf == (len(pqf_list) - 1)):
return (pqf_list[(- 2)], pqf_list[(- 1)])
else:
return (pqf_list[(ord_center_pqf - 1)], pqf_list[(ord_center_pqf + 1)])
def load_annotations(self):
'Load annotations.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
data_infos = []
vid_list = sorted(glob(osp.join(self.lq_folder, '*/')))
for vid_dir in vid_list:
frm_list = sorted(glob(osp.join(vid_dir, '*.png')))
vid_name = vid_dir.split('/')[(- 2)]
max_frm_num = len(frm_list)
if (self.test_mode and (max_frm_num > self.max_need_frms)):
random.shuffle(frm_list)
frm_list = frm_list[:self.max_need_frms]
pqf_list = list(range(self.i_frame_idx, (self.i_frame_idx + max_frm_num), 4))
for frm_path in frm_list:
frm_name = frm_path.split('/')[(- 1)].split('.')[0]
frm_idx = int(frm_name[1:])
if (frm_idx not in pqf_list):
continue
(left_pqf_idx, right_pqf_idx) = self.find_left_right_pqf(frm_idx, pqf_list)
clip_frm_name = f'{vid_name}/{frm_name}'
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, left_pqf_idx=left_pqf_idx, right_pqf_idx=right_pqf_idx, key=clip_frm_name))
return data_infos
|
@DATASETS.register_module()
class LDPNonPQFDataset(BaseSRDataset):
'LDP non-PQF dataset for compressed video quality enhancement.\n\n The dataset loads three LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n i_frame_idx (int): Index of the I frame.\n Default: 0\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n\n @ryanxingql\n '
def __init__(self, lq_folder, gt_folder, pipeline, scale, filename_tmpl='f{:03d}', i_frame_idx=0, max_need_frms=100, test_mode=False):
super().__init__(pipeline, scale)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.i_frame_idx = i_frame_idx
assert (filename_tmpl == 'f{:03d}')
self.filename_tmpl = filename_tmpl
self.test_mode = test_mode
self.max_need_frms = max_need_frms
self.data_infos = self.load_annotations()
def find_left_right_pqf(self, center_npqf, pqf_list):
for ord_pqf_idx in range((len(pqf_list) - 1)):
if ((pqf_list[ord_pqf_idx] < center_npqf) and (pqf_list[(ord_pqf_idx + 1)] > center_npqf)):
return (pqf_list[ord_pqf_idx], pqf_list[(ord_pqf_idx + 1)])
return (pqf_list[(- 1)], center_npqf)
def load_annotations(self):
'Load annotations.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
data_infos = []
vid_list = sorted(glob(osp.join(self.lq_folder, '*/')))
for vid_dir in vid_list:
frm_list = sorted(glob(osp.join(vid_dir, '*.png')))
vid_name = vid_dir.split('/')[(- 2)]
max_frm_num = len(frm_list)
if (self.test_mode and (max_frm_num > self.max_need_frms)):
random.shuffle(frm_list)
frm_list = frm_list[:self.max_need_frms]
pqf_list = list(range(self.i_frame_idx, (self.i_frame_idx + max_frm_num), 4))
for frm_path in frm_list:
frm_name = frm_path.split('/')[(- 1)].split('.')[0]
frm_idx = int(frm_name[1:])
if (frm_idx in pqf_list):
continue
(left_pqf_idx, right_pqf_idx) = self.find_left_right_pqf(frm_idx, pqf_list)
clip_frm_name = f'{vid_name}/{frm_name}'
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, left_pqf_idx=left_pqf_idx, right_pqf_idx=right_pqf_idx, key=clip_frm_name))
return data_infos
|
def get_rotated_sigma_matrix(sig_x, sig_y, theta):
'Calculate the rotated sigma matrix (two dimensional matrix).\n\n Args:\n sig_x (float): Standard deviation along the horizontal direction.\n sig_y (float): Standard deviation along the vertical direction.\n theta (float): Rotation in radian.\n\n Returns:\n ndarray: Rotated sigma matrix.\n '
diag = np.array([[(sig_x ** 2), 0], [0, (sig_y ** 2)]]).astype(np.float32)
rot = np.array([[np.cos(theta), (- np.sin(theta))], [np.sin(theta), np.cos(theta)]]).astype(np.float32)
return np.matmul(rot, np.matmul(diag, rot.T))
|
def _mesh_grid(kernel_size):
'Generate the mesh grid, centering at zero.\n\n Args:\n kernel_size (int): The size of the kernel.\n\n Returns:\n x_grid (ndarray): x-coordinates with shape (kernel_size, kernel_size).\n y_grid (ndarray): y-coordiantes with shape (kernel_size, kernel_size).\n xy_grid (ndarray): stacked coordinates with shape\n (kernel_size, kernel_size, 2).\n '
range_ = np.arange((((- kernel_size) // 2) + 1.0), ((kernel_size // 2) + 1.0))
(x_grid, y_grid) = np.meshgrid(range_, range_)
xy_grid = np.hstack((x_grid.reshape(((kernel_size * kernel_size), 1)), y_grid.reshape((kernel_size * kernel_size), 1))).reshape(kernel_size, kernel_size, 2)
return (xy_grid, x_grid, y_grid)
|
def calculate_gaussian_pdf(sigma_matrix, grid):
'Calculate PDF of the bivariate Gaussian distribution.\n\n Args:\n sigma_matrix (ndarray): The variance matrix with shape (2, 2).\n grid (ndarray): Coordinates generated by :func:`_mesh_grid`,\n with shape (K, K, 2), where K is the kernel size.\n\n Returns:\n kernel (ndarrray): Un-normalized kernel.\n '
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(((- 0.5) * np.sum((np.matmul(grid, inverse_sigma) * grid), 2)))
return kernel
|
def bivariate_gaussian(kernel_size, sig_x, sig_y=None, theta=None, grid=None, is_isotropic=True):
"Generate a bivariate isotropic or anisotropic Gaussian kernel.\n\n In isotropic mode, only `sig_x` is used. `sig_y` and `theta` are\n ignored.\n\n Args:\n kernel_size (int): The size of the kernel\n sig_x (float): Standard deviation along horizontal direction.\n sig_y (float | None, optional): Standard deviation along the vertical\n direction. If it is None, 'is_isotropic' must be set to True.\n Default: None.\n theta (float | None, optional): Rotation in radian. If it is None,\n 'is_isotropic' must be set to True. Default: None.\n grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,\n with shape (K, K, 2), where K is the kernel size. Default: None\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n\n Returns:\n kernel (ndarray): normalized kernel (i.e. sum to 1).\n "
if (grid is None):
(grid, _, _) = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[(sig_x ** 2), 0], [0, (sig_x ** 2)]]).astype(np.float32)
else:
if (sig_y is None):
raise ValueError('"sig_y" cannot be None if "is_isotropic" is False.')
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
kernel = calculate_gaussian_pdf(sigma_matrix, grid)
kernel = (kernel / np.sum(kernel))
return kernel
|
def bivariate_generalized_gaussian(kernel_size, sig_x, sig_y=None, theta=None, beta=1, grid=None, is_isotropic=True):
"Generate a bivariate generalized Gaussian kernel.\n\n Described in `Parameter Estimation For Multivariate Generalized\n Gaussian Distributions` by Pascal et. al (2013). In isotropic mode,\n only `sig_x` is used. `sig_y` and `theta` is ignored.\n\n Args:\n kernel_size (int): The size of the kernel\n sig_x (float): Standard deviation along horizontal direction\n sig_y (float | None, optional): Standard deviation along the vertical\n direction. If it is None, 'is_isotropic' must be set to True.\n Default: None.\n theta (float | None, optional): Rotation in radian. If it is None,\n 'is_isotropic' must be set to True. Default: None.\n beta (float, optional): Shape parameter, beta = 1 is the normal\n distribution. Default: 1.\n grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,\n with shape (K, K, 2), where K is the kernel size. Default: None\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n\n Returns:\n kernel (ndarray): normalized kernel.\n\n "
if (grid is None):
(grid, _, _) = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[(sig_x ** 2), 0], [0, (sig_x ** 2)]]).astype(np.float32)
else:
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(((- 0.5) * np.power(np.sum((np.matmul(grid, inverse_sigma) * grid), 2), beta)))
kernel = (kernel / np.sum(kernel))
return kernel
|
def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, is_isotropic=True):
'Generate a plateau-like anisotropic kernel.\n\n This kernel has a form of 1 / (1+x^(beta)).\n Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution # noqa\n In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.\n\n Args:\n kernel_size (int): The size of the kernel\n sig_x (float): Standard deviation along horizontal direction\n sig_y (float): Standard deviation along the vertical direction.\n theta (float): Rotation in radian.\n beta (float): Shape parameter, beta = 1 is the normal distribution.\n grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,\n with shape (K, K, 2), where K is the kernel size. Default: None\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n Returns:\n kernel (ndarray): normalized kernel (i.e. sum to 1).\n '
if (grid is None):
(grid, _, _) = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[(sig_x ** 2), 0], [0, (sig_x ** 2)]]).astype(np.float32)
else:
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.reciprocal((np.power(np.sum((np.matmul(grid, inverse_sigma) * grid), 2), beta) + 1))
kernel = (kernel / np.sum(kernel))
return kernel
|
def random_bivariate_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=None, is_isotropic=True):
'Randomly generate bivariate isotropic or anisotropic Gaussian kernels.\n\n In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and\n `rotation_range` is ignored.\n\n Args:\n kernel_size (int): The size of the kernel.\n sigma_x_range (tuple): The range of the standard deviation along the\n horizontal direction. Default: [0.6, 5]\n sigma_y_range (tuple): The range of the standard deviation along the\n vertical direction. Default: [0.6, 5]\n rotation_range (tuple): Range of rotation in radian.\n noise_range (tuple, optional): Multiplicative kernel noise.\n Default: None.\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n\n Returns:\n kernel (ndarray): The kernel whose parameters are sampled from the\n specified range.\n '
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
assert (sigma_x_range[0] <= sigma_x_range[1]), 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if (is_isotropic is False):
assert (sigma_y_range[0] <= sigma_y_range[1]), 'Wrong sigma_y_range.'
assert (rotation_range[0] <= rotation_range[1]), 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
kernel = bivariate_gaussian(kernel_size, sigma_x, sigma_y, rotation, is_isotropic=is_isotropic)
if (noise_range is not None):
assert (noise_range[0] <= noise_range[1]), 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = (kernel * noise)
kernel = (kernel / np.sum(kernel))
return kernel
|
def random_bivariate_generalized_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_range, noise_range=None, is_isotropic=True):
'Randomly generate bivariate generalized Gaussian kernels.\n\n In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and\n `rotation_range` is ignored.\n\n Args:\n kernel_size (int): The size of the kernel.\n sigma_x_range (tuple): The range of the standard deviation along the\n horizontal direction. Default: [0.6, 5]\n sigma_y_range (tuple): The range of the standard deviation along the\n vertical direction. Default: [0.6, 5]\n rotation_range (tuple): Range of rotation in radian.\n beta_range (float): The range of the shape parameter, beta = 1 is the\n normal distribution.\n noise_range (tuple, optional): Multiplicative kernel noise.\n Default: None.\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n\n Returns:\n kernel (ndarray):\n '
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
assert (sigma_x_range[0] <= sigma_x_range[1]), 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if (is_isotropic is False):
assert (sigma_y_range[0] <= sigma_y_range[1]), 'Wrong sigma_y_range.'
assert (rotation_range[0] <= rotation_range[1]), 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
if (np.random.uniform() <= 0.5):
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_generalized_gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, is_isotropic=is_isotropic)
if (noise_range is not None):
assert (noise_range[0] <= noise_range[1]), 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = (kernel * noise)
kernel = (kernel / np.sum(kernel))
return kernel
|
def random_bivariate_plateau_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_range, noise_range=None, is_isotropic=True):
'Randomly generate bivariate plateau kernels.\n\n In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and\n `rotation_range` is ignored.\n\n Args:\n kernel_size (int): The size of the kernel.\n sigma_x_range (tuple): The range of the standard deviation along the\n horizontal direction. Default: [0.6, 5]\n sigma_y_range (tuple): The range of the standard deviation along the\n vertical direction. Default: [0.6, 5]\n rotation_range (tuple): Range of rotation in radian.\n beta_range (float): The range of the shape parameter, beta = 1 is the\n normal distribution.\n noise_range (tuple, optional): Multiplicative kernel noise.\n Default: None.\n is_isotropic (bool, optional): Whether to use an isotropic kernel.\n Default: True.\n\n Returns:\n kernel (ndarray):\n '
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
assert (sigma_x_range[0] <= sigma_x_range[1]), 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if (is_isotropic is False):
assert (sigma_y_range[0] <= sigma_y_range[1]), 'Wrong sigma_y_range.'
assert (rotation_range[0] <= rotation_range[1]), 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
if (np.random.uniform() <= 0.5):
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, is_isotropic=is_isotropic)
if (noise_range is not None):
assert (noise_range[0] <= noise_range[1]), 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = (kernel * noise)
kernel = (kernel / np.sum(kernel))
return kernel
|
def random_circular_lowpass_kernel(omega_range, kernel_size, pad_to=0):
' Generate a 2D Sinc filter\n\n Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter # noqa\n\n Args:\n omega_range (tuple): The cutoff frequency in radian (pi is max).\n kernel_size (int): The size of the kernel. It must be an odd number.\n pad_to (int, optional): The size of the padded kernel. It must be odd\n or zero. Default: 0.\n\n Returns:\n ndarray: The Sinc kernel with specified parameters.\n '
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
omega = np.random.uniform(omega_range[0], omega_range[(- 1)])
kernel = np.fromfunction((lambda x, y: ((omega * special.j1((omega * np.sqrt((((x - ((kernel_size - 1) / 2)) ** 2) + ((y - ((kernel_size - 1) / 2)) ** 2)))))) / ((2 * np.pi) * np.sqrt((((x - ((kernel_size - 1) / 2)) ** 2) + ((y - ((kernel_size - 1) / 2)) ** 2)))))), [kernel_size, kernel_size])
kernel[(((kernel_size - 1) // 2), ((kernel_size - 1) // 2))] = ((omega ** 2) / (4 * np.pi))
kernel = (kernel / np.sum(kernel))
if (pad_to > kernel_size):
pad_size = ((pad_to - kernel_size) // 2)
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
np.seterr(**err)
return kernel
|
def random_mixed_kernels(kernel_list, kernel_prob, kernel_size, sigma_x_range=[0.6, 5], sigma_y_range=[0.6, 5], rotation_range=[(- np.pi), np.pi], beta_gaussian_range=[0.5, 8], beta_plateau_range=[1, 2], omega_range=[0, np.pi], noise_range=None):
"Randomly generate a kernel.\n\n\n Args:\n kernel_list (list): A list of kernel types. Choices are\n 'iso', 'aniso', 'skew', 'generalized_iso', 'generalized_aniso',\n 'plateau_iso', 'plateau_aniso', 'sinc'.\n kernel_prob (list): The probability of choosing of the corresponding\n kernel.\n kernel_size (int): The size of the kernel.\n sigma_x_range (list, optional): The range of the standard deviation\n along the horizontal direction. Default: (0.6, 5).\n sigma_y_range (list, optional): The range of the standard deviation\n along the vertical direction. Default: (0.6, 5).\n rotation_range (list, optional): Range of rotation in radian.\n Default: (-np.pi, np.pi).\n beta_gaussian_range (list, optional): The range of the shape parameter\n for generalized Gaussian. Default: (0.5, 8).\n beta_plateau_range (list, optional): The range of the shape parameter\n for plateau kernel. Default: (1, 2).\n omega_range (list, optional): The range of omega used in Sinc kernel.\n Default: (0, np.pi).\n noise_range (list, optional): Multiplicative kernel noise.\n Default: None.\n\n Returns:\n kernel (ndarray): The kernel whose parameters are sampled from the\n specified range.\n "
kernel_type = np.random.choice(kernel_list, p=kernel_prob)
if (kernel_type == 'iso'):
kernel = random_bivariate_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, is_isotropic=True)
elif (kernel_type == 'aniso'):
kernel = random_bivariate_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, is_isotropic=False)
elif (kernel_type == 'generalized_iso'):
kernel = random_bivariate_generalized_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_gaussian_range, noise_range=noise_range, is_isotropic=True)
elif (kernel_type == 'generalized_aniso'):
kernel = random_bivariate_generalized_gaussian_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_gaussian_range, noise_range=noise_range, is_isotropic=False)
elif (kernel_type == 'plateau_iso'):
kernel = random_bivariate_plateau_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_plateau_range, noise_range=None, is_isotropic=True)
elif (kernel_type == 'plateau_aniso'):
kernel = random_bivariate_plateau_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_plateau_range, noise_range=None, is_isotropic=False)
elif (kernel_type == 'sinc'):
kernel = random_circular_lowpass_kernel(omega_range, kernel_size)
return kernel
|
@PIPELINES.register_module()
class Compose():
'Compose a data pipeline with a sequence of transforms.\n\n Args:\n transforms (list[dict | callable]):\n Either config dicts of transforms or transform objects.\n '
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, but got {type(transform)}')
def __call__(self, data):
'Call function.\n\n Args:\n data (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n '
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
@PIPELINES.register_module()
class ToTensor():
'Convert some values in results dict to `torch.Tensor` type\n in data loader pipeline.\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class ImageToTensor():
'Convert image type to `torch.Tensor` type.\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n to_float32 (bool): Whether convert numpy image array to np.float32\n before converted to tensor. Default: True.\n '
def __init__(self, keys, to_float32=True):
self.keys = keys
self.to_float32 = to_float32
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for key in self.keys:
if (len(results[key].shape) == 2):
results[key] = results[key][(..., None)]
if (self.to_float32 and (not isinstance(results[key], np.float32))):
results[key] = results[key].astype(np.float32)
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, to_float32={self.to_float32})')
|
@PIPELINES.register_module()
class FramesToTensor(ImageToTensor):
'Convert frames type to `torch.Tensor` type.\n\n It accepts a list of frames, converts each to `torch.Tensor` type and then\n concatenates in a new dimension (dim=0).\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n to_float32 (bool): Whether convert numpy image array to np.float32\n before converted to tensor. Default: True.\n '
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for key in self.keys:
if (not isinstance(results[key], list)):
raise TypeError(f'results["{key}"] should be a list, but got {type(results[key])}')
for (idx, v) in enumerate(results[key]):
if (len(v.shape) == 2):
v = v[(..., None)]
if (self.to_float32 and (not isinstance(v, np.float32))):
v = v.astype(np.float32)
results[key][idx] = to_tensor(v.transpose(2, 0, 1))
results[key] = torch.stack(results[key], dim=0)
if (results[key].size(0) == 1):
results[key].squeeze_(dim=0)
return results
|
@PIPELINES.register_module()
class GetMaskedImage():
"Get masked image.\n\n Args:\n img_name (str): Key for clean image.\n mask_name (str): Key for mask image. The mask shape should be\n (h, w, 1) while '1' indicate holes and '0' indicate valid\n regions.\n "
def __init__(self, img_name='gt_img', mask_name='mask'):
self.img_name = img_name
self.mask_name = mask_name
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
clean_img = results[self.img_name]
mask = results[self.mask_name]
masked_img = (clean_img * (1.0 - mask))
results['masked_img'] = masked_img
return results
def __repr__(self):
return (self.__class__.__name__ + f"(img_name='{self.img_name}', mask_name='{self.mask_name}')")
|
@PIPELINES.register_module()
class FormatTrimap():
'Convert trimap (tensor) to one-hot representation.\n\n It transforms the trimap label from (0, 128, 255) to (0, 1, 2). If\n ``to_onehot`` is set to True, the trimap will convert to one-hot tensor of\n shape (3, H, W). Required key is "trimap", added or modified key are\n "trimap" and "to_onehot".\n\n Args:\n to_onehot (bool): whether convert trimap to one-hot tensor. Default:\n ``False``.\n '
def __init__(self, to_onehot=False):
self.to_onehot = to_onehot
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
trimap = results['trimap'].squeeze()
trimap[(trimap == 128)] = 1
trimap[(trimap == 255)] = 2
if self.to_onehot:
trimap = F.one_hot(trimap.to(torch.long), num_classes=3)
trimap = trimap.permute(2, 0, 1)
else:
trimap = trimap[(None, ...)]
results['trimap'] = trimap.float()
results['meta'].data['to_onehot'] = self.to_onehot
return results
def __repr__(self):
return (self.__class__.__name__ + f'(to_onehot={self.to_onehot})')
|
@PIPELINES.register_module()
class Collect():
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "gt_labels".\n\n The "img_meta" item is always populated. The contents of the "meta"\n dictionary depends on "meta_keys".\n\n Args:\n keys (Sequence[str]): Required keys to be collected.\n meta_keys (Sequence[str]): Required keys to be collected to "meta".\n Default: None.\n '
def __init__(self, keys, meta_keys=None):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})')
|
@PIPELINES.register_module()
class Normalize():
'Normalize images with the given mean and std value.\n\n Required keys are the keys in attribute "keys", added or modified keys are\n the keys in attribute "keys" and these keys with postfix \'_norm_cfg\'.\n It also supports normalizing a list of images.\n\n Args:\n keys (Sequence[str]): The images to be normalized.\n mean (np.ndarray): Mean values of different channels.\n std (np.ndarray): Std values of different channels.\n to_rgb (bool): Whether to convert channels from BGR to RGB.\n '
def __init__(self, keys, mean, std, to_rgb=False, save_original=False):
self.keys = keys
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
self.save_original = save_original
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for key in self.keys:
if isinstance(results[key], list):
if self.save_original:
results[(key + '_unnormalised')] = [v.copy() for v in results[key]]
results[key] = [mmcv.imnormalize(v, self.mean, self.std, self.to_rgb) for v in results[key]]
else:
if self.save_original:
results[(key + '_unnormalised')] = results[key].copy()
results[key] = mmcv.imnormalize(results[key], self.mean, self.std, self.to_rgb)
results['img_norm_cfg'] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(keys={self.keys}, mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
|
@PIPELINES.register_module()
class RescaleToZeroOne():
'Transform the images into a range between 0 and 1.\n\n Required keys are the keys in attribute "keys", added or modified keys are\n the keys in attribute "keys".\n It also supports rescaling a list of images.\n\n Args:\n keys (Sequence[str]): The images to be transformed.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n '
for key in self.keys:
if isinstance(results[key], list):
results[key] = [(v.astype(np.float32) / 255.0) for v in results[key]]
else:
results[key] = (results[key].astype(np.float32) / 255.0)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
class DistributedSampler(_DistributedSampler):
'DistributedSampler inheriting from `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n '
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, samples_per_gpu=1, seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.samples_per_gpu = samples_per_gpu
self.num_samples_per_replica = int(math.ceil((((len(self.dataset) * 1.0) / self.num_replicas) / samples_per_gpu)))
self.num_samples = (self.num_samples_per_replica * self.samples_per_gpu)
self.total_size = (self.num_samples * self.num_replicas)
self.seed = sync_random_seed(seed)
if (len(dataset) < (self.num_replicas * samples_per_gpu)):
raise ValueError('You may use too small dataset and our distributed sampler cannot pad your dataset correctly. We highly recommend you to use fewer GPUs to finish your work')
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
|
@DATASETS.register_module()
class SRAnnotationDataset(BaseSRDataset):
'General paired image dataset with an annotation file for image\n restoration.\n\n The dataset loads lq (Low Quality) and gt (Ground-Truth) image pairs,\n applies specified transforms and finally returns a dict containing paired\n data and other information.\n\n This is the "annotation file mode":\n Each line in the annotation file contains the image names and\n image shape (usually for gt), separated by a white space.\n\n Example of an annotation file:\n\n ::\n\n 0001_s001.png (480,480,3)\n 0001_s002.png (480,480,3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n filename_tmpl (str): Template for each filename. Note that the\n template excludes the file extension. Default: \'{}\'.\n '
def __init__(self, lq_folder, gt_folder, ann_file, pipeline, scale, test_mode=False, filename_tmpl='{}'):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.filename_tmpl = filename_tmpl
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n It loads the LQ and GT image path from the annotation file.\n Each line in the annotation file contains the image names and\n image shape (usually for gt), separated by a white space.\n\n Returns:\n list[dict]: A list of dicts for paired paths of LQ and GT.\n '
data_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
gt_name = line.split(' ')[0]
(basename, ext) = osp.splitext(osp.basename(gt_name))
lq_name = f'{self.filename_tmpl.format(basename)}{ext}'
data_infos.append(dict(lq_path=osp.join(self.lq_folder, lq_name), gt_path=osp.join(self.gt_folder, gt_name)))
return data_infos
|
@DATASETS.register_module()
class SRFacialLandmarkDataset(BaseSRDataset):
'Facial image and landmark dataset with an annotation file for image\n restoration.\n\n The dataset loads gt (Ground-Truth) image, shape of image, face box, and\n landmark. Applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n This is the "annotation file mode":\n Each dict in the annotation list contains the image names, image shape,\n face box, and landmark.\n\n Annotation file is a `npy` file, which contains a list of dict.\n Example of an annotation file:\n\n ::\n\n dict1(file=*, bbox=*, shape=*, landmark=*)\n dict2(file=*, bbox=*, shape=*, landmark=*)\n\n Args:\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, gt_folder, ann_file, pipeline, scale, test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n Annotation file is a `npy` file, which contains a list of dict.\n\n It loads the GT image path and landmark from the annotation file.\n Each dict in the annotation file contains the image names, image\n shape (usually for gt), bbox and landmark.\n\n Returns:\n list[dict]: A list of dicts for GT path and landmark.\n Contains: gt_path, bbox, shape, landmark.\n '
data_infos = np.load(self.ann_file, allow_pickle=True)
for data_info in data_infos:
data_info['gt_path'] = osp.join(self.gt_folder, data_info['gt_path'])
return data_infos
|
@DATASETS.register_module()
class SRFolderDataset(BaseSRDataset):
'General paired image folder dataset for image restoration.\n\n The dataset loads lq (Low Quality) and gt (Ground-Truth) image pairs,\n applies specified transforms and finally returns a dict containing paired\n data and other information.\n\n This is the "folder mode", which needs to specify the lq folder path and gt\n folder path, each folder containing the corresponding images.\n Image lists will be generated automatically. You can also specify the\n filename template to match the lq and gt pairs.\n\n For example, we have two folders with the following structures:\n\n ::\n\n data_root\n ├── lq\n │ ├── 0001_x4.png\n │ ├── 0002_x4.png\n ├── gt\n │ ├── 0001.png\n │ ├── 0002.png\n\n then, you need to set:\n\n .. code-block:: python\n\n lq_folder = data_root/lq\n gt_folder = data_root/gt\n filename_tmpl = \'{}_x4\'\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n pipeline (List[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n filename_tmpl (str): Template for each filename. Note that the\n template excludes the file extension. Default: \'{}\'.\n '
def __init__(self, lq_folder, gt_folder, pipeline, scale, test_mode=False, filename_tmpl='{}', ext_lq=None):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.filename_tmpl = filename_tmpl
self.ext_lq = ext_lq
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n It loads the LQ and GT image path from folders.\n\n Returns:\n list[dict]: A list of dicts for paired paths of LQ and GT.\n '
data_infos = []
lq_paths = self.scan_folder(self.lq_folder)
gt_paths = self.scan_folder(self.gt_folder)
assert (len(lq_paths) == len(gt_paths)), f'gt and lq datasets have different number of images: {len(lq_paths)}, {len(gt_paths)}.'
if (self.ext_lq is None):
(_, self.ext_lq) = osp.splitext(osp.basename(gt_paths[0]))
for gt_path in gt_paths:
(basename, ext) = osp.splitext(osp.basename(gt_path))
lq_path = osp.join(self.lq_folder, f'{self.filename_tmpl.format(basename)}{self.ext_lq}')
assert (lq_path in lq_paths), f'{lq_path} is not in lq_paths.'
data_infos.append(dict(lq_path=lq_path, gt_path=gt_path))
return data_infos
|
@DATASETS.register_module()
class SRFolderGTDataset(BaseSRDataset):
'General ground-truth image folder dataset for image restoration.\n\n The dataset loads gt (Ground-Truth) image only,\n applies specified transforms and finally returns a dict containing paired\n data and other information.\n\n This is the "gt folder mode", which needs to specify the gt\n folder path, each folder containing the corresponding images.\n Image lists will be generated automatically.\n\n For example, we have a folder with the following structure:\n\n ::\n\n data_root\n ├── gt\n │ ├── 0001.png\n │ ├── 0002.png\n\n then, you need to set:\n\n .. code-block:: python\n\n gt_folder = data_root/gt\n\n Args:\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n pipeline (List[dict | callable]): A sequence of data transformations.\n scale (int | tuple): Upsampling scale or upsampling scale range.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, gt_folder, pipeline, scale, test_mode=False, filename_tmpl='{}'):
super().__init__(pipeline, scale, test_mode)
self.gt_folder = str(gt_folder)
self.filename_tmpl = filename_tmpl
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n It loads the GT image path from folder.\n\n Returns:\n list[dict]: A list of dicts for path of GT.\n '
data_infos = []
gt_paths = self.scan_folder(self.gt_folder)
for gt_path in gt_paths:
data_infos.append(dict(gt_path=gt_path))
return data_infos
|
@DATASETS.register_module()
class SRFolderMultipleGTDataset(BaseSRDataset):
"General dataset for video super resolution, used for recurrent networks.\n\n The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)\n frames. Then it applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n This dataset takes an annotation file specifying the sequences used in\n training or test. If no annotation file is provided, it assumes all video\n sequences under the root directory is used for training or test.\n\n In the annotation file (.txt), each line contains:\n\n 1. folder name;\n 2. number of frames in this sequence (in the same folder)\n\n Examples:\n\n ::\n\n calendar 41\n city 34\n foliage 49\n walk 47\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n ann_file (str): The path to the annotation file. If None, we assume\n that all sequences in the folder is used. Default: None\n num_input_frames (None | int): The number of frames per iteration.\n If None, the whole clip is extracted. If it is a positive integer,\n a sequence of 'num_input_frames' frames is extracted from the clip.\n Note that non-positive integers are not accepted. Default: None.\n test_mode (bool): Store `True` when building test dataset.\n Default: `True`.\n "
def __init__(self, lq_folder, gt_folder, pipeline, scale, ann_file=None, num_input_frames=None, test_mode=True):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = ann_file
if ((num_input_frames is not None) and (num_input_frames <= 0)):
raise ValueError(f'"num_input_frames" must be None or positive, but got {num_input_frames}.')
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def _load_annotations_from_file(self):
data_infos = []
ann_list = mmcv.list_from_file(self.ann_file)
for ann in ann_list:
(key, sequence_length) = ann.strip().split(' ')
if (self.num_input_frames is None):
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, num_input_frames=int(num_input_frames), sequence_length=int(sequence_length)))
return data_infos
def load_annotations(self):
'Load annotations for the dataset.\n\n Returns:\n list[dict]: Returned list of dicts for paired paths of LQ and GT.\n '
if self.ann_file:
return self._load_annotations_from_file()
sequences = sorted(glob.glob(osp.join(self.lq_folder, '*')))
data_infos = []
for sequence in sequences:
sequence_length = len(glob.glob(osp.join(sequence, '*.png')))
if (self.num_input_frames is None):
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=sequence.replace(f'{self.lq_folder}{os.sep}', ''), num_input_frames=num_input_frames, sequence_length=sequence_length))
return data_infos
|
@DATASETS.register_module()
class SRFolderRefDataset(BaseSRDataset):
'\n General paired image folder dataset for reference-based image restoration.\n\n The dataset loads ref (reference) image pairs\n Must contain: ref (reference)\n Optional: GT (Ground-Truth), LQ (Low Quality), or both\n Cannot only contain ref.\n\n Applies specified transforms and finally returns a dict containing paired\n data and other information.\n\n This is the "folder mode", which needs to specify the ref folder path and\n gt folder path, each folder containing the corresponding images.\n Image lists will be generated automatically. You can also specify the\n filename template to match the image pairs.\n\n For example, we have three folders with the following structures:\n\n ::\n\n data_root\n ├── ref\n │ ├── 0001.png\n │ ├── 0002.png\n ├── gt\n │ ├── 0001.png\n │ ├── 0002.png\n ├── lq\n │ ├── 0001_x4.png\n │ ├── 0002_x4.png\n\n then, you need to set:\n\n .. code-block:: python\n\n ref_folder = \'data_root/ref\'\n gt_folder = \'data_root/gt\'\n lq_folder = \'data_root/lq\'\n filename_tmpl_gt=\'{}\'\n filename_tmpl_lq=\'{}_x4\'\n\n Args:\n pipeline (List[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n ref_folder (str | :obj:`Path`): Path to a ref folder.\n gt_folder (str | :obj:`Path` | None): Path to a gt folder.\n Default: None.\n lq_folder (str | :obj:`Path` | None): Path to a gt folder.\n Default: None.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n filename_tmpl_gt (str): Template for gt filename. Note that the\n template excludes the file extension. Default: \'{}\'.\n filename_tmpl_lq (str): Template for lq filename. Note that the\n template excludes the file extension. Default: \'{}\'.\n '
def __init__(self, pipeline, scale, ref_folder, gt_folder=None, lq_folder=None, test_mode=False, filename_tmpl_gt='{}', filename_tmpl_lq='{}'):
super().__init__(pipeline, scale, test_mode)
assert (gt_folder or lq_folder), 'At least one of gt_folder andlq_folder cannot be None.'
self.scale = scale
self.ref_folder = str(ref_folder)
self.gt_folder = (str(gt_folder) if gt_folder else None)
self.lq_folder = (str(lq_folder) if lq_folder else None)
self.filename_tmpl_gt = filename_tmpl_gt
self.filename_tmpl_lq = filename_tmpl_lq
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n It loads the ref, LQ and GT image path from folders.\n\n Returns:\n list[dict]: A list of dicts for paired paths of ref, LQ and GT.\n '
data_infos = []
ref_paths = self.scan_folder(self.ref_folder)
if (self.gt_folder is not None):
gt_paths = self.scan_folder(self.gt_folder)
assert (len(ref_paths) == len(gt_paths)), f'ref and gt datasets have different number of images: {len(ref_paths)}, {len(gt_paths)}.'
if (self.lq_folder is not None):
lq_paths = self.scan_folder(self.lq_folder)
assert (len(ref_paths) == len(lq_paths)), f'ref and lq datasets have different number of images: {len(ref_paths)}, {len(lq_paths)}.'
for ref_path in ref_paths:
(basename, ext) = osp.splitext(osp.basename(ref_path))
data_dict = dict(ref_path=ref_path)
if (self.gt_folder is not None):
gt_path = osp.join(self.gt_folder, f'{self.filename_tmpl_gt.format(basename)}{ext}')
assert (gt_path in gt_paths), f'{gt_path} is not in gt_paths.'
data_dict['gt_path'] = gt_path
if (self.lq_folder is not None):
lq_path = osp.join(self.lq_folder, f'{self.filename_tmpl_lq.format(basename)}{ext}')
assert (lq_path in lq_paths), f'{lq_path} is not in lq_paths.'
data_dict['lq_path'] = lq_path
data_infos.append(data_dict)
return data_infos
|
@DATASETS.register_module()
class SRFolderVideoDataset(BaseSRDataset):
"General dataset for video SR, used for sliding-window framework.\n\n The dataset loads several LQ (Low-Quality) frames and one GT (Ground-Truth)\n frames. Then it applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n This dataset takes an annotation file specifying the sequences used in\n training or test. If no annotation file is provided, it assumes all video\n sequences under the root directory are used for training or test.\n\n In the annotation file (.txt), each line contains:\n\n 1. image name (no file extension);\n 2. number of frames in the sequence (in the same folder)\n\n Examples:\n\n ::\n\n calendar/00000000 41\n calendar/00000001 41\n ...\n calendar/00000040 41\n city/00000000 34\n ...\n\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n num_input_frames (int): Window size for input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n ann_file (str): The path to the annotation file. If None, we assume\n that all sequences in the folder is used. Default: None.\n filename_tmpl (str): Template for each filename. Note that the\n template excludes the file extension. Default: '{:08d}'.\n metric_average_mode (str): The way to compute the average metric.\n If 'clip', we first compute an average value for each clip, and\n then average the values from different clips. If 'all', we\n compute the average of all frames. Default: 'clip'.\n test_mode (bool): Store `True` when building test dataset.\n Default: `True`.\n "
def __init__(self, lq_folder, gt_folder, num_input_frames, pipeline, scale, ann_file=None, filename_tmpl='{:08d}', metric_average_mode='clip', test_mode=True):
super().__init__(pipeline, scale, test_mode)
assert ((num_input_frames % 2) == 1), f'num_input_frames should be odd numbers, but received {num_input_frames}.'
if (metric_average_mode not in ['clip', 'all']):
raise ValueError(f'metric_average_mode can only be "clip" or "all", but got {metric_average_mode}.')
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.ann_file = ann_file
self.filename_tmpl = filename_tmpl
self.metric_average_mode = metric_average_mode
self.data_infos = self.load_annotations()
def _load_annotations_from_file(self):
self.folders = {}
data_infos = []
ann_list = mmcv.list_from_file(self.ann_file)
for ann in ann_list:
(key, max_frame_num) = ann.strip().rsplit(' ', 1)
key = key.replace('/', os.sep)
sequence = osp.basename(key)
if (sequence not in self.folders):
self.folders[sequence] = int(max_frame_num)
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, num_input_frames=self.num_input_frames, max_frame_num=int(max_frame_num)))
return data_infos
def load_annotations(self):
'Load annotations for the dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
if self.ann_file:
return self._load_annotations_from_file()
self.folders = {}
data_infos = []
sequences = sorted(glob.glob(osp.join(self.lq_folder, '*')))
sequences = [re.split('[\\\\/]', s)[(- 1)] for s in sequences]
for sequence in sequences:
seq_dir = osp.join(self.lq_folder, sequence)
max_frame_num = len(list(mmcv.utils.scandir(seq_dir)))
self.folders[sequence] = max_frame_num
for i in range(0, max_frame_num):
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=osp.join(sequence, self.filename_tmpl.format(i)), num_input_frames=self.num_input_frames, max_frame_num=max_frame_num))
return data_infos
def evaluate(self, results, logger=None):
'Evaluate with different metrics.\n\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n\n Return:\n dict: Evaluation results dict.\n '
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
results = [res['eval_result'] for res in results]
eval_result = defaultdict(list)
for res in results:
for (metric, val) in res.items():
eval_result[metric].append(val)
for (metric, val_list) in eval_result.items():
assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}'
if (self.metric_average_mode == 'clip'):
for (metric, values) in eval_result.items():
start_idx = 0
metric_avg = 0
for (_, num_img) in self.folders.items():
end_idx = (start_idx + num_img)
folder_values = values[start_idx:end_idx]
metric_avg += np.mean(folder_values)
start_idx = end_idx
eval_result[metric] = (metric_avg / len(self.folders))
else:
eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()}
return eval_result
|
@DATASETS.register_module()
class SRLDVDataset(BaseSRDataset):
'LDV dataset for video super resolution.\n\n The dataset loads several LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n It reads REDS keys from the txt file.\n Each line contains:\n 1. image name; 2, image shape, separated by a white space.\n Examples:\n\n ::\n\n 000/00000000.png (720, 1280, 3)\n 000/00000001.png (720, 1280, 3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n num_input_frames (int): Window size for input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n\n @ryanxingql\n '
def __init__(self, lq_folder, gt_folder, num_input_frames, pipeline, scale, max_need_frms=100, test_mode=False):
super().__init__(pipeline, scale)
assert ((num_input_frames % 2) == 1), f'num_input_frames should be odd numbers, but received {num_input_frames}.'
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.test_mode = test_mode
self.max_need_frms = max_need_frms
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for REDS dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
data_infos = []
vid_list = sorted(glob(osp.join(self.lq_folder, '*/')))
for vid_dir in vid_list:
frm_list = sorted(glob(osp.join(vid_dir, '*.png')))
vid_name = vid_dir.split('/')[(- 2)]
max_frm_num = len(frm_list)
if (self.test_mode and (max_frm_num > self.max_need_frms)):
random.shuffle(frm_list)
frm_list = frm_list[:self.max_need_frms]
for frm_path in frm_list:
frm_name = frm_path.split('/')[(- 1)].split('.')[0]
clip_frm_name = f'{vid_name}/{frm_name}'
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=clip_frm_name, max_frame_num=max_frm_num, num_input_frames=self.num_input_frames))
return data_infos
|
@DATASETS.register_module()
class SRLmdbDataset(BaseSRDataset):
'General paired image lmdb dataset for image restoration.\n\n The dataset loads lq (Low Quality) and gt (Ground-Truth) image pairs,\n applies specified transforms and finally returns a dict containing paired\n data and other information.\n\n This is the "lmdb mode". In order to speed up IO, you are recommended to\n use lmdb. First, you need to make lmdb files. Suppose the lmdb files\n are path_to_lq/lq.lmdb and path_to_gt/gt.lmdb, then you can just set:\n\n .. code-block:: python\n\n lq_folder = path_to_lq/lq.lmdb\n gt_folder = path_to_gt/gt.lmdb\n\n Contents of lmdb. Taking the lq.lmdb for example, the file structure is:\n\n ::\n\n lq.lmdb\n ├── data.mdb\n ├── lock.mdb\n ├── meta_info.txt\n\n The data.mdb and lock.mdb are standard lmdb files and you can refer to\n https://lmdb.readthedocs.io/en/release/ for more details.\n\n The meta_info.txt is a specified txt file to record the meta information\n of our datasets. It will be automatically created when preparing\n datasets by our provided dataset tools.\n Each line in the txt file records\n\n 1. image name (with extension);\n 2. image shape;\n 3. compression level, separated by a white space.\n\n For example, the meta information of the lq.lmdb is:\n `baboon.png (120,125,3) 1`, which means:\n 1) image name (with extension): baboon.png; 2) image shape: (120,125,3);\n and 3) compression level: 1\n\n We use the image name without extension as the lmdb key.\n Note that we use the same key for the corresponding lq and gt images.\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq lmdb file.\n gt_folder (str | :obj:`Path`): Path to a gt lmdb file.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, lq_folder, gt_folder, pipeline, scale, test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.scale = scale
if (not (self.gt_folder.endswith('.lmdb') and self.lq_folder.endswith('.lmdb'))):
raise ValueError(f'gt folder and lq folder should both in lmdb format. But received gt: {self.gt_folder}; lq: {self.lq_folder}')
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for SR dataset.\n\n It loads the LQ and GT image path from the ``meta_info.txt`` in the\n LMDB files.\n\n Returns:\n list[dict]: A list of dicts for paired paths of LQ and GT.\n '
data_infos = []
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
for line in fin:
key = line.split(' ')[0].split('.')[0]
data_infos.append(dict(lq_path=key, gt_path=key))
return data_infos
|
@DATASETS.register_module()
class SRREDSDataset(BaseSRDataset):
"REDS dataset for video super resolution.\n\n The dataset loads several LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n It reads REDS keys from the txt file.\n Each line contains:\n 1. image name; 2, image shape, separated by a white space.\n Examples:\n\n ::\n\n 000/00000000.png (720, 1280, 3)\n 000/00000001.png (720, 1280, 3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n num_input_frames (int): Window size for input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n val_partition (str): Validation partition mode. Choices ['official' or\n 'REDS4']. Default: 'official'.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n "
def __init__(self, lq_folder, gt_folder, ann_file, num_input_frames, pipeline, scale, val_partition='official', test_mode=False):
super().__init__(pipeline, scale, test_mode)
assert ((num_input_frames % 2) == 1), f'num_input_frames should be odd numbers, but received {num_input_frames}.'
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.val_partition = val_partition
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for REDS dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
with open(self.ann_file, 'r') as fin:
keys = [v.strip().split('.')[0] for v in fin]
keys = [key.replace('/', os.sep) for key in keys]
if (self.val_partition == 'REDS4'):
val_partition = ['000', '011', '015', '020']
elif (self.val_partition == 'official'):
val_partition = [f'{v:03d}' for v in range(240, 270)]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v.split(os.sep)[0] in val_partition)]
else:
keys = [v for v in keys if (v.split(os.sep)[0] not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, max_frame_num=100, num_input_frames=self.num_input_frames))
return data_infos
|
@DATASETS.register_module()
class SRREDSMultipleGTDataset(BaseSRDataset):
"REDS dataset for video super resolution for recurrent networks.\n\n The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)\n frames. Then it applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n num_input_frames (int): Number of input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n val_partition (str): Validation partition mode. Choices ['official' or\n 'REDS4']. Default: 'official'.\n repeat (int): Number of replication of the validation set. This is used\n to allow training REDS4 with more than 4 GPUs. For example, if\n 8 GPUs are used, this number can be set to 2. Default: 1.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n "
def __init__(self, lq_folder, gt_folder, num_input_frames, pipeline, scale, val_partition='official', repeat=1, test_mode=False):
self.repeat = repeat
if (not isinstance(repeat, int)):
raise TypeError(f'"repeat" must be an integer, but got {type(repeat)}.')
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.val_partition = val_partition
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for REDS dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
keys = [f'{i:03d}' for i in range(0, 270)]
if (self.val_partition == 'REDS4'):
val_partition = ['000', '011', '015', '020']
elif (self.val_partition == 'official'):
val_partition = [f'{i:03d}' for i in range(240, 270)]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v in val_partition)]
keys *= self.repeat
else:
keys = [v for v in keys if (v not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, sequence_length=100, num_input_frames=self.num_input_frames))
return data_infos
|
@DATASETS.register_module()
class SRTestMultipleGTDataset(BaseSRDataset):
'Test dataset for video super resolution for recurrent networks.\n\n It assumes all video sequences under the root directory is used for test.\n\n The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)\n frames. Then it applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `True`.\n '
def __init__(self, lq_folder, gt_folder, pipeline, scale, test_mode=True):
super().__init__(pipeline, scale, test_mode)
warnings.warn('"SRTestMultipleGTDataset" have been deprecated and will be removed in future release. Please use "SRFolderMultipleGTDataset" instead. Details see https://github.com/open-mmlab/mmediting/pull/355')
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for the test dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
sequences = sorted(glob.glob(osp.join(self.lq_folder, '*')))
data_infos = []
for sequence in sequences:
sequence_length = len(glob.glob(osp.join(sequence, '*.png')))
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=sequence.replace(f'{self.lq_folder}{os.sep}', ''), sequence_length=int(sequence_length)))
return data_infos
|
@DATASETS.register_module()
class SRVid4Dataset(BaseSRDataset):
"Vid4 dataset for video super resolution.\n\n The dataset loads several LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n It reads Vid4 keys from the txt file.\n Each line contains:\n\n 1. folder name;\n 2. number of frames in this clip (in the same folder);\n 3. image shape, separated by a white space.\n\n Examples:\n\n ::\n\n calendar 40 (320,480,3)\n city 34 (320,480,3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n num_input_frames (int): Window size for input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n filename_tmpl (str): Template for each filename. Note that the\n template excludes the file extension. Default: '{:08d}'.\n metric_average_mode (str): The way to compute the average metric.\n If 'clip', we first compute an average value for each clip, and\n then average the values from different clips. If 'all', we\n compute the average of all frames. Default: 'clip'.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n "
def __init__(self, lq_folder, gt_folder, ann_file, num_input_frames, pipeline, scale, filename_tmpl='{:08d}', metric_average_mode='clip', test_mode=False):
super().__init__(pipeline, scale, test_mode)
assert ((num_input_frames % 2) == 1), f'num_input_frames should be odd numbers, but received {num_input_frames}.'
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.filename_tmpl = filename_tmpl
if (metric_average_mode not in ['clip', 'all']):
raise ValueError(f'metric_average_mode can only be "clip" or "all", but got {metric_average_mode}.')
self.metric_average_mode = metric_average_mode
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for Vid4 dataset.\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
self.folders = {}
data_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
(folder, frame_num, _) = line.strip().split(' ')
self.folders[folder] = int(frame_num)
for i in range(int(frame_num)):
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=os.path.join(folder, self.filename_tmpl.format(i)), num_input_frames=self.num_input_frames, max_frame_num=int(frame_num)))
return data_infos
def evaluate(self, results, logger=None):
'Evaluate with different metrics.\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n Return:\n dict: Evaluation results dict.\n '
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
results = [res['eval_result'] for res in results]
eval_result = defaultdict(list)
for res in results:
for (metric, val) in res.items():
eval_result[metric].append(val)
for (metric, val_list) in eval_result.items():
assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}'
if (self.metric_average_mode == 'clip'):
for (metric, values) in eval_result.items():
start_idx = 0
metric_avg = 0
for (_, num_img) in self.folders.items():
end_idx = (start_idx + num_img)
folder_values = values[start_idx:end_idx]
metric_avg += np.mean(folder_values)
start_idx = end_idx
eval_result[metric] = (metric_avg / len(self.folders))
else:
eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()}
return eval_result
|
@DATASETS.register_module()
class SRVimeo90KDataset(BaseSRDataset):
'Vimeo90K dataset for video super resolution.\n\n The dataset loads several LQ (Low-Quality) frames and a center GT\n (Ground-Truth) frame. Then it applies specified transforms and finally\n returns a dict containing paired data and other information.\n\n It reads Vimeo90K keys from the txt file.\n Each line contains:\n 1. image name; 2, image shape, separated by a white space.\n Examples:\n\n ::\n\n 00001/0266 (256, 448, 3)\n 00001/0268 (256, 448, 3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n num_input_frames (int): Window size for input frames.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, lq_folder, gt_folder, ann_file, num_input_frames, pipeline, scale, test_mode=False):
super().__init__(pipeline, scale, test_mode)
assert ((num_input_frames % 2) == 1), f'num_input_frames should be odd numbers, but received {num_input_frames}.'
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for VimeoK dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
frame_index_list = []
for i in range(self.num_input_frames):
frame_index_list.append((i + ((9 - self.num_input_frames) // 2)))
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
(folder, subfolder) = key.split(os.sep)
lq_paths = []
for i in frame_index_list:
lq_paths.append(osp.join(self.lq_folder, folder, subfolder, f'im{i}.png'))
gt_paths = [osp.join(self.gt_folder, folder, subfolder, 'im4.png')]
data_infos.append(dict(lq_path=lq_paths, gt_path=gt_paths, key=key))
return data_infos
|
@DATASETS.register_module()
class SRVimeo90KMultipleGTDataset(BaseSRDataset):
'Vimeo90K dataset for video super resolution for recurrent networks.\n\n The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)\n frames. Then it applies specified transforms and finally returns a dict\n containing paired data and other information.\n\n It reads Vimeo90K keys from the txt file. Each line contains:\n\n 1. video frame folder\n 2. image shape\n\n Examples:\n\n ::\n\n 00001/0266 (256,448,3)\n 00001/0268 (256,448,3)\n\n Args:\n lq_folder (str | :obj:`Path`): Path to a lq folder.\n gt_folder (str | :obj:`Path`): Path to a gt folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transformations.\n scale (int): Upsampling scale ratio.\n num_input_frames (int): Number of frames in each training sequence.\n Default: 7.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, lq_folder, gt_folder, ann_file, pipeline, scale, num_input_frames=7, test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for Vimeo-90K dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
lq_paths = [osp.join(self.lq_folder, key, f'im{i}.png') for i in range(1, (self.num_input_frames + 1))]
gt_paths = [osp.join(self.gt_folder, key, f'im{i}.png') for i in range(1, (self.num_input_frames + 1))]
data_infos.append(dict(lq_path=lq_paths, gt_path=gt_paths, key=key))
return data_infos
|
@DATASETS.register_module()
class VFIVimeo90KDataset(BaseVFIDataset):
'Vimeo90K dataset for video frame interpolation.\n\n The dataset loads two input frames and a center GT (Ground-Truth) frame.\n Then it applies specified transforms and finally returns a dict containing\n paired data and other information.\n\n It reads Vimeo90K keys from the txt file.\n Each line contains:\n\n Examples:\n\n ::\n\n 00001/0389\n 00001/0402\n\n Args:\n pipeline (list[dict | callable]): A sequence of data transformations.\n folder (str | :obj:`Path`): Path to the folder.\n ann_file (str | :obj:`Path`): Path to the annotation file.\n test_mode (bool): Store `True` when building test dataset.\n Default: `False`.\n '
def __init__(self, pipeline, folder, ann_file, test_mode=False):
super().__init__(pipeline, folder, ann_file, test_mode)
self.data_infos = self.load_annotations()
def load_annotations(self):
'Load annotations for VimeoK dataset.\n\n Returns:\n list[dict]: A list of dicts for paired paths and other information.\n '
with open(self.ann_file, 'r') as f:
keys = f.read().split('\n')
keys = [k.strip() for k in keys if ((k.strip() is not None) and (k != ''))]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
key_folder = osp.join(self.folder, key)
inputs_path = [osp.join(key_folder, 'im1.png'), osp.join(key_folder, 'im3.png')]
target_path = osp.join(key_folder, 'im2.png')
data_infos.append(dict(inputs_path=inputs_path, target_path=target_path, key=key))
return data_infos
|
@BACKBONES.register_module()
class AOTEncoderDecoder(GLEncoderDecoder):
'Encoder-Decoder used in AOT-GAN model.\n\n This implementation follows:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n The architecture of the encoder-decoder is:\n (conv2d x 3) --> (dilated conv2d x 8) --> (conv2d or deconv2d x 3)\n\n Args:\n encoder (dict): Config dict to encoder.\n decoder (dict): Config dict to build decoder.\n dilation_neck (dict): Config dict to build dilation neck.\n '
def __init__(self, encoder=dict(type='AOTEncoder'), decoder=dict(type='AOTDecoder'), dilation_neck=dict(type='AOTBlockNeck')):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.dilation_neck = build_component(dilation_neck)
|
@COMPONENTS.register_module()
class AOTDecoder(nn.Module):
'Decoder used in AOT-GAN model.\n\n This implementation follows:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n\n Args:\n in_channels (int, optional): Channel number of input feature.\n Default: 256.\n mid_channels (int, optional): Channel number of middle feature.\n Default: 128.\n out_channels (int, optional): Channel number of output feature.\n Default 3.\n act_cfg (dict, optional): Config dict for activation layer,\n "relu" by default.\n '
def __init__(self, in_channels=256, mid_channels=128, out_channels=3, act_cfg=dict(type='ReLU')):
super().__init__()
self.decoder = nn.ModuleList([ConvModule(in_channels, mid_channels, kernel_size=3, stride=1, padding=1, act_cfg=act_cfg), ConvModule(mid_channels, (mid_channels // 2), kernel_size=3, stride=1, padding=1, act_cfg=act_cfg), ConvModule((mid_channels // 2), out_channels, kernel_size=3, stride=1, padding=1, act_cfg=None)])
self.output_act = nn.Tanh()
def forward(self, x):
"Forward Function.\n\n Args:\n x (Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n Tensor: Output tensor with shape of (n, c, h', w').\n "
for i in range(0, len(self.decoder)):
if (i <= 1):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
x = self.decoder[i](x)
return self.output_act(x)
|
@COMPONENTS.register_module()
class DeepFillDecoder(nn.Module):
'Decoder used in DeepFill model.\n\n This implementation follows:\n Generative Image Inpainting with Contextual Attention\n\n Args:\n in_channels (int): The number of input channels.\n conv_type (str): The type of conv module. In DeepFillv1 model, the\n `conv_type` should be \'conv\'. In DeepFillv2 model, the `conv_type`\n should be \'gated_conv\'.\n norm_cfg (dict): Config dict to build norm layer. Default: None.\n act_cfg (dict): Config dict for activation layer, "elu" by default.\n out_act_cfg (dict): Config dict for output activation layer. Here, we\n provide commonly used `clamp` or `clip` operation.\n channel_factor (float): The scale factor for channel size.\n Default: 1.\n kwargs (keyword arguments).\n '
_conv_type = dict(conv=ConvModule, gated_conv=SimpleGatedConvModule)
def __init__(self, in_channels, conv_type='conv', norm_cfg=None, act_cfg=dict(type='ELU'), out_act_cfg=dict(type='clip', min=(- 1.0), max=1.0), channel_factor=1.0, **kwargs):
super().__init__()
self.with_out_activation = (out_act_cfg is not None)
conv_module = self._conv_type[conv_type]
channel_list = [128, 128, 64, 64, 32, 16, 3]
channel_list = [int((x * channel_factor)) for x in channel_list]
channel_list[(- 1)] = 3
for i in range(7):
kwargs_ = copy.deepcopy(kwargs)
if (i == 6):
act_cfg = None
if (conv_type == 'gated_conv'):
kwargs_['feat_act_cfg'] = None
self.add_module(f'dec{(i + 1)}', conv_module(in_channels, channel_list[i], kernel_size=3, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs_))
in_channels = channel_list[i]
if self.with_out_activation:
act_type = out_act_cfg['type']
if (act_type == 'clip'):
act_cfg_ = copy.deepcopy(out_act_cfg)
act_cfg_.pop('type')
self.out_act = partial(torch.clamp, **act_cfg_)
else:
self.out_act = build_activation_layer(out_act_cfg)
def forward(self, input_dict):
'Forward Function.\n\n Args:\n input_dict (dict | torch.Tensor): Input dict with middle features\n or torch.Tensor.\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h, w).\n '
if isinstance(input_dict, dict):
x = input_dict['out']
else:
x = input_dict
for i in range(7):
x = getattr(self, f'dec{(i + 1)}')(x)
if (i in (1, 3)):
x = F.interpolate(x, scale_factor=2)
if self.with_out_activation:
x = self.out_act(x)
return x
|
@COMPONENTS.register_module()
class GLDecoder(nn.Module):
'Decoder used in Global&Local model.\n\n This implementation follows:\n Globally and locally Consistent Image Completion\n\n Args:\n in_channels (int): Channel number of input feature.\n norm_cfg (dict): Config dict to build norm layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n out_act (str): Output activation type, "clip" by default. Noted that\n in our implementation, we clip the output with range [-1, 1].\n '
def __init__(self, in_channels=256, norm_cfg=None, act_cfg=dict(type='ReLU'), out_act='clip'):
super().__init__()
self.dec1 = ConvModule(in_channels, 256, kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec2 = ConvModule(256, 256, kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec3 = ConvModule(256, 128, kernel_size=4, stride=2, padding=1, conv_cfg=dict(type='Deconv'), norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec4 = ConvModule(128, 128, kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec5 = ConvModule(128, 64, kernel_size=4, stride=2, padding=1, conv_cfg=dict(type='Deconv'), norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec6 = ConvModule(64, 32, kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dec7 = ConvModule(32, 3, kernel_size=3, stride=1, padding=1, norm_cfg=None, act_cfg=None)
if (out_act == 'sigmoid'):
self.output_act = nn.Sigmoid()
elif (out_act == 'clip'):
self.output_act = partial(torch.clamp, min=(- 1), max=1.0)
else:
raise ValueError(f'{out_act} activation for output has not be supported.')
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
for i in range(7):
x = getattr(self, f'dec{(i + 1)}')(x)
x = self.output_act(x)
return x
|
class IndexedUpsample(nn.Module):
"Indexed upsample module.\n\n Args:\n in_channels (int): Input channels.\n out_channels (int): Output channels.\n kernel_size (int, optional): Kernel size of the convolution layer.\n Defaults to 5.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Defaults to dict(type='BN').\n conv_module (ConvModule | DepthwiseSeparableConvModule, optional):\n Conv module. Defaults to ConvModule.\n "
def __init__(self, in_channels, out_channels, kernel_size=5, norm_cfg=dict(type='BN'), conv_module=ConvModule):
super().__init__()
self.conv = conv_module(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'))
self.init_weights()
def init_weights(self):
'Init weights for the module.\n '
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, mode='fan_in', nonlinearity='leaky_relu')
def forward(self, x, shortcut, dec_idx_feat=None):
"Forward function.\n\n Args:\n x (Tensor): Input feature map with shape (N, C, H, W).\n shortcut (Tensor): The shortcut connection with shape\n (N, C, H', W').\n dec_idx_feat (Tensor, optional): The decode index feature map with\n shape (N, C, H', W'). Defaults to None.\n\n Returns:\n Tensor: Output tensor with shape (N, C, H', W').\n "
if (dec_idx_feat is not None):
assert (shortcut.dim() == 4), 'shortcut must be tensor with 4 dimensions'
x = (dec_idx_feat * F.interpolate(x, size=shortcut.shape[2:]))
out = torch.cat((x, shortcut), dim=1)
return self.conv(out)
|
@COMPONENTS.register_module()
class IndexNetDecoder(nn.Module):
def __init__(self, in_channels, kernel_size=5, norm_cfg=dict(type='BN'), separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
blocks_in_channels = [(in_channels * 2), (96 * 2), (64 * 2), (32 * 2), (24 * 2), (16 * 2), (32 * 2)]
blocks_out_channels = [96, 64, 32, 24, 16, 32, 32]
self.decoder_layers = nn.ModuleList()
for (in_channel, out_channel) in zip(blocks_in_channels, blocks_out_channels):
self.decoder_layers.append(IndexedUpsample(in_channel, out_channel, kernel_size, norm_cfg, conv_module))
self.pred = nn.Sequential(conv_module(32, 1, kernel_size, padding=((kernel_size - 1) // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6')), nn.Conv2d(1, 1, kernel_size, padding=((kernel_size - 1) // 2), bias=False))
def init_weights(self):
'Init weights for the module.\n '
for m in self.modules():
if isinstance(m, nn.Conv2d):
std = math.sqrt((2.0 / (m.out_channels * (m.kernel_size[0] ** 2))))
normal_init(m, mean=0, std=std)
def forward(self, inputs):
'Forward function.\n\n Args:\n inputs (dict): Output dict of IndexNetEncoder.\n\n Returns:\n Tensor: Predicted alpha matte of the current batch.\n '
shortcuts = reversed(inputs['shortcuts'])
dec_idx_feat_list = reversed(inputs['dec_idx_feat_list'])
out = inputs['out']
group = (self.decoder_layers, shortcuts, dec_idx_feat_list)
for (decode_layer, shortcut, dec_idx_feat) in zip(*group):
out = decode_layer(out, shortcut, dec_idx_feat)
out = self.pred(out)
return out
|
@COMPONENTS.register_module()
class PConvDecoder(nn.Module):
"Decoder with partial conv.\n\n About the details for this architecture, pls see:\n Image Inpainting for Irregular Holes Using Partial Convolutions\n\n Args:\n num_layers (int): The number of convolutional layers. Default: 7.\n interpolation (str): The upsample mode. Default: 'nearest'.\n conv_cfg (dict): Config for convolution module. Default:\n {'type': 'PConv', 'multi_channel': True}.\n norm_cfg (dict): Config for norm layer. Default:\n {'type': 'BN'}.\n "
def __init__(self, num_layers=7, interpolation='nearest', conv_cfg=dict(type='PConv', multi_channel=True), norm_cfg=dict(type='BN')):
super().__init__()
self.num_layers = num_layers
self.interpolation = interpolation
for i in range(4, num_layers):
name = f'dec{(i + 1)}'
self.add_module(name, MaskConvModule((512 + 512), 512, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2)))
self.dec4 = MaskConvModule((512 + 256), 256, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
self.dec3 = MaskConvModule((256 + 128), 128, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
self.dec2 = MaskConvModule((128 + 64), 64, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
self.dec1 = MaskConvModule((64 + 3), 3, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=None, act_cfg=None)
def forward(self, input_dict):
'Forward Function.\n\n Args:\n input_dict (dict | torch.Tensor): Input dict with middle features\n or torch.Tensor.\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h, w).\n '
hidden_feats = input_dict['hidden_feats']
hidden_masks = input_dict['hidden_masks']
h_key = 'h{:d}'.format(self.num_layers)
(h, h_mask) = (hidden_feats[h_key], hidden_masks[h_key])
for i in range(self.num_layers, 0, (- 1)):
enc_h_key = f'h{(i - 1)}'
dec_l_key = f'dec{i}'
h = F.interpolate(h, scale_factor=2, mode=self.interpolation)
h_mask = F.interpolate(h_mask, scale_factor=2, mode=self.interpolation)
h = torch.cat([h, hidden_feats[enc_h_key]], dim=1)
h_mask = torch.cat([h_mask, hidden_masks[enc_h_key]], dim=1)
(h, h_mask) = getattr(self, dec_l_key)(h, h_mask)
return (h, h_mask)
|
class MaxUnpool2dop(Function):
'We warp the `torch.nn.functional.max_unpool2d`\n with an extra `symbolic` method, which is needed while exporting to ONNX.\n Users should not call this function directly.\n '
@staticmethod
def forward(ctx, input, indices, kernel_size, stride, padding, output_size):
'Forward function of MaxUnpool2dop.\n Args:\n input (Tensor): Tensor needed to upsample.\n indices (Tensor): Indices output of the previous MaxPool.\n kernel_size (Tuple): Size of the max pooling window.\n stride (Tuple): Stride of the max pooling window.\n padding (Tuple): Padding that was added to the input.\n output_size (List or Tuple): The shape of output tensor.\n Returns:\n Tensor: Output tensor.\n '
return F.max_unpool2d(input, indices, kernel_size, stride, padding, output_size)
@staticmethod
def symbolic(g, input, indices, kernel_size, stride, padding, output_size):
input_shape = g.op('Shape', input)
const_0 = g.op('Constant', value_t=torch.tensor(0))
const_1 = g.op('Constant', value_t=torch.tensor(1))
batch_size = g.op('Gather', input_shape, const_0, axis_i=0)
channel = g.op('Gather', input_shape, const_1, axis_i=0)
height = g.op('Gather', input_shape, g.op('Constant', value_t=torch.tensor(2)), axis_i=0)
height = g.op('Sub', height, const_1)
height = g.op('Mul', height, g.op('Constant', value_t=torch.tensor(stride[1])))
height = g.op('Add', height, g.op('Constant', value_t=torch.tensor(kernel_size[1])))
width = g.op('Gather', input_shape, g.op('Constant', value_t=torch.tensor(3)), axis_i=0)
width = g.op('Sub', width, const_1)
width = g.op('Mul', width, g.op('Constant', value_t=torch.tensor(stride[0])))
width = g.op('Add', width, g.op('Constant', value_t=torch.tensor(kernel_size[0])))
channel_step = g.op('Mul', height, width)
batch_step = g.op('Mul', channel_step, channel)
range_channel = g.op('Range', const_0, channel, const_1)
range_channel = g.op('Reshape', range_channel, g.op('Constant', value_t=torch.tensor([1, (- 1), 1, 1])))
range_channel = g.op('Mul', range_channel, channel_step)
range_channel = g.op('Cast', range_channel, to_i=7)
range_batch = g.op('Range', const_0, batch_size, const_1)
range_batch = g.op('Reshape', range_batch, g.op('Constant', value_t=torch.tensor([(- 1), 1, 1, 1])))
range_batch = g.op('Mul', range_batch, batch_step)
range_batch = g.op('Cast', range_batch, to_i=7)
indices = g.op('Add', indices, range_channel)
indices = g.op('Add', indices, range_batch)
return g.op('MaxUnpool', input, indices, kernel_shape_i=kernel_size, strides_i=stride)
|
class MaxUnpool2d(_MaxUnpoolNd):
'This module is modified from Pytorch `MaxUnpool2d` module.\n Args:\n kernel_size (int or tuple): Size of the max pooling window.\n stride (int or tuple): Stride of the max pooling window.\n Default: None (It is set to `kernel_size` by default).\n padding (int or tuple): Padding that is added to the input.\n Default: 0.\n '
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair((stride or kernel_size))
self.padding = _pair(padding)
def forward(self, input, indices, output_size=None):
'Forward function of MaxUnpool2d.\n Args:\n input (Tensor): Tensor needed to upsample.\n indices (Tensor): Indices output of the previous MaxPool.\n output_size (List or Tuple): The shape of output tensor.\n Default: None.\n Returns:\n Tensor: Output tensor.\n '
return MaxUnpool2dop.apply(input, indices, self.kernel_size, self.stride, self.padding, output_size)
|
@COMPONENTS.register_module()
class PlainDecoder(nn.Module):
'Simple decoder from Deep Image Matting.\n\n Args:\n in_channels (int): Channel num of input features.\n '
def __init__(self, in_channels):
super().__init__()
self.deconv6_1 = nn.Conv2d(in_channels, 512, kernel_size=1)
self.deconv5_1 = nn.Conv2d(512, 512, kernel_size=5, padding=2)
self.deconv4_1 = nn.Conv2d(512, 256, kernel_size=5, padding=2)
self.deconv3_1 = nn.Conv2d(256, 128, kernel_size=5, padding=2)
self.deconv2_1 = nn.Conv2d(128, 64, kernel_size=5, padding=2)
self.deconv1_1 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.deconv1 = nn.Conv2d(64, 1, kernel_size=5, padding=2)
self.relu = nn.ReLU(inplace=True)
self.max_unpool2d_for_onnx = MaxUnpool2d(kernel_size=2, stride=2)
self.max_unpool2d = nn.MaxUnpool2d(kernel_size=2, stride=2)
def init_weights(self):
'Init weights for the module.\n '
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, inputs):
'Forward function of PlainDecoder.\n\n Args:\n inputs (dict): Output dictionary of the VGG encoder containing:\n\n - out (Tensor): Output of the VGG encoder.\n - max_idx_1 (Tensor): Index of the first maxpooling layer in the\n VGG encoder.\n - max_idx_2 (Tensor): Index of the second maxpooling layer in the\n VGG encoder.\n - max_idx_3 (Tensor): Index of the third maxpooling layer in the\n VGG encoder.\n - max_idx_4 (Tensor): Index of the fourth maxpooling layer in the\n VGG encoder.\n - max_idx_5 (Tensor): Index of the fifth maxpooling layer in the\n VGG encoder.\n\n Returns:\n Tensor: Output tensor.\n '
max_idx_1 = inputs['max_idx_1']
max_idx_2 = inputs['max_idx_2']
max_idx_3 = inputs['max_idx_3']
max_idx_4 = inputs['max_idx_4']
max_idx_5 = inputs['max_idx_5']
x = inputs['out']
max_unpool2d = self.max_unpool2d
if torch.onnx.is_in_onnx_export():
max_unpool2d = self.max_unpool2d_for_onnx
out = self.relu(self.deconv6_1(x))
out = max_unpool2d(out, max_idx_5)
out = self.relu(self.deconv5_1(out))
out = max_unpool2d(out, max_idx_4)
out = self.relu(self.deconv4_1(out))
out = max_unpool2d(out, max_idx_3)
out = self.relu(self.deconv3_1(out))
out = max_unpool2d(out, max_idx_2)
out = self.relu(self.deconv2_1(out))
out = max_unpool2d(out, max_idx_1)
out = self.relu(self.deconv1_1(out))
raw_alpha = self.deconv1(out)
return raw_alpha
|
class BasicBlockDec(BasicBlock):
'Basic residual block for decoder.\n\n For decoder, we use ConvTranspose2d with kernel_size 4 and padding 1 for\n conv1. And the output channel of conv1 is modified from `out_channels` to\n `in_channels`.\n '
def build_conv1(self, in_channels, out_channels, kernel_size, stride, conv_cfg, norm_cfg, act_cfg, with_spectral_norm):
"Build conv1 of the block.\n\n Args:\n in_channels (int): The input channels of the ConvModule.\n out_channels (int): The output channels of the ConvModule.\n kernel_size (int): The kernel size of the ConvModule.\n stride (int): The stride of the ConvModule. If stride is set to 2,\n then ``conv_cfg`` will be overwritten as\n ``dict(type='Deconv')`` and ``kernel_size`` will be overwritten\n as 4.\n conv_cfg (dict): The conv config of the ConvModule.\n norm_cfg (dict): The norm config of the ConvModule.\n act_cfg (dict): The activation config of the ConvModule.\n with_spectral_norm (bool): Whether use spectral norm.\n\n Returns:\n nn.Module: The built ConvModule.\n "
if (stride == 2):
conv_cfg = dict(type='Deconv')
kernel_size = 4
padding = 1
else:
padding = (kernel_size // 2)
return ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm)
def build_conv2(self, in_channels, out_channels, kernel_size, conv_cfg, norm_cfg, with_spectral_norm):
'Build conv2 of the block.\n\n Args:\n in_channels (int): The input channels of the ConvModule.\n out_channels (int): The output channels of the ConvModule.\n kernel_size (int): The kernel size of the ConvModule.\n conv_cfg (dict): The conv config of the ConvModule.\n norm_cfg (dict): The norm config of the ConvModule.\n with_spectral_norm (bool): Whether use spectral norm.\n\n Returns:\n nn.Module: The built ConvModule.\n '
return ConvModule(in_channels, out_channels, kernel_size, stride=1, padding=(kernel_size // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None, with_spectral_norm=with_spectral_norm)
|
@COMPONENTS.register_module()
class ResNetDec(nn.Module):
'ResNet decoder for image matting.\n\n This class is adopted from https://github.com/Yaoyi-Li/GCA-Matting.\n\n Args:\n block (str): Type of residual block. Currently only `BasicBlockDec` is\n implemented.\n layers (list[int]): Number of layers in each block.\n in_channels (int): Channel num of input features.\n kernel_size (int): Kernel size of the conv layers in the decoder.\n conv_cfg (dict): dictionary to construct convolution layer. If it is\n None, 2d convolution will be applied. Default: None.\n norm_cfg (dict): Config dict for normalization layer. "BN" by default.\n act_cfg (dict): Config dict for activation layer, "ReLU" by default.\n with_spectral_norm (bool): Whether use spectral norm after conv.\n Default: False.\n late_downsample (bool): Whether to adopt late downsample strategy,\n Default: False.\n '
def __init__(self, block, layers, in_channels, kernel_size=3, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='LeakyReLU', negative_slope=0.2, inplace=True), with_spectral_norm=False, late_downsample=False):
super().__init__()
if (block == 'BasicBlockDec'):
block = BasicBlockDec
else:
raise NotImplementedError(f'{block} is not implemented.')
self.kernel_size = kernel_size
self.inplanes = in_channels
self.midplanes = (64 if late_downsample else 32)
self.layer1 = self._make_layer(block, 256, layers[0], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer2 = self._make_layer(block, 128, layers[1], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer3 = self._make_layer(block, 64, layers[2], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer4 = self._make_layer(block, self.midplanes, layers[3], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.conv1 = ConvModule(self.midplanes, 32, 4, stride=2, padding=1, conv_cfg=dict(type='Deconv'), norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm)
self.conv2 = ConvModule(32, 1, self.kernel_size, padding=(self.kernel_size // 2), act_cfg=None)
def init_weights(self):
'Init weights for the module.\n '
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m.weight, 1)
constant_init(m.bias, 0)
for m in self.modules():
if isinstance(m, BasicBlockDec):
constant_init(m.conv2.bn.weight, 0)
def _make_layer(self, block, planes, num_blocks, conv_cfg, norm_cfg, act_cfg, with_spectral_norm):
upsample = nn.Sequential(nn.UpsamplingNearest2d(scale_factor=2), ConvModule(self.inplanes, (planes * block.expansion), 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None, with_spectral_norm=with_spectral_norm))
layers = [block(self.inplanes, planes, kernel_size=self.kernel_size, stride=2, interpolation=upsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm)]
self.inplanes = (planes * block.expansion)
for _ in range(1, num_blocks):
layers.append(block(self.inplanes, planes, kernel_size=self.kernel_size, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm))
return nn.Sequential(*layers)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (N, C, H, W).\n\n Returns:\n Tensor: Output tensor.\n '
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv1(x)
x = self.conv2(x)
return x
|
@COMPONENTS.register_module()
class ResShortcutDec(ResNetDec):
'ResNet decoder for image matting with shortcut connection.\n\n ::\n\n feat1 --------------------------- conv2 --- out\n |\n feat2 ---------------------- conv1\n |\n feat3 ----------------- layer4\n |\n feat4 ------------ layer3\n |\n feat5 ------- layer2\n |\n out --- layer1\n\n Args:\n block (str): Type of residual block. Currently only `BasicBlockDec` is\n implemented.\n layers (list[int]): Number of layers in each block.\n in_channels (int): Channel number of input features.\n kernel_size (int): Kernel size of the conv layers in the decoder.\n conv_cfg (dict): Dictionary to construct convolution layer. If it is\n None, 2d convolution will be applied. Default: None.\n norm_cfg (dict): Config dict for normalization layer. "BN" by default.\n act_cfg (dict): Config dict for activation layer, "ReLU" by default.\n late_downsample (bool): Whether to adopt late downsample strategy,\n Default: False.\n '
def forward(self, inputs):
'Forward function of resnet shortcut decoder.\n\n Args:\n inputs (dict): Output dictionary of the ResNetEnc containing:\n\n - out (Tensor): Output of the ResNetEnc.\n - feat1 (Tensor): Shortcut connection from input image.\n - feat2 (Tensor): Shortcut connection from conv2 of ResNetEnc.\n - feat3 (Tensor): Shortcut connection from layer1 of ResNetEnc.\n - feat4 (Tensor): Shortcut connection from layer2 of ResNetEnc.\n - feat5 (Tensor): Shortcut connection from layer3 of ResNetEnc.\n\n Returns:\n Tensor: Output tensor.\n '
feat1 = inputs['feat1']
feat2 = inputs['feat2']
feat3 = inputs['feat3']
feat4 = inputs['feat4']
feat5 = inputs['feat5']
x = inputs['out']
x = (self.layer1(x) + feat5)
x = (self.layer2(x) + feat4)
x = (self.layer3(x) + feat3)
x = (self.layer4(x) + feat2)
x = (self.conv1(x) + feat1)
x = self.conv2(x)
return x
|
@COMPONENTS.register_module()
class ResGCADecoder(ResShortcutDec):
'ResNet decoder with shortcut connection and gca module.\n\n ::\n\n feat1 ---------------------------------------- conv2 --- out\n |\n feat2 ----------------------------------- conv1\n |\n feat3 ------------------------------ layer4\n |\n feat4, img_feat -- gca_module - layer3\n |\n feat5 ------- layer2\n |\n out --- layer1\n\n * gca module also requires unknown tensor generated by trimap which is ignored in the above graph.\n\n Args:\n block (str): Type of residual block. Currently only `BasicBlockDec` is\n implemented.\n layers (list[int]): Number of layers in each block.\n in_channels (int): Channel number of input features.\n kernel_size (int): Kernel size of the conv layers in the decoder.\n conv_cfg (dict): Dictionary to construct convolution layer. If it is\n None, 2d convolution will be applied. Default: None.\n norm_cfg (dict): Config dict for normalization layer. "BN" by default.\n act_cfg (dict): Config dict for activation layer, "ReLU" by default.\n late_downsample (bool): Whether to adopt late downsample strategy,\n Default: False.\n '
def __init__(self, block, layers, in_channels, kernel_size=3, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='LeakyReLU', negative_slope=0.2, inplace=True), with_spectral_norm=False, late_downsample=False):
super().__init__(block, layers, in_channels, kernel_size, conv_cfg, norm_cfg, act_cfg, with_spectral_norm, late_downsample)
self.gca = GCAModule(128, 128)
def forward(self, inputs):
'Forward function of resnet shortcut decoder.\n\n Args:\n inputs (dict): Output dictionary of the ResGCAEncoder containing:\n\n - out (Tensor): Output of the ResGCAEncoder.\n - feat1 (Tensor): Shortcut connection from input image.\n - feat2 (Tensor): Shortcut connection from conv2 of ResGCAEncoder.\n - feat3 (Tensor): Shortcut connection from layer1 of ResGCAEncoder.\n - feat4 (Tensor): Shortcut connection from layer2 of ResGCAEncoder.\n - feat5 (Tensor): Shortcut connection from layer3 of ResGCAEncoder.\n - img_feat (Tensor): Image feature extracted by guidance head.\n - unknown (Tensor): Unknown tensor generated by trimap.\n\n Returns:\n Tensor: Output tensor.\n '
img_feat = inputs['img_feat']
unknown = inputs['unknown']
feat1 = inputs['feat1']
feat2 = inputs['feat2']
feat3 = inputs['feat3']
feat4 = inputs['feat4']
feat5 = inputs['feat5']
x = inputs['out']
x = (self.layer1(x) + feat5)
x = (self.layer2(x) + feat4)
x = self.gca(img_feat, x, unknown)
x = (self.layer3(x) + feat3)
x = (self.layer4(x) + feat2)
x = (self.conv1(x) + feat1)
x = self.conv2(x)
return x
|
@COMPONENTS.register_module()
class AOTEncoder(nn.Module):
'Encoder used in AOT-GAN model.\n\n This implementation follows:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n\n Args:\n in_channels (int, optional): Channel number of input feature.\n Default: 4.\n mid_channels (int, optional): Channel number of middle feature.\n Default: 64.\n out_channels (int, optional): Channel number of output feature.\n Default: 256.\n act_cfg (dict, optional): Config dict for activation layer,\n "relu" by default.\n '
def __init__(self, in_channels=4, mid_channels=64, out_channels=256, act_cfg=dict(type='ReLU')):
super().__init__()
self.encoder = nn.Sequential(nn.ReflectionPad2d(3), ConvModule(in_channels, mid_channels, kernel_size=7, stride=1, act_cfg=act_cfg), ConvModule(mid_channels, (mid_channels * 2), kernel_size=4, stride=2, padding=1, act_cfg=act_cfg), ConvModule((mid_channels * 2), out_channels, kernel_size=4, stride=2, padding=1, act_cfg=act_cfg))
def forward(self, x):
"Forward Function.\n\n Args:\n x (Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n Tensor: Output tensor with shape of (n, c, h', w').\n "
return self.encoder(x)
|
@COMPONENTS.register_module()
class DeepFillEncoder(nn.Module):
'Encoder used in DeepFill model.\n\n This implementation follows:\n Generative Image Inpainting with Contextual Attention\n\n Args:\n in_channels (int): The number of input channels. Default: 5.\n conv_type (str): The type of conv module. In DeepFillv1 model, the\n `conv_type` should be \'conv\'. In DeepFillv2 model, the `conv_type`\n should be \'gated_conv\'.\n norm_cfg (dict): Config dict to build norm layer. Default: None.\n act_cfg (dict): Config dict for activation layer, "elu" by default.\n encoder_type (str): Type of the encoder. Should be one of [\'stage1\',\n \'stage2_conv\', \'stage2_attention\']. Default: \'stage1\'.\n channel_factor (float): The scale factor for channel size.\n Default: 1.\n kwargs (keyword arguments).\n '
_conv_type = dict(conv=ConvModule, gated_conv=SimpleGatedConvModule)
def __init__(self, in_channels=5, conv_type='conv', norm_cfg=None, act_cfg=dict(type='ELU'), encoder_type='stage1', channel_factor=1.0, **kwargs):
super().__init__()
conv_module = self._conv_type[conv_type]
channel_list_dict = dict(stage1=[32, 64, 64, 128, 128, 128], stage2_conv=[32, 32, 64, 64, 128, 128], stage2_attention=[32, 32, 64, 128, 128, 128])
channel_list = channel_list_dict[encoder_type]
channel_list = [int((x * channel_factor)) for x in channel_list]
kernel_size_list = [5, 3, 3, 3, 3, 3]
stride_list = [1, 2, 1, 2, 1, 1]
for i in range(6):
ks = kernel_size_list[i]
padding = ((ks - 1) // 2)
self.add_module(f'enc{(i + 1)}', conv_module(in_channels, channel_list[i], kernel_size=ks, stride=stride_list[i], padding=padding, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs))
in_channels = channel_list[i]
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
for i in range(6):
x = getattr(self, f'enc{(i + 1)}')(x)
outputs = dict(out=x)
return outputs
|
@COMPONENTS.register_module()
class GLEncoder(nn.Module):
'Encoder used in Global&Local model.\n\n This implementation follows:\n Globally and locally Consistent Image Completion\n\n Args:\n norm_cfg (dict): Config dict to build norm layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n '
def __init__(self, norm_cfg=None, act_cfg=dict(type='ReLU')):
super().__init__()
channel_list = [64, 128, 128, 256, 256, 256]
kernel_size_list = [5, 3, 3, 3, 3, 3]
stride_list = [1, 2, 1, 2, 1, 1]
in_channels = 4
for i in range(6):
ks = kernel_size_list[i]
padding = ((ks - 1) // 2)
self.add_module(f'enc{(i + 1)}', ConvModule(in_channels, channel_list[i], kernel_size=ks, stride=stride_list[i], padding=padding, norm_cfg=norm_cfg, act_cfg=act_cfg))
in_channels = channel_list[i]
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
for i in range(6):
x = getattr(self, f'enc{(i + 1)}')(x)
return x
|
def build_index_block(in_channels, out_channels, kernel_size, stride=2, padding=0, groups=1, norm_cfg=dict(type='BN'), use_nonlinear=False, expansion=1):
"Build an conv block for IndexBlock.\n\n Args:\n in_channels (int): The input channels of the block.\n out_channels (int): The output channels of the block.\n kernel_size (int): The kernel size of the block.\n stride (int, optional): The stride of the block. Defaults to 2.\n padding (int, optional): The padding of the block. Defaults to 0.\n groups (int, optional): The groups of the block. Defaults to 1.\n norm_cfg (dict, optional): The norm config of the block.\n Defaults to dict(type='BN').\n use_nonlinear (bool, optional): Whether use nonlinearty in the block.\n If true, a ConvModule with kernel size 1 will be appended and an\n ``ReLU6`` nonlinearty will be added to the origin ConvModule.\n Defaults to False.\n expansion (int, optional): Expandsion ratio of the middle channels.\n Effective when ``use_nonlinear`` is true. Defaults to 1.\n\n Returns:\n nn.Module: The built conv block.\n "
if use_nonlinear:
return nn.Sequential(ConvModule(in_channels, (in_channels * expansion), kernel_size, stride=stride, padding=padding, groups=groups, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6')), ConvModule((in_channels * expansion), out_channels, 1, stride=1, padding=0, groups=groups, bias=False, norm_cfg=None, act_cfg=None))
return ConvModule(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=False, norm_cfg=None, act_cfg=None)
|
class HolisticIndexBlock(nn.Module):
"Holistic Index Block.\n\n From https://arxiv.org/abs/1908.00672.\n\n Args:\n in_channels (int): Input channels of the holistic index block.\n kernel_size (int): Kernel size of the conv layers. Default: 2.\n padding (int): Padding number of the conv layers. Default: 0.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n use_nonlinear (bool): Whether add a non-linear conv layer in the index\n block. Default: False.\n "
def __init__(self, in_channels, norm_cfg=dict(type='BN'), use_context=False, use_nonlinear=False):
super().__init__()
if use_context:
(kernel_size, padding) = (4, 1)
else:
(kernel_size, padding) = (2, 0)
self.index_block = build_index_block(in_channels, 4, kernel_size, stride=2, padding=padding, groups=1, norm_cfg=norm_cfg, use_nonlinear=use_nonlinear, expansion=2)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
self.pixel_shuffle = nn.PixelShuffle(2)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input feature map with shape (N, C, H, W).\n\n Returns:\n tuple(Tensor): Encoder index feature and decoder index feature.\n '
x = self.index_block(x)
y = self.sigmoid(x)
z = self.softmax(y)
enc_idx_feat = self.pixel_shuffle(z)
dec_idx_feat = self.pixel_shuffle(y)
return (enc_idx_feat, dec_idx_feat)
|
class DepthwiseIndexBlock(nn.Module):
"Depthwise index block.\n\n From https://arxiv.org/abs/1908.00672.\n\n Args:\n in_channels (int): Input channels of the holistic index block.\n kernel_size (int): Kernel size of the conv layers. Default: 2.\n padding (int): Padding number of the conv layers. Default: 0.\n mode (str): Mode of index block. Should be 'o2o' or 'm2o'. In 'o2o'\n mode, the group of the conv layers is 1; In 'm2o' mode, the group\n of the conv layer is `in_channels`.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n use_nonlinear (bool): Whether add a non-linear conv layer in the index\n blocks. Default: False.\n "
def __init__(self, in_channels, norm_cfg=dict(type='BN'), use_context=False, use_nonlinear=False, mode='o2o'):
super().__init__()
groups = (in_channels if (mode == 'o2o') else 1)
if use_context:
(kernel_size, padding) = (4, 1)
else:
(kernel_size, padding) = (2, 0)
self.index_blocks = nn.ModuleList()
for _ in range(4):
self.index_blocks.append(build_index_block(in_channels, in_channels, kernel_size, stride=2, padding=padding, groups=groups, norm_cfg=norm_cfg, use_nonlinear=use_nonlinear))
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=2)
self.pixel_shuffle = nn.PixelShuffle(2)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input feature map with shape (N, C, H, W).\n\n Returns:\n tuple(Tensor): Encoder index feature and decoder index feature.\n '
(n, c, h, w) = x.shape
feature_list = [_index_block(x).unsqueeze(2) for _index_block in self.index_blocks]
x = torch.cat(feature_list, dim=2)
y = self.sigmoid(x)
z = self.softmax(y)
y = y.view(n, (c * 4), (h // 2), (w // 2))
z = z.view(n, (c * 4), (h // 2), (w // 2))
enc_idx_feat = self.pixel_shuffle(z)
dec_idx_feat = self.pixel_shuffle(y)
return (enc_idx_feat, dec_idx_feat)
|
class InvertedResidual(nn.Module):
'Inverted residual layer for indexnet encoder.\n\n It basically is a depthwise separable conv module. If `expand_ratio` is not\n one, then a conv module of kernel_size 1 will be inserted to change the\n input channels to `in_channels * expand_ratio`.\n\n Args:\n in_channels (int): Input channels of the layer.\n out_channels (int): Output channels of the layer.\n stride (int): Stride of the depthwise separable conv module.\n dilation (int): Dilation of the depthwise separable conv module.\n expand_ratio (float): Expand ratio of the input channels of the\n depthwise separable conv module.\n norm_cfg (dict | None): Config dict for normalization layer.\n use_res_connect (bool, optional): Whether use shortcut connection.\n Defaults to False.\n '
def __init__(self, in_channels, out_channels, stride, dilation, expand_ratio, norm_cfg, use_res_connect=False):
super().__init__()
assert (stride in [1, 2]), 'stride must 1 or 2'
self.use_res_connect = use_res_connect
self.kernel_size = 3
self.dilation = dilation
if (expand_ratio == 1):
self.conv = DepthwiseSeparableConvModule(in_channels, out_channels, 3, stride=stride, dilation=dilation, norm_cfg=norm_cfg, dw_act_cfg=dict(type='ReLU6'), pw_act_cfg=None)
else:
hidden_dim = round((in_channels * expand_ratio))
self.conv = nn.Sequential(ConvModule(in_channels, hidden_dim, 1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6')), DepthwiseSeparableConvModule(hidden_dim, out_channels, 3, stride=stride, dilation=dilation, norm_cfg=norm_cfg, dw_act_cfg=dict(type='ReLU6'), pw_act_cfg=None))
def pad(self, inputs, kernel_size, dilation):
effective_ksize = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
left = ((effective_ksize - 1) // 2)
right = (effective_ksize // 2)
return F.pad(inputs, (left, right, left, right))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input feature map with shape (N, C, H, W).\n\n Returns:\n Tensor: Output feature map.\n '
out = self.conv(self.pad(x, self.kernel_size, self.dilation))
if self.use_res_connect:
out = (out + x)
return out
|
@COMPONENTS.register_module()
class IndexNetEncoder(nn.Module):
"Encoder for IndexNet.\n\n Please refer to https://arxiv.org/abs/1908.00672.\n\n Args:\n in_channels (int, optional): Input channels of the encoder.\n out_stride (int, optional): Output stride of the encoder. For\n example, if `out_stride` is 32, the input feature map or image\n will be downsample to the 1/32 of original size.\n Defaults to 32.\n width_mult (int, optional): Width multiplication factor of channel\n dimension in MobileNetV2. Defaults to 1.\n index_mode (str, optional): Index mode of the index network. It\n must be one of {'holistic', 'o2o', 'm2o'}. If it is set to\n 'holistic', then Holistic index network will be used as the\n index network. If it is set to 'o2o' (or 'm2o'), when O2O\n (or M2O) Depthwise index network will be used as the index\n network. Defaults to 'm2o'.\n aspp (bool, optional): Whether use ASPP module to augment output\n feature. Defaults to True.\n norm_cfg (None | dict, optional): Config dict for normalization\n layer. Defaults to dict(type='BN').\n freeze_bn (bool, optional): Whether freeze batch norm layer.\n Defaults to False.\n use_nonlinear (bool, optional): Whether use nonlinearty in index\n network. Refer to the paper for more information.\n Defaults to True.\n use_context (bool, optional): Whether use larger kernel size in\n index network. Refer to the paper for more information.\n Defaults to True.\n\n Raises:\n ValueError: out_stride must 16 or 32.\n NameError: Supported index_mode are {'holistic', 'o2o', 'm2o'}.\n "
def __init__(self, in_channels, out_stride=32, width_mult=1, index_mode='m2o', aspp=True, norm_cfg=dict(type='BN'), freeze_bn=False, use_nonlinear=True, use_context=True):
super().__init__()
if (out_stride not in [16, 32]):
raise ValueError(f'out_stride must 16 or 32, got {out_stride}')
self.out_stride = out_stride
self.width_mult = width_mult
if (index_mode == 'holistic'):
index_block = HolisticIndexBlock
elif (index_mode in ('o2o', 'm2o')):
index_block = partial(DepthwiseIndexBlock, mode=index_mode)
else:
raise NameError('Unknown index block mode {}'.format(index_mode))
initial_channels = 32
inverted_residual_setting = [[1, initial_channels, 16, 1, 1, 1], [6, 16, 24, 2, 2, 1], [6, 24, 32, 3, 2, 1], [6, 32, 64, 4, 2, 1], [6, 64, 96, 3, 1, 1], [6, 96, 160, 3, 2, 1], [6, 160, 320, 1, 1, 1]]
initial_channels = int((initial_channels * width_mult))
for layer_setting in inverted_residual_setting:
layer_setting[1] = int((layer_setting[1] * self.width_mult))
layer_setting[2] = int((layer_setting[2] * self.width_mult))
if (out_stride == 32):
self.downsampled_layers = [0, 2, 3, 4, 6]
else:
self.downsampled_layers = [0, 2, 3, 4]
inverted_residual_setting[5][5] = 2
inverted_residual_setting[6][5] = 2
self.layers = nn.ModuleList([ConvModule(in_channels, initial_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'))])
for layer_setting in inverted_residual_setting:
self.layers.append(self._make_layer(layer_setting, norm_cfg))
if freeze_bn:
self.freeze_bn()
self.index_layers = nn.ModuleList()
for layer in self.downsampled_layers:
self.index_layers.append(index_block(inverted_residual_setting[layer][1], norm_cfg, use_context, use_nonlinear))
self.avg_pool = nn.AvgPool2d(2, stride=2)
if aspp:
dilation = ((2, 4, 8) if (out_stride == 32) else (6, 12, 18))
self.dconv = ASPP((320 * self.width_mult), 160, mid_channels=int((256 * self.width_mult)), dilations=dilation, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'), separable_conv=True)
else:
self.dconv = ConvModule((320 * self.width_mult), 160, 1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'))
self.out_channels = 160
def _make_layer(self, layer_setting, norm_cfg):
(expand_ratio, in_channels, out_channels, num_blocks, stride, dilation) = layer_setting
dilation0 = (max((dilation // 2), 1) if (stride == 2) else dilation)
layers = [InvertedResidual(in_channels, out_channels, 1, dilation0, expand_ratio, norm_cfg)]
in_channels = out_channels
for _ in range(1, num_blocks):
layers.append(InvertedResidual(in_channels, out_channels, 1, dilation, expand_ratio, norm_cfg, use_res_connect=True))
return nn.Sequential(*layers)
def freeze_bn(self):
'Set BatchNorm modules in the model to evaluation mode.\n '
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, SyncBatchNorm)):
m.eval()
def init_weights(self, pretrained=None):
'Init weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input feature map with shape (N, C, H, W).\n\n Returns:\n dict: Output tensor, shortcut feature and decoder index feature.\n '
dec_idx_feat_list = list()
shortcuts = list()
for (i, layer) in enumerate(self.layers):
x = layer(x)
if (i in self.downsampled_layers):
(enc_idx_feat, dec_idx_feat) = self.index_layers[self.downsampled_layers.index(i)](x)
x = (enc_idx_feat * x)
shortcuts.append(x)
dec_idx_feat_list.append(dec_idx_feat)
x = (4 * self.avg_pool(x))
elif (i != 7):
shortcuts.append(x)
dec_idx_feat_list.append(None)
x = self.dconv(x)
return {'out': x, 'shortcuts': shortcuts, 'dec_idx_feat_list': dec_idx_feat_list}
|
@COMPONENTS.register_module()
class PConvEncoder(nn.Module):
"Encoder with partial conv.\n\n About the details for this architecture, pls see:\n Image Inpainting for Irregular Holes Using Partial Convolutions\n\n Args:\n in_channels (int): The number of input channels. Default: 3.\n num_layers (int): The number of convolutional layers. Default 7.\n conv_cfg (dict): Config for convolution module. Default:\n {'type': 'PConv', 'multi_channel': True}.\n norm_cfg (dict): Config for norm layer. Default:\n {'type': 'BN'}.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effective on Batch Norm\n and its variants only.\n "
def __init__(self, in_channels=3, num_layers=7, conv_cfg=dict(type='PConv', multi_channel=True), norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False):
super().__init__()
self.num_layers = num_layers
self.norm_eval = norm_eval
self.enc1 = MaskConvModule(in_channels, 64, kernel_size=7, stride=2, padding=3, conv_cfg=conv_cfg, norm_cfg=None, act_cfg=dict(type='ReLU'))
self.enc2 = MaskConvModule(64, 128, kernel_size=5, stride=2, padding=2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'))
self.enc3 = MaskConvModule(128, 256, kernel_size=5, stride=2, padding=2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'))
self.enc4 = MaskConvModule(256, 512, kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'))
for i in range(4, num_layers):
name = f'enc{(i + 1)}'
self.add_module(name, MaskConvModule(512, 512, kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')))
def train(self, mode=True):
super().train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x, mask):
'Forward function for partial conv encoder.\n\n Args:\n x (torch.Tensor): Masked image with shape (n, c, h, w).\n mask (torch.Tensor): Mask tensor with shape (n, c, h, w).\n\n Returns:\n dict: Contains the results and middle level features in this module. `hidden_feats` contain the middle feature maps and `hidden_masks` store updated masks.\n '
hidden_feats = {}
hidden_masks = {}
(hidden_feats['h0'], hidden_masks['h0']) = (x, mask)
h_key_prev = 'h0'
for i in range(1, (self.num_layers + 1)):
l_key = f'enc{i}'
h_key = f'h{i}'
(hidden_feats[h_key], hidden_masks[h_key]) = getattr(self, l_key)(hidden_feats[h_key_prev], hidden_masks[h_key_prev])
h_key_prev = h_key
outputs = dict(out=hidden_feats[f'h{self.num_layers}'], hidden_feats=hidden_feats, hidden_masks=hidden_masks)
return outputs
|
class BasicBlock(nn.Module):
'Basic block for ResNet.'
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, act_cfg=dict(type='ReLU'), conv_cfg=None, norm_cfg=dict(type='BN'), with_cp=False):
super(BasicBlock, self).__init__()
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.activate = build_activation_layer(act_cfg)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.activate(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.activate(out)
return out
|
class Bottleneck(nn.Module):
'Bottleneck block for ResNet.'
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, act_cfg=dict(type='ReLU'), conv_cfg=None, norm_cfg=dict(type='BN'), with_cp=False):
super(Bottleneck, self).__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.act_cfg = act_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1_stride = 1
self.conv2_stride = stride
self.with_cp = with_cp
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.activate = build_activation_layer(act_cfg)
self.downsample = downsample
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
@property
def norm3(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm3_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.activate(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.activate(out)
out = self.conv3(out)
out = self.norm3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.activate(out)
return out
|
class ResNet(nn.Module):
'General ResNet.\n\n This class is adopted from\n https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/backbones/resnet.py.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default" 3.\n stem_channels (int): Number of stem channels. Default: 64.\n base_channels (int): Number of base channels of res layer. Default: 64.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n act_cfg (dict): Dictionary to construct and config activation layer.\n conv_cfg (dict): Dictionary to construct and config convolution layer.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n multi_grid (Sequence[int]|None): Multi grid dilation rates of last\n stage. Default: None\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels, stem_channels, base_channels, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 2, 4), deep_stem=False, avg_down=False, frozen_stages=(- 1), act_cfg=dict(type='ReLU'), conv_cfg=None, norm_cfg=dict(type='BN'), with_cp=False, multi_grid=None, contract_dilation=False, zero_init_residual=True):
super(ResNet, self).__init__()
from functools import partial
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
(self.block, stage_blocks) = self.arch_settings[depth]
self.depth = depth
self.inplanes = stem_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
self._make_stem_layer(in_channels, stem_channels)
self.layer1 = self._make_layer(self.block, 64, stage_blocks[0], stride=strides[0])
self.layer2 = self._make_layer(self.block, 128, stage_blocks[1], stride=strides[1])
self.layer3 = self._make_layer(self.block, 256, stage_blocks[2], stride=strides[2])
self.layer4 = self._make_layer(self.block, 512, stage_blocks[3], stride=strides[3])
self.layer1.apply(partial(self._nostride_dilate, dilate=dilations[0]))
self.layer2.apply(partial(self._nostride_dilate, dilate=dilations[1]))
self.layer3.apply(partial(self._nostride_dilate, dilate=dilations[2]))
self.layer4.apply(partial(self._nostride_dilate, dilate=dilations[3]))
self._freeze_stages()
def _make_stem_layer(self, in_channels, stem_channels):
'Make stem layer for ResNet.'
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], build_activation_layer(self.act_cfg), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], build_activation_layer(self.act_cfg), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], build_activation_layer(self.act_cfg))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.activate = build_activation_layer(self.act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@property
def norm1(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm1_name)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.inplanes, (planes * block.expansion), stride=stride, kernel_size=1, dilation=dilation, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, conv_cfg=self.conv_cfg))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if ((classname.find('Conv') != (- 1)) and (dilate > 1)):
if (m.stride == (2, 2)):
m.stride = (1, 1)
if (m.kernel_size == (3, 3)):
m.dilation = ((dilate // 2), (dilate // 2))
m.padding = ((dilate // 2), (dilate // 2))
elif (m.kernel_size == (3, 3)):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def init_weights(self, pretrained=None):
'Init weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def _freeze_stages(self):
'Freeze stages param and norm stats.'
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (N, C, H, W).\n\n Returns:\n Tensor: Output tensor.\n '
conv_out = [x]
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.activate(x)
conv_out.append(x)
x = self.maxpool(x)
x = self.layer1(x)
conv_out.append(x)
x = self.layer2(x)
conv_out.append(x)
x = self.layer3(x)
conv_out.append(x)
x = self.layer4(x)
conv_out.append(x)
return conv_out
|
@COMPONENTS.register_module()
class VGG16(nn.Module):
'Customized VGG16 Encoder.\n\n A 1x1 conv is added after the original VGG16 conv layers. The indices of\n max pooling layers are returned for unpooling layers in decoders.\n\n Args:\n in_channels (int): Number of input channels.\n batch_norm (bool, optional): Whether use ``nn.BatchNorm2d``.\n Default to False.\n aspp (bool, optional): Whether use ASPP module after the last conv\n layer. Default to False.\n dilations (list[int], optional): Atrous rates of ASPP module.\n Default to None.\n '
def __init__(self, in_channels, batch_norm=False, aspp=False, dilations=None):
super().__init__()
self.batch_norm = batch_norm
self.aspp = aspp
self.dilations = dilations
self.layer1 = self._make_layer(in_channels, 64, 2)
self.layer2 = self._make_layer(64, 128, 2)
self.layer3 = self._make_layer(128, 256, 3)
self.layer4 = self._make_layer(256, 512, 3)
self.layer5 = self._make_layer(512, 512, 3)
self.conv6 = nn.Conv2d(512, 512, kernel_size=1)
if self.batch_norm:
self.bn = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
if self.aspp:
self.aspp = ASPP(512, dilations=self.dilations)
self.out_channels = 256
else:
self.out_channels = 512
def _make_layer(self, inplanes, planes, convs_layers):
layers = []
for _ in range(convs_layers):
conv2d = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)
if self.batch_norm:
bn = nn.BatchNorm2d(planes)
layers += [conv2d, bn, nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
inplanes = planes
layers += [nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)]
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
def forward(self, x):
'Forward function for ASPP module.\n\n Args:\n x (Tensor): Input tensor with shape (N, C, H, W).\n\n Returns:\n dict: Dict containing output tensor and maxpooling indices.\n '
(out, max_idx_1) = self.layer1(x)
(out, max_idx_2) = self.layer2(out)
(out, max_idx_3) = self.layer3(out)
(out, max_idx_4) = self.layer4(out)
(out, max_idx_5) = self.layer5(out)
out = self.conv6(out)
if self.batch_norm:
out = self.bn(out)
out = self.relu(out)
if self.aspp:
out = self.aspp(out)
return {'out': out, 'max_idx_1': max_idx_1, 'max_idx_2': max_idx_2, 'max_idx_3': max_idx_3, 'max_idx_4': max_idx_4, 'max_idx_5': max_idx_5}
|
@BACKBONES.register_module()
class GLEncoderDecoder(nn.Module):
'Encoder-Decoder used in Global&Local model.\n\n This implementation follows:\n Globally and locally Consistent Image Completion\n\n The architecture of the encoder-decoder is: (conv2d x 6) --> (dilated conv2d x 4) --> (conv2d or deconv2d x 7)\n\n Args:\n encoder (dict): Config dict to encoder.\n decoder (dict): Config dict to build decoder.\n dilation_neck (dict): Config dict to build dilation neck.\n '
def __init__(self, encoder=dict(type='GLEncoder'), decoder=dict(type='GLDecoder'), dilation_neck=dict(type='GLDilationNeck')):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.dilation_neck = build_component(dilation_neck)
self.fp16_enabled = False
@auto_fp16()
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
x = self.encoder(x)
if isinstance(x, dict):
x = x['out']
x = self.dilation_neck(x)
x = self.decoder(x)
return x
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
|
@COMPONENTS.register_module()
class AOTBlockNeck(nn.Module):
'Dilation backbone used in AOT-GAN model.\n\n This implementation follows:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n\n Args:\n in_channels (int, optional): Channel number of input feature.\n Default: 256.\n dilation_rates (Tuple[int], optional): The dilation rates used\n for AOT block. Default: (1, 2, 4, 8).\n num_aotblock (int, optional): Number of AOT blocks. Default: 8.\n act_cfg (dict, optional): Config dict for activation layer,\n "relu" by default.\n kwargs (keyword arguments).\n '
def __init__(self, in_channels=256, dilation_rates=(1, 2, 4, 8), num_aotblock=8, act_cfg=dict(type='ReLU'), **kwargs):
super().__init__()
self.dilation_rates = list(dilation_rates)
self.model = nn.Sequential(*[AOTBlock(in_channels=in_channels, dilation_rates=self.dilation_rates, act_cfg=act_cfg) for _ in range(0, num_aotblock)])
def forward(self, x):
x = self.model(x)
return x
|
class AOTBlock(nn.Module):
'AOT Block which constitutes the dilation backbone.\n\n This implementation follows:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n\n The AOT Block adopts the split-transformation-merge strategy:\n Splitting: A kernel with 256 output channels is split into four\n 64-channel sub-kernels.\n Transforming: Each sub-kernel performs a different transformation with\n a different dilation rate.\n Splitting: Sub-kernels with different receptive fields are merged.\n\n Args:\n in_channels (int, optional): Channel number of input feature.\n Default: 256.\n dilation_rates (Tuple[int]): The dilation rates used for AOT block.\n Default (1, 2, 4, 8).\n act_cfg (dict, optional): Config dict for activation layer,\n "relu" by default.\n kwargs (keyword arguments).\n '
def __init__(self, in_channels=256, dilation_rates=(1, 2, 4, 8), act_cfg=dict(type='ReLU'), **kwargs):
super().__init__()
self.dilation_rates = dilation_rates
self.blocks = nn.ModuleList([nn.Sequential(nn.ReflectionPad2d(dilation_rate), ConvModule(in_channels, (in_channels // 4), kernel_size=3, dilation=dilation_rate, act_cfg=act_cfg)) for dilation_rate in self.dilation_rates])
self.fuse = nn.Sequential(nn.ReflectionPad2d(1), ConvModule(in_channels, in_channels, 3, dilation=1, act_cfg=None))
self.gate = nn.Sequential(nn.ReflectionPad2d(1), ConvModule(in_channels, in_channels, 3, dilation=1, act_cfg=None))
def normalize(self, x):
mean = x.mean((2, 3), keepdim=True)
std = (x.std((2, 3), keepdim=True) + 1e-09)
x = (((2 * (x - mean)) / std) - 1)
x = (5 * x)
return x
def forward(self, x):
dilate_x = [self.blocks[i](x) for i in range(0, len(self.dilation_rates))]
dilate_x = torch.cat(dilate_x, 1)
dilate_x = self.fuse(dilate_x)
mask = self.normalize(self.gate(x))
mask = torch.sigmoid(mask)
return ((x * (1 - mask)) + (dilate_x * mask))
|
@COMPONENTS.register_module()
class ContextualAttentionNeck(nn.Module):
"Neck with contextual attention module.\n\n Args:\n in_channels (int): The number of input channels.\n conv_type (str): The type of conv module. In DeepFillv1 model, the\n `conv_type` should be 'conv'. In DeepFillv2 model, the `conv_type`\n should be 'gated_conv'.\n conv_cfg (dict | None): Config of conv module. Default: None.\n norm_cfg (dict | None): Config of norm module. Default: None.\n act_cfg (dict | None): Config of activation layer. Default:\n dict(type='ELU').\n contextual_attention_args (dict): Config of contextual attention\n module. Default: dict(softmax_scale=10.).\n kwargs (keyword arguments).\n "
_conv_type = dict(conv=ConvModule, gated_conv=SimpleGatedConvModule)
def __init__(self, in_channels, conv_type='conv', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ELU'), contextual_attention_args=dict(softmax_scale=10.0), **kwargs):
super().__init__()
self.contextual_attention = ContextualAttentionModule(**contextual_attention_args)
conv_module = self._conv_type[conv_type]
self.conv1 = conv_module(in_channels, in_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs)
self.conv2 = conv_module(in_channels, in_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs)
def forward(self, x, mask):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n mask (torch.Tensor): Input tensor with shape of (n, 1, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
(x, offset) = self.contextual_attention(x, x, mask)
x = self.conv1(x)
x = self.conv2(x)
return (x, offset)
|
@COMPONENTS.register_module()
class GLDilationNeck(nn.Module):
'Dilation Backbone used in Global&Local model.\n\n This implementation follows:\n Globally and locally Consistent Image Completion\n\n Args:\n in_channels (int): Channel number of input feature.\n conv_type (str): The type of conv module. In DeepFillv1 model, the\n `conv_type` should be \'conv\'. In DeepFillv2 model, the `conv_type`\n should be \'gated_conv\'.\n norm_cfg (dict): Config dict to build norm layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n kwargs (keyword arguments).\n '
_conv_type = dict(conv=ConvModule, gated_conv=SimpleGatedConvModule)
def __init__(self, in_channels=256, conv_type='conv', norm_cfg=None, act_cfg=dict(type='ReLU'), **kwargs):
super().__init__()
conv_module = self._conv_type[conv_type]
dilation_convs_ = []
for i in range(4):
dilation_ = int((2 ** (i + 1)))
dilation_convs_.append(conv_module(in_channels, in_channels, kernel_size=3, padding=dilation_, dilation=dilation_, stride=1, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs))
self.dilation_convs = nn.Sequential(*dilation_convs_)
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
x = self.dilation_convs(x)
return x
|
@BACKBONES.register_module()
class PConvEncoderDecoder(nn.Module):
'Encoder-Decoder with partial conv module.\n\n Args:\n encoder (dict): Config of the encoder.\n decoder (dict): Config of the decoder.\n '
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.fp16_enabled = False
@auto_fp16()
def forward(self, x, mask_in):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n mask_in (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
enc_outputs = self.encoder(x, mask_in)
(x, final_mask) = self.decoder(enc_outputs)
return (x, final_mask)
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
|
@BACKBONES.register_module()
class SimpleEncoderDecoder(nn.Module):
'Simple encoder-decoder model from matting.\n\n Args:\n encoder (dict): Config of the encoder.\n decoder (dict): Config of the decoder.\n '
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = build_component(encoder)
if hasattr(self.encoder, 'out_channels'):
decoder['in_channels'] = self.encoder.out_channels
self.decoder = build_component(decoder)
def init_weights(self, pretrained=None):
self.encoder.init_weights(pretrained)
self.decoder.init_weights()
def forward(self, *args, **kwargs):
'Forward function.\n\n Returns:\n Tensor: The output tensor of the decoder.\n '
out = self.encoder(*args, **kwargs)
out = self.decoder(out)
return out
|
@BACKBONES.register_module()
class ResnetGenerator(nn.Module):
"Construct a Resnet-based generator that consists of residual blocks\n between a few downsampling/upsampling operations.\n\n Args:\n in_channels (int): Number of channels in input images.\n out_channels (int): Number of channels in output images.\n base_channels (int): Number of filters at the last conv layer.\n Default: 64.\n norm_cfg (dict): Config dict to build norm layer. Default:\n `dict(type='IN')`.\n use_dropout (bool): Whether to use dropout layers. Default: False.\n num_blocks (int): Number of residual blocks. Default: 9.\n padding_mode (str): The name of padding layer in conv layers:\n 'reflect' | 'replicate' | 'zeros'. Default: 'reflect'.\n init_cfg (dict): Config dict for initialization.\n `type`: The name of our initialization method. Default: 'normal'.\n `gain`: Scaling factor for normal, xavier and orthogonal.\n Default: 0.02.\n "
def __init__(self, in_channels, out_channels, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02)):
super().__init__()
assert (num_blocks >= 0), f'Number of residual blocks must be non-negative, but got {num_blocks}.'
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
use_bias = (norm_cfg['type'] == 'IN')
model = []
model += [ConvModule(in_channels=in_channels, out_channels=base_channels, kernel_size=7, padding=3, bias=use_bias, norm_cfg=norm_cfg, padding_mode=padding_mode)]
num_down = 2
for i in range(num_down):
multiple = (2 ** i)
model += [ConvModule(in_channels=(base_channels * multiple), out_channels=((base_channels * multiple) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias, norm_cfg=norm_cfg)]
multiple = (2 ** num_down)
for i in range(num_blocks):
model += [ResidualBlockWithDropout((base_channels * multiple), padding_mode=padding_mode, norm_cfg=norm_cfg, use_dropout=use_dropout)]
for i in range(num_down):
multiple = (2 ** (num_down - i))
model += [ConvModule(in_channels=(base_channels * multiple), out_channels=((base_channels * multiple) // 2), kernel_size=3, stride=2, padding=1, bias=use_bias, conv_cfg=dict(type='Deconv', output_padding=1), norm_cfg=norm_cfg)]
model += [ConvModule(in_channels=base_channels, out_channels=out_channels, kernel_size=7, padding=3, bias=True, norm_cfg=None, act_cfg=dict(type='Tanh'), padding_mode=padding_mode)]
self.model = nn.Sequential(*model)
self.init_type = ('normal' if (init_cfg is None) else init_cfg.get('type', 'normal'))
self.init_gain = (0.02 if (init_cfg is None) else init_cfg.get('gain', 0.02))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
return self.model(x)
def init_weights(self, pretrained=None, strict=True):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n strict (bool, optional): Whether to allow different params for the\n model and checkpoint. Default: True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
generation_init_weights(self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError(f"'pretrained' must be a str or None. But received {type(pretrained)}.")
|
@BACKBONES.register_module()
class UnetGenerator(nn.Module):
"Construct the Unet-based generator from the innermost layer to the\n outermost layer, which is a recursive process.\n\n Args:\n in_channels (int): Number of channels in input images.\n out_channels (int): Number of channels in output images.\n num_down (int): Number of downsamplings in Unet. If `num_down` is 8,\n the image with size 256x256 will become 1x1 at the bottleneck.\n Default: 8.\n base_channels (int): Number of channels at the last conv layer.\n Default: 64.\n norm_cfg (dict): Config dict to build norm layer. Default:\n `dict(type='BN')`.\n use_dropout (bool): Whether to use dropout layers. Default: False.\n init_cfg (dict): Config dict for initialization.\n `type`: The name of our initialization method. Default: 'normal'.\n `gain`: Scaling factor for normal, xavier and orthogonal.\n Default: 0.02.\n "
def __init__(self, in_channels, out_channels, num_down=8, base_channels=64, norm_cfg=dict(type='BN'), use_dropout=False, init_cfg=dict(type='normal', gain=0.02)):
super().__init__()
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
unet_block = UnetSkipConnectionBlock((base_channels * 8), (base_channels * 8), in_channels=None, submodule=None, norm_cfg=norm_cfg, is_innermost=True)
for _ in range((num_down - 5)):
unet_block = UnetSkipConnectionBlock((base_channels * 8), (base_channels * 8), in_channels=None, submodule=unet_block, norm_cfg=norm_cfg, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock((base_channels * 4), (base_channels * 8), in_channels=None, submodule=unet_block, norm_cfg=norm_cfg)
unet_block = UnetSkipConnectionBlock((base_channels * 2), (base_channels * 4), in_channels=None, submodule=unet_block, norm_cfg=norm_cfg)
unet_block = UnetSkipConnectionBlock(base_channels, (base_channels * 2), in_channels=None, submodule=unet_block, norm_cfg=norm_cfg)
self.model = UnetSkipConnectionBlock(out_channels, base_channels, in_channels=in_channels, submodule=unet_block, is_outermost=True, norm_cfg=norm_cfg)
self.init_type = ('normal' if (init_cfg is None) else init_cfg.get('type', 'normal'))
self.init_gain = (0.02 if (init_cfg is None) else init_cfg.get('gain', 0.02))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
return self.model(x)
def init_weights(self, pretrained=None, strict=True):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n strict (bool, optional): Whether to allow different params for the\n model and checkpoint. Default: True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
generation_init_weights(self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError(f"'pretrained' must be a str or None. But received {type(pretrained)}.")
|
@BACKBONES.register_module()
class ARCNN(nn.Module):
'ARCNN network structure.\n\n Paper: https://arxiv.org/pdf/1504.06993.pdf\n\n Args:\n in_channels (int): Channel number of inputs.\n Default: 3.\n mid_channels_1 (int): Channel number of the first intermediate\n features.\n Default: 64.\n mid_channels_2 (int): Channel number of the second intermediate\n features.\n Default: 32.\n mid_channels_3 (int): Channel number of the third intermediate\n features.\n Default: 16.\n out_channels (int): Channel number of outputs.\n Default: 3.\n in_kernel_size (int): Kernel size of the first convolution.\n Default: 9.\n mid_kernel_size (int): Kernel size of the first intermediate\n convolution.\n Default: 7.\n mid_kernel_size (int): Kernel size of the second intermediate\n convolution.\n Default: 1.\n out_kernel_size (int): Kernel size of the last convolution.\n Default: 5.\n '
def __init__(self, in_channels=3, mid_channels_1=64, mid_channels_2=32, mid_channels_3=16, out_channels=3, in_kernel_size=9, mid_kernel_size_1=7, mid_kernel_size_2=1, out_kernel_size=5):
super().__init__()
self.network = nn.Sequential(nn.Conv2d(in_channels, mid_channels_1, in_kernel_size, padding=(in_kernel_size // 2)), nn.ReLU(), nn.Conv2d(mid_channels_1, mid_channels_2, mid_kernel_size_1, padding=(mid_kernel_size_1 // 2)), nn.ReLU(), nn.Conv2d(mid_channels_2, mid_channels_3, mid_kernel_size_2, padding=(mid_kernel_size_2 // 2)), nn.ReLU(), nn.Conv2d(mid_channels_3, out_channels, out_kernel_size, padding=(out_kernel_size // 2)))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
res = self.network(x)
x = (res + x)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@BACKBONES.register_module()
class CBDNet(nn.Module):
'CBDNet network structure.\n\n Args:\n in_channels (int): Channel number of inputs.\n Default: 3.\n mid_channels_1 (int): Channel number of the first intermediate\n features.\n Default: 64.\n mid_channels_2 (int): Channel number of the second intermediate\n features.\n Default: 32.\n mid_channels_3 (int): Channel number of the third intermediate\n features.\n Default: 16.\n out_channels (int): Channel number of outputs.\n Default: 3.\n in_kernel_size (int): Kernel size of the first convolution.\n Default: 9.\n mid_kernel_size (int): Kernel size of the first intermediate\n convolution.\n Default: 7.\n mid_kernel_size (int): Kernel size of the second intermediate\n convolution.\n Default: 1.\n out_kernel_size (int): Kernel size of the last convolution.\n Default: 5.\n '
def __init__(self, in_channels=3, estimate_channels=32, out_channels=3, nlevel_denoise=3, nf_base_denoise=64, nf_gr_denoise=2, nl_base_denoise=1, nl_gr_denoise=2, down_denoise='avepool2d', up_denoise='transpose2d', reduce_denoise='add'):
super().__init__()
estimate_list = nn.ModuleList([nn.Conv2d(in_channels=in_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
for _ in range(3):
estimate_list += nn.ModuleList([nn.Conv2d(in_channels=estimate_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
estimate_list += nn.ModuleList([nn.Conv2d(estimate_channels, out_channels, 3, padding=(3 // 2)), nn.ReLU(inplace=True)])
self.estimate = nn.Sequential(*estimate_list)
self.denoise = UNet(nf_in=(in_channels * 2), nf_out=out_channels, nlevel=nlevel_denoise, nf_base=nf_base_denoise, nf_gr=nf_gr_denoise, nl_base=nl_base_denoise, nl_gr=nl_gr_denoise, down=down_denoise, up=up_denoise, reduce=reduce_denoise, residual=False)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
estimated_noise_map = self.estimate(x)
res = self.denoise(torch.cat([x, estimated_noise_map], dim=1))
x = (res + x)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@BACKBONES.register_module()
class DCAD(nn.Module):
'DCAD network structure.\n\n Paper: https://ieeexplore.ieee.org/document/7923714\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n num_blocks (int): Block number in the trunk network. Default: 8.\n '
def __init__(self, in_channels, out_channels, mid_channels=64, num_blocks=8):
super().__init__()
self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, padding=1)
body_ = []
for _ in range(num_blocks):
body_ += [nn.ReLU(), nn.Conv2d(mid_channels, mid_channels, 3, padding=1)]
self.body = nn.Sequential(*body_)
self.conv_after_body = nn.Sequential(*[nn.ReLU(), nn.Conv2d(mid_channels, out_channels, 3, padding=1)])
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
res = self.conv_after_body(self.body(self.conv_first(x)))
x = (res + x)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
class FeedbackBlock(nn.Module):
'Feedback Block of DIC\n\n It has a style of:\n\n ::\n ----- Module ----->\n ^ |\n |____________|\n\n Args:\n mid_channels (int): Number of channels in the intermediate features.\n num_blocks (int): Number of blocks.\n upscale_factor (int): upscale factor.\n '
def __init__(self, mid_channels, num_blocks, upscale_factor, padding=2, prelu_init=0.2):
super().__init__()
stride = upscale_factor
kernel_size = (upscale_factor + 4)
self.num_blocks = num_blocks
self.need_reset = True
self.last_hidden = None
self.conv_first = nn.Sequential(nn.Conv2d((2 * mid_channels), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init))
self.up_blocks = nn.ModuleList()
self.down_blocks = nn.ModuleList()
self.lr_blocks = nn.ModuleList()
self.hr_blocks = nn.ModuleList()
for idx in range(self.num_blocks):
self.up_blocks.append(nn.Sequential(nn.ConvTranspose2d(mid_channels, mid_channels, kernel_size, stride, padding), nn.PReLU(init=prelu_init)))
self.down_blocks.append(nn.Sequential(nn.Conv2d(mid_channels, mid_channels, kernel_size, stride, padding), nn.PReLU(init=prelu_init)))
if (idx > 0):
self.lr_blocks.append(nn.Sequential(nn.Conv2d((mid_channels * (idx + 1)), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init)))
self.hr_blocks.append(nn.Sequential(nn.Conv2d((mid_channels * (idx + 1)), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init)))
self.conv_last = nn.Sequential(nn.Conv2d((num_blocks * mid_channels), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
if self.need_reset:
self.last_hidden = x
self.need_reset = False
x = torch.cat((x, self.last_hidden), dim=1)
x = self.conv_first(x)
lr_features = [x]
hr_features = []
for idx in range(self.num_blocks):
lr = torch.cat(lr_features, 1)
if (idx > 0):
lr = self.lr_blocks[(idx - 1)](lr)
hr = self.up_blocks[idx](lr)
hr_features.append(hr)
hr = torch.cat(hr_features, 1)
if (idx > 0):
hr = self.hr_blocks[(idx - 1)](hr)
lr = self.down_blocks[idx](hr)
lr_features.append(lr)
output = torch.cat(lr_features[1:], 1)
output = self.conv_last(output)
self.last_hidden = output
return output
|
class FeedbackBlockCustom(FeedbackBlock):
'Custom feedback block, will be used as the first feedback block.\n\n Args:\n in_channels (int): Number of channels in the input features.\n mid_channels (int): Number of channels in the intermediate features.\n num_blocks (int): Number of blocks.\n upscale_factor (int): upscale factor.\n '
def __init__(self, in_channels, mid_channels, num_blocks, upscale_factor):
super().__init__(mid_channels, num_blocks, upscale_factor)
prelu_init = 0.2
self.conv_first = nn.Sequential(nn.Conv2d(in_channels, mid_channels, kernel_size=1), nn.PReLU(init=prelu_init))
def forward(self, x):
x = self.conv_first(x)
lr_features = [x]
hr_features = []
for idx in range(self.num_blocks):
lr = torch.cat(lr_features, 1)
if (idx > 0):
lr = self.lr_blocks[(idx - 1)](lr)
hr = self.up_blocks[idx](lr)
hr_features.append(hr)
hr = torch.cat(hr_features, 1)
if (idx > 0):
hr = self.hr_blocks[(idx - 1)](hr)
lr = self.down_blocks[idx](hr)
lr_features.append(lr)
output = torch.cat(lr_features[1:], 1)
output = self.conv_last(output)
return output
|
class GroupResBlock(nn.Module):
'ResBlock with Group Conv.\n\n Args:\n in_channels (int): Channel number of input features.\n out_channels (int): Channel number of output features.\n mid_channels (int): Channel number of intermediate features.\n groups (int): Number of blocked connections from input to output.\n res_scale (float): Used to scale the residual before addition.\n Default: 1.0.\n '
def __init__(self, in_channels, out_channels, mid_channels, groups, res_scale=1.0):
super().__init__()
self.res = nn.Sequential(nn.Conv2d(in_channels, mid_channels, 3, 1, 1, groups=groups), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(mid_channels, out_channels, 3, 1, 1, groups=groups))
self.res_scale = res_scale
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
res = self.res(x).mul(self.res_scale)
return (x + res)
|
class FeatureHeatmapFusingBlock(nn.Module):
' Fusing Feature and Heatmap.\n\n Args:\n in_channels (int): Number of channels in the input features.\n num_heatmaps (int): Number of heatmap.\n num_blocks (int): Number of blocks.\n mid_channels (int | None): Number of channels in the intermediate\n features. Default: None\n '
def __init__(self, in_channels, num_heatmaps, num_blocks, mid_channels=None):
super().__init__()
self.num_heatmaps = num_heatmaps
res_block_channel = (in_channels * num_heatmaps)
if (mid_channels is None):
self.mid_channels = (num_heatmaps * in_channels)
else:
self.mid_channels = mid_channels
self.conv_first = nn.Sequential(nn.Conv2d(in_channels, res_block_channel, kernel_size=1), nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.body = make_layer(GroupResBlock, num_blocks, in_channels=res_block_channel, out_channels=res_block_channel, mid_channels=self.mid_channels, groups=num_heatmaps)
def forward(self, feature, heatmap):
'Forward function.\n\n Args:\n feature (Tensor): Input feature tensor.\n heatmap (Tensor): Input heatmap tensor.\n\n Returns:\n Tensor: Forward results.\n '
assert (self.num_heatmaps == heatmap.size(1))
batch_size = heatmap.size(0)
(w, h) = feature.shape[(- 2):]
feature = self.conv_first(feature)
feature = self.body(feature)
attention = nn.functional.softmax(heatmap, dim=1)
feature = (feature.view(batch_size, self.num_heatmaps, (- 1), w, h) * attention.unsqueeze(2))
feature = feature.sum(1)
return feature
|
class FeedbackBlockHeatmapAttention(FeedbackBlock):
'Feedback block with HeatmapAttention.\n\n Args:\n in_channels (int): Number of channels in the input features.\n mid_channels (int): Number of channels in the intermediate features.\n num_blocks (int): Number of blocks.\n upscale_factor (int): upscale factor.\n '
def __init__(self, mid_channels, num_blocks, upscale_factor, num_heatmaps, num_fusion_blocks, padding=2, prelu_init=0.2):
super().__init__(mid_channels, num_blocks, upscale_factor, padding=padding, prelu_init=prelu_init)
self.fusion_block = FeatureHeatmapFusingBlock(mid_channels, num_heatmaps, num_fusion_blocks)
def forward(self, x, heatmap):
'Forward function.\n\n Args:\n x (Tensor): Input feature tensor.\n heatmap (Tensor): Input heatmap tensor.\n\n Returns:\n Tensor: Forward results.\n '
if self.need_reset:
self.last_hidden = x
self.need_reset = False
x = torch.cat((x, self.last_hidden), dim=1)
x = self.conv_first(x)
x = self.fusion_block(x, heatmap)
lr_features = []
hr_features = []
lr_features.append(x)
for idx in range(self.num_blocks):
lr = torch.cat(lr_features, 1)
if (idx > 0):
lr = self.lr_blocks[(idx - 1)](lr)
hr = self.up_blocks[idx](lr)
hr_features.append(hr)
hr = torch.cat(hr_features, 1)
if (idx > 0):
hr = self.hr_blocks[(idx - 1)](hr)
lr = self.down_blocks[idx](hr)
lr_features.append(lr)
output = torch.cat(lr_features[1:], 1)
output = self.conv_last(output)
self.last_hidden = output
return output
|
@BACKBONES.register_module()
class DICNet(nn.Module):
'DIC network structure for face super-resolution.\n\n Paper: Deep Face Super-Resolution with Iterative Collaboration between\n Attentive Recovery and Landmark Estimation\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels in the output image\n mid_channels (int): Channel number of intermediate features.\n Default: 64\n num_blocks (tuple[int]): Block numbers in the trunk network.\n Default: 6\n hg_mid_channels (int): Channel number of intermediate features\n of HourGlass. Default: 256\n hg_num_keypoints (int): Keypoint number of HourGlass. Default: 68\n num_steps (int): Number of iterative steps. Default: 4\n upscale_factor (int): Upsampling factor. Default: 8\n detach_attention (bool): Detached from the current tensor for heatmap\n or not.\n prelu_init (float): `init` of PReLU. Default: 0.2\n num_heatmaps (int): Number of heatmaps. Default: 5\n num_fusion_blocks (int): Number of fusion blocks. Default: 7\n '
def __init__(self, in_channels, out_channels, mid_channels, num_blocks=6, hg_mid_channels=256, hg_num_keypoints=68, num_steps=4, upscale_factor=8, detach_attention=False, prelu_init=0.2, num_heatmaps=5, num_fusion_blocks=7):
super().__init__()
self.num_steps = num_steps
self.detach_attention = detach_attention
self.conv_first = nn.Sequential(nn.Conv2d(in_channels, (mid_channels * 4), 3, 1, 1), nn.PReLU(init=prelu_init), nn.PixelShuffle(2))
self.first_block = FeedbackBlockCustom(in_channels=mid_channels, mid_channels=mid_channels, num_blocks=num_blocks, upscale_factor=upscale_factor)
self.block = FeedbackBlockHeatmapAttention(mid_channels=mid_channels, num_blocks=num_blocks, upscale_factor=upscale_factor, num_heatmaps=num_heatmaps, num_fusion_blocks=num_fusion_blocks)
self.block.need_reset = False
self.hour_glass = FeedbackHourglass(mid_channels=hg_mid_channels, num_keypoints=hg_num_keypoints)
self.conv_last = nn.Sequential(nn.ConvTranspose2d(mid_channels, mid_channels, 8, 4, 2), nn.PReLU(init=prelu_init), nn.Conv2d(mid_channels, out_channels, 3, 1, 1))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor.\n\n Returns:\n Tensor: Forward results.\n sr_outputs (list[Tensor]): forward sr results.\n heatmap_outputs (list[Tensor]): forward heatmap results.\n '
inter_res = nn.functional.interpolate(x, size=(128, 128), mode='bilinear', align_corners=False)
x = self.conv_first(x)
sr_outputs = []
heatmap_outputs = []
last_hidden = None
heatmap = None
for step in range(self.num_steps):
if (step == 0):
sr_feature = self.first_block(x)
self.block.last_hidden = sr_feature
else:
heatmap = reduce_to_five_heatmaps(heatmap, self.detach_attention)
sr_feature = self.block(x, heatmap)
sr = self.conv_last(sr_feature)
sr = torch.add(inter_res, sr)
(heatmap, last_hidden) = self.hour_glass(sr, last_hidden)
sr_outputs.append(sr)
heatmap_outputs.append(heatmap)
return (sr_outputs, heatmap_outputs)
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@BACKBONES.register_module()
class DnCNN(nn.Module):
'DnCNN network structure.\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n num_blocks (int): Block number in the trunk network. Default: 15.\n if_bn (bool): If use BN layer. Default: False.\n '
def __init__(self, in_channels, out_channels, mid_channels=64, num_blocks=15, if_bn=False):
super().__init__()
self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, padding=1)
body_ = []
for _ in range(num_blocks):
body_.append(nn.ReLU())
if if_bn:
body_ += [nn.Conv2d(mid_channels, mid_channels, 3, padding=1, bias=False), nn.BatchNorm2d(num_features=mid_channels, momentum=0.9, eps=0.0001, affine=True)]
else:
body_.append(nn.Conv2d(mid_channels, mid_channels, 3, padding=1))
self.body = nn.Sequential(*body_)
self.conv_after_body = nn.Sequential(*[nn.ReLU(), nn.Conv2d(mid_channels, out_channels, 3, padding=1)])
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
res = self.conv_after_body(self.body(self.conv_first(x)))
x = (res + x)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
class DynamicUpsamplingFilter(nn.Module):
'Dynamic upsampling filter used in DUF.\n\n Ref: https://github.com/yhjo09/VSR-DUF.\n It only supports input with 3 channels. And it applies the same filters\n to 3 channels.\n\n Args:\n filter_size (tuple): Filter size of generated filters.\n The shape is (kh, kw). Default: (5, 5).\n '
def __init__(self, filter_size=(5, 5)):
super().__init__()
if (not isinstance(filter_size, tuple)):
raise TypeError(f'The type of filter_size must be tuple, but got type{filter_size}')
if (len(filter_size) != 2):
raise ValueError(f'The length of filter size must be 2, but got {len(filter_size)}.')
self.filter_size = filter_size
filter_prod = np.prod(filter_size)
expansion_filter = torch.eye(int(filter_prod)).view(filter_prod, 1, *filter_size)
self.expansion_filter = expansion_filter.repeat(3, 1, 1, 1)
def forward(self, x, filters):
'Forward function for DynamicUpsamplingFilter.\n\n Args:\n x (Tensor): Input image with 3 channels. The shape is (n, 3, h, w).\n filters (Tensor): Generated dynamic filters.\n The shape is (n, filter_prod, upsampling_square, h, w).\n filter_prod: prod of filter kernel size, e.g., 1*5*5=25.\n upsampling_square: similar to pixel shuffle,\n upsampling_square = upsampling * upsampling\n e.g., for x 4 upsampling, upsampling_square= 4*4 = 16\n\n Returns:\n Tensor: Filtered image with shape (n, 3*upsampling, h, w)\n '
(n, filter_prod, upsampling_square, h, w) = filters.size()
(kh, kw) = self.filter_size
expanded_input = F.conv2d(x, self.expansion_filter.to(x), padding=((kh // 2), (kw // 2)), groups=3)
expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1, 2)
filters = filters.permute(0, 3, 4, 1, 2)
out = torch.matmul(expanded_input, filters)
return out.permute(0, 3, 4, 1, 2).view(n, (3 * upsampling_square), h, w)
|
class UpsampleModule(nn.Sequential):
'Upsample module used in EDSR.\n\n Args:\n scale (int): Scale factor. Supported scales: 2^n and 3.\n mid_channels (int): Channel number of intermediate features.\n '
def __init__(self, scale, mid_channels):
modules = []
if ((scale & (scale - 1)) == 0):
for _ in range(int(math.log(scale, 2))):
modules.append(PixelShufflePack(mid_channels, mid_channels, 2, upsample_kernel=3))
elif (scale == 3):
modules.append(PixelShufflePack(mid_channels, mid_channels, scale, upsample_kernel=3))
else:
raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
super().__init__(*modules)
|
@BACKBONES.register_module()
class EDSR(nn.Module):
'EDSR network structure.\n\n Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution.\n Ref repo: https://github.com/thstkdgus35/EDSR-PyTorch\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n num_blocks (int): Block number in the trunk network. Default: 16.\n upscale_factor (int): Upsampling factor. Support 2^n and 3.\n Default: 4.\n res_scale (float): Used to scale the residual in residual block.\n Default: 1.\n rgb_mean (list[float]): Image mean in RGB orders.\n Default: [0.4488, 0.4371, 0.4040], calculated from DIV2K dataset.\n rgb_std (list[float]): Image std in RGB orders. In EDSR, it uses\n [1.0, 1.0, 1.0]. Default: [1.0, 1.0, 1.0].\n '
def __init__(self, in_channels, out_channels, mid_channels=64, num_blocks=16, upscale_factor=4, res_scale=1, rgb_mean=[0.4488, 0.4371, 0.404], rgb_std=[1.0, 1.0, 1.0]):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mid_channels = mid_channels
self.num_blocks = num_blocks
self.upscale_factor = upscale_factor
self.mean = torch.Tensor(rgb_mean).view(1, (- 1), 1, 1)
self.std = torch.Tensor(rgb_std).view(1, (- 1), 1, 1)
self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, padding=1)
self.body = make_layer(ResidualBlockNoBN, num_blocks, mid_channels=mid_channels, res_scale=res_scale)
self.conv_after_body = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1)
self.upsample = UpsampleModule(upscale_factor, mid_channels)
self.conv_last = nn.Conv2d(mid_channels, out_channels, 3, 1, 1, bias=True)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
self.mean = self.mean.to(x)
self.std = self.std.to(x)
x = ((x - self.mean) / self.std)
x = self.conv_first(x)
res = self.conv_after_body(self.body(x))
res += x
x = self.conv_last(self.upsample(res))
x = ((x * self.std) + self.mean)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
class ResidualDenseBlock(nn.Module):
'Residual Dense Block.\n\n Used in RRDB block in ESRGAN.\n\n Args:\n mid_channels (int): Channel number of intermediate features.\n growth_channels (int): Channels for each growth.\n '
def __init__(self, mid_channels=64, growth_channels=32):
super().__init__()
for i in range(5):
out_channels = (mid_channels if (i == 4) else growth_channels)
self.add_module(f'conv{(i + 1)}', nn.Conv2d((mid_channels + (i * growth_channels)), out_channels, 3, 1, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.init_weights()
def init_weights(self):
'Init weights for ResidualDenseBlock.\n\n Use smaller std for better stability and performance. We empirically\n use 0.1. See more details in "ESRGAN: Enhanced Super-Resolution\n Generative Adversarial Networks"\n '
for i in range(5):
default_init_weights(getattr(self, f'conv{(i + 1)}'), 0.1)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return ((x5 * 0.2) + x)
|
class RRDB(nn.Module):
'Residual in Residual Dense Block.\n\n Used in RRDB-Net in ESRGAN.\n\n Args:\n mid_channels (int): Channel number of intermediate features.\n growth_channels (int): Channels for each growth.\n '
def __init__(self, mid_channels, growth_channels=32):
super().__init__()
self.rdb1 = ResidualDenseBlock(mid_channels, growth_channels)
self.rdb2 = ResidualDenseBlock(mid_channels, growth_channels)
self.rdb3 = ResidualDenseBlock(mid_channels, growth_channels)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
out = self.rdb1(x)
out = self.rdb2(out)
out = self.rdb3(out)
return ((out * 0.2) + x)
|
@BACKBONES.register_module()
class RRDBNet(nn.Module):
'Networks consisting of Residual in Residual Dense Block, which is used\n in ESRGAN and Real-ESRGAN.\n\n ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.\n Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. # noqa: E501\n Currently, it supports [x1/x2/x4] upsampling scale factor.\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64\n num_blocks (int): Block number in the trunk network. Defaults: 23\n growth_channels (int): Channels for each growth. Default: 32.\n upscale_factor (int): Upsampling factor. Support x1, x2 and x4.\n Default: 4.\n '
_supported_upscale_factors = [1, 2, 4]
def __init__(self, in_channels, out_channels, mid_channels=64, num_blocks=23, growth_channels=32, upscale_factor=4):
super().__init__()
if (upscale_factor in self._supported_upscale_factors):
in_channels = (in_channels * ((4 // upscale_factor) ** 2))
else:
raise ValueError(f'Unsupported scale factor {upscale_factor}. Currently supported ones are {self._supported_upscale_factors}.')
self.upscale_factor = upscale_factor
self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
self.body = make_layer(RRDB, num_blocks, mid_channels=mid_channels, growth_channels=growth_channels)
self.conv_body = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1)
self.conv_up1 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1)
self.conv_up2 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1)
self.conv_hr = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1)
self.conv_last = nn.Conv2d(mid_channels, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
if (self.upscale_factor in [1, 2]):
feat = pixel_unshuffle(x, scale=(4 // self.upscale_factor))
else:
feat = x
feat = self.conv_first(feat)
body_feat = self.conv_body(self.body(feat))
feat = (feat + body_feat)
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
return out
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
for m in [self.conv_first, self.conv_body, self.conv_up1, self.conv_up2, self.conv_hr, self.conv_last]:
default_init_weights(m, 0.1)
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@BACKBONES.register_module()
class MSRResNet(nn.Module):
'Modified SRResNet.\n\n A compacted version modified from SRResNet in "Photo-Realistic Single\n Image Super-Resolution Using a Generative Adversarial Network".\n\n It uses residual blocks without BN, similar to EDSR.\n Currently, it supports x2, x3 and x4 upsampling scale factor.\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n num_blocks (int): Block number in the trunk network. Default: 16.\n upscale_factor (int): Upsampling factor. Support x2, x3 and x4.\n Default: 4.\n '
_supported_upscale_factors = [2, 3, 4]
def __init__(self, in_channels, out_channels, mid_channels=64, num_blocks=16, upscale_factor=4):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mid_channels = mid_channels
self.num_blocks = num_blocks
self.upscale_factor = upscale_factor
self.conv_first = nn.Conv2d(in_channels, mid_channels, 3, 1, 1, bias=True)
self.trunk_net = make_layer(ResidualBlockNoBN, num_blocks, mid_channels=mid_channels)
if (self.upscale_factor in [2, 3]):
self.upsample1 = PixelShufflePack(mid_channels, mid_channels, self.upscale_factor, upsample_kernel=3)
elif (self.upscale_factor == 4):
self.upsample1 = PixelShufflePack(mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(mid_channels, mid_channels, 2, upsample_kernel=3)
else:
raise ValueError(f'Unsupported scale factor {self.upscale_factor}. Currently supported ones are {self._supported_upscale_factors}.')
self.conv_hr = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(mid_channels, out_channels, 3, 1, 1, bias=True)
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor, mode='bilinear', align_corners=False)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
feat = self.lrelu(self.conv_first(x))
out = self.trunk_net(feat)
if (self.upscale_factor in [2, 3]):
out = self.upsample1(out)
elif (self.upscale_factor == 4):
out = self.upsample1(out)
out = self.upsample2(out)
out = self.conv_last(self.lrelu(self.conv_hr(out)))
upsampled_img = self.img_upsampler(x)
out += upsampled_img
return out
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
for m in [self.conv_first, self.conv_hr, self.conv_last]:
default_init_weights(m, 0.1)
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@BACKBONES.register_module()
class SRCNN(nn.Module):
'SRCNN network structure for image super resolution.\n\n SRCNN has three conv layers. For each layer, we can define the\n `in_channels`, `out_channels` and `kernel_size`.\n The input image will first be upsampled with a bicubic upsampler, and then\n super-resolved in the HR spatial size.\n\n Paper: Learning a Deep Convolutional Network for Image Super-Resolution.\n\n Args:\n channels (tuple[int]): A tuple of channel numbers for each layer\n including channels of input and output . Default: (3, 64, 32, 3).\n kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.\n Default: (9, 1, 5).\n upscale_factor (int): Upsampling factor. Default: 4.\n '
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5), upscale_factor=4):
super().__init__()
assert (len(channels) == 4), f'The length of channel tuple should be 4, but got {len(channels)}'
assert (len(kernel_sizes) == 3), f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor, mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=kernel_sizes[0], padding=(kernel_sizes[0] // 2))
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=kernel_sizes[1], padding=(kernel_sizes[1] // 2))
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=kernel_sizes[2], padding=(kernel_sizes[2] // 2))
self.relu = nn.ReLU()
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
x = self.img_upsampler(x)
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = self.conv3(out)
return out
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
class SFE(nn.Module):
'Structural Feature Encoder\n\n Backbone of Texture Transformer Network for Image Super-Resolution.\n\n Args:\n in_channels (int): Number of channels in the input image\n mid_channels (int): Channel number of intermediate features\n num_blocks (int): Block number in the trunk network\n res_scale (float): Used to scale the residual in residual block.\n Default: 1.\n '
def __init__(self, in_channels, mid_channels, num_blocks, res_scale):
super().__init__()
self.num_blocks = num_blocks
self.conv_first = _conv3x3_layer(in_channels, mid_channels)
self.body = make_layer(ResidualBlockNoBN, num_blocks, mid_channels=mid_channels, res_scale=res_scale)
self.conv_last = _conv3x3_layer(mid_channels, mid_channels)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
x1 = x = F.relu(self.conv_first(x))
x = self.body(x)
x = self.conv_last(x)
x = (x + x1)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.