| | import argparse |
| | import glob |
| | import os |
| | from PIL import Image |
| |
|
| | import cv2 |
| | import math |
| | import numpy as np |
| | import os |
| | import os.path as osp |
| | import random |
| | import time |
| | import torch |
| | from tqdm import tqdm |
| |
|
| | from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels |
| | from basicsr.data.transforms import augment |
| | from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor |
| | from basicsr.utils.registry import DATASET_REGISTRY |
| | from torch.utils import data as data |
| | from torchvision.transforms.functional import center_crop |
| | import torchvision.transforms as T |
| | from torchvision.utils import save_image |
| |
|
| | from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt |
| | from basicsr.data.transforms import paired_random_crop |
| | from basicsr.utils import DiffJPEG, USMSharp |
| | from basicsr.utils.img_process_util import filter2D |
| | from basicsr.utils.registry import MODEL_REGISTRY |
| | from collections import OrderedDict |
| | from torch.nn import functional as F |
| |
|
| | cfg = { |
| | |
| | "name": "DF2K+OST", |
| | "type": "RealESRGANDataset", |
| | "dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution", |
| | "meta_train": [ |
| | "DIV2K/metas/DIV2K_train_HR.list", |
| | "Flickr2K/metas/Flickr2K.list", |
| | "OST/metas/OST.list", |
| | ], |
| | "meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"], |
| |
|
| | |
| | "resize_prob": [0.2, 0.7, 0.1], |
| | "resize_range": [0.15, 1.5], |
| | "gaussian_noise_prob": 0.5, |
| | "noise_range": [1, 30], |
| | "poisson_scale_range": [0.05, 3], |
| | "gray_noise_prob": 0.4, |
| | "jpeg_range": [30, 95], |
| |
|
| | "blur_kernel_size": 21, |
| | "kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'], |
| | "kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03], |
| | "sinc_prob": 0.1, |
| | "blur_sigma": [0.2, 3], |
| | "betag_range": [0.5, 4], |
| | "betap_range": [1, 2], |
| |
|
| | |
| | "second_blur_prob": 0.8, |
| | "resize_prob2": [0.3, 0.4, 0.3], |
| | "resize_range2": [0.3, 1.2], |
| | "gaussian_noise_prob2": 0.5, |
| | "noise_range2": [1, 25], |
| | "poisson_scale_range2": [0.05, 2.5], |
| | "gray_noise_prob2": 0.4, |
| | "jpeg_range2": [30, 95], |
| |
|
| | "blur_kernel_size2": 21, |
| | "kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'], |
| | "kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03], |
| | "sinc_prob2": 0.1, |
| | "blur_sigma2": [0.2, 1.5], |
| | "betag_range2": [0.5, 4], |
| | "betap_range2": [1, 2], |
| |
|
| | "final_sinc_prob": 0.8, |
| |
|
| | "gt_size": 512, |
| | "keep_ratio": True, |
| | "use_hflip": True, |
| | "use_rot": False, |
| |
|
| | |
| | "use_shuffle": True, |
| | "num_worker_per_gpu": 5, |
| | "batch_size_per_gpu": 12, |
| | "dataset_enlarge_ratio": 1, |
| | "prefetch_mode": None, |
| | } |
| |
|
| | def set_seed(seed=42): |
| | random.seed(seed) |
| | np.random.seed(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| |
|
| | torch.backends.cudnn.deterministic = True |
| | torch.backends.cudnn.benchmark = False |
| |
|
| | @DATASET_REGISTRY.register() |
| | class RealESRGANDataset(data.Dataset): |
| | """Dataset used for Real-ESRGAN model: |
| | Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. |
| | |
| | It loads gt (Ground-Truth) images, and augments them. |
| | It also generates blur kernels and sinc kernels for generating low-quality images. |
| | Note that the low-quality images are processed in tensors on GPUS for faster processing. |
| | |
| | Args: |
| | opt (dict): Config for train datasets. It contains the following keys: |
| | dataroot_gt (str): Data root path for gt. |
| | meta_info (str): Path for meta information file. |
| | io_backend (dict): IO backend type and other kwarg. |
| | use_hflip (bool): Use horizontal flips. |
| | use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). |
| | Please see more options in the codes. |
| | """ |
| |
|
| | def __init__(self, opt, train=True): |
| | super(RealESRGANDataset, self).__init__() |
| | self.opt = opt |
| | self.file_client = None |
| |
|
| | |
| | self.data_rt = opt['dataroot_gt'] |
| |
|
| | |
| | self.train = train |
| | if self.train: |
| | self.metas = opt['meta_train'] |
| | else: |
| | self.metas = opt['meta_test'] |
| |
|
| | self.paths = [] |
| | for meta in self.metas: |
| | with open(os.path.join(self.data_rt, meta)) as fin: |
| | self.paths += [line.strip().split(' ')[1] for line in fin] |
| |
|
| | |
| | |
| | self.blur_kernel_size = opt['blur_kernel_size'] |
| | self.kernel_list = opt['kernel_list'] |
| | self.kernel_prob = opt['kernel_prob'] |
| | self.blur_sigma = opt['blur_sigma'] |
| | self.betag_range = opt['betag_range'] |
| | self.betap_range = opt['betap_range'] |
| | self.sinc_prob = opt['sinc_prob'] |
| |
|
| | |
| | self.blur_kernel_size2 = opt['blur_kernel_size2'] |
| | self.kernel_list2 = opt['kernel_list2'] |
| | self.kernel_prob2 = opt['kernel_prob2'] |
| | self.blur_sigma2 = opt['blur_sigma2'] |
| | self.betag_range2 = opt['betag_range2'] |
| | self.betap_range2 = opt['betap_range2'] |
| | self.sinc_prob2 = opt['sinc_prob2'] |
| |
|
| | |
| | self.final_sinc_prob = opt['final_sinc_prob'] |
| |
|
| | self.kernel_range = [2 * v + 1 for v in range(3, 11)] |
| | |
| | self.pulse_tensor = torch.zeros(21, 21).float() |
| | self.pulse_tensor[10, 10] = 1 |
| |
|
| | self.device = torch.cuda.current_device() |
| | self.jpeger = DiffJPEG(differentiable=False).to(self.device) |
| | self.usm_sharpener = USMSharp().to(self.device) |
| | self.resize = opt['gt_size'] |
| | self.keep_ratio = opt['keep_ratio'] |
| |
|
| | |
| | self.crop = T.RandomCrop((self.resize, self.resize)) |
| | self.flip = T.RandomHorizontalFlip() |
| | self.transform = T.Compose( |
| | [ |
| | |
| | T.ToTensor(), |
| | ] |
| | ) |
| |
|
| | def __getitem__(self, index): |
| | |
| | gt_path = self.paths[index] |
| | img_gt = Image.open(gt_path).convert("RGB") |
| |
|
| | |
| | |
| | h, w = img_gt.height, img_gt.width |
| | if self.keep_ratio: |
| | ratio = self.resize / min(h, w) |
| | h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2) |
| | img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS) |
| | else: |
| | img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS) |
| | |
| | img_gt = self.crop(img_gt) |
| | |
| | if self.train: |
| | img_gt = self.flip(img_gt) |
| | |
| | img_gt = self.transform(img_gt).to(torch.float32) |
| |
|
| | |
| | kernel, kernel2, sinc_kernel = self.generate_kernel() |
| |
|
| | |
| | lq, hq = self.generate_lr({ |
| | "gt": img_gt.unsqueeze(0), |
| | "kernel1": kernel, |
| | "kernel2": kernel2, |
| | "sinc_kernel": sinc_kernel, |
| | }) |
| |
|
| | return lq, hq, gt_path |
| |
|
| | def generate_kernel(self, ): |
| | |
| | kernel_size = random.choice(self.kernel_range) |
| | if np.random.uniform() < self.opt['sinc_prob']: |
| | |
| | if kernel_size < 13: |
| | omega_c = np.random.uniform(np.pi / 3, np.pi) |
| | else: |
| | omega_c = np.random.uniform(np.pi / 5, np.pi) |
| | kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) |
| | else: |
| | kernel = random_mixed_kernels( |
| | self.kernel_list, |
| | self.kernel_prob, |
| | kernel_size, |
| | self.blur_sigma, |
| | self.blur_sigma, [-math.pi, math.pi], |
| | self.betag_range, |
| | self.betap_range, |
| | noise_range=None) |
| | |
| | pad_size = (21 - kernel_size) // 2 |
| | kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) |
| | kernel = torch.FloatTensor(kernel) |
| |
|
| | |
| | kernel_size = random.choice(self.kernel_range) |
| | if np.random.uniform() < self.opt['sinc_prob2']: |
| | if kernel_size < 13: |
| | omega_c = np.random.uniform(np.pi / 3, np.pi) |
| | else: |
| | omega_c = np.random.uniform(np.pi / 5, np.pi) |
| | kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) |
| | else: |
| | kernel2 = random_mixed_kernels( |
| | self.kernel_list2, |
| | self.kernel_prob2, |
| | kernel_size, |
| | self.blur_sigma2, |
| | self.blur_sigma2, [-math.pi, math.pi], |
| | self.betag_range2, |
| | self.betap_range2, |
| | noise_range=None) |
| |
|
| | |
| | pad_size = (21 - kernel_size) // 2 |
| | kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size))) |
| | kernel2 = torch.FloatTensor(kernel2) |
| | |
| | |
| | if np.random.uniform() < self.opt['final_sinc_prob']: |
| | kernel_size = random.choice(self.kernel_range) |
| | omega_c = np.random.uniform(np.pi / 3, np.pi) |
| | sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21) |
| | sinc_kernel = torch.FloatTensor(sinc_kernel) |
| | else: |
| | sinc_kernel = self.pulse_tensor |
| | return kernel, kernel2, sinc_kernel |
| |
|
| | def generate_lr(self, data): |
| | """Accept data from dataloader, and then add two-order degradations to obtain LQ images. |
| | """ |
| | |
| | self.gt = data['gt'].to(self.device) |
| | self.gt_usm = self.usm_sharpener(self.gt) |
| |
|
| | self.kernel1 = data['kernel1'].to(self.device) |
| | self.kernel2 = data['kernel2'].to(self.device) |
| | self.sinc_kernel = data['sinc_kernel'].to(self.device) |
| |
|
| | ori_h, ori_w = self.gt.size()[2:4] |
| |
|
| | |
| | |
| | out = filter2D(self.gt_usm, self.kernel1) |
| | |
| | updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] |
| | if updown_type == 'up': |
| | scale = np.random.uniform(1, self.opt['resize_range'][1]) |
| | elif updown_type == 'down': |
| | scale = np.random.uniform(self.opt['resize_range'][0], 1) |
| | else: |
| | scale = 1 |
| | mode = random.choice(['area', 'bilinear', 'bicubic']) |
| | out = F.interpolate(out, scale_factor=scale, mode=mode) |
| | |
| | gray_noise_prob = self.opt['gray_noise_prob'] |
| | if np.random.uniform() < self.opt['gaussian_noise_prob']: |
| | out = random_add_gaussian_noise_pt( |
| | out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) |
| | else: |
| | out = random_add_poisson_noise_pt( |
| | out, |
| | scale_range=self.opt['poisson_scale_range'], |
| | gray_prob=gray_noise_prob, |
| | clip=True, |
| | rounds=False) |
| | |
| | jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) |
| | out = torch.clamp(out, 0, 1) |
| | out = self.jpeger(out, quality=jpeg_p) |
| |
|
| | |
| | |
| | if np.random.uniform() < self.opt['second_blur_prob']: |
| | out = filter2D(out, self.kernel2) |
| | |
| | updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] |
| | if updown_type == 'up': |
| | scale = np.random.uniform(1, self.opt['resize_range2'][1]) |
| | elif updown_type == 'down': |
| | scale = np.random.uniform(self.opt['resize_range2'][0], 1) |
| | else: |
| | scale = 1 |
| | mode = random.choice(['area', 'bilinear', 'bicubic']) |
| | out = F.interpolate( |
| | out, size=(int(ori_h * scale), int(ori_w * scale)), mode=mode) |
| | |
| | gray_noise_prob = self.opt['gray_noise_prob2'] |
| | if np.random.uniform() < self.opt['gaussian_noise_prob2']: |
| | out = random_add_gaussian_noise_pt( |
| | out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) |
| | else: |
| | out = random_add_poisson_noise_pt( |
| | out, |
| | scale_range=self.opt['poisson_scale_range2'], |
| | gray_prob=gray_noise_prob, |
| | clip=True, |
| | rounds=False) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | if np.random.uniform() < 0.5: |
| | |
| | mode = random.choice(['area', 'bilinear', 'bicubic']) |
| | out = F.interpolate(out, size=(ori_h, ori_w), mode=mode) |
| | out = filter2D(out, self.sinc_kernel) |
| | |
| | jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) |
| | out = torch.clamp(out, 0, 1) |
| | out = self.jpeger(out, quality=jpeg_p) |
| | else: |
| | |
| | jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) |
| | out = torch.clamp(out, 0, 1) |
| | out = self.jpeger(out, quality=jpeg_p) |
| | |
| | mode = random.choice(['area', 'bilinear', 'bicubic']) |
| | out = F.interpolate(out, size=(ori_h, ori_w), mode=mode) |
| | out = filter2D(out, self.sinc_kernel) |
| |
|
| | |
| | lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. |
| | lq = lq.contiguous() |
| |
|
| | hq = self.usm_sharpener(self.gt) |
| | return lq[0], hq[0] |
| |
|
| | def __len__(self): |
| | return len(self.paths) |
| |
|
| | def real_esrgan_sampler(): |
| | """ |
| | Generate multi-scale versions for GT images with LANCZOS resampling. |
| | It is now used for DF2K dataset (DIV2K + Flickr 2K) |
| | """ |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument('--num_samples', type=int, default=3, help='train: one to many') |
| | args = parser.parse_args() |
| |
|
| | |
| | dataset = RealESRGANDataset(cfg, train=True) |
| | data_dl = data.DataLoader( |
| | dataset, |
| | batch_size = 1 |
| | ) |
| | print("Train Data:", dataset.data_rt, len(data_dl)) |
| | for number in range(args.num_samples): |
| | print("="*100) |
| | print(f"Generate round {number}...") |
| |
|
| | meta_info = {} |
| | for sample in tqdm(data_dl): |
| | lq, hq, path = sample |
| | |
| | file_name = os.path.basename(path[0]) |
| | gt_folder = os.path.dirname(path[0]) |
| | if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder: |
| | hq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/HR") |
| | lq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/LR") |
| | else: |
| | hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "HR/") |
| | lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "LR/") |
| |
|
| | os.makedirs(hq_folder, exist_ok=True) |
| | os.makedirs(lq_folder, exist_ok=True) |
| |
|
| | hq_path = os.path.join(hq_folder, file_name) |
| | lq_path = os.path.join(lq_folder, file_name) |
| |
|
| | save_image(hq[0], hq_path) |
| | save_image(lq[0], lq_path) |
| |
|
| | dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0] |
| | if dset not in meta_info: |
| | meta_info[dset] = [(lq_path, hq_path)] |
| | else: |
| | meta_info[dset].append((lq_path, hq_path)) |
| |
|
| | for dset, dlist in meta_info.items(): |
| | with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), 'w') as fp: |
| | for item in dlist: |
| | fp.write('{} {} {}\n'.format(item[0], item[1], None)) |
| | print(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), len(dlist)) |
| |
|
| |
|
| | |
| | dataset = RealESRGANDataset(cfg, train=False) |
| | data_dl = data.DataLoader( |
| | dataset, |
| | batch_size = 1 |
| | ) |
| | print("Test Data:", dataset.data_rt, len(data_dl)) |
| | print("="*100) |
| | print(f"Generate round {number}...") |
| |
|
| | meta_info = {} |
| | for sample in tqdm(data_dl): |
| | lq, hq, path = sample |
| | |
| | file_name = os.path.basename(path[0]) |
| | gt_folder = os.path.dirname(path[0]) |
| | if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder: |
| | hq_folder = gt_folder.replace("HR", f"pair/SR/HR") |
| | lq_folder = gt_folder.replace("HR", f"pair/SR/LR") |
| | else: |
| | hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "HR/") |
| | lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "LR/") |
| |
|
| | os.makedirs(hq_folder, exist_ok=True) |
| | os.makedirs(lq_folder, exist_ok=True) |
| |
|
| | hq_path = os.path.join(hq_folder, file_name) |
| | lq_path = os.path.join(lq_folder, file_name) |
| |
|
| | save_image(hq[0], hq_path) |
| | save_image(lq[0], lq_path) |
| |
|
| | dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0] |
| | if dset not in meta_info: |
| | meta_info[dset] = [(lq_path, hq_path)] |
| | else: |
| | meta_info[dset].append((lq_path, hq_path)) |
| |
|
| | for dset, dlist in meta_info.items(): |
| | with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), 'w') as fp: |
| | for item in dlist: |
| | fp.write('{} {} {}\n'.format(item[0], item[1], None)) |
| | print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), len(dlist)) |
| |
|
| | def simple_multiscale(): |
| | """ |
| | Generate multi-scale versions for GT images with LANCZOS resampling. |
| | It is now used for DF2K dataset (DIV2K + Flickr 2K) |
| | """ |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument('--input', type=str, default='DIV2K/DIV2K_train_HR', help='Input folder') |
| | parser.add_argument('--output', type=str, default='DIV2K/DIV2K_train_multiscale', help='Output folder') |
| | args = parser.parse_args() |
| | os.makedirs(args.output, exist_ok=True) |
| |
|
| | |
| | |
| | scale_list = [0.75, 0.5, 1 / 3] |
| | shortest_edge = 400 |
| |
|
| | path_list = sorted(glob.glob(os.path.join(args.input, '*'))) |
| | for path in path_list: |
| | basename = os.path.splitext(os.path.basename(path))[0] |
| |
|
| | img = Image.open(path) |
| | width, height = img.size |
| | for idx, scale in enumerate(scale_list): |
| | print(f'\t{scale:.2f}') |
| | rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS) |
| | rlt = rlt.resize((width, height), resample=Image.NEAREST) |
| | rlt.save(os.path.join(args.output, f'{basename}T{idx}.png')) |
| |
|
| | |
| | if width < height: |
| | ratio = height / width |
| | width = shortest_edge |
| | height = int(width * ratio) |
| | else: |
| | ratio = width / height |
| | height = shortest_edge |
| | width = int(height * ratio) |
| | rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS) |
| | rlt = rlt.resize(img.size, resample=Image.NEAREST) |
| | rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png')) |
| |
|
| |
|
| | if __name__ == '__main__': |
| | set_seed(1229) |
| | |
| | |
| | |
| | |
| | real_esrgan_sampler() |
| |
|
| | |