PIR_tar / scripts /generate_noise.py
AaronCIH's picture
Rename generate_noise.py to scripts/generate_noise.py
3fb0bb1 verified
import argparse
import glob
import os
from PIL import Image
import cv2
import math
import numpy as np
import os
import os.path as osp
import random
import time
import torch
from tqdm import tqdm
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
from basicsr.data.transforms import augment
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
from torch.utils import data as data
from torchvision.transforms.functional import center_crop
import torchvision.transforms as T
from torchvision.utils import save_image
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
from basicsr.data.transforms import paired_random_crop
from basicsr.utils import DiffJPEG, USMSharp
from basicsr.utils.img_process_util import filter2D
from basicsr.utils.registry import MODEL_REGISTRY
from collections import OrderedDict
from torch.nn import functional as F
cfg = {
# dataset info.
"name": "DF2K+OST",
"type": "RealESRGANDataset",
"dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution",
"meta_train": [
"DIV2K/metas/DIV2K_train_HR.list",
"Flickr2K/metas/Flickr2K.list",
# "OST/metas/OST.list",
],
"meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"],
# the first degradation process
"resize_prob": [0.2, 0.7, 0.1], # up, down, keep
"resize_range": [0.15, 1.5],
"gaussian_noise_prob": 0.5,
"noise_range": [1, 30],
"poisson_scale_range": [0.05, 3],
"gray_noise_prob": 0.4,
"jpeg_range": [30, 95],
"blur_kernel_size": 21,
"kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
"kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
"sinc_prob": 0.1,
"blur_sigma": [0.2, 3],
"betag_range": [0.5, 4],
"betap_range": [1, 2],
# the second degradation process
"second_blur_prob": 0.8,
"resize_prob2": [0.3, 0.4, 0.3], # up, down, keep
"resize_range2": [0.3, 1.2],
"gaussian_noise_prob2": 0.5,
"noise_range2": [1, 25],
"poisson_scale_range2": [0.05, 2.5],
"gray_noise_prob2": 0.4,
"jpeg_range2": [30, 95],
"blur_kernel_size2": 21,
"kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
"kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
"sinc_prob2": 0.1,
"blur_sigma2": [0.2, 1.5],
"betag_range2": [0.5, 4],
"betap_range2": [1, 2],
"final_sinc_prob": 0.8,
"gt_size": 512,
"keep_ratio": True,
"use_hflip": True,
"use_rot": False,
# data loader
"use_shuffle": True,
"num_worker_per_gpu": 5,
"batch_size_per_gpu": 12,
"dataset_enlarge_ratio": 1,
"prefetch_mode": None,
}
def set_seed(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@DATASET_REGISTRY.register()
class NoiseDataset(data.Dataset):
"""Dataset used for Denoise model:
synthetic Gaussian and Poisson noise dataset.
"""
def __init__(self, opt, train=True, level=None):
super(NoiseDataset, self).__init__()
self.opt = opt
# kernel define
self.data_rt = opt['dataroot_gt']
# dataload
self.train = train
if self.train:
self.metas = opt['meta_train']
else:
self.metas = opt['meta_test']
self.paths = []
for meta in self.metas:
with open(os.path.join(self.data_rt, meta)) as fin:
self.paths += [line.strip().split(' ')[1] for line in fin]
# hyperparameter
self.device = torch.cuda.current_device()
self.jpeger = DiffJPEG(differentiable=False).to(self.device) # simulate JPEG compression artifacts
self.usm_sharpener = USMSharp().to(self.device) # do usm sharpening
self.resize = opt['gt_size']
self.keep_ratio = opt['keep_ratio']
# function
self.crop = T.RandomCrop((self.resize, self.resize))
self.flip = T.RandomHorizontalFlip()
self.transform = T.Compose(
[
# T.ToDtype(torch.float32, scale=True), # only support for torch 2.++
T.ToTensor(),
]
)
# noise
self.sigma = [0.0588, 0.0784, 0.098, 0.1451, 0.1961] # 5 levels: 15, 20, 25, 37, 50
if level:
self.level = [level]
else:
self.level = [1,2,3,4,5]
def __getitem__(self, index):
# -------------------------------- Load gt images -------------------------------- #
gt_path = self.paths[index]
img_gt = Image.open(gt_path).convert("RGB")
# -------------------------------- Image Process --------------------------------
# resize
h, w = img_gt.height, img_gt.width
if self.keep_ratio:
ratio = self.resize / min(h, w)
h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2)
img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS)
else:
img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS)
# crop and
img_gt = self.crop(img_gt)
# flip (only for train)
if self.train:
img_gt = self.flip(img_gt)
# transform to tensor
img_gt = self.transform(img_gt).to(torch.float32)
# -------------------------------- Generate Noise --------------------------------
# Poisson Noise
peak = 255
lam = torch.clamp(img_gt, 0, 1) * peak
counts = torch.poisson(lam)
img_poisson = torch.clamp(counts / float(peak), 0.0, 1.0)
# Gaussian Noise
level = random.choice(self.level)
noise = torch.randn(size=img_poisson.size())
img_poisson_gaussian = torch.clamp(img_poisson + self.sigma[level-1] * noise, 0., 1.)
return img_poisson_gaussian, img_gt, gt_path
def __len__(self):
return len(self.paths)
def poisson_gaussian_sampler():
"""
It is now used for DF2K dataset (DIV2K + Flickr 2K)
"""
parser = argparse.ArgumentParser()
parser.add_argument('--level', type=int, default=None, help='train: one to many')
args = parser.parse_args()
if args.level:
level = args.level
else:
level = [1,3,5]
# generate training dataset
for number in level:
print("="*100)
print(f"Generate Noise Level {number}...")
dataset = NoiseDataset(cfg, train=True, level=number)
data_dl = data.DataLoader(
dataset,
batch_size = 1
)
print("Train Data:", dataset.data_rt, len(data_dl))
meta_info = {}
for sample in tqdm(data_dl):
lq, hq, path = sample
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
file_name = os.path.basename(path[0])
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
hq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/HQ")
lq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/LQ")
else:
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "HQ/")
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "LQ/")
os.makedirs(hq_folder, exist_ok=True)
os.makedirs(lq_folder, exist_ok=True)
hq_path = os.path.join(hq_folder, file_name)
lq_path = os.path.join(lq_folder, file_name)
save_image(hq[0], hq_path)
save_image(lq[0], lq_path)
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
if dset not in meta_info:
meta_info[dset] = [(lq_path, hq_path)]
else:
meta_info[dset].append((lq_path, hq_path))
for dset, dlist in meta_info.items():
with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), 'w') as fp:
for item in dlist:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), len(dlist))
# generate testing dataset
dataset = NoiseDataset(cfg, train=False)
data_dl = data.DataLoader(
dataset,
batch_size = 1
)
print("Test Data:", dataset.data_rt, len(data_dl))
print("="*100)
print(f"Generate Testing Noise...")
meta_info = {}
for sample in tqdm(data_dl):
lq, hq, path = sample
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
file_name = os.path.basename(path[0])
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
hq_folder = gt_folder.replace("HR", f"pair/Noise/HQ")
lq_folder = gt_folder.replace("HR", f"pair/Noise/LQ")
else:
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "HQ/")
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "LQ/")
os.makedirs(hq_folder, exist_ok=True)
os.makedirs(lq_folder, exist_ok=True)
hq_path = os.path.join(hq_folder, file_name)
lq_path = os.path.join(lq_folder, file_name)
save_image(hq[0], hq_path)
save_image(lq[0], lq_path)
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
if dset not in meta_info:
meta_info[dset] = [(lq_path, hq_path)]
else:
meta_info[dset].append((lq_path, hq_path))
for dset, dlist in meta_info.items():
with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), 'w') as fp:
for item in dlist:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), len(dlist))
if __name__ == '__main__':
set_seed(1229)
# poisson_gaussian for data generation
poisson_gaussian_sampler()
# python 3_generate_noise.py