Upload folder using huggingface_hub
Browse files- 0_generate_list.py +881 -0
- 1_generate_iqa.py +422 -0
- generate_lowresolution.py +537 -0
- generate_noise.py +300 -0
0_generate_list.py
ADDED
|
@@ -0,0 +1,881 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, sys
|
| 2 |
+
import shutil
|
| 3 |
+
import numpy as np
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
base_rt = f"/home/CORP/hsiang.chen/Project/Datasets/IR"
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
Deblur: GoPro, HIDE, RealBlur
|
| 10 |
+
"""
|
| 11 |
+
# GoPro
|
| 12 |
+
# pre-process for GoPro, dataset: https://seungjunnah.github.io/Datasets/gopro
|
| 13 |
+
# Seungjun Nah, Tae Hyun Kim, and Kyoung Mu Lee. Deep multi-scale convolutional neural network for dynamic scene deblurring. In CVPR, 2017
|
| 14 |
+
rt = os.path.join(base_rt, "Deblur/GoPro")
|
| 15 |
+
meta_folder = os.path.join(rt, "metas")
|
| 16 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 17 |
+
for dset in ['train', 'test']: # train, test
|
| 18 |
+
dset_pth = os.path.join(rt, dset)
|
| 19 |
+
list_file = []
|
| 20 |
+
total_ct = 0
|
| 21 |
+
for sample_folder in os.listdir(dset_pth): # GOPOXXX_XX_XX
|
| 22 |
+
folder_pth = os.path.join(dset_pth, sample_folder)
|
| 23 |
+
blur_folder = os.path.join(folder_pth, 'blur')
|
| 24 |
+
sharp_folder = os.path.join(folder_pth, 'sharp')
|
| 25 |
+
for ct, sample in enumerate(os.listdir(blur_folder)):
|
| 26 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 27 |
+
blur_file = os.path.join(blur_folder, sample)
|
| 28 |
+
sharp_file = os.path.join(sharp_folder, sample)
|
| 29 |
+
list_file.append((blur_file, sharp_file))
|
| 30 |
+
total_ct += 1
|
| 31 |
+
|
| 32 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 33 |
+
for item in list_file:
|
| 34 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 35 |
+
print(dset_pth, total_ct)
|
| 36 |
+
|
| 37 |
+
# HIDE
|
| 38 |
+
# pre-process for HIDE, dataset: https://github.com/joanshen0508/HA_deblur
|
| 39 |
+
# Ziyi Shen, Wenguan Wang, Xiankai Lu, Jianbing Shen, Haibin Ling, Tingfa Xu, and Ling Shao. Human-aware mo- tion deblurring. In ICCV, 2019.
|
| 40 |
+
# ==============================================
|
| 41 |
+
# HIDE/
|
| 42 |
+
# |- train/{image, gt}
|
| 43 |
+
# |- test/{image, gt}
|
| 44 |
+
# ==============================================
|
| 45 |
+
rt = os.path.join(base_rt, "Deblur/HIDE")
|
| 46 |
+
meta_folder = os.path.join(rt, "metas")
|
| 47 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 48 |
+
for dset in ['train', 'test']: # train, test
|
| 49 |
+
dset_pth = os.path.join(rt, dset)
|
| 50 |
+
list_file = []
|
| 51 |
+
total_ct = 0
|
| 52 |
+
if dset == 'train':
|
| 53 |
+
blur_folder = dset_pth
|
| 54 |
+
sharp_folder = os.path.join(rt, 'GT')
|
| 55 |
+
for ct, sample in enumerate(os.listdir(blur_folder)):
|
| 56 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 57 |
+
blur_file = os.path.join(blur_folder, sample)
|
| 58 |
+
sharp_file = os.path.join(sharp_folder, sample)
|
| 59 |
+
list_file.append((blur_file, sharp_file))
|
| 60 |
+
total_ct += 1
|
| 61 |
+
else:
|
| 62 |
+
for sample_folder in ['test-close-ups', 'test-long-shot']:
|
| 63 |
+
folder_pth = os.path.join(dset_pth, sample_folder)
|
| 64 |
+
blur_folder = folder_pth
|
| 65 |
+
sharp_folder = os.path.join(rt, 'GT')
|
| 66 |
+
for ct, sample in enumerate(os.listdir(blur_folder)):
|
| 67 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 68 |
+
blur_file = os.path.join(blur_folder, sample)
|
| 69 |
+
sharp_file = os.path.join(sharp_folder, sample)
|
| 70 |
+
list_file.append((blur_file, sharp_file))
|
| 71 |
+
total_ct += 1
|
| 72 |
+
|
| 73 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 74 |
+
for item in list_file:
|
| 75 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 76 |
+
print(dset_pth, total_ct)
|
| 77 |
+
|
| 78 |
+
# RealBlur
|
| 79 |
+
# pre-process for RealBlur-J,R, dataset: https://github.com/rimchang/RealBlur
|
| 80 |
+
# Jaesung Rim, Haeyun Lee, Jucheol Won, and Sunghyun Cho. Real-world blur dataset for learning and benchmarking de- blurring algorithms. In ECCV, 2020.
|
| 81 |
+
rt = os.path.join(base_rt, "Deblur/")
|
| 82 |
+
|
| 83 |
+
realblur_j_test_txt = os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref/RealBlur_J_test_list.txt")
|
| 84 |
+
realblur_r_test_txt = os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref/RealBlur_R_test_list.txt")
|
| 85 |
+
|
| 86 |
+
for txt in [realblur_j_test_txt, realblur_r_test_txt]:
|
| 87 |
+
data_list = []
|
| 88 |
+
total_ct = 0
|
| 89 |
+
miss_ct = 0
|
| 90 |
+
with open(txt) as fin:
|
| 91 |
+
for ct, line in enumerate(fin): # gt, image
|
| 92 |
+
line = line.strip().split()
|
| 93 |
+
if len(line) == 1: # no gt
|
| 94 |
+
data_list.append([None, os.path.join(rt, line[0])]) # image, None
|
| 95 |
+
miss_ct += 1
|
| 96 |
+
else:
|
| 97 |
+
data_list.append([os.path.join(rt, line[1]), os.path.join(rt, line[0])]) # image ,gt
|
| 98 |
+
total_ct += 1
|
| 99 |
+
|
| 100 |
+
set_dict = {realblur_j_test_txt: os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref"),
|
| 101 |
+
realblur_r_test_txt: os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref")}
|
| 102 |
+
meta_folder = os.path.join(set_dict[txt], "metas")
|
| 103 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 104 |
+
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
|
| 105 |
+
for item in data_list:
|
| 106 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 107 |
+
|
| 108 |
+
print(os.path.join(meta_folder,'test.list'), total_ct, miss_ct)
|
| 109 |
+
|
| 110 |
+
"""
|
| 111 |
+
Dehaze: 4kID, NH-Haze, OTS, SOTS
|
| 112 |
+
"""
|
| 113 |
+
# OST
|
| 114 |
+
# pre-process for OTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
|
| 115 |
+
# Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
|
| 116 |
+
rt = os.path.join(base_rt, "Dehaze/OTS")
|
| 117 |
+
meta_folder = os.path.join(rt, "metas")
|
| 118 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 119 |
+
for dset in ['hazy']:
|
| 120 |
+
dset_pth = os.path.join(rt, dset)
|
| 121 |
+
list_file = []
|
| 122 |
+
total_ct = 0
|
| 123 |
+
# 0025_0.8_0.1.jpg
|
| 124 |
+
for sub_folder in os.listdir(dset_pth):
|
| 125 |
+
input_folder = os.path.join(dset_pth, sub_folder)
|
| 126 |
+
gt_folder = os.path.join(rt, 'gt')
|
| 127 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 128 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 129 |
+
input_file = os.path.join(input_folder, sample)
|
| 130 |
+
gt_file = os.path.join(gt_folder, "%s.jpg"%(sample.split('_')[0]))
|
| 131 |
+
list_file.append((input_file, gt_file))
|
| 132 |
+
total_ct += 1
|
| 133 |
+
|
| 134 |
+
set_dict = {"hazy":'train'}
|
| 135 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 136 |
+
for item in list_file:
|
| 137 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 138 |
+
print(dset_pth, total_ct)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
# SOTS
|
| 142 |
+
# pre-process for SOTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-standard
|
| 143 |
+
# Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
|
| 144 |
+
rt = os.path.join(base_rt, "Dehaze/SOTS")
|
| 145 |
+
meta_folder = os.path.join(rt, "metas")
|
| 146 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 147 |
+
for dset in ['outdoor']:
|
| 148 |
+
dset_pth = os.path.join(rt, dset)
|
| 149 |
+
list_file = []
|
| 150 |
+
total_ct = 0
|
| 151 |
+
input_folder = os.path.join(dset_pth, 'hazy')
|
| 152 |
+
gt_folder = os.path.join(dset_pth, 'gt')
|
| 153 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 154 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 155 |
+
input_file = os.path.join(input_folder, sample)
|
| 156 |
+
gt_file = os.path.join(gt_folder, "%s.png"%(sample.split("_")[0]))
|
| 157 |
+
list_file.append((input_file, gt_file))
|
| 158 |
+
total_ct += 1
|
| 159 |
+
|
| 160 |
+
set_dict = {"outdoor":'test'}
|
| 161 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 162 |
+
for item in list_file:
|
| 163 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 164 |
+
print(input_folder, total_ct)
|
| 165 |
+
|
| 166 |
+
# 4kID
|
| 167 |
+
# pre-process for 4kID, dataset: https://github.com/zzr-idam/4KDehazing
|
| 168 |
+
# Ultra-High-Definition Image Dehazing via Multi-Guided Bilateral Learning, CVPR21.
|
| 169 |
+
rt = os.path.join(base_rt, "Dehaze/4kID")
|
| 170 |
+
meta_folder = os.path.join(rt, "metas")
|
| 171 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 172 |
+
for dset in ['4KDehazing', '4KDehazing_test']: # train, test
|
| 173 |
+
dset_pth = os.path.join(rt, dset)
|
| 174 |
+
list_file = []
|
| 175 |
+
total_ct = 0
|
| 176 |
+
input_folder = os.path.join(dset_pth, 'inputs')
|
| 177 |
+
gt_folder = os.path.join(dset_pth, 'groundtrues')
|
| 178 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 179 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 180 |
+
input_file = os.path.join(input_folder, sample)
|
| 181 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 182 |
+
list_file.append((input_file, gt_file))
|
| 183 |
+
total_ct += 1
|
| 184 |
+
|
| 185 |
+
set_dict = {"4KDehazing":'train', "4KDehazing_test":'test'}
|
| 186 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 187 |
+
for item in list_file:
|
| 188 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 189 |
+
print(dset_pth, total_ct)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# Unann
|
| 193 |
+
# pre-process for unann, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
|
| 194 |
+
rt = os.path.join(base_rt, "Dehaze/UnannotatedHazyImages")
|
| 195 |
+
meta_folder = os.path.join(rt, "metas")
|
| 196 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 197 |
+
list_file = []
|
| 198 |
+
total_ct = 0
|
| 199 |
+
input_folder = os.path.join(rt, "Image")
|
| 200 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 201 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 202 |
+
input_file = os.path.join(input_folder, sample)
|
| 203 |
+
list_file.append((input_file, ""))
|
| 204 |
+
total_ct += 1
|
| 205 |
+
|
| 206 |
+
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
|
| 207 |
+
for item in list_file:
|
| 208 |
+
fp.write('{} {} {}\n'.format(item[0], None, None))
|
| 209 |
+
print(rt, total_ct)
|
| 210 |
+
|
| 211 |
+
# NH-Haze
|
| 212 |
+
rt = os.path.join(base_rt, "Dehaze/NH-Haze")
|
| 213 |
+
meta_folder = os.path.join(rt, "metas")
|
| 214 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 215 |
+
list_file = []
|
| 216 |
+
total_ct = 0
|
| 217 |
+
input_folder = os.path.join(rt, "images")
|
| 218 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 219 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 220 |
+
input_file = os.path.join(input_folder, sample)
|
| 221 |
+
list_file.append((input_file, ""))
|
| 222 |
+
total_ct += 1
|
| 223 |
+
|
| 224 |
+
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
|
| 225 |
+
for item in list_file:
|
| 226 |
+
fp.write('{} {} {}\n'.format(item[0], None, None))
|
| 227 |
+
print(rt, total_ct)
|
| 228 |
+
|
| 229 |
+
"""
|
| 230 |
+
Denoise: BSD68, BSD400, CBSD68, KodaK, McMaster, Set12, SIDD, Urban100, WaterlooED
|
| 231 |
+
"""
|
| 232 |
+
"""
|
| 233 |
+
pre-process for denoise dataset
|
| 234 |
+
1. BSD400:
|
| 235 |
+
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
|
| 236 |
+
* https://github.com/smartboy110/denoising-datasets/tree/main
|
| 237 |
+
2. WED:
|
| 238 |
+
* Kede Ma, Zhengfang Duanmu, Qingbo Wu, Zhou Wang, Hongwei Yong, Hongliang Li, and Lei Zhang. Waterloo exploration database: New challenges for image quality as- sessment models. IEEE Transactions on Image Processing, 26(2):1004–1016, 2016.
|
| 239 |
+
* https://kedema.org/project/exploration/index.html
|
| 240 |
+
3. BSD68:
|
| 241 |
+
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
|
| 242 |
+
* https://github.com/smartboy110/denoising-datasets/tree/main
|
| 243 |
+
4. CBSD68:
|
| 244 |
+
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
|
| 245 |
+
* https://github.com/smartboy110/denoising-datasets/tree/main
|
| 246 |
+
5. Urban100:
|
| 247 |
+
* Jia-BinHuang,AbhishekSingh,andNarendraAhuja.Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197–5206, 2015.
|
| 248 |
+
* https://github.com/jbhuang0604/SelfExSR
|
| 249 |
+
6. Kodak:
|
| 250 |
+
* Rich Franzen. Kodak lossless true color image suite. source: http://r0k. us/graphics/kodak, 4(2), 1999.
|
| 251 |
+
* https://www.kaggle.com/datasets/sherylmehta/kodak-dataset
|
| 252 |
+
7. McMaster, Set12
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
rt = os.path.join(base_rt, "Denoise")
|
| 256 |
+
dataset = ['BSD68', 'BSD400', 'CBSD68', 'Kodak',
|
| 257 |
+
'McMaster', 'Set12', 'Urban100', 'WaterlooED']
|
| 258 |
+
|
| 259 |
+
for dset in dataset:
|
| 260 |
+
# list sample
|
| 261 |
+
list_file = []
|
| 262 |
+
total_ct = 0
|
| 263 |
+
dset_pth = os.path.join(rt, dset)
|
| 264 |
+
meta_folder = os.path.join(dset_pth, "metas")
|
| 265 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 266 |
+
input_folder = os.path.join(rt, '%s/image'%(dset))
|
| 267 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 268 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 269 |
+
input_file = os.path.join(input_folder, sample)
|
| 270 |
+
list_file.append([input_file])
|
| 271 |
+
total_ct += 1
|
| 272 |
+
|
| 273 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 274 |
+
for item in list_file:
|
| 275 |
+
fp.write('{} {} {}\n'.format(None, item[0], None))
|
| 276 |
+
print(input_folder, total_ct)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# SIDD
|
| 280 |
+
rt = os.path.join(base_rt, "Denoise/SIDD")
|
| 281 |
+
meta_folder = os.path.join(rt, "metas")
|
| 282 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 283 |
+
for dset in ['train', 'test']:
|
| 284 |
+
# list sample
|
| 285 |
+
list_file = []
|
| 286 |
+
total_ct = 0
|
| 287 |
+
dset_folder = os.path.join(rt, dset)
|
| 288 |
+
if dset == 'train':
|
| 289 |
+
for folder in os.listdir(dset_folder):
|
| 290 |
+
file_folder = os.path.join(dset_folder, folder)
|
| 291 |
+
for ct, sample in enumerate(os.listdir(file_folder)):
|
| 292 |
+
if "NOISY_SRGB" in sample:
|
| 293 |
+
input_file = os.path.join(file_folder, sample)
|
| 294 |
+
gt_file = os.path.join(file_folder, sample.replace("NOISY", "GT"))
|
| 295 |
+
list_file.append([input_file, gt_file])
|
| 296 |
+
total_ct += 1
|
| 297 |
+
elif dset == 'test':
|
| 298 |
+
image_folder = os.path.join(dset_folder, 'NOISY')
|
| 299 |
+
gt_folder = os.path.join(dset_folder, "GT")
|
| 300 |
+
for ct, sample in enumerate(os.listdir(image_folder)):
|
| 301 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 302 |
+
input_file = os.path.join(image_folder, sample)
|
| 303 |
+
gt_file = os.path.join(gt_folder, sample.replace("NOISY", "GT"))
|
| 304 |
+
list_file.append([input_file, gt_file])
|
| 305 |
+
total_ct += 1
|
| 306 |
+
|
| 307 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 308 |
+
for item in list_file:
|
| 309 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 310 |
+
print(dset_folder, total_ct)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
"""
|
| 314 |
+
Derain: LHPRain, Practical, Rain100L, RainDS, RainTrianL, UHD-Rain, RainDrop
|
| 315 |
+
"""
|
| 316 |
+
# RainDS
|
| 317 |
+
rt = os.path.join(base_rt, "Derain/RainDS")
|
| 318 |
+
meta_folder = os.path.join(rt, "metas")
|
| 319 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 320 |
+
for dset in ['RainDS_syn', 'RainDS_real']: # dataset
|
| 321 |
+
dataset_folder = os.path.join(rt, dset)
|
| 322 |
+
if dset == 'RainDS_syn':
|
| 323 |
+
set_list = ['train', 'test']
|
| 324 |
+
elif dset == 'RainDS_real':
|
| 325 |
+
set_list = ['train_set', 'test_set']
|
| 326 |
+
for subset in set_list:
|
| 327 |
+
input_folder = os.path.join(dataset_folder, subset)
|
| 328 |
+
for raintype in ['rainstreak', 'raindrop', 'rainstreak_raindrop']:
|
| 329 |
+
rain_folder = os.path.join(input_folder, raintype)
|
| 330 |
+
clear_folder = os.path.join(input_folder, 'gt')
|
| 331 |
+
list_file = []
|
| 332 |
+
total_ct = 0
|
| 333 |
+
rain_dict = {'rainstreak_raindrop': 'rd-rain', 'rainstreak': 'rain', 'raindrop': 'rd'}
|
| 334 |
+
# rd-rain-97, rain-97, rd-97 -> norain-97
|
| 335 |
+
# pie-rd-rain-97, pie-rain-97, pie-rd-97 -> pie-norain-97
|
| 336 |
+
for ct, sample in enumerate(os.listdir(rain_folder)):
|
| 337 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 338 |
+
input_file = os.path.join(rain_folder, sample)
|
| 339 |
+
gt_file = os.path.join(clear_folder, sample.replace(rain_dict[raintype], "norain"))
|
| 340 |
+
list_file.append((input_file, gt_file))
|
| 341 |
+
total_ct += 1
|
| 342 |
+
|
| 343 |
+
with open(os.path.join(meta_folder,'{}_{}_{}.list'.format(dset, subset, raintype)), 'w') as fp:
|
| 344 |
+
for item in list_file:
|
| 345 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 346 |
+
print(rain_folder, total_ct)
|
| 347 |
+
|
| 348 |
+
# LHPRain
|
| 349 |
+
# pre-process for LHPRain: https://github.com/yunguo224/LHP-Rain
|
| 350 |
+
# From Sky to the Ground: A Large-scale Benchmark and Simple Baseline Towards Real Rain Removal (ICCV 2023)
|
| 351 |
+
rt = os.path.join(base_rt, "Derain/LHPRain")
|
| 352 |
+
meta_folder = os.path.join(rt, "metas")
|
| 353 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 354 |
+
for dset in ['train', 'val', 'test']: # train, test
|
| 355 |
+
input_folder = os.path.join(rt, "input/%s"%(dset))
|
| 356 |
+
gt_folder = os.path.join(rt, "gt/%s"%(dset))
|
| 357 |
+
list_file = []
|
| 358 |
+
total_ct = 0
|
| 359 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 360 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 361 |
+
input_file = os.path.join(input_folder, sample)
|
| 362 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 363 |
+
list_file.append((input_file, gt_file))
|
| 364 |
+
total_ct += 1
|
| 365 |
+
|
| 366 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 367 |
+
for item in list_file:
|
| 368 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 369 |
+
print(input_folder, total_ct)
|
| 370 |
+
|
| 371 |
+
# Practical
|
| 372 |
+
# pre-process for LHPRain: https://github.com/ZhangXinNan/RainDetectionAndRemoval
|
| 373 |
+
# WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In CVPR, 2017
|
| 374 |
+
rt = os.path.join(base_rt, "Derain/Practical")
|
| 375 |
+
meta_folder = os.path.join(rt, "metas")
|
| 376 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 377 |
+
# create image folder
|
| 378 |
+
input_folder = os.path.join(rt, "image")
|
| 379 |
+
list_file = []
|
| 380 |
+
total_ct = 0
|
| 381 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 382 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 383 |
+
input_file = os.path.join(input_folder, sample)
|
| 384 |
+
list_file.append((input_file))
|
| 385 |
+
total_ct += 1
|
| 386 |
+
|
| 387 |
+
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
|
| 388 |
+
for item in list_file:
|
| 389 |
+
fp.write('{} {} {}\n'.format(item, None, None))
|
| 390 |
+
print(rt, total_ct)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
# Rain100L
|
| 394 |
+
# pre-process for Rain100L: https://github.com/shangwei5/BRN
|
| 395 |
+
# WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 1357–1366, 2017.
|
| 396 |
+
# norain-xxx.png
|
| 397 |
+
# rain-xxx.png
|
| 398 |
+
# rainregion-xxx.png
|
| 399 |
+
# rainstreak-xxx.png
|
| 400 |
+
rt = os.path.join(base_rt, "Derain/Rain100L")
|
| 401 |
+
meta_folder = os.path.join(rt, "metas")
|
| 402 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 403 |
+
|
| 404 |
+
image_folder = os.path.join(rt, 'image')
|
| 405 |
+
gt_folder = os.path.join(rt, 'gt')
|
| 406 |
+
list_file = []
|
| 407 |
+
total_ct = 0
|
| 408 |
+
for ct, sample in enumerate(os.listdir(image_folder)):
|
| 409 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 410 |
+
input_file = os.path.join(image_folder, sample)
|
| 411 |
+
gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
|
| 412 |
+
list_file.append((input_file, gt_file))
|
| 413 |
+
total_ct += 1
|
| 414 |
+
|
| 415 |
+
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
|
| 416 |
+
for item in list_file:
|
| 417 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 418 |
+
print(rt, total_ct)
|
| 419 |
+
|
| 420 |
+
# Rain200L
|
| 421 |
+
# pre-process for RainTrainL(Rain200L): https://github.com/shangwei5/BRN
|
| 422 |
+
# Yang W, Tan RT, Feng J, Liu J, Guo Z, Yan S. Deep joint rain detection and removal from a single image. In IEEE CVPR 2017.
|
| 423 |
+
# norain-xxx.png
|
| 424 |
+
# rain-xxx.png
|
| 425 |
+
# rainregion-xxx.png
|
| 426 |
+
# rainstreak-xxx.png
|
| 427 |
+
rt = os.path.join(base_rt, "Derain/RainTrainL")
|
| 428 |
+
meta_folder = os.path.join(rt, "metas")
|
| 429 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 430 |
+
image_folder = os.path.join(rt, 'image')
|
| 431 |
+
gt_folder = os.path.join(rt, 'gt')
|
| 432 |
+
list_file = []
|
| 433 |
+
total_ct = 0
|
| 434 |
+
for ct, sample in enumerate(os.listdir(image_folder)):
|
| 435 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 436 |
+
input_file = os.path.join(image_folder, sample)
|
| 437 |
+
gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
|
| 438 |
+
list_file.append((input_file, gt_file))
|
| 439 |
+
total_ct += 1
|
| 440 |
+
|
| 441 |
+
with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
|
| 442 |
+
for item in list_file:
|
| 443 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 444 |
+
print(rt, total_ct)
|
| 445 |
+
|
| 446 |
+
# UHD-Rain
|
| 447 |
+
# pre-process for UHD-Rain: https://github.com/wlydlut/uhddip
|
| 448 |
+
# UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
|
| 449 |
+
rt = os.path.join(base_rt, "Derain/UHD-Rain")
|
| 450 |
+
meta_folder = os.path.join(rt, "metas")
|
| 451 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 452 |
+
|
| 453 |
+
for dset in ['training_set', 'testing_set']: # train, test
|
| 454 |
+
dset_path = os.path.join(rt, dset)
|
| 455 |
+
input_folder = os.path.join(dset_path, 'input')
|
| 456 |
+
gt_folder = os.path.join(dset_path, 'gt')
|
| 457 |
+
list_file = []
|
| 458 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 459 |
+
input_file = os.path.join(input_folder, sample)
|
| 460 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 461 |
+
list_file.append((input_file, gt_file))
|
| 462 |
+
|
| 463 |
+
set_dict = {'training_set': 'train', 'testing_set':'test'}
|
| 464 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 465 |
+
for item in list_file:
|
| 466 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 467 |
+
print(rt, ct+1)
|
| 468 |
+
|
| 469 |
+
# Rain-drop
|
| 470 |
+
# pre-process for Rain-drop: https://github.com/rui1996/DeRaindrop
|
| 471 |
+
# Attentive Generative Adversarial Network for Raindrop Removal from A Single Image (CVPR'2018 Highlight)
|
| 472 |
+
rt = os.path.join(base_rt, "Derain/RainDrop")
|
| 473 |
+
meta_folder = os.path.join(rt, "metas")
|
| 474 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 475 |
+
|
| 476 |
+
for dset in ['train', 'test_a', 'test_b']: # train, test
|
| 477 |
+
dset_path = os.path.join(rt, dset)
|
| 478 |
+
input_folder = os.path.join(dset_path, 'data')
|
| 479 |
+
gt_folder = os.path.join(dset_path, 'gt')
|
| 480 |
+
list_file = []
|
| 481 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 482 |
+
input_file = os.path.join(input_folder, sample)
|
| 483 |
+
gt_file = os.path.join(gt_folder, sample.replace("rain", "clean"))
|
| 484 |
+
list_file.append((input_file, gt_file))
|
| 485 |
+
|
| 486 |
+
with open(os.path.join(meta_folder,'Raindrop_{}.list'.format(dset)), 'w') as fp:
|
| 487 |
+
for item in list_file:
|
| 488 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 489 |
+
print(rt, ct+1)
|
| 490 |
+
|
| 491 |
+
"""
|
| 492 |
+
Desnow: Snow100k, UHD-Snow
|
| 493 |
+
"""
|
| 494 |
+
# Snow100k: download: https://pan.baidu.com/s/1Y8fq8qQjC0YK5DTktYPfbQ?pwd=nyop#list/path=/sharelink688030094-540249285017805/snow100k&parentPath=/sharelink688030094-540249285017805
|
| 495 |
+
# Training set (50,000 images, 7.8GB), Test set (50,000 images, 7.8GB), Realistic snowy images (1,329 images, 67MB).
|
| 496 |
+
rt = os.path.join(base_rt, "Desnow/Snow100k")
|
| 497 |
+
meta_folder = os.path.join(rt, "metas")
|
| 498 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 499 |
+
|
| 500 |
+
folder = Path(rt)
|
| 501 |
+
img_list = list(folder.rglob("*.[jp][pn]g"))
|
| 502 |
+
for data in img_list:
|
| 503 |
+
if " " in str(data):
|
| 504 |
+
old = str(data)
|
| 505 |
+
new = old.replace(" ", "_")
|
| 506 |
+
os.rename(old, new)
|
| 507 |
+
|
| 508 |
+
# # training
|
| 509 |
+
train_rt = os.path.join(rt, 'all')
|
| 510 |
+
train_gt_rt = os.path.join(train_rt, 'gt')
|
| 511 |
+
train_img_rt = os.path.join(train_rt, 'synthetic')
|
| 512 |
+
list_file = []
|
| 513 |
+
total_ct = 0
|
| 514 |
+
for ct, sample in enumerate(os.listdir(train_img_rt)):
|
| 515 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 516 |
+
img_ = os.path.join(train_img_rt, sample)
|
| 517 |
+
gt_ = os.path.join(train_gt_rt, sample)
|
| 518 |
+
list_file.append((img_, gt_))
|
| 519 |
+
total_ct += 1
|
| 520 |
+
|
| 521 |
+
with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
|
| 522 |
+
for item in list_file:
|
| 523 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 524 |
+
print(train_rt, total_ct)
|
| 525 |
+
|
| 526 |
+
# test_syn
|
| 527 |
+
test_rt = os.path.join(rt, 'media/jdway/GameSSD/overlapping/test')
|
| 528 |
+
for dset in os.listdir(test_rt):
|
| 529 |
+
test_folder = os.path.join(test_rt, dset)
|
| 530 |
+
test_gt_rt = os.path.join(test_folder, 'gt')
|
| 531 |
+
test_img_rt = os.path.join(test_folder, 'synthetic')
|
| 532 |
+
list_file = []
|
| 533 |
+
total_ct = 0
|
| 534 |
+
for ct, sample in enumerate(os.listdir(test_img_rt)):
|
| 535 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 536 |
+
img_ = os.path.join(test_img_rt, sample)
|
| 537 |
+
gt_ = os.path.join(test_gt_rt, sample)
|
| 538 |
+
list_file.append((img_, gt_))
|
| 539 |
+
total_ct += 1
|
| 540 |
+
|
| 541 |
+
with open(os.path.join(meta_folder,'test_%s.list'%(dset[-1])), 'w') as fp:
|
| 542 |
+
for item in list_file:
|
| 543 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 544 |
+
print(test_folder, total_ct)
|
| 545 |
+
|
| 546 |
+
# real-world testing
|
| 547 |
+
test_rt = os.path.join(rt, 'realistic')
|
| 548 |
+
list_file = []
|
| 549 |
+
total_ct = 0
|
| 550 |
+
for ct, sample in enumerate(os.listdir(test_rt)):
|
| 551 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 552 |
+
img_ = os.path.join(test_rt, sample)
|
| 553 |
+
list_file.append(img_)
|
| 554 |
+
total_ct += 1
|
| 555 |
+
|
| 556 |
+
with open(os.path.join(meta_folder,'test_realistic.list'), 'w') as fp:
|
| 557 |
+
for item in list_file:
|
| 558 |
+
fp.write('{} {} {}\n'.format(item, None, None))
|
| 559 |
+
print(test_rt, total_ct)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
# UHD-Snow
|
| 563 |
+
# pre-process for UHD-Snow: https://github.com/wlydlut/uhddip
|
| 564 |
+
# UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
|
| 565 |
+
rt = os.path.join(base_rt, "Desnow/UHD-Snow")
|
| 566 |
+
meta_folder = os.path.join(rt, "metas")
|
| 567 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 568 |
+
for dset in ['training_set', 'testing_set']: # train, test
|
| 569 |
+
dset_path = os.path.join(rt, dset)
|
| 570 |
+
input_folder = os.path.join(dset_path, 'input')
|
| 571 |
+
gt_folder = os.path.join(dset_path, 'gt')
|
| 572 |
+
list_file = []
|
| 573 |
+
total_ct = 0
|
| 574 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 575 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 576 |
+
input_file = os.path.join(input_folder, sample)
|
| 577 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 578 |
+
list_file.append((input_file, gt_file))
|
| 579 |
+
total_ct += 1
|
| 580 |
+
|
| 581 |
+
set_dict = {'training_set': 'train', 'testing_set':'test'}
|
| 582 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 583 |
+
for item in list_file:
|
| 584 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 585 |
+
print(dset_path, total_ct)
|
| 586 |
+
|
| 587 |
+
"""
|
| 588 |
+
Lowlight: DICM, LIME, LOL, MEF, NPE, VV
|
| 589 |
+
"""
|
| 590 |
+
# LOL
|
| 591 |
+
# pre-process for LOL: https://github.com/fediory/hvi-cidnet
|
| 592 |
+
# Chen Wei, Wenjing Wang, Wenhan Yang, and Jiaying Liu. Deep retinex decomposition for low-light enhancement. In BMVC,2018
|
| 593 |
+
rt = os.path.join(base_rt, "LowLight/LOL")
|
| 594 |
+
meta_folder = os.path.join(rt, "metas")
|
| 595 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 596 |
+
for dset in ['our485', 'eval15']: # train, test
|
| 597 |
+
dset_path = os.path.join(rt, dset)
|
| 598 |
+
input_folder = os.path.join(dset_path, "low")
|
| 599 |
+
gt_folder = os.path.join(dset_path, "high")
|
| 600 |
+
list_file = []
|
| 601 |
+
total_ct = 0
|
| 602 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 603 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 604 |
+
input_file = os.path.join(input_folder, sample)
|
| 605 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 606 |
+
list_file.append((input_file, gt_file))
|
| 607 |
+
total_ct += 1
|
| 608 |
+
|
| 609 |
+
set_dict = {'our485':'train', 'eval15':'test'}
|
| 610 |
+
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
|
| 611 |
+
for item in list_file:
|
| 612 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 613 |
+
print(dset_path, total_ct)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
# pre-process for low-light dataset
|
| 617 |
+
# 2. DICM:
|
| 618 |
+
# * Chulwoo Lee, Chul Lee, and Chang-Su Kim. Contrast en- hancement based on layered difference representation. In ICIP, 2012.
|
| 619 |
+
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
|
| 620 |
+
# 3. MEF:
|
| 621 |
+
# * Kede Ma, Kai Zeng, and Zhou Wang. Perceptual quality assessment for multi-exposure image fusion. TIP, 2015.
|
| 622 |
+
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
|
| 623 |
+
# 4. NPE:
|
| 624 |
+
# * ShuhangWang,JinZheng,Hai-MiaoHu,andBoLi.Naturalness preserved enhancement algorithm for non-uniform illumination images. TIP, 2013.
|
| 625 |
+
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
|
| 626 |
+
# 5. LIME:
|
| 627 |
+
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
|
| 628 |
+
# 6. VV:
|
| 629 |
+
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
|
| 630 |
+
rt = os.path.join(base_rt, "LowLight")
|
| 631 |
+
|
| 632 |
+
dataset = ['DICM', 'LIME', 'MEF', 'NPE', 'VV']
|
| 633 |
+
for dset in dataset:
|
| 634 |
+
# create image folder
|
| 635 |
+
# dset_pth = "./%s"%(dset)
|
| 636 |
+
# os.rename(dset_pth, 'image')
|
| 637 |
+
# os.makedirs(dset_pth)
|
| 638 |
+
# shutil.move('./image', dset_pth)
|
| 639 |
+
|
| 640 |
+
# list sample
|
| 641 |
+
dset_pth = os.path.join(rt, dset)
|
| 642 |
+
list_file = []
|
| 643 |
+
input_folder = os.path.join(dset_pth, 'image')
|
| 644 |
+
meta_folder = os.path.join(dset_pth, "metas")
|
| 645 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 646 |
+
folder = Path(input_folder)
|
| 647 |
+
img_list = list(folder.rglob("*.[jp][pn]g"))
|
| 648 |
+
for data in img_list:
|
| 649 |
+
if " " in str(data):
|
| 650 |
+
old = str(data)
|
| 651 |
+
new = old.replace(" ", "_")
|
| 652 |
+
os.rename(old, new)
|
| 653 |
+
|
| 654 |
+
total_ct = 0
|
| 655 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 656 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 657 |
+
input_file = os.path.join(input_folder, sample)
|
| 658 |
+
list_file.append((input_file))
|
| 659 |
+
total_ct += 1
|
| 660 |
+
|
| 661 |
+
with open(os.path.join(meta_folder, 'test.list'), 'w') as fp:
|
| 662 |
+
for item in list_file:
|
| 663 |
+
fp.write('{} {} {}\n'.format(item, None, None))
|
| 664 |
+
print(input_folder, total_ct)
|
| 665 |
+
|
| 666 |
+
"""
|
| 667 |
+
Other: UDC
|
| 668 |
+
"""
|
| 669 |
+
# pre-process for UDC(TOLED+POLED): https://yzhouas.github.io/projects/UDC/udc.html
|
| 670 |
+
# Yuqian Zhou, David Ren, Neil Emerton, Sehoon Lim, and Timothy Large. Image restoration for under-display camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9179–9188, 2021.
|
| 671 |
+
def mat2png(dset_folder, name, rt):
|
| 672 |
+
# dset_folder: '~/UDC/{poled,toled}', name: 'poled_test_display.mat', save_folder: '~/UDC/image/'
|
| 673 |
+
udc_key = name.split('.')[0][6:]
|
| 674 |
+
udc_file = os.path.join(dset_folder, name)
|
| 675 |
+
udc_mat = loadmat(udc_file)[udc_key]
|
| 676 |
+
dset, imggt = udc_key.split('_')[0], udc_key.split('_')[1]
|
| 677 |
+
dset_folder = os.path.join(rt, dset)
|
| 678 |
+
os.makedirs(dset_folder, exist_ok=True)
|
| 679 |
+
if imggt == 'display':
|
| 680 |
+
imggt = 'image'
|
| 681 |
+
else:
|
| 682 |
+
imggt = 'gt'
|
| 683 |
+
imggt_folder = os.path.join(dset_folder, imggt)
|
| 684 |
+
os.makedirs(imggt_folder, exist_ok=True)
|
| 685 |
+
|
| 686 |
+
# restoration
|
| 687 |
+
n_im, h, w, c = udc_mat.shape
|
| 688 |
+
results = udc_mat.copy()
|
| 689 |
+
for i in range(n_im):
|
| 690 |
+
print(i, end='\r')
|
| 691 |
+
udc = np.reshape(udc_mat[i, :, :, :], (h, w, c))
|
| 692 |
+
sample = Image.fromarray(np.uint8(udc)).convert('RGB')
|
| 693 |
+
sample = sample.save(os.path.join(imggt_folder, '%s_%d.png'%(name.split('_')[0], i)))
|
| 694 |
+
|
| 695 |
+
rt = os.path.join(base_rt, "Other/UDC")
|
| 696 |
+
meta_folder = os.path.join(rt, "metas")
|
| 697 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 698 |
+
# pre-processing
|
| 699 |
+
poled_folder = os.path.join(rt, 'poled')
|
| 700 |
+
poled_sample = ['poled_test_display.mat', 'poled_test_gt.mat',
|
| 701 |
+
'poled_val_display.mat', 'poled_val_gt.mat']
|
| 702 |
+
toled_folder = os.path.join(rt, 'toled')
|
| 703 |
+
toled_sample = ['toled_test_display.mat', 'toled_test_gt.mat',
|
| 704 |
+
'toled_val_display.mat', 'toled_val_gt.mat']
|
| 705 |
+
|
| 706 |
+
# for sample_set in poled_sample:
|
| 707 |
+
# print(sample_set)
|
| 708 |
+
# mat2png(poled_folder, sample_set, rt)
|
| 709 |
+
# print()
|
| 710 |
+
|
| 711 |
+
# for sample_set in toled_sample:
|
| 712 |
+
# print(sample_set)
|
| 713 |
+
# mat2png(toled_folder, sample_set, rt)
|
| 714 |
+
# print()
|
| 715 |
+
|
| 716 |
+
for dset in ['val', 'test']: # train, test
|
| 717 |
+
dset_folder = os.path.join(rt, dset)
|
| 718 |
+
input_folder = os.path.join(dset_folder, 'image')
|
| 719 |
+
gt_folder = os.path.join(dset_folder, 'gt')
|
| 720 |
+
list_file = []
|
| 721 |
+
total_gt = 0
|
| 722 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 723 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 724 |
+
input_file = os.path.join(input_folder, sample)
|
| 725 |
+
gt_file = os.path.join(gt_folder, sample)
|
| 726 |
+
list_file.append((input_file, gt_file))
|
| 727 |
+
total_gt += 1
|
| 728 |
+
|
| 729 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 730 |
+
for item in list_file:
|
| 731 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 732 |
+
print(dset_folder, total_gt)
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
"""
|
| 737 |
+
SR: DIV2K, Flickr2K, OST
|
| 738 |
+
"""
|
| 739 |
+
# OST
|
| 740 |
+
rt = os.path.join(base_rt, "SuperResolution/OST")
|
| 741 |
+
meta_folder = os.path.join(rt, "metas")
|
| 742 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 743 |
+
list_file = []
|
| 744 |
+
total_ct = 0
|
| 745 |
+
image_folder = os.path.join(rt, "images")
|
| 746 |
+
for dset in os.listdir(image_folder): # train, test
|
| 747 |
+
input_folder = os.path.join(image_folder, dset)
|
| 748 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 749 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 750 |
+
input_file = os.path.join(input_folder, sample)
|
| 751 |
+
list_file.append((input_file, None))
|
| 752 |
+
total_ct += 1
|
| 753 |
+
|
| 754 |
+
with open(os.path.join(meta_folder,'OST_HR.list'), 'w') as fp:
|
| 755 |
+
for item in list_file:
|
| 756 |
+
fp.write('{} {} {}\n'.format(None, item[0], None))
|
| 757 |
+
print(rt, total_ct)
|
| 758 |
+
|
| 759 |
+
# aug version (SR)
|
| 760 |
+
image_folder = os.path.join(rt, "images_pair")
|
| 761 |
+
for distortion in os.listdir(image_folder): # SR1, SR2, SR3
|
| 762 |
+
list_file = []
|
| 763 |
+
total_ct = 0
|
| 764 |
+
pair_folder = os.path.join(image_folder, distortion)
|
| 765 |
+
for dset in os.listdir(pair_folder): # animal, building, grass, ...
|
| 766 |
+
input_folder = os.path.join(pair_folder, dset)
|
| 767 |
+
if "SR" in distortion:
|
| 768 |
+
hq_folder = os.path.join(input_folder, "HR")
|
| 769 |
+
lq_folder = os.path.join(input_folder, "LR")
|
| 770 |
+
else:
|
| 771 |
+
raise KeyError("Unknown {distortion} for Augmented OST dataset.")
|
| 772 |
+
for ct, sample in enumerate(os.listdir(hq_folder)):
|
| 773 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 774 |
+
hq_file = os.path.join(hq_folder, sample)
|
| 775 |
+
lq_file = os.path.join(lq_folder, sample)
|
| 776 |
+
list_file.append((lq_file, hq_file))
|
| 777 |
+
total_ct += 1
|
| 778 |
+
|
| 779 |
+
with open(os.path.join(meta_folder, f"OST_train_pair_{distortion}.list"), 'w') as fp:
|
| 780 |
+
for item in list_file:
|
| 781 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 782 |
+
print(pair_folder, total_ct)
|
| 783 |
+
|
| 784 |
+
|
| 785 |
+
# Flickr2K
|
| 786 |
+
rt = os.path.join(base_rt, "SuperResolution/Flickr2K")
|
| 787 |
+
meta_folder = os.path.join(rt, "metas")
|
| 788 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 789 |
+
|
| 790 |
+
list_file = []
|
| 791 |
+
total_ct = 0
|
| 792 |
+
for dset in ['images']: # train, test
|
| 793 |
+
input_folder = os.path.join(rt, dset)
|
| 794 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 795 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 796 |
+
input_file = os.path.join(input_folder, sample)
|
| 797 |
+
list_file.append((input_file, None))
|
| 798 |
+
total_ct += 1
|
| 799 |
+
|
| 800 |
+
with open(os.path.join(meta_folder,'Flickr2K_HR.list'), 'w') as fp:
|
| 801 |
+
for item in list_file:
|
| 802 |
+
fp.write('{} {} {}\n'.format(None, item[0], None))
|
| 803 |
+
print(rt, total_ct)
|
| 804 |
+
|
| 805 |
+
# aug version (SR, Noise)
|
| 806 |
+
image_folder = os.path.join(rt, "images_pair")
|
| 807 |
+
for distortion in os.listdir(image_folder): # Noise_L1, ..., SR1, ...
|
| 808 |
+
list_file = []
|
| 809 |
+
total_ct = 0
|
| 810 |
+
input_folder = os.path.join(image_folder, distortion)
|
| 811 |
+
if "SR" in distortion:
|
| 812 |
+
hq_folder = os.path.join(input_folder, "HR")
|
| 813 |
+
lq_folder = os.path.join(input_folder, "LR")
|
| 814 |
+
elif "Noise" in distortion:
|
| 815 |
+
hq_folder = os.path.join(input_folder, "HQ")
|
| 816 |
+
lq_folder = os.path.join(input_folder, "LQ")
|
| 817 |
+
else:
|
| 818 |
+
raise KeyError("Unknown {distortion} for Augmented Flickr2K dataset.")
|
| 819 |
+
|
| 820 |
+
for ct, sample in enumerate(os.listdir(hq_folder)):
|
| 821 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 822 |
+
hq_file = os.path.join(hq_folder, sample)
|
| 823 |
+
lq_file = os.path.join(lq_folder, sample)
|
| 824 |
+
|
| 825 |
+
list_file.append((lq_file, hq_file))
|
| 826 |
+
total_ct += 1
|
| 827 |
+
|
| 828 |
+
with open(os.path.join(meta_folder, f"Flickr2K_train_pair_{distortion}.list"), 'w') as fp:
|
| 829 |
+
for item in list_file:
|
| 830 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 831 |
+
print(rt, total_ct)
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
# DIV2K
|
| 835 |
+
rt = os.path.join(base_rt, "SuperResolution/DIV2K")
|
| 836 |
+
meta_folder = os.path.join(rt, "metas")
|
| 837 |
+
os.makedirs(meta_folder, exist_ok=True)
|
| 838 |
+
for dset in ['DIV2K_train_HR', 'DIV2K_valid_HR']: # train, test
|
| 839 |
+
input_folder = os.path.join(rt, dset)
|
| 840 |
+
list_file = []
|
| 841 |
+
total_ct = 0
|
| 842 |
+
for ct, sample in enumerate(os.listdir(input_folder)):
|
| 843 |
+
print(ct, end='\r')
|
| 844 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 845 |
+
input_file = os.path.join(input_folder, sample)
|
| 846 |
+
list_file.append((input_file, None))
|
| 847 |
+
total_ct += 1
|
| 848 |
+
|
| 849 |
+
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
|
| 850 |
+
for item in list_file:
|
| 851 |
+
fp.write('{} {} {}\n'.format(None, item[0], None))
|
| 852 |
+
print(input_folder, total_ct)
|
| 853 |
+
|
| 854 |
+
# aug version (SR, Noise)
|
| 855 |
+
for dset in ['DIV2K_train_pair', 'DIV2K_valid_pair']: # train, test
|
| 856 |
+
imgae_folder = os.path.join(rt, dset)
|
| 857 |
+
for distortion in os.listdir(imgae_folder): # Noise, SR.
|
| 858 |
+
list_file = []
|
| 859 |
+
total_ct = 0
|
| 860 |
+
input_folder = os.path.join(imgae_folder, distortion)
|
| 861 |
+
if "SR" in distortion:
|
| 862 |
+
hq_folder = os.path.join(input_folder, "HR")
|
| 863 |
+
lq_folder = os.path.join(input_folder, "LR")
|
| 864 |
+
elif "Noise" in distortion:
|
| 865 |
+
hq_folder = os.path.join(input_folder, "HQ")
|
| 866 |
+
lq_folder = os.path.join(input_folder, "LQ")
|
| 867 |
+
else:
|
| 868 |
+
raise KeyError("Unknown {distortion} for Augmented DIV2K dataset.")
|
| 869 |
+
|
| 870 |
+
for ct, sample in enumerate(os.listdir(hq_folder)):
|
| 871 |
+
print(ct, end='\r')
|
| 872 |
+
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
|
| 873 |
+
hq_file = os.path.join(hq_folder, sample)
|
| 874 |
+
lq_file = os.path.join(lq_folder, sample)
|
| 875 |
+
list_file.append((lq_file, hq_file))
|
| 876 |
+
total_ct += 1
|
| 877 |
+
|
| 878 |
+
with open(os.path.join(meta_folder, f"{dset}_{distortion}.list"), 'w') as fp:
|
| 879 |
+
for item in list_file:
|
| 880 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 881 |
+
print(input_folder, total_ct)
|
1_generate_iqa.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import numpy as np
|
| 5 |
+
import random
|
| 6 |
+
import torchvision.transforms.functional as TF
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
import cv2
|
| 9 |
+
import re
|
| 10 |
+
import json
|
| 11 |
+
|
| 12 |
+
# ir dataset dictionary:
|
| 13 |
+
base_rt = f'/home/CORP/hsiang.chen/Project/Datasets/IR'
|
| 14 |
+
dataset_dict = {
|
| 15 |
+
# Image Restoration
|
| 16 |
+
## Super Resolution (3)
|
| 17 |
+
"HR": {
|
| 18 |
+
"DIV2K": {'train': 'SuperResolution/DIV2K/metas/DIV2K_train_HR.list', # (800, single)
|
| 19 |
+
'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_HR.list'}, # (100, single)
|
| 20 |
+
"Flickr2K": {'train': 'SuperResolution/Flickr2K/metas/Flickr2K_HR.list'}, # (2650, single)
|
| 21 |
+
"OST": {'train': 'SuperResolution/OST/metas/OST_HR.list'}, # (10324, single)
|
| 22 |
+
},
|
| 23 |
+
|
| 24 |
+
"Low Resolution": {
|
| 25 |
+
"DIV2K": {'train1': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR1.list', # (800, pair)
|
| 26 |
+
'train2': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR2.list', # (800, pair)
|
| 27 |
+
'train3': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR3.list', # (800, pair)
|
| 28 |
+
'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_pair_SR.list'}, # (100, pair)
|
| 29 |
+
"Flickr2K": {'train1': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR1.list', # (2650, pair)
|
| 30 |
+
'train2': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR2.list', # (2650, pair)
|
| 31 |
+
'train3': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR3.list'},# (2650, pair)
|
| 32 |
+
"OST": {'train1': 'SuperResolution/OST/metas/OST_train_pair_SR1.list', # (10324, pair)
|
| 33 |
+
'train2': 'SuperResolution/OST/metas/OST_train_pair_SR2.list', # (10324, pair)
|
| 34 |
+
'train3': 'SuperResolution/OST/metas/OST_train_pair_SR3.list'}, # (10324, pair)
|
| 35 |
+
},
|
| 36 |
+
|
| 37 |
+
## Derain (5)
|
| 38 |
+
"Rain": {
|
| 39 |
+
"RainTrainL": {'train': 'Derain/RainTrainL/metas/train.list'}, # (200, pair)
|
| 40 |
+
"Rain100L": {'test': 'Derain/Rain100L/metas/test.list'}, # (100, pair)
|
| 41 |
+
"LHPRain": {'train': 'Derain/LHPRain/metas/train.list', # (2100, pair)
|
| 42 |
+
'val': 'Derain/LHPRain/metas/val.list', # (600, pair)
|
| 43 |
+
'test': 'Derain/LHPRain/metas/test.list'}, # (300, pair)
|
| 44 |
+
"UHDRain": {'train': 'Derain/UHD-Rain/metas/train.list', # (3000, pair)
|
| 45 |
+
'test': 'Derain/UHD-Rain/metas/test.list'}, # (200, pair)
|
| 46 |
+
"Practical": {'test': 'Derain/Practical/metas/test.list'}, # (15, real)
|
| 47 |
+
},
|
| 48 |
+
|
| 49 |
+
## Deraindrop (2)
|
| 50 |
+
"RainDrop": {
|
| 51 |
+
"RainDrop": {'train': 'Derain/RainDrop/metas/Raindrop_train.list', # (861, pair)
|
| 52 |
+
'test_a': 'Derain/RainDrop/metas/Raindrop_test_a.list', # (58, pair)
|
| 53 |
+
'test_b': 'Derain/RainDrop/metas/Raindrop_test_b.list'}, # (249, pair)
|
| 54 |
+
"RainDS_syn_rainstreak": {'train': 'Derain/RainDS/metas/RainDS_syn_train_rainstreak.list', # (1000, pair)
|
| 55 |
+
'test': 'Derain/RainDS/metas/RainDS_syn_test_rainstreak.list'}, # (200, pair)
|
| 56 |
+
"RainDS_syn_raindrop": {'train': 'Derain/RainDS/metas/RainDS_syn_train_raindrop.list', # (1000, pair)
|
| 57 |
+
'test': 'Derain/RainDS/metas/RainDS_syn_test_raindrop.list'}, # (200, pair)
|
| 58 |
+
"RainDS_syn_rainstreak_raindrop": {'train': 'Derain/RainDS/metas/RainDS_syn_train_rainstreak_raindrop.list', # (1000, pair)
|
| 59 |
+
'test': 'Derain/RainDS/metas/RainDS_syn_test_rainstreak_raindrop.list'}, # (200, pair)
|
| 60 |
+
"RainDS_real_rainstreak": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_rainstreak.list', # (150, pair)
|
| 61 |
+
'test': 'Derain/RainDS/metas/RainDS_real_test_set_rainstreak.list'}, # (98, pair)
|
| 62 |
+
"RainDS_real_raindrop": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_raindrop.list', # (150, pair)
|
| 63 |
+
'test': 'Derain/RainDS/metas/RainDS_real_test_set_raindrop.list'}, # (98, pair)
|
| 64 |
+
"RainDS_real_rainstreak_raindrop": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_rainstreak_raindrop.list', # (150, pair)
|
| 65 |
+
'test': 'Derain/RainDS/metas/RainDS_real_test_set_rainstreak.list'}, # (98, pair)
|
| 66 |
+
},
|
| 67 |
+
|
| 68 |
+
## Dehaze (5)
|
| 69 |
+
"Fog":{
|
| 70 |
+
"SOTS": {'test': 'Dehaze/SOTS/metas/test.list'}, # (500, pair)
|
| 71 |
+
"OTS": {'train': 'Dehaze/OTS/metas/train.list'}, # (72135, pair)
|
| 72 |
+
"4kID": {'train': 'Dehaze/4kID/metas/train.list', # (15606, pair)
|
| 73 |
+
'test': 'Dehaze/4kID/metas/test.list'}, # (97, pair)
|
| 74 |
+
"Unann": {'test': 'Dehaze/UnannotatedHazyImages/metas/test.list'}, # (4809, real)
|
| 75 |
+
"NH-Haze": {'test': 'Dehaze/NH-Haze/metas/test.list'}, # (5, real)
|
| 76 |
+
},
|
| 77 |
+
|
| 78 |
+
## Denoise (9)
|
| 79 |
+
"Noise": {
|
| 80 |
+
"BSD400": {'train': 'Denoise/BSD400/metas/BSD400.list'}, # (400, syn)
|
| 81 |
+
"WED": {'train': 'Denoise/WaterlooED/metas/WaterlooED.list'}, # (4744, syn)
|
| 82 |
+
"BSD68": {'test': 'Denoise/BSD68/metas/BSD68.list'}, # (68, syn)
|
| 83 |
+
"Urban": {'test': 'Denoise/Urban100/metas/Urban100.list'}, # (100, syn)
|
| 84 |
+
"CBSD68": {'test': 'Denoise/CBSD68/metas/CBSD68.list'}, # (68, syn)
|
| 85 |
+
"Kodak": {'test': 'Denoise/Kodak/metas/Kodak.list'}, # (24, syn)
|
| 86 |
+
"McMaster": {'test': 'Denoise/McMaster/metas/McMaster.list'}, # (18, syn)
|
| 87 |
+
"Set12": {'test': 'Denoise/Set12/metas/Set12.list'}, # (12, syn)
|
| 88 |
+
"SIDD": {'train': 'Denoise/SIDD/metas/train.list', # (320, pair)
|
| 89 |
+
'test': 'Denoise/SIDD/metas/test.list'}, # (1280, pair)
|
| 90 |
+
|
| 91 |
+
"DIV2K": {'train1': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L1.list', # (800, pair)
|
| 92 |
+
'train2': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L3.list', # (800, pair)
|
| 93 |
+
'train3': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L5.list', # (800, pair)
|
| 94 |
+
'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_pair_Noise.list'}, # (100, pair)
|
| 95 |
+
"Flickr2K": {'train1': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L1.list', # (2650, pair)
|
| 96 |
+
'train2': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L3.list', # (2650, pair)
|
| 97 |
+
'train3': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L5.list'},# (2650, pair)
|
| 98 |
+
},
|
| 99 |
+
|
| 100 |
+
## Desnow (2)
|
| 101 |
+
"Snow": {
|
| 102 |
+
"Snow100k": {'train': 'Desnow/Snow100k/metas/train.list'}, # (50000, syn)
|
| 103 |
+
"Snow100k-S": {'test': 'Desnow/Snow100k/metas/test_S.list'}, # (16611, syn)
|
| 104 |
+
"Snow100k-M": {'test': 'Desnow/Snow100k/metas/test_M.list'}, # (16588, syn)
|
| 105 |
+
"Snow100k-L": {'test': 'Desnow/Snow100k/metas/test_L.list'}, # (16801, syn)
|
| 106 |
+
"Snow100k-R": {'test': 'Desnow/Snow100k/metas/test_realistic.list'}, # (1329, real)
|
| 107 |
+
"UHDSnow": {'train': 'Desnow/UHD-Snow/metas/train.list', # (3000, pair)
|
| 108 |
+
'test': 'Desnow/UHD-Snow/metas/test.list'}, # (200, pair)
|
| 109 |
+
},
|
| 110 |
+
|
| 111 |
+
## Deblur (3)
|
| 112 |
+
"Blur": {
|
| 113 |
+
"GoPro": {'train': 'Deblur/GoPro/metas/train.list', # (2103, pair)
|
| 114 |
+
'test': 'Deblur/GoPro/metas/test.list'}, # (1111, pair)
|
| 115 |
+
"HIDE": {'train': 'Deblur/HIDE/metas/train.list', # (6397, pair)
|
| 116 |
+
'test': 'Deblur/HIDE/metas/test.list'}, # (2025, pair)
|
| 117 |
+
"RealBlur-J": {'test': 'Deblur/RealBlur-J_ECC_IMCORR_centroid_itensity_ref/metas/test.list'}, # (980, pair)
|
| 118 |
+
"RealBlur-R": {'test': 'Deblur/RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref/metas/test.list'},# (980, pair)
|
| 119 |
+
},
|
| 120 |
+
|
| 121 |
+
## Lowlight (6)
|
| 122 |
+
"Low-light": {
|
| 123 |
+
"LOL": {'train': 'LowLight/LOL/metas/train.list', # (485, pair)
|
| 124 |
+
'test': 'LowLight/LOL/metas/test.list'}, # (15, pair)
|
| 125 |
+
"DICM": {'test': 'LowLight/DICM/metas/test.list'}, # (69, real)
|
| 126 |
+
"MEF": {'test': 'LowLight/MEF/metas/test.list'}, # (17, real)
|
| 127 |
+
"NPE": {'test': 'LowLight/NPE/metas/test.list'}, # (8, real)
|
| 128 |
+
"LIME": {'test': 'LowLight/LIME/metas/test.list'}, # (10, real)
|
| 129 |
+
"VV": {'test': 'LowLight/VV/metas/test.list'}, # (24, real)
|
| 130 |
+
},
|
| 131 |
+
|
| 132 |
+
## Others (1)
|
| 133 |
+
"Unknown": {
|
| 134 |
+
"UDC": {'val': 'Other/UDC/metas/val.list', # (60, pair)
|
| 135 |
+
'test': 'Other/UDC/metas/test.list'}, # (60, pair)
|
| 136 |
+
},
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
def IRImageData(listfile):
|
| 140 |
+
# [degradation, clean, label]
|
| 141 |
+
paths = []
|
| 142 |
+
with open(listfile) as fin:
|
| 143 |
+
for line in fin:
|
| 144 |
+
line = line.strip().split()
|
| 145 |
+
if len(line) == 3:
|
| 146 |
+
paths.append(line)
|
| 147 |
+
paths = sorted(paths)
|
| 148 |
+
# Dataset
|
| 149 |
+
LQ_list = []
|
| 150 |
+
HQ_list = []
|
| 151 |
+
for data in paths:
|
| 152 |
+
lq_pth, hq_pth, label = data
|
| 153 |
+
if os.path.isfile(lq_pth):
|
| 154 |
+
LQ_list.append(lq_pth)
|
| 155 |
+
if os.path.isfile(hq_pth):
|
| 156 |
+
HQ_list.append(hq_pth)
|
| 157 |
+
return LQ_list, HQ_list
|
| 158 |
+
|
| 159 |
+
# question dictionary:
|
| 160 |
+
question_dict = {
|
| 161 |
+
"Full-Reference": {
|
| 162 |
+
"ONE": [
|
| 163 |
+
"Compared to the reference, what ONE distortion stands out most in the evaluated image?",
|
| 164 |
+
"Determine the leading ONE degradation when comparing the evaluated image to the reference.",
|
| 165 |
+
"Determine the most impactful ONE distortion in the evaluated image compared to the reference.",
|
| 166 |
+
"Highlight the most significant ONE distortion in the evaluated image in comparison to the reference.",
|
| 167 |
+
"Identify the chief ONE degradation in the evaluated image when compared to the reference.",
|
| 168 |
+
"Identify the most notable ONE distortion in the evaluated image's quality when compared to the reference.",
|
| 169 |
+
"In comparison to the reference, what ONE distortion is most prominent in the evaluated image?",
|
| 170 |
+
"What ONE distortion is most apparent in the evaluated image relative to the reference?",
|
| 171 |
+
"What ONE distortion most significantly affects the evaluated image compared to the reference?",
|
| 172 |
+
"What ONE distortion stands out in the evaluated image against the reference?",
|
| 173 |
+
"What critical ONE quality degradation is present in the evaluated image versus the reference?",
|
| 174 |
+
],
|
| 175 |
+
"TWO": [
|
| 176 |
+
"Compared to the reference, what TWO distortions stand out most in the evaluated image?",
|
| 177 |
+
"Determine the leading TWO degradations when comparing the evaluated image to the reference.",
|
| 178 |
+
"Determine the most impactful TWO distortions in the evaluated image compared to the reference.",
|
| 179 |
+
"Highlight the most significant TWO distortions in the evaluated image in comparison to the reference.",
|
| 180 |
+
"Identify the chief TWO degradations in the evaluated image when compared to the reference.",
|
| 181 |
+
"Identify the most notable TWO distortions in the evaluated image's quality when compared to the reference.",
|
| 182 |
+
"In comparison to the reference, what TWO distortions are most prominent in the evaluated image?",
|
| 183 |
+
"What TWO distortions are most apparent in the evaluated image relative to the reference?",
|
| 184 |
+
"What TWO distortions most significantly affect the evaluated image compared to the reference?",
|
| 185 |
+
"What TWO distortions stand out in the evaluated image against the reference?",
|
| 186 |
+
"What critical TWO quality degradations are present in the evaluated image versus the reference?",
|
| 187 |
+
],
|
| 188 |
+
"Common": [
|
| 189 |
+
"Compared to the reference, what distortion(s) stand out most in the evaluated image?",
|
| 190 |
+
"Determine the leading degradation(s) when comparing the evaluated image to the reference.",
|
| 191 |
+
"Determine the most impactful distortion(s) in the evaluated image compared to the reference.",
|
| 192 |
+
"Highlight the most significant distortion(s) in the evaluated image in comparison to the reference.",
|
| 193 |
+
"Identify the chief degradation(s) in the evaluated image when compared to the reference.",
|
| 194 |
+
"Identify the most notable distortion(s) in the evaluated image's quality when compared to the reference.",
|
| 195 |
+
"In comparison to the reference, what distortion(s) are most prominent in the evaluated image?",
|
| 196 |
+
"What critical quality degradation(s) are present in the evaluated image versus the reference?",
|
| 197 |
+
"What distortion(s) are most apparent in the evaluated image relative to the reference?",
|
| 198 |
+
"What distortion(s) most significantly affect the evaluated image compared to the reference?",
|
| 199 |
+
"What distortion(s) stand out in the evaluated image against the reference?"
|
| 200 |
+
]
|
| 201 |
+
},
|
| 202 |
+
"Non-Reference": {
|
| 203 |
+
"ONE": [
|
| 204 |
+
"Determine the leading ONE degradation in the evaluated image.",
|
| 205 |
+
"Determine the most impactful ONE distortion in the evaluated image.",
|
| 206 |
+
"Highlight the most significant ONE distortion in the evaluated image.",
|
| 207 |
+
"Identify the chief ONE degradation in the evaluated image.",
|
| 208 |
+
"Identify the most critical ONE distortion in the evaluated image.",
|
| 209 |
+
"Identify the most notable ONE distortion in the evaluated image's quality.",
|
| 210 |
+
"In terms of image quality, what is the most glaring ONE issue with the evaluated image?",
|
| 211 |
+
"In the evaluated image, what ONE distortion is most detrimental to image quality?",
|
| 212 |
+
"Pinpoint the foremost ONE image quality issue in the evaluated image.",
|
| 213 |
+
"What ONE distortion is most apparent in the evaluated image?",
|
| 214 |
+
"What ONE distortion is most evident in the evaluated image?",
|
| 215 |
+
"What ONE distortion is most prominent in the evaluated image?",
|
| 216 |
+
"What ONE distortion is most prominent when examining the evaluated image?",
|
| 217 |
+
"What ONE distortion most detrimentally affects the overall quality of the evaluated image?",
|
| 218 |
+
"What ONE distortion most notably affects the clarity of the evaluated image?",
|
| 219 |
+
"What ONE distortion most significantly affects the evaluated image?",
|
| 220 |
+
"What ONE distortion stands out in the evaluated image?",
|
| 221 |
+
"What ONE quality degradation is most apparent in the evaluated image?",
|
| 222 |
+
"What critical ONE quality degradation is present in the evaluated image?",
|
| 223 |
+
"What is the foremost ONE distortion affecting the evaluated image's quality?",
|
| 224 |
+
"What is the leading ONE distortion in the evaluated image?",
|
| 225 |
+
"What is the most critical ONE image quality issue in the evaluated image?",
|
| 226 |
+
"What is the most severe ONE degradation observed in the evaluated image?",
|
| 227 |
+
"What is the primary ONE degradation observed in the evaluated image?"
|
| 228 |
+
],
|
| 229 |
+
"TWO": [
|
| 230 |
+
"Determine the leading TWO degradations in the evaluated image.",
|
| 231 |
+
"Determine the most impactful TWO distortions in the evaluated image.",
|
| 232 |
+
"Highlight the most significant TWO distortions in the evaluated image.",
|
| 233 |
+
"Identify the chief TWO degradations in the evaluated image.",
|
| 234 |
+
"Identify the most critical TWO distortions in the evaluated image.",
|
| 235 |
+
"Identify the most notable TWO distortions in the evaluated image's quality.",
|
| 236 |
+
"In terms of image quality, what are the most glaring TWO issues with the evaluated image?",
|
| 237 |
+
"In the evaluated image, what TWO distortions are most detrimental to image quality?",
|
| 238 |
+
"Pinpoint the foremost TWO image quality issues in the evaluated image.",
|
| 239 |
+
"What TWO distortions are most apparent in the evaluated image?",
|
| 240 |
+
"What TWO distortions are most evident in the evaluated image?",
|
| 241 |
+
"What TWO distortions are most prominent in the evaluated image?",
|
| 242 |
+
"What TWO distortions are most prominent when examining the evaluated image?",
|
| 243 |
+
"What TWO distortions most detrimentally affect the overall quality of the evaluated image?",
|
| 244 |
+
"What TWO distortions most notably affect the clarity of the evaluated image?",
|
| 245 |
+
"What TWO distortions most significantly affect the evaluated image?",
|
| 246 |
+
"What TWO distortions stand out in the evaluated image?",
|
| 247 |
+
"What TWO quality degradations are most apparent in the evaluated image?",
|
| 248 |
+
"What are the foremost TWO distortions affecting the evaluated image's quality?",
|
| 249 |
+
"What are the leading TWO distortions in the evaluated image?",
|
| 250 |
+
"What are the most critical TWO image quality issues in the evaluated image?",
|
| 251 |
+
"What are the most severe TWO degradations observed in the evaluated image?",
|
| 252 |
+
"What are the primary TWO degradations observed in the evaluated image?",
|
| 253 |
+
"What critical TWO quality degradations are present in the evaluated image?",
|
| 254 |
+
],
|
| 255 |
+
"Common": [
|
| 256 |
+
"Determine the leading degradation(s) in the evaluated image.",
|
| 257 |
+
"Determine the most impactful distortion(s) in the evaluated image.",
|
| 258 |
+
"Highlight the most significant distortion(s) in the evaluated image.",
|
| 259 |
+
"Identify the chief degradation(s) in the evaluated image.",
|
| 260 |
+
"Identify the most critical distortion(s) in the evaluated image.",
|
| 261 |
+
"Identify the most notable distortion(s) in the evaluated image's quality.",
|
| 262 |
+
"In terms of image quality, what are the most glaring issue(s) with the evaluated image?",
|
| 263 |
+
"In the evaluated image, what distortion(s) are most detrimental to image quality?",
|
| 264 |
+
"Pinpoint the foremost image quality issue(s) in the evaluated image.",
|
| 265 |
+
"What are the foremost distortion(s) affecting the evaluated image's quality?",
|
| 266 |
+
"What are the leading distortion(s) in the evaluated image?",
|
| 267 |
+
"What are the most critical image quality issue(s) in the evaluated image?",
|
| 268 |
+
"What are the most severe degradation(s) observed in the evaluated image?",
|
| 269 |
+
"What are the primary degradation(s) observed in the evaluated image?",
|
| 270 |
+
"What critical quality degradation(s) are present in the evaluated image?",
|
| 271 |
+
"What distortion(s) are most apparent in the evaluated image?",
|
| 272 |
+
"What distortion(s) are most evident in the evaluated image?",
|
| 273 |
+
"What distortion(s) are most prominent in the evaluated image?",
|
| 274 |
+
"What distortion(s) are most prominent when examining the evaluated image?",
|
| 275 |
+
"What distortion(s) most detrimentally affect the overall quality of the evaluated image?",
|
| 276 |
+
"What distortion(s) most notably affect the clarity of the evaluated image?",
|
| 277 |
+
"What distortion(s) most significantly affect the evaluated image?",
|
| 278 |
+
"What distortion(s) stand out in the evaluated image?",
|
| 279 |
+
"What quality degradation(s) are most apparent in the evaluated image?"
|
| 280 |
+
]
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
def question_generate(ref="Full-Reference", degra="Common"):
|
| 285 |
+
option = f" Answer the question using a single word or phrase."
|
| 286 |
+
template = random.choice(question_dict[ref]["Common"] + question_dict[ref][degra])
|
| 287 |
+
if random.random() >= 0.4:
|
| 288 |
+
template += option
|
| 289 |
+
return template
|
| 290 |
+
|
| 291 |
+
if __name__ == "__main__":
|
| 292 |
+
for degradation, degra_dict in dataset_dict.items():
|
| 293 |
+
for dname, ddict in degra_dict.items():
|
| 294 |
+
for dset, list_path in ddict.items():
|
| 295 |
+
|
| 296 |
+
meta_refA = []
|
| 297 |
+
meta_A = []
|
| 298 |
+
meta_syn = []
|
| 299 |
+
|
| 300 |
+
# read data list
|
| 301 |
+
paths = []
|
| 302 |
+
list_path = os.path.join(base_rt, list_path)
|
| 303 |
+
with open(list_path) as fin:
|
| 304 |
+
for line in fin:
|
| 305 |
+
line = line.strip().split()
|
| 306 |
+
if len(line) == 3:
|
| 307 |
+
paths.append(line)
|
| 308 |
+
paths = sorted(paths)
|
| 309 |
+
|
| 310 |
+
# Dataset
|
| 311 |
+
LQ_list = []
|
| 312 |
+
HQ_list = []
|
| 313 |
+
for data in paths:
|
| 314 |
+
lq_pth, hq_pth, label = data
|
| 315 |
+
|
| 316 |
+
if os.path.isfile(hq_pth):
|
| 317 |
+
HQ_list.append(hq_pth)
|
| 318 |
+
image_ref = os.path.relpath(hq_pth, base_rt).replace("\\", "/")
|
| 319 |
+
id = os.path.basename(image_ref)
|
| 320 |
+
else:
|
| 321 |
+
image_ref = None
|
| 322 |
+
|
| 323 |
+
if os.path.isfile(lq_pth):
|
| 324 |
+
LQ_list.append(lq_pth)
|
| 325 |
+
image_A = os.path.relpath(lq_pth, base_rt).replace("\\", "/")
|
| 326 |
+
id = os.path.basename(image_A)
|
| 327 |
+
else:
|
| 328 |
+
image_A = None
|
| 329 |
+
|
| 330 |
+
meta_refA.append({
|
| 331 |
+
"distortion_class": degradation,
|
| 332 |
+
"distortion_name": degradation,
|
| 333 |
+
"severity": 3,
|
| 334 |
+
"id": id,
|
| 335 |
+
"image_ref": image_ref,
|
| 336 |
+
"image_A": image_A,
|
| 337 |
+
"image_B": None,
|
| 338 |
+
"task_type": "quality_single_A",
|
| 339 |
+
"conversations": [
|
| 340 |
+
{
|
| 341 |
+
"from": "human",
|
| 342 |
+
"value": question_generate(ref="Full-Reference", degra="ONE"),
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"from": "gpt",
|
| 346 |
+
"value": degradation
|
| 347 |
+
}
|
| 348 |
+
],
|
| 349 |
+
})
|
| 350 |
+
|
| 351 |
+
meta_A.append({
|
| 352 |
+
"distortion_class": degradation,
|
| 353 |
+
"distortion_name": degradation,
|
| 354 |
+
"severity": 3,
|
| 355 |
+
"id": id,
|
| 356 |
+
"image_ref": None,
|
| 357 |
+
"image_A": image_A,
|
| 358 |
+
"image_B": None,
|
| 359 |
+
"task_type": "quality_single_A_noref",
|
| 360 |
+
"conversations": [
|
| 361 |
+
{
|
| 362 |
+
"from": "human",
|
| 363 |
+
"value": question_generate(ref="Non-Reference", degra="ONE"),
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"from": "gpt",
|
| 367 |
+
"value": degradation
|
| 368 |
+
}
|
| 369 |
+
],
|
| 370 |
+
})
|
| 371 |
+
|
| 372 |
+
meta_syn.append({
|
| 373 |
+
"distortion_class": degradation,
|
| 374 |
+
"distortion_name": degradation,
|
| 375 |
+
"severity": 3,
|
| 376 |
+
"id": id,
|
| 377 |
+
"image_ref": None,
|
| 378 |
+
"image_A": image_ref,
|
| 379 |
+
"image_B": None,
|
| 380 |
+
"task_type": "quality_single_A_noref",
|
| 381 |
+
"conversations": [
|
| 382 |
+
{
|
| 383 |
+
"from": "human",
|
| 384 |
+
"value": question_generate(ref="Non-Reference", degra="ONE"),
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"from": "gpt",
|
| 388 |
+
"value": degradation
|
| 389 |
+
}
|
| 390 |
+
],
|
| 391 |
+
})
|
| 392 |
+
|
| 393 |
+
print(f"[{degradation}]: {dname}-{dset} - LQ[{len(LQ_list)}], HQ[{len(HQ_list)}]")
|
| 394 |
+
|
| 395 |
+
if len(LQ_list) > 0 and len(HQ_list) > 0 and len(LQ_list) == len(HQ_list): # pair
|
| 396 |
+
meta_refA_pth = list_path.replace(".list", "_iqa_refA_brief.json")
|
| 397 |
+
meta_A_pth = list_path.replace(".list", "_iqa_A_brief.json")
|
| 398 |
+
with open(meta_refA_pth, "w") as f:
|
| 399 |
+
json.dump(meta_refA, f, indent=4)
|
| 400 |
+
with open(meta_A_pth, "w") as f:
|
| 401 |
+
json.dump(meta_A, f, indent=4)
|
| 402 |
+
print(f"Done. Metadata saved to: {meta_refA_pth} and {meta_A_pth}")
|
| 403 |
+
|
| 404 |
+
elif len(LQ_list) > 0 and len(HQ_list) == 0: # real image
|
| 405 |
+
meta_A_pth = list_path.replace(".list", "_iqa_A_brief.json")
|
| 406 |
+
with open(meta_A_pth, "w") as f:
|
| 407 |
+
json.dump(meta_A, f, indent=4)
|
| 408 |
+
print(f"Done. Metadata saved to: {meta_A_pth}")
|
| 409 |
+
|
| 410 |
+
elif len(LQ_list) == 0 and len(HQ_list) > 0: # syn image
|
| 411 |
+
meta_refA_pth = list_path.replace(".list", "_iqa_syn_refA_brief.json")
|
| 412 |
+
meta_syn_pth = list_path.replace(".list", "_iqa_syn_A_brief.json")
|
| 413 |
+
with open(meta_refA_pth, "w") as f:
|
| 414 |
+
json.dump(meta_refA, f, indent=4)
|
| 415 |
+
with open(meta_syn_pth, "w") as f:
|
| 416 |
+
json.dump(meta_syn, f, indent=4)
|
| 417 |
+
print(f"Done. Metadata saved to: {meta_refA_pth} and {meta_syn_pth}")
|
| 418 |
+
|
| 419 |
+
else:
|
| 420 |
+
raise KeyError(f"the task is not matched, please check the dataset {list_path}")
|
| 421 |
+
|
| 422 |
+
|
generate_lowresolution.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import glob
|
| 3 |
+
import os
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import math
|
| 8 |
+
import numpy as np
|
| 9 |
+
import os
|
| 10 |
+
import os.path as osp
|
| 11 |
+
import random
|
| 12 |
+
import time
|
| 13 |
+
import torch
|
| 14 |
+
from tqdm import tqdm
|
| 15 |
+
|
| 16 |
+
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
|
| 17 |
+
from basicsr.data.transforms import augment
|
| 18 |
+
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
| 19 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
| 20 |
+
from torch.utils import data as data
|
| 21 |
+
from torchvision.transforms.functional import center_crop
|
| 22 |
+
import torchvision.transforms as T
|
| 23 |
+
from torchvision.utils import save_image
|
| 24 |
+
|
| 25 |
+
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
| 26 |
+
from basicsr.data.transforms import paired_random_crop
|
| 27 |
+
from basicsr.utils import DiffJPEG, USMSharp
|
| 28 |
+
from basicsr.utils.img_process_util import filter2D
|
| 29 |
+
from basicsr.utils.registry import MODEL_REGISTRY
|
| 30 |
+
from collections import OrderedDict
|
| 31 |
+
from torch.nn import functional as F
|
| 32 |
+
|
| 33 |
+
cfg = {
|
| 34 |
+
# dataset info.
|
| 35 |
+
"name": "DF2K+OST",
|
| 36 |
+
"type": "RealESRGANDataset",
|
| 37 |
+
"dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution",
|
| 38 |
+
"meta_train": [
|
| 39 |
+
"DIV2K/metas/DIV2K_train_HR.list",
|
| 40 |
+
"Flickr2K/metas/Flickr2K.list",
|
| 41 |
+
"OST/metas/OST.list",
|
| 42 |
+
],
|
| 43 |
+
"meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"],
|
| 44 |
+
|
| 45 |
+
# the first degradation process
|
| 46 |
+
"resize_prob": [0.2, 0.7, 0.1], # up, down, keep
|
| 47 |
+
"resize_range": [0.15, 1.5],
|
| 48 |
+
"gaussian_noise_prob": 0.5,
|
| 49 |
+
"noise_range": [1, 30],
|
| 50 |
+
"poisson_scale_range": [0.05, 3],
|
| 51 |
+
"gray_noise_prob": 0.4,
|
| 52 |
+
"jpeg_range": [30, 95],
|
| 53 |
+
|
| 54 |
+
"blur_kernel_size": 21,
|
| 55 |
+
"kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
|
| 56 |
+
"kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
|
| 57 |
+
"sinc_prob": 0.1,
|
| 58 |
+
"blur_sigma": [0.2, 3],
|
| 59 |
+
"betag_range": [0.5, 4],
|
| 60 |
+
"betap_range": [1, 2],
|
| 61 |
+
|
| 62 |
+
# the second degradation process
|
| 63 |
+
"second_blur_prob": 0.8,
|
| 64 |
+
"resize_prob2": [0.3, 0.4, 0.3], # up, down, keep
|
| 65 |
+
"resize_range2": [0.3, 1.2],
|
| 66 |
+
"gaussian_noise_prob2": 0.5,
|
| 67 |
+
"noise_range2": [1, 25],
|
| 68 |
+
"poisson_scale_range2": [0.05, 2.5],
|
| 69 |
+
"gray_noise_prob2": 0.4,
|
| 70 |
+
"jpeg_range2": [30, 95],
|
| 71 |
+
|
| 72 |
+
"blur_kernel_size2": 21,
|
| 73 |
+
"kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
|
| 74 |
+
"kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
|
| 75 |
+
"sinc_prob2": 0.1,
|
| 76 |
+
"blur_sigma2": [0.2, 1.5],
|
| 77 |
+
"betag_range2": [0.5, 4],
|
| 78 |
+
"betap_range2": [1, 2],
|
| 79 |
+
|
| 80 |
+
"final_sinc_prob": 0.8,
|
| 81 |
+
|
| 82 |
+
"gt_size": 512,
|
| 83 |
+
"keep_ratio": True,
|
| 84 |
+
"use_hflip": True,
|
| 85 |
+
"use_rot": False,
|
| 86 |
+
|
| 87 |
+
# data loader
|
| 88 |
+
"use_shuffle": True,
|
| 89 |
+
"num_worker_per_gpu": 5,
|
| 90 |
+
"batch_size_per_gpu": 12,
|
| 91 |
+
"dataset_enlarge_ratio": 1,
|
| 92 |
+
"prefetch_mode": None,
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
def set_seed(seed=42):
|
| 96 |
+
random.seed(seed)
|
| 97 |
+
np.random.seed(seed)
|
| 98 |
+
torch.manual_seed(seed)
|
| 99 |
+
torch.cuda.manual_seed(seed)
|
| 100 |
+
torch.cuda.manual_seed_all(seed)
|
| 101 |
+
|
| 102 |
+
torch.backends.cudnn.deterministic = True
|
| 103 |
+
torch.backends.cudnn.benchmark = False
|
| 104 |
+
|
| 105 |
+
@DATASET_REGISTRY.register()
|
| 106 |
+
class RealESRGANDataset(data.Dataset):
|
| 107 |
+
"""Dataset used for Real-ESRGAN model:
|
| 108 |
+
Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
| 109 |
+
|
| 110 |
+
It loads gt (Ground-Truth) images, and augments them.
|
| 111 |
+
It also generates blur kernels and sinc kernels for generating low-quality images.
|
| 112 |
+
Note that the low-quality images are processed in tensors on GPUS for faster processing.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
opt (dict): Config for train datasets. It contains the following keys:
|
| 116 |
+
dataroot_gt (str): Data root path for gt.
|
| 117 |
+
meta_info (str): Path for meta information file.
|
| 118 |
+
io_backend (dict): IO backend type and other kwarg.
|
| 119 |
+
use_hflip (bool): Use horizontal flips.
|
| 120 |
+
use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
|
| 121 |
+
Please see more options in the codes.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, opt, train=True):
|
| 125 |
+
super(RealESRGANDataset, self).__init__()
|
| 126 |
+
self.opt = opt
|
| 127 |
+
self.file_client = None
|
| 128 |
+
|
| 129 |
+
# kernel define
|
| 130 |
+
self.data_rt = opt['dataroot_gt']
|
| 131 |
+
|
| 132 |
+
# dataload
|
| 133 |
+
self.train = train
|
| 134 |
+
if self.train:
|
| 135 |
+
self.metas = opt['meta_train']
|
| 136 |
+
else:
|
| 137 |
+
self.metas = opt['meta_test']
|
| 138 |
+
|
| 139 |
+
self.paths = []
|
| 140 |
+
for meta in self.metas:
|
| 141 |
+
with open(os.path.join(self.data_rt, meta)) as fin:
|
| 142 |
+
self.paths += [line.strip().split(' ')[1] for line in fin]
|
| 143 |
+
|
| 144 |
+
# Hyperparameter of Degradation
|
| 145 |
+
# blur settings for the first degradation
|
| 146 |
+
self.blur_kernel_size = opt['blur_kernel_size']
|
| 147 |
+
self.kernel_list = opt['kernel_list']
|
| 148 |
+
self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
|
| 149 |
+
self.blur_sigma = opt['blur_sigma']
|
| 150 |
+
self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
|
| 151 |
+
self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
|
| 152 |
+
self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
|
| 153 |
+
|
| 154 |
+
# blur settings for the second degradation
|
| 155 |
+
self.blur_kernel_size2 = opt['blur_kernel_size2']
|
| 156 |
+
self.kernel_list2 = opt['kernel_list2']
|
| 157 |
+
self.kernel_prob2 = opt['kernel_prob2']
|
| 158 |
+
self.blur_sigma2 = opt['blur_sigma2']
|
| 159 |
+
self.betag_range2 = opt['betag_range2']
|
| 160 |
+
self.betap_range2 = opt['betap_range2']
|
| 161 |
+
self.sinc_prob2 = opt['sinc_prob2']
|
| 162 |
+
|
| 163 |
+
# a final sinc filter
|
| 164 |
+
self.final_sinc_prob = opt['final_sinc_prob']
|
| 165 |
+
|
| 166 |
+
self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
|
| 167 |
+
# TODO: kernel range is now hard-coded, should be in the configure file
|
| 168 |
+
self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
|
| 169 |
+
self.pulse_tensor[10, 10] = 1
|
| 170 |
+
|
| 171 |
+
self.device = torch.cuda.current_device()
|
| 172 |
+
self.jpeger = DiffJPEG(differentiable=False).to(self.device) # simulate JPEG compression artifacts
|
| 173 |
+
self.usm_sharpener = USMSharp().to(self.device) # do usm sharpening
|
| 174 |
+
self.resize = opt['gt_size']
|
| 175 |
+
self.keep_ratio = opt['keep_ratio']
|
| 176 |
+
|
| 177 |
+
# function
|
| 178 |
+
self.crop = T.RandomCrop((self.resize, self.resize))
|
| 179 |
+
self.flip = T.RandomHorizontalFlip()
|
| 180 |
+
self.transform = T.Compose(
|
| 181 |
+
[
|
| 182 |
+
# T.ToDtype(torch.float32, scale=True), # only support for torch 2.++
|
| 183 |
+
T.ToTensor(),
|
| 184 |
+
]
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
def __getitem__(self, index):
|
| 188 |
+
# -------------------------------- Load gt images -------------------------------- #
|
| 189 |
+
gt_path = self.paths[index]
|
| 190 |
+
img_gt = Image.open(gt_path).convert("RGB")
|
| 191 |
+
|
| 192 |
+
# -------------------------------- Image Process --------------------------------
|
| 193 |
+
# resize
|
| 194 |
+
h, w = img_gt.height, img_gt.width
|
| 195 |
+
if self.keep_ratio:
|
| 196 |
+
ratio = self.resize / min(h, w)
|
| 197 |
+
h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2)
|
| 198 |
+
img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS)
|
| 199 |
+
else:
|
| 200 |
+
img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS)
|
| 201 |
+
# crop and
|
| 202 |
+
img_gt = self.crop(img_gt)
|
| 203 |
+
# flip (only for train)
|
| 204 |
+
if self.train:
|
| 205 |
+
img_gt = self.flip(img_gt)
|
| 206 |
+
# transform to tensor
|
| 207 |
+
img_gt = self.transform(img_gt).to(torch.float32)
|
| 208 |
+
|
| 209 |
+
# -------------------------------- Generate Kernels --------------------------------
|
| 210 |
+
kernel, kernel2, sinc_kernel = self.generate_kernel()
|
| 211 |
+
|
| 212 |
+
# ------------------------- Generate Low Resolutino Sample -------------------------
|
| 213 |
+
lq, hq = self.generate_lr({
|
| 214 |
+
"gt": img_gt.unsqueeze(0),
|
| 215 |
+
"kernel1": kernel,
|
| 216 |
+
"kernel2": kernel2,
|
| 217 |
+
"sinc_kernel": sinc_kernel,
|
| 218 |
+
})
|
| 219 |
+
|
| 220 |
+
return lq, hq, gt_path
|
| 221 |
+
|
| 222 |
+
def generate_kernel(self, ):
|
| 223 |
+
# ------------------------ Generate kernels (used in the first degradation) ------------------------ #
|
| 224 |
+
kernel_size = random.choice(self.kernel_range)
|
| 225 |
+
if np.random.uniform() < self.opt['sinc_prob']:
|
| 226 |
+
# this sinc filter setting is for kernels ranging from [7, 21]
|
| 227 |
+
if kernel_size < 13:
|
| 228 |
+
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
| 229 |
+
else:
|
| 230 |
+
omega_c = np.random.uniform(np.pi / 5, np.pi)
|
| 231 |
+
kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
|
| 232 |
+
else:
|
| 233 |
+
kernel = random_mixed_kernels(
|
| 234 |
+
self.kernel_list,
|
| 235 |
+
self.kernel_prob,
|
| 236 |
+
kernel_size,
|
| 237 |
+
self.blur_sigma,
|
| 238 |
+
self.blur_sigma, [-math.pi, math.pi],
|
| 239 |
+
self.betag_range,
|
| 240 |
+
self.betap_range,
|
| 241 |
+
noise_range=None)
|
| 242 |
+
# pad kernel
|
| 243 |
+
pad_size = (21 - kernel_size) // 2
|
| 244 |
+
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
|
| 245 |
+
kernel = torch.FloatTensor(kernel)
|
| 246 |
+
|
| 247 |
+
# ------------------------ Generate kernels (used in the second degradation) ------------------------ #
|
| 248 |
+
kernel_size = random.choice(self.kernel_range)
|
| 249 |
+
if np.random.uniform() < self.opt['sinc_prob2']:
|
| 250 |
+
if kernel_size < 13:
|
| 251 |
+
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
| 252 |
+
else:
|
| 253 |
+
omega_c = np.random.uniform(np.pi / 5, np.pi)
|
| 254 |
+
kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
|
| 255 |
+
else:
|
| 256 |
+
kernel2 = random_mixed_kernels(
|
| 257 |
+
self.kernel_list2,
|
| 258 |
+
self.kernel_prob2,
|
| 259 |
+
kernel_size,
|
| 260 |
+
self.blur_sigma2,
|
| 261 |
+
self.blur_sigma2, [-math.pi, math.pi],
|
| 262 |
+
self.betag_range2,
|
| 263 |
+
self.betap_range2,
|
| 264 |
+
noise_range=None)
|
| 265 |
+
|
| 266 |
+
# pad kernel
|
| 267 |
+
pad_size = (21 - kernel_size) // 2
|
| 268 |
+
kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
|
| 269 |
+
kernel2 = torch.FloatTensor(kernel2)
|
| 270 |
+
|
| 271 |
+
# ------------------------------------- the final sinc kernel ------------------------------------- #
|
| 272 |
+
if np.random.uniform() < self.opt['final_sinc_prob']:
|
| 273 |
+
kernel_size = random.choice(self.kernel_range)
|
| 274 |
+
omega_c = np.random.uniform(np.pi / 3, np.pi)
|
| 275 |
+
sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
|
| 276 |
+
sinc_kernel = torch.FloatTensor(sinc_kernel)
|
| 277 |
+
else:
|
| 278 |
+
sinc_kernel = self.pulse_tensor
|
| 279 |
+
return kernel, kernel2, sinc_kernel
|
| 280 |
+
|
| 281 |
+
def generate_lr(self, data):
|
| 282 |
+
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
|
| 283 |
+
"""
|
| 284 |
+
# training data synthesis
|
| 285 |
+
self.gt = data['gt'].to(self.device)
|
| 286 |
+
self.gt_usm = self.usm_sharpener(self.gt)
|
| 287 |
+
|
| 288 |
+
self.kernel1 = data['kernel1'].to(self.device)
|
| 289 |
+
self.kernel2 = data['kernel2'].to(self.device)
|
| 290 |
+
self.sinc_kernel = data['sinc_kernel'].to(self.device)
|
| 291 |
+
|
| 292 |
+
ori_h, ori_w = self.gt.size()[2:4]
|
| 293 |
+
|
| 294 |
+
# ----------------------- The first degradation process ----------------------- #
|
| 295 |
+
# blur
|
| 296 |
+
out = filter2D(self.gt_usm, self.kernel1)
|
| 297 |
+
# random resize
|
| 298 |
+
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
|
| 299 |
+
if updown_type == 'up':
|
| 300 |
+
scale = np.random.uniform(1, self.opt['resize_range'][1])
|
| 301 |
+
elif updown_type == 'down':
|
| 302 |
+
scale = np.random.uniform(self.opt['resize_range'][0], 1)
|
| 303 |
+
else:
|
| 304 |
+
scale = 1
|
| 305 |
+
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
| 306 |
+
out = F.interpolate(out, scale_factor=scale, mode=mode)
|
| 307 |
+
# add noise
|
| 308 |
+
gray_noise_prob = self.opt['gray_noise_prob']
|
| 309 |
+
if np.random.uniform() < self.opt['gaussian_noise_prob']:
|
| 310 |
+
out = random_add_gaussian_noise_pt(
|
| 311 |
+
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
| 312 |
+
else:
|
| 313 |
+
out = random_add_poisson_noise_pt(
|
| 314 |
+
out,
|
| 315 |
+
scale_range=self.opt['poisson_scale_range'],
|
| 316 |
+
gray_prob=gray_noise_prob,
|
| 317 |
+
clip=True,
|
| 318 |
+
rounds=False)
|
| 319 |
+
# JPEG compression
|
| 320 |
+
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
|
| 321 |
+
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
|
| 322 |
+
out = self.jpeger(out, quality=jpeg_p)
|
| 323 |
+
|
| 324 |
+
# ----------------------- The second degradation process ----------------------- #
|
| 325 |
+
# blur
|
| 326 |
+
if np.random.uniform() < self.opt['second_blur_prob']:
|
| 327 |
+
out = filter2D(out, self.kernel2)
|
| 328 |
+
# random resize
|
| 329 |
+
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
|
| 330 |
+
if updown_type == 'up':
|
| 331 |
+
scale = np.random.uniform(1, self.opt['resize_range2'][1])
|
| 332 |
+
elif updown_type == 'down':
|
| 333 |
+
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
|
| 334 |
+
else:
|
| 335 |
+
scale = 1
|
| 336 |
+
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
| 337 |
+
out = F.interpolate(
|
| 338 |
+
out, size=(int(ori_h * scale), int(ori_w * scale)), mode=mode)
|
| 339 |
+
# add noise
|
| 340 |
+
gray_noise_prob = self.opt['gray_noise_prob2']
|
| 341 |
+
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
|
| 342 |
+
out = random_add_gaussian_noise_pt(
|
| 343 |
+
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
| 344 |
+
else:
|
| 345 |
+
out = random_add_poisson_noise_pt(
|
| 346 |
+
out,
|
| 347 |
+
scale_range=self.opt['poisson_scale_range2'],
|
| 348 |
+
gray_prob=gray_noise_prob,
|
| 349 |
+
clip=True,
|
| 350 |
+
rounds=False)
|
| 351 |
+
|
| 352 |
+
# JPEG compression + the final sinc filter
|
| 353 |
+
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
|
| 354 |
+
# as one operation.
|
| 355 |
+
# We consider two orders:
|
| 356 |
+
# 1. [resize back + sinc filter] + JPEG compression
|
| 357 |
+
# 2. JPEG compression + [resize back + sinc filter]
|
| 358 |
+
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
|
| 359 |
+
if np.random.uniform() < 0.5:
|
| 360 |
+
# resize back + the final sinc filter
|
| 361 |
+
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
| 362 |
+
out = F.interpolate(out, size=(ori_h, ori_w), mode=mode)
|
| 363 |
+
out = filter2D(out, self.sinc_kernel)
|
| 364 |
+
# JPEG compression
|
| 365 |
+
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
| 366 |
+
out = torch.clamp(out, 0, 1)
|
| 367 |
+
out = self.jpeger(out, quality=jpeg_p)
|
| 368 |
+
else:
|
| 369 |
+
# JPEG compression
|
| 370 |
+
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
| 371 |
+
out = torch.clamp(out, 0, 1)
|
| 372 |
+
out = self.jpeger(out, quality=jpeg_p)
|
| 373 |
+
# resize back + the final sinc filter
|
| 374 |
+
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
| 375 |
+
out = F.interpolate(out, size=(ori_h, ori_w), mode=mode)
|
| 376 |
+
out = filter2D(out, self.sinc_kernel)
|
| 377 |
+
|
| 378 |
+
# clamp and round
|
| 379 |
+
lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
| 380 |
+
lq = lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
|
| 381 |
+
|
| 382 |
+
hq = self.usm_sharpener(self.gt)
|
| 383 |
+
return lq[0], hq[0]
|
| 384 |
+
|
| 385 |
+
def __len__(self):
|
| 386 |
+
return len(self.paths)
|
| 387 |
+
|
| 388 |
+
def real_esrgan_sampler():
|
| 389 |
+
"""
|
| 390 |
+
Generate multi-scale versions for GT images with LANCZOS resampling.
|
| 391 |
+
It is now used for DF2K dataset (DIV2K + Flickr 2K)
|
| 392 |
+
"""
|
| 393 |
+
parser = argparse.ArgumentParser()
|
| 394 |
+
parser.add_argument('--num_samples', type=int, default=3, help='train: one to many')
|
| 395 |
+
args = parser.parse_args()
|
| 396 |
+
|
| 397 |
+
# generate training dataset
|
| 398 |
+
dataset = RealESRGANDataset(cfg, train=True)
|
| 399 |
+
data_dl = data.DataLoader(
|
| 400 |
+
dataset,
|
| 401 |
+
batch_size = 1
|
| 402 |
+
)
|
| 403 |
+
print("Train Data:", dataset.data_rt, len(data_dl))
|
| 404 |
+
for number in range(args.num_samples):
|
| 405 |
+
print("="*100)
|
| 406 |
+
print(f"Generate round {number}...")
|
| 407 |
+
|
| 408 |
+
meta_info = {}
|
| 409 |
+
for sample in tqdm(data_dl):
|
| 410 |
+
lq, hq, path = sample
|
| 411 |
+
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
|
| 412 |
+
file_name = os.path.basename(path[0])
|
| 413 |
+
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
|
| 414 |
+
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
|
| 415 |
+
hq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/HR")
|
| 416 |
+
lq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/LR")
|
| 417 |
+
else:
|
| 418 |
+
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "HR/")
|
| 419 |
+
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "LR/")
|
| 420 |
+
|
| 421 |
+
os.makedirs(hq_folder, exist_ok=True)
|
| 422 |
+
os.makedirs(lq_folder, exist_ok=True)
|
| 423 |
+
|
| 424 |
+
hq_path = os.path.join(hq_folder, file_name)
|
| 425 |
+
lq_path = os.path.join(lq_folder, file_name)
|
| 426 |
+
|
| 427 |
+
save_image(hq[0], hq_path)
|
| 428 |
+
save_image(lq[0], lq_path)
|
| 429 |
+
|
| 430 |
+
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
|
| 431 |
+
if dset not in meta_info:
|
| 432 |
+
meta_info[dset] = [(lq_path, hq_path)]
|
| 433 |
+
else:
|
| 434 |
+
meta_info[dset].append((lq_path, hq_path))
|
| 435 |
+
|
| 436 |
+
for dset, dlist in meta_info.items():
|
| 437 |
+
with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), 'w') as fp:
|
| 438 |
+
for item in dlist:
|
| 439 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 440 |
+
print(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), len(dlist))
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
# generate testing dataset
|
| 444 |
+
dataset = RealESRGANDataset(cfg, train=False)
|
| 445 |
+
data_dl = data.DataLoader(
|
| 446 |
+
dataset,
|
| 447 |
+
batch_size = 1
|
| 448 |
+
)
|
| 449 |
+
print("Test Data:", dataset.data_rt, len(data_dl))
|
| 450 |
+
print("="*100)
|
| 451 |
+
print(f"Generate round {number}...")
|
| 452 |
+
|
| 453 |
+
meta_info = {}
|
| 454 |
+
for sample in tqdm(data_dl):
|
| 455 |
+
lq, hq, path = sample
|
| 456 |
+
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
|
| 457 |
+
file_name = os.path.basename(path[0])
|
| 458 |
+
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
|
| 459 |
+
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
|
| 460 |
+
hq_folder = gt_folder.replace("HR", f"pair/SR/HR")
|
| 461 |
+
lq_folder = gt_folder.replace("HR", f"pair/SR/LR")
|
| 462 |
+
else:
|
| 463 |
+
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "HR/")
|
| 464 |
+
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "LR/")
|
| 465 |
+
|
| 466 |
+
os.makedirs(hq_folder, exist_ok=True)
|
| 467 |
+
os.makedirs(lq_folder, exist_ok=True)
|
| 468 |
+
|
| 469 |
+
hq_path = os.path.join(hq_folder, file_name)
|
| 470 |
+
lq_path = os.path.join(lq_folder, file_name)
|
| 471 |
+
|
| 472 |
+
save_image(hq[0], hq_path)
|
| 473 |
+
save_image(lq[0], lq_path)
|
| 474 |
+
|
| 475 |
+
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
|
| 476 |
+
if dset not in meta_info:
|
| 477 |
+
meta_info[dset] = [(lq_path, hq_path)]
|
| 478 |
+
else:
|
| 479 |
+
meta_info[dset].append((lq_path, hq_path))
|
| 480 |
+
|
| 481 |
+
for dset, dlist in meta_info.items():
|
| 482 |
+
with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), 'w') as fp:
|
| 483 |
+
for item in dlist:
|
| 484 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 485 |
+
print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), len(dlist))
|
| 486 |
+
|
| 487 |
+
def simple_multiscale():
|
| 488 |
+
"""
|
| 489 |
+
Generate multi-scale versions for GT images with LANCZOS resampling.
|
| 490 |
+
It is now used for DF2K dataset (DIV2K + Flickr 2K)
|
| 491 |
+
"""
|
| 492 |
+
parser = argparse.ArgumentParser()
|
| 493 |
+
parser.add_argument('--input', type=str, default='DIV2K/DIV2K_train_HR', help='Input folder')
|
| 494 |
+
parser.add_argument('--output', type=str, default='DIV2K/DIV2K_train_multiscale', help='Output folder')
|
| 495 |
+
args = parser.parse_args()
|
| 496 |
+
os.makedirs(args.output, exist_ok=True)
|
| 497 |
+
|
| 498 |
+
# For DF2K, we consider the following three scales,
|
| 499 |
+
# and the smallest image whose shortest edge is 400
|
| 500 |
+
scale_list = [0.75, 0.5, 1 / 3]
|
| 501 |
+
shortest_edge = 400
|
| 502 |
+
|
| 503 |
+
path_list = sorted(glob.glob(os.path.join(args.input, '*')))
|
| 504 |
+
for path in path_list:
|
| 505 |
+
basename = os.path.splitext(os.path.basename(path))[0]
|
| 506 |
+
|
| 507 |
+
img = Image.open(path)
|
| 508 |
+
width, height = img.size
|
| 509 |
+
for idx, scale in enumerate(scale_list):
|
| 510 |
+
print(f'\t{scale:.2f}')
|
| 511 |
+
rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
|
| 512 |
+
rlt = rlt.resize((width, height), resample=Image.NEAREST)
|
| 513 |
+
rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
|
| 514 |
+
|
| 515 |
+
# save the smallest image which the shortest edge is 400
|
| 516 |
+
if width < height:
|
| 517 |
+
ratio = height / width
|
| 518 |
+
width = shortest_edge
|
| 519 |
+
height = int(width * ratio)
|
| 520 |
+
else:
|
| 521 |
+
ratio = width / height
|
| 522 |
+
height = shortest_edge
|
| 523 |
+
width = int(height * ratio)
|
| 524 |
+
rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
|
| 525 |
+
rlt = rlt.resize(img.size, resample=Image.NEAREST)
|
| 526 |
+
rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
if __name__ == '__main__':
|
| 530 |
+
set_seed(1229)
|
| 531 |
+
# simple version
|
| 532 |
+
# simple_multiscale()
|
| 533 |
+
|
| 534 |
+
# Real-ESRGAN for data generation
|
| 535 |
+
real_esrgan_sampler()
|
| 536 |
+
|
| 537 |
+
# python 2_generate_lowresolution.py --num_samples 3
|
generate_noise.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import glob
|
| 3 |
+
import os
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import math
|
| 8 |
+
import numpy as np
|
| 9 |
+
import os
|
| 10 |
+
import os.path as osp
|
| 11 |
+
import random
|
| 12 |
+
import time
|
| 13 |
+
import torch
|
| 14 |
+
from tqdm import tqdm
|
| 15 |
+
|
| 16 |
+
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
|
| 17 |
+
from basicsr.data.transforms import augment
|
| 18 |
+
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
| 19 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
| 20 |
+
from torch.utils import data as data
|
| 21 |
+
from torchvision.transforms.functional import center_crop
|
| 22 |
+
import torchvision.transforms as T
|
| 23 |
+
from torchvision.utils import save_image
|
| 24 |
+
|
| 25 |
+
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
| 26 |
+
from basicsr.data.transforms import paired_random_crop
|
| 27 |
+
from basicsr.utils import DiffJPEG, USMSharp
|
| 28 |
+
from basicsr.utils.img_process_util import filter2D
|
| 29 |
+
from basicsr.utils.registry import MODEL_REGISTRY
|
| 30 |
+
from collections import OrderedDict
|
| 31 |
+
from torch.nn import functional as F
|
| 32 |
+
|
| 33 |
+
cfg = {
|
| 34 |
+
# dataset info.
|
| 35 |
+
"name": "DF2K+OST",
|
| 36 |
+
"type": "RealESRGANDataset",
|
| 37 |
+
"dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution",
|
| 38 |
+
"meta_train": [
|
| 39 |
+
"DIV2K/metas/DIV2K_train_HR.list",
|
| 40 |
+
"Flickr2K/metas/Flickr2K.list",
|
| 41 |
+
# "OST/metas/OST.list",
|
| 42 |
+
],
|
| 43 |
+
"meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"],
|
| 44 |
+
|
| 45 |
+
# the first degradation process
|
| 46 |
+
"resize_prob": [0.2, 0.7, 0.1], # up, down, keep
|
| 47 |
+
"resize_range": [0.15, 1.5],
|
| 48 |
+
"gaussian_noise_prob": 0.5,
|
| 49 |
+
"noise_range": [1, 30],
|
| 50 |
+
"poisson_scale_range": [0.05, 3],
|
| 51 |
+
"gray_noise_prob": 0.4,
|
| 52 |
+
"jpeg_range": [30, 95],
|
| 53 |
+
|
| 54 |
+
"blur_kernel_size": 21,
|
| 55 |
+
"kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
|
| 56 |
+
"kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
|
| 57 |
+
"sinc_prob": 0.1,
|
| 58 |
+
"blur_sigma": [0.2, 3],
|
| 59 |
+
"betag_range": [0.5, 4],
|
| 60 |
+
"betap_range": [1, 2],
|
| 61 |
+
|
| 62 |
+
# the second degradation process
|
| 63 |
+
"second_blur_prob": 0.8,
|
| 64 |
+
"resize_prob2": [0.3, 0.4, 0.3], # up, down, keep
|
| 65 |
+
"resize_range2": [0.3, 1.2],
|
| 66 |
+
"gaussian_noise_prob2": 0.5,
|
| 67 |
+
"noise_range2": [1, 25],
|
| 68 |
+
"poisson_scale_range2": [0.05, 2.5],
|
| 69 |
+
"gray_noise_prob2": 0.4,
|
| 70 |
+
"jpeg_range2": [30, 95],
|
| 71 |
+
|
| 72 |
+
"blur_kernel_size2": 21,
|
| 73 |
+
"kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
|
| 74 |
+
"kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
|
| 75 |
+
"sinc_prob2": 0.1,
|
| 76 |
+
"blur_sigma2": [0.2, 1.5],
|
| 77 |
+
"betag_range2": [0.5, 4],
|
| 78 |
+
"betap_range2": [1, 2],
|
| 79 |
+
|
| 80 |
+
"final_sinc_prob": 0.8,
|
| 81 |
+
|
| 82 |
+
"gt_size": 512,
|
| 83 |
+
"keep_ratio": True,
|
| 84 |
+
"use_hflip": True,
|
| 85 |
+
"use_rot": False,
|
| 86 |
+
|
| 87 |
+
# data loader
|
| 88 |
+
"use_shuffle": True,
|
| 89 |
+
"num_worker_per_gpu": 5,
|
| 90 |
+
"batch_size_per_gpu": 12,
|
| 91 |
+
"dataset_enlarge_ratio": 1,
|
| 92 |
+
"prefetch_mode": None,
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
def set_seed(seed=42):
|
| 96 |
+
random.seed(seed)
|
| 97 |
+
np.random.seed(seed)
|
| 98 |
+
torch.manual_seed(seed)
|
| 99 |
+
torch.cuda.manual_seed(seed)
|
| 100 |
+
torch.cuda.manual_seed_all(seed)
|
| 101 |
+
|
| 102 |
+
torch.backends.cudnn.deterministic = True
|
| 103 |
+
torch.backends.cudnn.benchmark = False
|
| 104 |
+
|
| 105 |
+
@DATASET_REGISTRY.register()
|
| 106 |
+
class NoiseDataset(data.Dataset):
|
| 107 |
+
"""Dataset used for Denoise model:
|
| 108 |
+
synthetic Gaussian and Poisson noise dataset.
|
| 109 |
+
"""
|
| 110 |
+
def __init__(self, opt, train=True, level=None):
|
| 111 |
+
super(NoiseDataset, self).__init__()
|
| 112 |
+
self.opt = opt
|
| 113 |
+
|
| 114 |
+
# kernel define
|
| 115 |
+
self.data_rt = opt['dataroot_gt']
|
| 116 |
+
|
| 117 |
+
# dataload
|
| 118 |
+
self.train = train
|
| 119 |
+
if self.train:
|
| 120 |
+
self.metas = opt['meta_train']
|
| 121 |
+
else:
|
| 122 |
+
self.metas = opt['meta_test']
|
| 123 |
+
|
| 124 |
+
self.paths = []
|
| 125 |
+
for meta in self.metas:
|
| 126 |
+
with open(os.path.join(self.data_rt, meta)) as fin:
|
| 127 |
+
self.paths += [line.strip().split(' ')[1] for line in fin]
|
| 128 |
+
|
| 129 |
+
# hyperparameter
|
| 130 |
+
|
| 131 |
+
self.device = torch.cuda.current_device()
|
| 132 |
+
self.jpeger = DiffJPEG(differentiable=False).to(self.device) # simulate JPEG compression artifacts
|
| 133 |
+
self.usm_sharpener = USMSharp().to(self.device) # do usm sharpening
|
| 134 |
+
self.resize = opt['gt_size']
|
| 135 |
+
self.keep_ratio = opt['keep_ratio']
|
| 136 |
+
|
| 137 |
+
# function
|
| 138 |
+
self.crop = T.RandomCrop((self.resize, self.resize))
|
| 139 |
+
self.flip = T.RandomHorizontalFlip()
|
| 140 |
+
self.transform = T.Compose(
|
| 141 |
+
[
|
| 142 |
+
# T.ToDtype(torch.float32, scale=True), # only support for torch 2.++
|
| 143 |
+
T.ToTensor(),
|
| 144 |
+
]
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# noise
|
| 148 |
+
self.sigma = [0.0588, 0.0784, 0.098, 0.1451, 0.1961] # 5 levels: 15, 20, 25, 37, 50
|
| 149 |
+
if level:
|
| 150 |
+
self.level = [level]
|
| 151 |
+
else:
|
| 152 |
+
self.level = [1,2,3,4,5]
|
| 153 |
+
|
| 154 |
+
def __getitem__(self, index):
|
| 155 |
+
# -------------------------------- Load gt images -------------------------------- #
|
| 156 |
+
gt_path = self.paths[index]
|
| 157 |
+
img_gt = Image.open(gt_path).convert("RGB")
|
| 158 |
+
|
| 159 |
+
# -------------------------------- Image Process --------------------------------
|
| 160 |
+
# resize
|
| 161 |
+
h, w = img_gt.height, img_gt.width
|
| 162 |
+
if self.keep_ratio:
|
| 163 |
+
ratio = self.resize / min(h, w)
|
| 164 |
+
h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2)
|
| 165 |
+
img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS)
|
| 166 |
+
else:
|
| 167 |
+
img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS)
|
| 168 |
+
# crop and
|
| 169 |
+
img_gt = self.crop(img_gt)
|
| 170 |
+
# flip (only for train)
|
| 171 |
+
if self.train:
|
| 172 |
+
img_gt = self.flip(img_gt)
|
| 173 |
+
# transform to tensor
|
| 174 |
+
img_gt = self.transform(img_gt).to(torch.float32)
|
| 175 |
+
|
| 176 |
+
# -------------------------------- Generate Noise --------------------------------
|
| 177 |
+
# Poisson Noise
|
| 178 |
+
peak = 255
|
| 179 |
+
lam = torch.clamp(img_gt, 0, 1) * peak
|
| 180 |
+
counts = torch.poisson(lam)
|
| 181 |
+
img_poisson = torch.clamp(counts / float(peak), 0.0, 1.0)
|
| 182 |
+
|
| 183 |
+
# Gaussian Noise
|
| 184 |
+
level = random.choice(self.level)
|
| 185 |
+
noise = torch.randn(size=img_poisson.size())
|
| 186 |
+
img_poisson_gaussian = torch.clamp(img_poisson + self.sigma[level-1] * noise, 0., 1.)
|
| 187 |
+
|
| 188 |
+
return img_poisson_gaussian, img_gt, gt_path
|
| 189 |
+
|
| 190 |
+
def __len__(self):
|
| 191 |
+
return len(self.paths)
|
| 192 |
+
|
| 193 |
+
def poisson_gaussian_sampler():
|
| 194 |
+
"""
|
| 195 |
+
It is now used for DF2K dataset (DIV2K + Flickr 2K)
|
| 196 |
+
"""
|
| 197 |
+
parser = argparse.ArgumentParser()
|
| 198 |
+
parser.add_argument('--level', type=int, default=None, help='train: one to many')
|
| 199 |
+
args = parser.parse_args()
|
| 200 |
+
|
| 201 |
+
if args.level:
|
| 202 |
+
level = args.level
|
| 203 |
+
else:
|
| 204 |
+
level = [1,3,5]
|
| 205 |
+
|
| 206 |
+
# generate training dataset
|
| 207 |
+
for number in level:
|
| 208 |
+
print("="*100)
|
| 209 |
+
print(f"Generate Noise Level {number}...")
|
| 210 |
+
|
| 211 |
+
dataset = NoiseDataset(cfg, train=True, level=number)
|
| 212 |
+
data_dl = data.DataLoader(
|
| 213 |
+
dataset,
|
| 214 |
+
batch_size = 1
|
| 215 |
+
)
|
| 216 |
+
print("Train Data:", dataset.data_rt, len(data_dl))
|
| 217 |
+
meta_info = {}
|
| 218 |
+
for sample in tqdm(data_dl):
|
| 219 |
+
lq, hq, path = sample
|
| 220 |
+
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
|
| 221 |
+
file_name = os.path.basename(path[0])
|
| 222 |
+
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
|
| 223 |
+
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
|
| 224 |
+
hq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/HQ")
|
| 225 |
+
lq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/LQ")
|
| 226 |
+
else:
|
| 227 |
+
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "HQ/")
|
| 228 |
+
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "LQ/")
|
| 229 |
+
|
| 230 |
+
os.makedirs(hq_folder, exist_ok=True)
|
| 231 |
+
os.makedirs(lq_folder, exist_ok=True)
|
| 232 |
+
|
| 233 |
+
hq_path = os.path.join(hq_folder, file_name)
|
| 234 |
+
lq_path = os.path.join(lq_folder, file_name)
|
| 235 |
+
|
| 236 |
+
save_image(hq[0], hq_path)
|
| 237 |
+
save_image(lq[0], lq_path)
|
| 238 |
+
|
| 239 |
+
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
|
| 240 |
+
if dset not in meta_info:
|
| 241 |
+
meta_info[dset] = [(lq_path, hq_path)]
|
| 242 |
+
else:
|
| 243 |
+
meta_info[dset].append((lq_path, hq_path))
|
| 244 |
+
|
| 245 |
+
for dset, dlist in meta_info.items():
|
| 246 |
+
with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), 'w') as fp:
|
| 247 |
+
for item in dlist:
|
| 248 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 249 |
+
print(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), len(dlist))
|
| 250 |
+
|
| 251 |
+
# generate testing dataset
|
| 252 |
+
dataset = NoiseDataset(cfg, train=False)
|
| 253 |
+
data_dl = data.DataLoader(
|
| 254 |
+
dataset,
|
| 255 |
+
batch_size = 1
|
| 256 |
+
)
|
| 257 |
+
print("Test Data:", dataset.data_rt, len(data_dl))
|
| 258 |
+
print("="*100)
|
| 259 |
+
print(f"Generate Testing Noise...")
|
| 260 |
+
|
| 261 |
+
meta_info = {}
|
| 262 |
+
for sample in tqdm(data_dl):
|
| 263 |
+
lq, hq, path = sample
|
| 264 |
+
# /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
|
| 265 |
+
file_name = os.path.basename(path[0])
|
| 266 |
+
gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
|
| 267 |
+
if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
|
| 268 |
+
hq_folder = gt_folder.replace("HR", f"pair/Noise/HQ")
|
| 269 |
+
lq_folder = gt_folder.replace("HR", f"pair/Noise/LQ")
|
| 270 |
+
else:
|
| 271 |
+
hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "HQ/")
|
| 272 |
+
lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "LQ/")
|
| 273 |
+
|
| 274 |
+
os.makedirs(hq_folder, exist_ok=True)
|
| 275 |
+
os.makedirs(lq_folder, exist_ok=True)
|
| 276 |
+
|
| 277 |
+
hq_path = os.path.join(hq_folder, file_name)
|
| 278 |
+
lq_path = os.path.join(lq_folder, file_name)
|
| 279 |
+
|
| 280 |
+
save_image(hq[0], hq_path)
|
| 281 |
+
save_image(lq[0], lq_path)
|
| 282 |
+
|
| 283 |
+
dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
|
| 284 |
+
if dset not in meta_info:
|
| 285 |
+
meta_info[dset] = [(lq_path, hq_path)]
|
| 286 |
+
else:
|
| 287 |
+
meta_info[dset].append((lq_path, hq_path))
|
| 288 |
+
|
| 289 |
+
for dset, dlist in meta_info.items():
|
| 290 |
+
with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), 'w') as fp:
|
| 291 |
+
for item in dlist:
|
| 292 |
+
fp.write('{} {} {}\n'.format(item[0], item[1], None))
|
| 293 |
+
print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), len(dlist))
|
| 294 |
+
|
| 295 |
+
if __name__ == '__main__':
|
| 296 |
+
set_seed(1229)
|
| 297 |
+
# poisson_gaussian for data generation
|
| 298 |
+
poisson_gaussian_sampler()
|
| 299 |
+
|
| 300 |
+
# python 3_generate_noise.py
|