AaronCIH commited on
Commit
d8b338f
·
verified ·
1 Parent(s): 3fb0bb1

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. 0_generate_list.py +905 -0
  2. 1_generate_iqa.py +447 -0
  3. generate_lowresolution.py +537 -0
  4. generate_noise.py +308 -0
0_generate_list.py ADDED
@@ -0,0 +1,905 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import shutil
3
+ import numpy as np
4
+ from pathlib import Path
5
+
6
+ base_rt = f"/home/CORP/hsiang.chen/Project/Datasets/IR"
7
+
8
+ """
9
+ Deblur: GoPro, HIDE, RealBlur
10
+ """
11
+ # GoPro
12
+ # pre-process for GoPro, dataset: https://seungjunnah.github.io/Datasets/gopro
13
+ # Seungjun Nah, Tae Hyun Kim, and Kyoung Mu Lee. Deep multi-scale convolutional neural network for dynamic scene deblurring. In CVPR, 2017
14
+ rt = os.path.join(base_rt, "Deblur/GoPro")
15
+ meta_folder = os.path.join(rt, "metas")
16
+ os.makedirs(meta_folder, exist_ok=True)
17
+ for dset in ['train', 'test']: # train, test
18
+ dset_pth = os.path.join(rt, dset)
19
+ list_file = []
20
+ total_ct = 0
21
+ for sample_folder in os.listdir(dset_pth): # GOPOXXX_XX_XX
22
+ folder_pth = os.path.join(dset_pth, sample_folder)
23
+ blur_folder = os.path.join(folder_pth, 'blur')
24
+ sharp_folder = os.path.join(folder_pth, 'sharp')
25
+ for ct, sample in enumerate(os.listdir(blur_folder)):
26
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
27
+ blur_file = os.path.join(blur_folder, sample)
28
+ sharp_file = os.path.join(sharp_folder, sample)
29
+ list_file.append((blur_file, sharp_file))
30
+ total_ct += 1
31
+
32
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
33
+ for item in list_file:
34
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
35
+ print(dset_pth, total_ct)
36
+
37
+ # HIDE
38
+ # pre-process for HIDE, dataset: https://github.com/joanshen0508/HA_deblur
39
+ # Ziyi Shen, Wenguan Wang, Xiankai Lu, Jianbing Shen, Haibin Ling, Tingfa Xu, and Ling Shao. Human-aware mo- tion deblurring. In ICCV, 2019.
40
+ # ==============================================
41
+ # HIDE/
42
+ # |- train/{image, gt}
43
+ # |- test/{image, gt}
44
+ # ==============================================
45
+ rt = os.path.join(base_rt, "Deblur/HIDE")
46
+ meta_folder = os.path.join(rt, "metas")
47
+ os.makedirs(meta_folder, exist_ok=True)
48
+ for dset in ['train', 'test']: # train, test
49
+ dset_pth = os.path.join(rt, dset)
50
+ list_file = []
51
+ total_ct = 0
52
+ if dset == 'train':
53
+ blur_folder = dset_pth
54
+ sharp_folder = os.path.join(rt, 'GT')
55
+ for ct, sample in enumerate(os.listdir(blur_folder)):
56
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
57
+ blur_file = os.path.join(blur_folder, sample)
58
+ sharp_file = os.path.join(sharp_folder, sample)
59
+ list_file.append((blur_file, sharp_file))
60
+ total_ct += 1
61
+ else:
62
+ for sample_folder in ['test-close-ups', 'test-long-shot']:
63
+ folder_pth = os.path.join(dset_pth, sample_folder)
64
+ blur_folder = folder_pth
65
+ sharp_folder = os.path.join(rt, 'GT')
66
+ for ct, sample in enumerate(os.listdir(blur_folder)):
67
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
68
+ blur_file = os.path.join(blur_folder, sample)
69
+ sharp_file = os.path.join(sharp_folder, sample)
70
+ list_file.append((blur_file, sharp_file))
71
+ total_ct += 1
72
+
73
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
74
+ for item in list_file:
75
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
76
+ print(dset_pth, total_ct)
77
+
78
+ # RealBlur
79
+ # pre-process for RealBlur-J,R, dataset: https://github.com/rimchang/RealBlur
80
+ # Jaesung Rim, Haeyun Lee, Jucheol Won, and Sunghyun Cho. Real-world blur dataset for learning and benchmarking de- blurring algorithms. In ECCV, 2020.
81
+ rt = os.path.join(base_rt, "Deblur/")
82
+
83
+ realblur_j_test_txt = os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref/RealBlur_J_test_list.txt")
84
+ realblur_r_test_txt = os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref/RealBlur_R_test_list.txt")
85
+
86
+ for txt in [realblur_j_test_txt, realblur_r_test_txt]:
87
+ data_list = []
88
+ total_ct = 0
89
+ miss_ct = 0
90
+ with open(txt) as fin:
91
+ for ct, line in enumerate(fin): # gt, image
92
+ line = line.strip().split()
93
+ if len(line) == 1: # no gt
94
+ data_list.append([None, os.path.join(rt, line[0])]) # image, None
95
+ miss_ct += 1
96
+ else:
97
+ data_list.append([os.path.join(rt, line[1]), os.path.join(rt, line[0])]) # image ,gt
98
+ total_ct += 1
99
+
100
+ set_dict = {realblur_j_test_txt: os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref"),
101
+ realblur_r_test_txt: os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref")}
102
+ meta_folder = os.path.join(set_dict[txt], "metas")
103
+ os.makedirs(meta_folder, exist_ok=True)
104
+ with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
105
+ for item in data_list:
106
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
107
+
108
+ print(os.path.join(meta_folder,'test.list'), total_ct, miss_ct)
109
+
110
+ """
111
+ Dehaze: 4kID, NH-Haze, OTS, SOTS
112
+ """
113
+ # OST
114
+ # pre-process for OTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
115
+ # Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
116
+ rt = os.path.join(base_rt, "Dehaze/OTS")
117
+ meta_folder = os.path.join(rt, "metas")
118
+ os.makedirs(meta_folder, exist_ok=True)
119
+ for dset in ['hazy']:
120
+ dset_pth = os.path.join(rt, dset)
121
+ list_file = []
122
+ total_ct = 0
123
+ # 0025_0.8_0.1.jpg
124
+ for sub_folder in os.listdir(dset_pth):
125
+ input_folder = os.path.join(dset_pth, sub_folder)
126
+ gt_folder = os.path.join(rt, 'gt')
127
+ for ct, sample in enumerate(os.listdir(input_folder)):
128
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
129
+ input_file = os.path.join(input_folder, sample)
130
+ gt_file = os.path.join(gt_folder, "%s.jpg"%(sample.split('_')[0]))
131
+ list_file.append((input_file, gt_file))
132
+ total_ct += 1
133
+
134
+ set_dict = {"hazy":'train'}
135
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
136
+ for item in list_file:
137
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
138
+ print(dset_pth, total_ct)
139
+
140
+
141
+ # SOTS
142
+ # pre-process for SOTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-standard
143
+ # Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
144
+ rt = os.path.join(base_rt, "Dehaze/SOTS")
145
+ meta_folder = os.path.join(rt, "metas")
146
+ os.makedirs(meta_folder, exist_ok=True)
147
+ for dset in ['outdoor']:
148
+ dset_pth = os.path.join(rt, dset)
149
+ list_file = []
150
+ total_ct = 0
151
+ input_folder = os.path.join(dset_pth, 'hazy')
152
+ gt_folder = os.path.join(dset_pth, 'gt')
153
+ for ct, sample in enumerate(os.listdir(input_folder)):
154
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
155
+ input_file = os.path.join(input_folder, sample)
156
+ gt_file = os.path.join(gt_folder, "%s.png"%(sample.split("_")[0]))
157
+ list_file.append((input_file, gt_file))
158
+ total_ct += 1
159
+
160
+ set_dict = {"outdoor":'test'}
161
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
162
+ for item in list_file:
163
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
164
+ print(input_folder, total_ct)
165
+
166
+ # 4kID
167
+ # pre-process for 4kID, dataset: https://github.com/zzr-idam/4KDehazing
168
+ # Ultra-High-Definition Image Dehazing via Multi-Guided Bilateral Learning, CVPR21.
169
+ rt = os.path.join(base_rt, "Dehaze/4kID")
170
+ meta_folder = os.path.join(rt, "metas")
171
+ os.makedirs(meta_folder, exist_ok=True)
172
+ for dset in ['4KDehazing', '4KDehazing_test']: # train, test
173
+ dset_pth = os.path.join(rt, dset)
174
+ list_file = []
175
+ total_ct = 0
176
+ input_folder = os.path.join(dset_pth, 'inputs')
177
+ gt_folder = os.path.join(dset_pth, 'groundtrues')
178
+ for ct, sample in enumerate(os.listdir(input_folder)):
179
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
180
+ input_file = os.path.join(input_folder, sample)
181
+ gt_file = os.path.join(gt_folder, sample)
182
+ list_file.append((input_file, gt_file))
183
+ total_ct += 1
184
+
185
+ set_dict = {"4KDehazing":'train', "4KDehazing_test":'test'}
186
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
187
+ for item in list_file:
188
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
189
+ print(dset_pth, total_ct)
190
+
191
+
192
+ # Unann
193
+ # pre-process for unann, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
194
+ rt = os.path.join(base_rt, "Dehaze/UnannotatedHazyImages")
195
+ meta_folder = os.path.join(rt, "metas")
196
+ os.makedirs(meta_folder, exist_ok=True)
197
+ list_file = []
198
+ total_ct = 0
199
+ input_folder = os.path.join(rt, "Image")
200
+ for ct, sample in enumerate(os.listdir(input_folder)):
201
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
202
+ input_file = os.path.join(input_folder, sample)
203
+ list_file.append((input_file, ""))
204
+ total_ct += 1
205
+
206
+ with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
207
+ for item in list_file:
208
+ fp.write('{} {} {}\n'.format(item[0], None, None))
209
+ print(rt, total_ct)
210
+
211
+ # NH-Haze
212
+ rt = os.path.join(base_rt, "Dehaze/NH-Haze")
213
+ meta_folder = os.path.join(rt, "metas")
214
+ os.makedirs(meta_folder, exist_ok=True)
215
+ list_file = []
216
+ total_ct = 0
217
+ input_folder = os.path.join(rt, "images")
218
+ for ct, sample in enumerate(os.listdir(input_folder)):
219
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
220
+ input_file = os.path.join(input_folder, sample)
221
+ list_file.append((input_file, ""))
222
+ total_ct += 1
223
+
224
+ with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
225
+ for item in list_file:
226
+ fp.write('{} {} {}\n'.format(item[0], None, None))
227
+ print(rt, total_ct)
228
+
229
+ """
230
+ Denoise: BSD68, BSD400, CBSD68, KodaK, McMaster, Set12, SIDD, Urban100, WaterlooED
231
+ """
232
+ """
233
+ pre-process for denoise dataset
234
+ 1. BSD400:
235
+ * David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
236
+ * https://github.com/smartboy110/denoising-datasets/tree/main
237
+ 2. WED:
238
+ * Kede Ma, Zhengfang Duanmu, Qingbo Wu, Zhou Wang, Hongwei Yong, Hongliang Li, and Lei Zhang. Waterloo exploration database: New challenges for image quality as- sessment models. IEEE Transactions on Image Processing, 26(2):1004–1016, 2016.
239
+ * https://kedema.org/project/exploration/index.html
240
+ 3. BSD68:
241
+ * David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
242
+ * https://github.com/smartboy110/denoising-datasets/tree/main
243
+ 4. CBSD68:
244
+ * David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
245
+ * https://github.com/smartboy110/denoising-datasets/tree/main
246
+ 5. Urban100:
247
+ * Jia-BinHuang,AbhishekSingh,andNarendraAhuja.Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197–5206, 2015.
248
+ * https://github.com/jbhuang0604/SelfExSR
249
+ 6. Kodak:
250
+ * Rich Franzen. Kodak lossless true color image suite. source: http://r0k. us/graphics/kodak, 4(2), 1999.
251
+ * https://www.kaggle.com/datasets/sherylmehta/kodak-dataset
252
+ 7. McMaster, Set12
253
+ """
254
+
255
+ rt = os.path.join(base_rt, "Denoise")
256
+ dataset = ['BSD68', 'BSD400', 'CBSD68', 'Kodak',
257
+ 'McMaster', 'Set12', 'Urban100', 'WaterlooED']
258
+
259
+ for dset in dataset:
260
+ # list sample
261
+ list_file = []
262
+ total_ct = 0
263
+ dset_pth = os.path.join(rt, dset)
264
+ meta_folder = os.path.join(dset_pth, "metas")
265
+ os.makedirs(meta_folder, exist_ok=True)
266
+ input_folder = os.path.join(rt, '%s/image'%(dset))
267
+ for ct, sample in enumerate(os.listdir(input_folder)):
268
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
269
+ input_file = os.path.join(input_folder, sample)
270
+ list_file.append([input_file])
271
+ total_ct += 1
272
+
273
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
274
+ for item in list_file:
275
+ fp.write('{} {} {}\n'.format(None, item[0], None))
276
+ print(input_folder, total_ct)
277
+
278
+ # for pair data
279
+ for dset in dataset:
280
+ # list sample
281
+ list_file = []
282
+ total_ct = 0
283
+ dset_pth = os.path.join(rt, dset)
284
+ meta_folder = os.path.join(dset_pth, "metas")
285
+ os.makedirs(meta_folder, exist_ok=True)
286
+ input_folder = os.path.join(rt, '%s/image_pair'%(dset))
287
+ for distortion in os.listdir(input_folder):
288
+ image_folder = os.path.join(input_folder, distortion)
289
+ hq_folder = os.path.join(image_folder, "HQ")
290
+ lq_folder = os.path.join(image_folder, "LQ")
291
+ for ct, sample in enumerate(os.listdir(hq_folder)):
292
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
293
+ hq_file = os.path.join(hq_folder, sample)
294
+ lq_file = os.path.join(lq_folder, sample)
295
+ list_file.append((lq_file, hq_file))
296
+ total_ct += 1
297
+
298
+ with open(os.path.join(meta_folder, f"{dset}_{distortion}.list"), 'w') as fp:
299
+ for item in list_file:
300
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
301
+ print(input_folder, total_ct)
302
+
303
+ # SIDD
304
+ rt = os.path.join(base_rt, "Denoise/SIDD")
305
+ meta_folder = os.path.join(rt, "metas")
306
+ os.makedirs(meta_folder, exist_ok=True)
307
+ for dset in ['train', 'test']:
308
+ # list sample
309
+ list_file = []
310
+ total_ct = 0
311
+ dset_folder = os.path.join(rt, dset)
312
+ if dset == 'train':
313
+ for folder in os.listdir(dset_folder):
314
+ file_folder = os.path.join(dset_folder, folder)
315
+ for ct, sample in enumerate(os.listdir(file_folder)):
316
+ if "NOISY_SRGB" in sample:
317
+ input_file = os.path.join(file_folder, sample)
318
+ gt_file = os.path.join(file_folder, sample.replace("NOISY", "GT"))
319
+ list_file.append([input_file, gt_file])
320
+ total_ct += 1
321
+ elif dset == 'test':
322
+ image_folder = os.path.join(dset_folder, 'NOISY')
323
+ gt_folder = os.path.join(dset_folder, "GT")
324
+ for ct, sample in enumerate(os.listdir(image_folder)):
325
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
326
+ input_file = os.path.join(image_folder, sample)
327
+ gt_file = os.path.join(gt_folder, sample.replace("NOISY", "GT"))
328
+ list_file.append([input_file, gt_file])
329
+ total_ct += 1
330
+
331
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
332
+ for item in list_file:
333
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
334
+ print(dset_folder, total_ct)
335
+
336
+
337
+ """
338
+ Derain: LHPRain, Practical, Rain100L, RainDS, RainTrianL, UHD-Rain, RainDrop
339
+ """
340
+ # RainDS
341
+ rt = os.path.join(base_rt, "Derain/RainDS")
342
+ meta_folder = os.path.join(rt, "metas")
343
+ os.makedirs(meta_folder, exist_ok=True)
344
+ for dset in ['RainDS_syn', 'RainDS_real']: # dataset
345
+ dataset_folder = os.path.join(rt, dset)
346
+ if dset == 'RainDS_syn':
347
+ set_list = ['train', 'test']
348
+ elif dset == 'RainDS_real':
349
+ set_list = ['train_set', 'test_set']
350
+ for subset in set_list:
351
+ input_folder = os.path.join(dataset_folder, subset)
352
+ for raintype in ['rainstreak', 'raindrop', 'rainstreak_raindrop']:
353
+ rain_folder = os.path.join(input_folder, raintype)
354
+ clear_folder = os.path.join(input_folder, 'gt')
355
+ list_file = []
356
+ total_ct = 0
357
+ rain_dict = {'rainstreak_raindrop': 'rd-rain', 'rainstreak': 'rain', 'raindrop': 'rd'}
358
+ # rd-rain-97, rain-97, rd-97 -> norain-97
359
+ # pie-rd-rain-97, pie-rain-97, pie-rd-97 -> pie-norain-97
360
+ for ct, sample in enumerate(os.listdir(rain_folder)):
361
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
362
+ input_file = os.path.join(rain_folder, sample)
363
+ gt_file = os.path.join(clear_folder, sample.replace(rain_dict[raintype], "norain"))
364
+ list_file.append((input_file, gt_file))
365
+ total_ct += 1
366
+
367
+ with open(os.path.join(meta_folder,'{}_{}_{}.list'.format(dset, subset, raintype)), 'w') as fp:
368
+ for item in list_file:
369
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
370
+ print(rain_folder, total_ct)
371
+
372
+ # LHPRain
373
+ # pre-process for LHPRain: https://github.com/yunguo224/LHP-Rain
374
+ # From Sky to the Ground: A Large-scale Benchmark and Simple Baseline Towards Real Rain Removal (ICCV 2023)
375
+ rt = os.path.join(base_rt, "Derain/LHPRain")
376
+ meta_folder = os.path.join(rt, "metas")
377
+ os.makedirs(meta_folder, exist_ok=True)
378
+ for dset in ['train', 'val', 'test']: # train, test
379
+ input_folder = os.path.join(rt, "input/%s"%(dset))
380
+ gt_folder = os.path.join(rt, "gt/%s"%(dset))
381
+ list_file = []
382
+ total_ct = 0
383
+ for ct, sample in enumerate(os.listdir(input_folder)):
384
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
385
+ input_file = os.path.join(input_folder, sample)
386
+ gt_file = os.path.join(gt_folder, sample)
387
+ list_file.append((input_file, gt_file))
388
+ total_ct += 1
389
+
390
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
391
+ for item in list_file:
392
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
393
+ print(input_folder, total_ct)
394
+
395
+ # Practical
396
+ # pre-process for LHPRain: https://github.com/ZhangXinNan/RainDetectionAndRemoval
397
+ # WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In CVPR, 2017
398
+ rt = os.path.join(base_rt, "Derain/Practical")
399
+ meta_folder = os.path.join(rt, "metas")
400
+ os.makedirs(meta_folder, exist_ok=True)
401
+ # create image folder
402
+ input_folder = os.path.join(rt, "image")
403
+ list_file = []
404
+ total_ct = 0
405
+ for ct, sample in enumerate(os.listdir(input_folder)):
406
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
407
+ input_file = os.path.join(input_folder, sample)
408
+ list_file.append((input_file))
409
+ total_ct += 1
410
+
411
+ with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
412
+ for item in list_file:
413
+ fp.write('{} {} {}\n'.format(item, None, None))
414
+ print(rt, total_ct)
415
+
416
+
417
+ # Rain100L
418
+ # pre-process for Rain100L: https://github.com/shangwei5/BRN
419
+ # WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 1357–1366, 2017.
420
+ # norain-xxx.png
421
+ # rain-xxx.png
422
+ # rainregion-xxx.png
423
+ # rainstreak-xxx.png
424
+ rt = os.path.join(base_rt, "Derain/Rain100L")
425
+ meta_folder = os.path.join(rt, "metas")
426
+ os.makedirs(meta_folder, exist_ok=True)
427
+
428
+ image_folder = os.path.join(rt, 'image')
429
+ gt_folder = os.path.join(rt, 'gt')
430
+ list_file = []
431
+ total_ct = 0
432
+ for ct, sample in enumerate(os.listdir(image_folder)):
433
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
434
+ input_file = os.path.join(image_folder, sample)
435
+ gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
436
+ list_file.append((input_file, gt_file))
437
+ total_ct += 1
438
+
439
+ with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
440
+ for item in list_file:
441
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
442
+ print(rt, total_ct)
443
+
444
+ # Rain200L
445
+ # pre-process for RainTrainL(Rain200L): https://github.com/shangwei5/BRN
446
+ # Yang W, Tan RT, Feng J, Liu J, Guo Z, Yan S. Deep joint rain detection and removal from a single image. In IEEE CVPR 2017.
447
+ # norain-xxx.png
448
+ # rain-xxx.png
449
+ # rainregion-xxx.png
450
+ # rainstreak-xxx.png
451
+ rt = os.path.join(base_rt, "Derain/RainTrainL")
452
+ meta_folder = os.path.join(rt, "metas")
453
+ os.makedirs(meta_folder, exist_ok=True)
454
+ image_folder = os.path.join(rt, 'image')
455
+ gt_folder = os.path.join(rt, 'gt')
456
+ list_file = []
457
+ total_ct = 0
458
+ for ct, sample in enumerate(os.listdir(image_folder)):
459
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
460
+ input_file = os.path.join(image_folder, sample)
461
+ gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
462
+ list_file.append((input_file, gt_file))
463
+ total_ct += 1
464
+
465
+ with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
466
+ for item in list_file:
467
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
468
+ print(rt, total_ct)
469
+
470
+ # UHD-Rain
471
+ # pre-process for UHD-Rain: https://github.com/wlydlut/uhddip
472
+ # UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
473
+ rt = os.path.join(base_rt, "Derain/UHD-Rain")
474
+ meta_folder = os.path.join(rt, "metas")
475
+ os.makedirs(meta_folder, exist_ok=True)
476
+
477
+ for dset in ['training_set', 'testing_set']: # train, test
478
+ dset_path = os.path.join(rt, dset)
479
+ input_folder = os.path.join(dset_path, 'input')
480
+ gt_folder = os.path.join(dset_path, 'gt')
481
+ list_file = []
482
+ for ct, sample in enumerate(os.listdir(input_folder)):
483
+ input_file = os.path.join(input_folder, sample)
484
+ gt_file = os.path.join(gt_folder, sample)
485
+ list_file.append((input_file, gt_file))
486
+
487
+ set_dict = {'training_set': 'train', 'testing_set':'test'}
488
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
489
+ for item in list_file:
490
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
491
+ print(rt, ct+1)
492
+
493
+ # Rain-drop
494
+ # pre-process for Rain-drop: https://github.com/rui1996/DeRaindrop
495
+ # Attentive Generative Adversarial Network for Raindrop Removal from A Single Image (CVPR'2018 Highlight)
496
+ rt = os.path.join(base_rt, "Derain/RainDrop")
497
+ meta_folder = os.path.join(rt, "metas")
498
+ os.makedirs(meta_folder, exist_ok=True)
499
+
500
+ for dset in ['train', 'test_a', 'test_b']: # train, test
501
+ dset_path = os.path.join(rt, dset)
502
+ input_folder = os.path.join(dset_path, 'data')
503
+ gt_folder = os.path.join(dset_path, 'gt')
504
+ list_file = []
505
+ for ct, sample in enumerate(os.listdir(input_folder)):
506
+ input_file = os.path.join(input_folder, sample)
507
+ gt_file = os.path.join(gt_folder, sample.replace("rain", "clean"))
508
+ list_file.append((input_file, gt_file))
509
+
510
+ with open(os.path.join(meta_folder,'Raindrop_{}.list'.format(dset)), 'w') as fp:
511
+ for item in list_file:
512
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
513
+ print(rt, ct+1)
514
+
515
+ """
516
+ Desnow: Snow100k, UHD-Snow
517
+ """
518
+ # Snow100k: download: https://pan.baidu.com/s/1Y8fq8qQjC0YK5DTktYPfbQ?pwd=nyop#list/path=/sharelink688030094-540249285017805/snow100k&parentPath=/sharelink688030094-540249285017805
519
+ # Training set (50,000 images, 7.8GB), Test set (50,000 images, 7.8GB), Realistic snowy images (1,329 images, 67MB).
520
+ rt = os.path.join(base_rt, "Desnow/Snow100k")
521
+ meta_folder = os.path.join(rt, "metas")
522
+ os.makedirs(meta_folder, exist_ok=True)
523
+
524
+ folder = Path(rt)
525
+ img_list = list(folder.rglob("*.[jp][pn]g"))
526
+ for data in img_list:
527
+ if " " in str(data):
528
+ old = str(data)
529
+ new = old.replace(" ", "_")
530
+ os.rename(old, new)
531
+
532
+ # # training
533
+ train_rt = os.path.join(rt, 'all')
534
+ train_gt_rt = os.path.join(train_rt, 'gt')
535
+ train_img_rt = os.path.join(train_rt, 'synthetic')
536
+ list_file = []
537
+ total_ct = 0
538
+ for ct, sample in enumerate(os.listdir(train_img_rt)):
539
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
540
+ img_ = os.path.join(train_img_rt, sample)
541
+ gt_ = os.path.join(train_gt_rt, sample)
542
+ list_file.append((img_, gt_))
543
+ total_ct += 1
544
+
545
+ with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
546
+ for item in list_file:
547
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
548
+ print(train_rt, total_ct)
549
+
550
+ # test_syn
551
+ test_rt = os.path.join(rt, 'media/jdway/GameSSD/overlapping/test')
552
+ for dset in os.listdir(test_rt):
553
+ test_folder = os.path.join(test_rt, dset)
554
+ test_gt_rt = os.path.join(test_folder, 'gt')
555
+ test_img_rt = os.path.join(test_folder, 'synthetic')
556
+ list_file = []
557
+ total_ct = 0
558
+ for ct, sample in enumerate(os.listdir(test_img_rt)):
559
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
560
+ img_ = os.path.join(test_img_rt, sample)
561
+ gt_ = os.path.join(test_gt_rt, sample)
562
+ list_file.append((img_, gt_))
563
+ total_ct += 1
564
+
565
+ with open(os.path.join(meta_folder,'test_%s.list'%(dset[-1])), 'w') as fp:
566
+ for item in list_file:
567
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
568
+ print(test_folder, total_ct)
569
+
570
+ # real-world testing
571
+ test_rt = os.path.join(rt, 'realistic')
572
+ list_file = []
573
+ total_ct = 0
574
+ for ct, sample in enumerate(os.listdir(test_rt)):
575
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
576
+ img_ = os.path.join(test_rt, sample)
577
+ list_file.append(img_)
578
+ total_ct += 1
579
+
580
+ with open(os.path.join(meta_folder,'test_realistic.list'), 'w') as fp:
581
+ for item in list_file:
582
+ fp.write('{} {} {}\n'.format(item, None, None))
583
+ print(test_rt, total_ct)
584
+
585
+
586
+ # UHD-Snow
587
+ # pre-process for UHD-Snow: https://github.com/wlydlut/uhddip
588
+ # UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
589
+ rt = os.path.join(base_rt, "Desnow/UHD-Snow")
590
+ meta_folder = os.path.join(rt, "metas")
591
+ os.makedirs(meta_folder, exist_ok=True)
592
+ for dset in ['training_set', 'testing_set']: # train, test
593
+ dset_path = os.path.join(rt, dset)
594
+ input_folder = os.path.join(dset_path, 'input')
595
+ gt_folder = os.path.join(dset_path, 'gt')
596
+ list_file = []
597
+ total_ct = 0
598
+ for ct, sample in enumerate(os.listdir(input_folder)):
599
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
600
+ input_file = os.path.join(input_folder, sample)
601
+ gt_file = os.path.join(gt_folder, sample)
602
+ list_file.append((input_file, gt_file))
603
+ total_ct += 1
604
+
605
+ set_dict = {'training_set': 'train', 'testing_set':'test'}
606
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
607
+ for item in list_file:
608
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
609
+ print(dset_path, total_ct)
610
+
611
+ """
612
+ Lowlight: DICM, LIME, LOL, MEF, NPE, VV
613
+ """
614
+ # LOL
615
+ # pre-process for LOL: https://github.com/fediory/hvi-cidnet
616
+ # Chen Wei, Wenjing Wang, Wenhan Yang, and Jiaying Liu. Deep retinex decomposition for low-light enhancement. In BMVC,2018
617
+ rt = os.path.join(base_rt, "LowLight/LOL")
618
+ meta_folder = os.path.join(rt, "metas")
619
+ os.makedirs(meta_folder, exist_ok=True)
620
+ for dset in ['our485', 'eval15']: # train, test
621
+ dset_path = os.path.join(rt, dset)
622
+ input_folder = os.path.join(dset_path, "low")
623
+ gt_folder = os.path.join(dset_path, "high")
624
+ list_file = []
625
+ total_ct = 0
626
+ for ct, sample in enumerate(os.listdir(input_folder)):
627
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
628
+ input_file = os.path.join(input_folder, sample)
629
+ gt_file = os.path.join(gt_folder, sample)
630
+ list_file.append((input_file, gt_file))
631
+ total_ct += 1
632
+
633
+ set_dict = {'our485':'train', 'eval15':'test'}
634
+ with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
635
+ for item in list_file:
636
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
637
+ print(dset_path, total_ct)
638
+
639
+
640
+ # pre-process for low-light dataset
641
+ # 2. DICM:
642
+ # * Chulwoo Lee, Chul Lee, and Chang-Su Kim. Contrast en- hancement based on layered difference representation. In ICIP, 2012.
643
+ # * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
644
+ # 3. MEF:
645
+ # * Kede Ma, Kai Zeng, and Zhou Wang. Perceptual quality assessment for multi-exposure image fusion. TIP, 2015.
646
+ # * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
647
+ # 4. NPE:
648
+ # * ShuhangWang,JinZheng,Hai-MiaoHu,andBoLi.Naturalness preserved enhancement algorithm for non-uniform illumination images. TIP, 2013.
649
+ # * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
650
+ # 5. LIME:
651
+ # * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
652
+ # 6. VV:
653
+ # * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
654
+ rt = os.path.join(base_rt, "LowLight")
655
+
656
+ dataset = ['DICM', 'LIME', 'MEF', 'NPE', 'VV']
657
+ for dset in dataset:
658
+ # create image folder
659
+ # dset_pth = "./%s"%(dset)
660
+ # os.rename(dset_pth, 'image')
661
+ # os.makedirs(dset_pth)
662
+ # shutil.move('./image', dset_pth)
663
+
664
+ # list sample
665
+ dset_pth = os.path.join(rt, dset)
666
+ list_file = []
667
+ input_folder = os.path.join(dset_pth, 'image')
668
+ meta_folder = os.path.join(dset_pth, "metas")
669
+ os.makedirs(meta_folder, exist_ok=True)
670
+ folder = Path(input_folder)
671
+ img_list = list(folder.rglob("*.[jp][pn]g"))
672
+ for data in img_list:
673
+ if " " in str(data):
674
+ old = str(data)
675
+ new = old.replace(" ", "_")
676
+ os.rename(old, new)
677
+
678
+ total_ct = 0
679
+ for ct, sample in enumerate(os.listdir(input_folder)):
680
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
681
+ input_file = os.path.join(input_folder, sample)
682
+ list_file.append((input_file))
683
+ total_ct += 1
684
+
685
+ with open(os.path.join(meta_folder, 'test.list'), 'w') as fp:
686
+ for item in list_file:
687
+ fp.write('{} {} {}\n'.format(item, None, None))
688
+ print(input_folder, total_ct)
689
+
690
+ """
691
+ Other: UDC
692
+ """
693
+ # pre-process for UDC(TOLED+POLED): https://yzhouas.github.io/projects/UDC/udc.html
694
+ # Yuqian Zhou, David Ren, Neil Emerton, Sehoon Lim, and Timothy Large. Image restoration for under-display camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9179–9188, 2021.
695
+ def mat2png(dset_folder, name, rt):
696
+ # dset_folder: '~/UDC/{poled,toled}', name: 'poled_test_display.mat', save_folder: '~/UDC/image/'
697
+ udc_key = name.split('.')[0][6:]
698
+ udc_file = os.path.join(dset_folder, name)
699
+ udc_mat = loadmat(udc_file)[udc_key]
700
+ dset, imggt = udc_key.split('_')[0], udc_key.split('_')[1]
701
+ dset_folder = os.path.join(rt, dset)
702
+ os.makedirs(dset_folder, exist_ok=True)
703
+ if imggt == 'display':
704
+ imggt = 'image'
705
+ else:
706
+ imggt = 'gt'
707
+ imggt_folder = os.path.join(dset_folder, imggt)
708
+ os.makedirs(imggt_folder, exist_ok=True)
709
+
710
+ # restoration
711
+ n_im, h, w, c = udc_mat.shape
712
+ results = udc_mat.copy()
713
+ for i in range(n_im):
714
+ print(i, end='\r')
715
+ udc = np.reshape(udc_mat[i, :, :, :], (h, w, c))
716
+ sample = Image.fromarray(np.uint8(udc)).convert('RGB')
717
+ sample = sample.save(os.path.join(imggt_folder, '%s_%d.png'%(name.split('_')[0], i)))
718
+
719
+ rt = os.path.join(base_rt, "Other/UDC")
720
+ meta_folder = os.path.join(rt, "metas")
721
+ os.makedirs(meta_folder, exist_ok=True)
722
+ # pre-processing
723
+ poled_folder = os.path.join(rt, 'poled')
724
+ poled_sample = ['poled_test_display.mat', 'poled_test_gt.mat',
725
+ 'poled_val_display.mat', 'poled_val_gt.mat']
726
+ toled_folder = os.path.join(rt, 'toled')
727
+ toled_sample = ['toled_test_display.mat', 'toled_test_gt.mat',
728
+ 'toled_val_display.mat', 'toled_val_gt.mat']
729
+
730
+ # for sample_set in poled_sample:
731
+ # print(sample_set)
732
+ # mat2png(poled_folder, sample_set, rt)
733
+ # print()
734
+
735
+ # for sample_set in toled_sample:
736
+ # print(sample_set)
737
+ # mat2png(toled_folder, sample_set, rt)
738
+ # print()
739
+
740
+ for dset in ['val', 'test']: # train, test
741
+ dset_folder = os.path.join(rt, dset)
742
+ input_folder = os.path.join(dset_folder, 'image')
743
+ gt_folder = os.path.join(dset_folder, 'gt')
744
+ list_file = []
745
+ total_gt = 0
746
+ for ct, sample in enumerate(os.listdir(input_folder)):
747
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
748
+ input_file = os.path.join(input_folder, sample)
749
+ gt_file = os.path.join(gt_folder, sample)
750
+ list_file.append((input_file, gt_file))
751
+ total_gt += 1
752
+
753
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
754
+ for item in list_file:
755
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
756
+ print(dset_folder, total_gt)
757
+
758
+
759
+
760
+ """
761
+ SR: DIV2K, Flickr2K, OST
762
+ """
763
+ # OST
764
+ rt = os.path.join(base_rt, "SuperResolution/OST")
765
+ meta_folder = os.path.join(rt, "metas")
766
+ os.makedirs(meta_folder, exist_ok=True)
767
+ list_file = []
768
+ total_ct = 0
769
+ image_folder = os.path.join(rt, "images")
770
+ for dset in os.listdir(image_folder): # train, test
771
+ input_folder = os.path.join(image_folder, dset)
772
+ for ct, sample in enumerate(os.listdir(input_folder)):
773
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
774
+ input_file = os.path.join(input_folder, sample)
775
+ list_file.append((input_file, None))
776
+ total_ct += 1
777
+
778
+ with open(os.path.join(meta_folder,'OST_HR.list'), 'w') as fp:
779
+ for item in list_file:
780
+ fp.write('{} {} {}\n'.format(None, item[0], None))
781
+ print(rt, total_ct)
782
+
783
+ # aug version (SR)
784
+ image_folder = os.path.join(rt, "images_pair")
785
+ for distortion in os.listdir(image_folder): # SR1, SR2, SR3
786
+ list_file = []
787
+ total_ct = 0
788
+ pair_folder = os.path.join(image_folder, distortion)
789
+ for dset in os.listdir(pair_folder): # animal, building, grass, ...
790
+ input_folder = os.path.join(pair_folder, dset)
791
+ if "SR" in distortion:
792
+ hq_folder = os.path.join(input_folder, "HR")
793
+ lq_folder = os.path.join(input_folder, "LR")
794
+ else:
795
+ raise KeyError("Unknown {distortion} for Augmented OST dataset.")
796
+ for ct, sample in enumerate(os.listdir(hq_folder)):
797
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
798
+ hq_file = os.path.join(hq_folder, sample)
799
+ lq_file = os.path.join(lq_folder, sample)
800
+ list_file.append((lq_file, hq_file))
801
+ total_ct += 1
802
+
803
+ with open(os.path.join(meta_folder, f"OST_train_pair_{distortion}.list"), 'w') as fp:
804
+ for item in list_file:
805
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
806
+ print(pair_folder, total_ct)
807
+
808
+
809
+ # Flickr2K
810
+ rt = os.path.join(base_rt, "SuperResolution/Flickr2K")
811
+ meta_folder = os.path.join(rt, "metas")
812
+ os.makedirs(meta_folder, exist_ok=True)
813
+
814
+ list_file = []
815
+ total_ct = 0
816
+ for dset in ['images']: # train, test
817
+ input_folder = os.path.join(rt, dset)
818
+ for ct, sample in enumerate(os.listdir(input_folder)):
819
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
820
+ input_file = os.path.join(input_folder, sample)
821
+ list_file.append((input_file, None))
822
+ total_ct += 1
823
+
824
+ with open(os.path.join(meta_folder,'Flickr2K_HR.list'), 'w') as fp:
825
+ for item in list_file:
826
+ fp.write('{} {} {}\n'.format(None, item[0], None))
827
+ print(rt, total_ct)
828
+
829
+ # aug version (SR, Noise)
830
+ image_folder = os.path.join(rt, "images_pair")
831
+ for distortion in os.listdir(image_folder): # Noise_L1, ..., SR1, ...
832
+ list_file = []
833
+ total_ct = 0
834
+ input_folder = os.path.join(image_folder, distortion)
835
+ if "SR" in distortion:
836
+ hq_folder = os.path.join(input_folder, "HR")
837
+ lq_folder = os.path.join(input_folder, "LR")
838
+ elif "Noise" in distortion:
839
+ hq_folder = os.path.join(input_folder, "HQ")
840
+ lq_folder = os.path.join(input_folder, "LQ")
841
+ else:
842
+ raise KeyError("Unknown {distortion} for Augmented Flickr2K dataset.")
843
+
844
+ for ct, sample in enumerate(os.listdir(hq_folder)):
845
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
846
+ hq_file = os.path.join(hq_folder, sample)
847
+ lq_file = os.path.join(lq_folder, sample)
848
+
849
+ list_file.append((lq_file, hq_file))
850
+ total_ct += 1
851
+
852
+ with open(os.path.join(meta_folder, f"Flickr2K_train_pair_{distortion}.list"), 'w') as fp:
853
+ for item in list_file:
854
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
855
+ print(rt, total_ct)
856
+
857
+
858
+ # DIV2K
859
+ rt = os.path.join(base_rt, "SuperResolution/DIV2K")
860
+ meta_folder = os.path.join(rt, "metas")
861
+ os.makedirs(meta_folder, exist_ok=True)
862
+ for dset in ['DIV2K_train_HR', 'DIV2K_valid_HR']: # train, test
863
+ input_folder = os.path.join(rt, dset)
864
+ list_file = []
865
+ total_ct = 0
866
+ for ct, sample in enumerate(os.listdir(input_folder)):
867
+ print(ct, end='\r')
868
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
869
+ input_file = os.path.join(input_folder, sample)
870
+ list_file.append((input_file, None))
871
+ total_ct += 1
872
+
873
+ with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
874
+ for item in list_file:
875
+ fp.write('{} {} {}\n'.format(None, item[0], None))
876
+ print(input_folder, total_ct)
877
+
878
+ # aug version (SR, Noise)
879
+ for dset in ['DIV2K_train_pair', 'DIV2K_valid_pair']: # train, test
880
+ imgae_folder = os.path.join(rt, dset)
881
+ for distortion in os.listdir(imgae_folder): # Noise, SR.
882
+ list_file = []
883
+ total_ct = 0
884
+ input_folder = os.path.join(imgae_folder, distortion)
885
+ if "SR" in distortion:
886
+ hq_folder = os.path.join(input_folder, "HR")
887
+ lq_folder = os.path.join(input_folder, "LR")
888
+ elif "Noise" in distortion:
889
+ hq_folder = os.path.join(input_folder, "HQ")
890
+ lq_folder = os.path.join(input_folder, "LQ")
891
+ else:
892
+ raise KeyError("Unknown {distortion} for Augmented DIV2K dataset.")
893
+
894
+ for ct, sample in enumerate(os.listdir(hq_folder)):
895
+ print(ct, end='\r')
896
+ if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
897
+ hq_file = os.path.join(hq_folder, sample)
898
+ lq_file = os.path.join(lq_folder, sample)
899
+ list_file.append((lq_file, hq_file))
900
+ total_ct += 1
901
+
902
+ with open(os.path.join(meta_folder, f"{dset}_{distortion}.list"), 'w') as fp:
903
+ for item in list_file:
904
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
905
+ print(input_folder, total_ct)
1_generate_iqa.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+ from pathlib import Path
4
+ import numpy as np
5
+ import random
6
+ import torchvision.transforms.functional as TF
7
+ import torchvision.transforms as transforms
8
+ import cv2
9
+ import re
10
+ import json
11
+
12
+ # ir dataset dictionary:
13
+ base_rt = f'/home/CORP/hsiang.chen/Project/Datasets/IR'
14
+ dataset_dict = {
15
+ # Image Restoration
16
+ ## Super Resolution (3)
17
+ "HR": {
18
+ "DIV2K": {'train': 'SuperResolution/DIV2K/metas/DIV2K_train_HR.list', # (800, single)
19
+ 'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_HR.list'}, # (100, single)
20
+ "Flickr2K": {'train': 'SuperResolution/Flickr2K/metas/Flickr2K_HR.list'}, # (2650, single)
21
+ "OST": {'train': 'SuperResolution/OST/metas/OST_HR.list'}, # (10324, single)
22
+ },
23
+
24
+ "Low Resolution": {
25
+ "DIV2K": {'train1': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR1.list', # (800, pair)
26
+ 'train2': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR2.list', # (800, pair)
27
+ 'train3': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_SR3.list', # (800, pair)
28
+ 'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_pair_SR.list'}, # (100, pair)
29
+ "Flickr2K": {'train1': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR1.list', # (2650, pair)
30
+ 'train2': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR2.list', # (2650, pair)
31
+ 'train3': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_SR3.list'},# (2650, pair)
32
+ "OST": {'train1': 'SuperResolution/OST/metas/OST_train_pair_SR1.list', # (10324, pair)
33
+ 'train2': 'SuperResolution/OST/metas/OST_train_pair_SR2.list', # (10324, pair)
34
+ 'train3': 'SuperResolution/OST/metas/OST_train_pair_SR3.list'}, # (10324, pair)
35
+ },
36
+
37
+ ## Derain (5)
38
+ "Rain": {
39
+ "RainTrainL": {'train': 'Derain/RainTrainL/metas/train.list'}, # (200, pair)
40
+ "Rain100L": {'test': 'Derain/Rain100L/metas/test.list'}, # (100, pair)
41
+ "LHPRain": {'train': 'Derain/LHPRain/metas/train.list', # (2100, pair)
42
+ 'val': 'Derain/LHPRain/metas/val.list', # (600, pair)
43
+ 'test': 'Derain/LHPRain/metas/test.list'}, # (300, pair)
44
+ "UHDRain": {'train': 'Derain/UHD-Rain/metas/train.list', # (3000, pair)
45
+ 'test': 'Derain/UHD-Rain/metas/test.list'}, # (200, pair)
46
+ "Practical": {'test': 'Derain/Practical/metas/test.list'}, # (15, real)
47
+ },
48
+
49
+ ## Deraindrop (2)
50
+ "RainDrop": {
51
+ "RainDrop": {'train': 'Derain/RainDrop/metas/Raindrop_train.list', # (861, pair)
52
+ 'test_a': 'Derain/RainDrop/metas/Raindrop_test_a.list', # (58, pair)
53
+ 'test_b': 'Derain/RainDrop/metas/Raindrop_test_b.list'}, # (249, pair)
54
+ "RainDS_syn_rainstreak": {'train': 'Derain/RainDS/metas/RainDS_syn_train_rainstreak.list', # (1000, pair)
55
+ 'test': 'Derain/RainDS/metas/RainDS_syn_test_rainstreak.list'}, # (200, pair)
56
+ "RainDS_syn_raindrop": {'train': 'Derain/RainDS/metas/RainDS_syn_train_raindrop.list', # (1000, pair)
57
+ 'test': 'Derain/RainDS/metas/RainDS_syn_test_raindrop.list'}, # (200, pair)
58
+ "RainDS_syn_rainstreak_raindrop": {'train': 'Derain/RainDS/metas/RainDS_syn_train_rainstreak_raindrop.list', # (1000, pair)
59
+ 'test': 'Derain/RainDS/metas/RainDS_syn_test_rainstreak_raindrop.list'}, # (200, pair)
60
+ "RainDS_real_rainstreak": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_rainstreak.list', # (150, pair)
61
+ 'test': 'Derain/RainDS/metas/RainDS_real_test_set_rainstreak.list'}, # (98, pair)
62
+ "RainDS_real_raindrop": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_raindrop.list', # (150, pair)
63
+ 'test': 'Derain/RainDS/metas/RainDS_real_test_set_raindrop.list'}, # (98, pair)
64
+ "RainDS_real_rainstreak_raindrop": {'train': 'Derain/RainDS/metas/RainDS_real_train_set_rainstreak_raindrop.list', # (150, pair)
65
+ 'test': 'Derain/RainDS/metas/RainDS_real_test_set_rainstreak.list'}, # (98, pair)
66
+ },
67
+
68
+ ## Dehaze (5)
69
+ "Fog":{
70
+ "SOTS": {'test': 'Dehaze/SOTS/metas/test.list'}, # (500, pair)
71
+ "OTS": {'train': 'Dehaze/OTS/metas/train.list'}, # (72135, pair)
72
+ "4kID": {'train': 'Dehaze/4kID/metas/train.list', # (15606, pair)
73
+ 'test': 'Dehaze/4kID/metas/test.list'}, # (97, pair)
74
+ "Unann": {'test': 'Dehaze/UnannotatedHazyImages/metas/test.list'}, # (4809, real)
75
+ "NH-Haze": {'test': 'Dehaze/NH-Haze/metas/test.list'}, # (5, real)
76
+ },
77
+
78
+ ## Denoise (9)
79
+ "Noise": {
80
+ "BSD400": {'train': 'Denoise/BSD400/metas/BSD400.list',
81
+ 'train1': 'Denoise/BSD400/metas/BSD400_Noise_L1.list',
82
+ 'train2': 'Denoise/BSD400/metas/BSD400_Noise_L3.list',
83
+ 'train3': 'Denoise/BSD400/metas/BSD400_Noise_L5.list',}, # (400, syn)
84
+ "WED": {'train': 'Denoise/WaterlooED/metas/WaterlooED.list',
85
+ 'train1': 'Denoise/WaterlooED/metas/WaterlooED_Noise_L1.list',
86
+ 'train2': 'Denoise/WaterlooED/metas/WaterlooED_Noise_L3.list',
87
+ 'train3': 'Denoise/WaterlooED/metas/WaterlooED_Noise_L5.list',}, # (4744, syn)
88
+ "BSD68": {'test': 'Denoise/BSD68/metas/BSD68.list',
89
+ 'test1': 'Denoise/BSD68/metas/BSD68_Noise_L1.list',
90
+ 'test2': 'Denoise/BSD68/metas/BSD68_Noise_L3.list',
91
+ 'test3': 'Denoise/BSD68/metas/BSD68_Noise_L5.list'}, # (68, syn)
92
+ "Urban": {'test': 'Denoise/Urban100/metas/Urban100.list',
93
+ 'test1': 'Denoise/Urban100/metas/Urban100_Noise_L1.list',
94
+ 'test2': 'Denoise/Urban100/metas/Urban100_Noise_L3.list',
95
+ 'test3': 'Denoise/Urban100/metas/Urban100_Noise_L5.list'}, # (100, syn)
96
+ "CBSD68": {'test': 'Denoise/CBSD68/metas/CBSD68.list',
97
+ 'test1': 'Denoise/CBSD68/metas/CBSD68_Noise_L1.list',
98
+ 'test2': 'Denoise/CBSD68/metas/CBSD68_Noise_L3.list',
99
+ 'test3': 'Denoise/CBSD68/metas/CBSD68_Noise_L5.list',}, # (68, syn)
100
+ "Kodak": {'test': 'Denoise/Kodak/metas/Kodak.list',
101
+ 'test1': 'Denoise/Kodak/metas/Kodak_Noise_L1.list',
102
+ 'test2': 'Denoise/Kodak/metas/Kodak_Noise_L3.list',
103
+ 'test3': 'Denoise/Kodak/metas/Kodak_Noise_L5.list'}, # (24, syn)
104
+ "McMaster": {'test': 'Denoise/McMaster/metas/McMaster.list',
105
+ 'test1': 'Denoise/McMaster/metas/McMaster_Noise_L1.list',
106
+ 'test2': 'Denoise/McMaster/metas/McMaster_Noise_L3.list',
107
+ 'test3': 'Denoise/McMaster/metas/McMaster_Noise_L5.list'}, # (18, syn)
108
+ "Set12": {'test': 'Denoise/Set12/metas/Set12.list',
109
+ 'test1': 'Denoise/Set12/metas/Set12_Noise_L1.list',
110
+ 'test2': 'Denoise/Set12/metas/Set12_Noise_L3.list',
111
+ 'test3': 'Denoise/Set12/metas/Set12_Noise_L5.list',}, # (12, syn)
112
+ "SIDD": {'train': 'Denoise/SIDD/metas/train.list', # (320, pair)
113
+ 'test': 'Denoise/SIDD/metas/test.list'}, # (1280, pair)
114
+
115
+ "DIV2K": {'train1': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L1.list', # (800, pair)
116
+ 'train2': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L3.list', # (800, pair)
117
+ 'train3': 'SuperResolution/DIV2K/metas/DIV2K_train_pair_Noise_L5.list', # (800, pair)
118
+ 'val': 'SuperResolution/DIV2K/metas/DIV2K_valid_pair_Noise.list'}, # (100, pair)
119
+ "Flickr2K": {'train1': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L1.list', # (2650, pair)
120
+ 'train2': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L3.list', # (2650, pair)
121
+ 'train3': 'SuperResolution/Flickr2K/metas/Flickr2K_train_pair_Noise_L5.list'},# (2650, pair)
122
+ },
123
+
124
+ ## Desnow (2)
125
+ "Snow": {
126
+ "Snow100k": {'train': 'Desnow/Snow100k/metas/train.list'}, # (50000, syn)
127
+ "Snow100k-S": {'test': 'Desnow/Snow100k/metas/test_S.list'}, # (16611, syn)
128
+ "Snow100k-M": {'test': 'Desnow/Snow100k/metas/test_M.list'}, # (16588, syn)
129
+ "Snow100k-L": {'test': 'Desnow/Snow100k/metas/test_L.list'}, # (16801, syn)
130
+ "Snow100k-R": {'test': 'Desnow/Snow100k/metas/test_realistic.list'}, # (1329, real)
131
+ "UHDSnow": {'train': 'Desnow/UHD-Snow/metas/train.list', # (3000, pair)
132
+ 'test': 'Desnow/UHD-Snow/metas/test.list'}, # (200, pair)
133
+ },
134
+
135
+ ## Deblur (3)
136
+ "Blur": {
137
+ "GoPro": {'train': 'Deblur/GoPro/metas/train.list', # (2103, pair)
138
+ 'test': 'Deblur/GoPro/metas/test.list'}, # (1111, pair)
139
+ "HIDE": {'train': 'Deblur/HIDE/metas/train.list', # (6397, pair)
140
+ 'test': 'Deblur/HIDE/metas/test.list'}, # (2025, pair)
141
+ "RealBlur-J": {'test': 'Deblur/RealBlur-J_ECC_IMCORR_centroid_itensity_ref/metas/test.list'}, # (980, pair)
142
+ "RealBlur-R": {'test': 'Deblur/RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref/metas/test.list'},# (980, pair)
143
+ },
144
+
145
+ ## Lowlight (6)
146
+ "Low-light": {
147
+ "LOL": {'train': 'LowLight/LOL/metas/train.list', # (485, pair)
148
+ 'test': 'LowLight/LOL/metas/test.list'}, # (15, pair)
149
+ "DICM": {'test': 'LowLight/DICM/metas/test.list'}, # (69, real)
150
+ "MEF": {'test': 'LowLight/MEF/metas/test.list'}, # (17, real)
151
+ "NPE": {'test': 'LowLight/NPE/metas/test.list'}, # (8, real)
152
+ "LIME": {'test': 'LowLight/LIME/metas/test.list'}, # (10, real)
153
+ "VV": {'test': 'LowLight/VV/metas/test.list'}, # (24, real)
154
+ },
155
+
156
+ ## Others (1)
157
+ "Unknown": {
158
+ "UDC": {'val': 'Other/UDC/metas/val.list', # (60, pair)
159
+ 'test': 'Other/UDC/metas/test.list'}, # (60, pair)
160
+ },
161
+ }
162
+
163
+ def IRImageData(listfile):
164
+ # [degradation, clean, label]
165
+ paths = []
166
+ with open(listfile) as fin:
167
+ for line in fin:
168
+ line = line.strip().split()
169
+ if len(line) == 3:
170
+ paths.append(line)
171
+ paths = sorted(paths)
172
+ # Dataset
173
+ LQ_list = []
174
+ HQ_list = []
175
+ for data in paths:
176
+ lq_pth, hq_pth, label = data
177
+ if os.path.isfile(lq_pth):
178
+ LQ_list.append(lq_pth)
179
+ if os.path.isfile(hq_pth):
180
+ HQ_list.append(hq_pth)
181
+ return LQ_list, HQ_list
182
+
183
+ # question dictionary:
184
+ question_dict = {
185
+ "Full-Reference": {
186
+ "ONE": [
187
+ "Compared to the reference, what ONE distortion stands out most in the evaluated image?",
188
+ "Determine the leading ONE degradation when comparing the evaluated image to the reference.",
189
+ "Determine the most impactful ONE distortion in the evaluated image compared to the reference.",
190
+ "Highlight the most significant ONE distortion in the evaluated image in comparison to the reference.",
191
+ "Identify the chief ONE degradation in the evaluated image when compared to the reference.",
192
+ "Identify the most notable ONE distortion in the evaluated image's quality when compared to the reference.",
193
+ "In comparison to the reference, what ONE distortion is most prominent in the evaluated image?",
194
+ "What ONE distortion is most apparent in the evaluated image relative to the reference?",
195
+ "What ONE distortion most significantly affects the evaluated image compared to the reference?",
196
+ "What ONE distortion stands out in the evaluated image against the reference?",
197
+ "What critical ONE quality degradation is present in the evaluated image versus the reference?",
198
+ ],
199
+ "TWO": [
200
+ "Compared to the reference, what TWO distortions stand out most in the evaluated image?",
201
+ "Determine the leading TWO degradations when comparing the evaluated image to the reference.",
202
+ "Determine the most impactful TWO distortions in the evaluated image compared to the reference.",
203
+ "Highlight the most significant TWO distortions in the evaluated image in comparison to the reference.",
204
+ "Identify the chief TWO degradations in the evaluated image when compared to the reference.",
205
+ "Identify the most notable TWO distortions in the evaluated image's quality when compared to the reference.",
206
+ "In comparison to the reference, what TWO distortions are most prominent in the evaluated image?",
207
+ "What TWO distortions are most apparent in the evaluated image relative to the reference?",
208
+ "What TWO distortions most significantly affect the evaluated image compared to the reference?",
209
+ "What TWO distortions stand out in the evaluated image against the reference?",
210
+ "What critical TWO quality degradations are present in the evaluated image versus the reference?",
211
+ ],
212
+ "Common": [
213
+ "Compared to the reference, what distortion(s) stand out most in the evaluated image?",
214
+ "Determine the leading degradation(s) when comparing the evaluated image to the reference.",
215
+ "Determine the most impactful distortion(s) in the evaluated image compared to the reference.",
216
+ "Highlight the most significant distortion(s) in the evaluated image in comparison to the reference.",
217
+ "Identify the chief degradation(s) in the evaluated image when compared to the reference.",
218
+ "Identify the most notable distortion(s) in the evaluated image's quality when compared to the reference.",
219
+ "In comparison to the reference, what distortion(s) are most prominent in the evaluated image?",
220
+ "What critical quality degradation(s) are present in the evaluated image versus the reference?",
221
+ "What distortion(s) are most apparent in the evaluated image relative to the reference?",
222
+ "What distortion(s) most significantly affect the evaluated image compared to the reference?",
223
+ "What distortion(s) stand out in the evaluated image against the reference?"
224
+ ]
225
+ },
226
+ "Non-Reference": {
227
+ "ONE": [
228
+ "Determine the leading ONE degradation in the evaluated image.",
229
+ "Determine the most impactful ONE distortion in the evaluated image.",
230
+ "Highlight the most significant ONE distortion in the evaluated image.",
231
+ "Identify the chief ONE degradation in the evaluated image.",
232
+ "Identify the most critical ONE distortion in the evaluated image.",
233
+ "Identify the most notable ONE distortion in the evaluated image's quality.",
234
+ "In terms of image quality, what is the most glaring ONE issue with the evaluated image?",
235
+ "In the evaluated image, what ONE distortion is most detrimental to image quality?",
236
+ "Pinpoint the foremost ONE image quality issue in the evaluated image.",
237
+ "What ONE distortion is most apparent in the evaluated image?",
238
+ "What ONE distortion is most evident in the evaluated image?",
239
+ "What ONE distortion is most prominent in the evaluated image?",
240
+ "What ONE distortion is most prominent when examining the evaluated image?",
241
+ "What ONE distortion most detrimentally affects the overall quality of the evaluated image?",
242
+ "What ONE distortion most notably affects the clarity of the evaluated image?",
243
+ "What ONE distortion most significantly affects the evaluated image?",
244
+ "What ONE distortion stands out in the evaluated image?",
245
+ "What ONE quality degradation is most apparent in the evaluated image?",
246
+ "What critical ONE quality degradation is present in the evaluated image?",
247
+ "What is the foremost ONE distortion affecting the evaluated image's quality?",
248
+ "What is the leading ONE distortion in the evaluated image?",
249
+ "What is the most critical ONE image quality issue in the evaluated image?",
250
+ "What is the most severe ONE degradation observed in the evaluated image?",
251
+ "What is the primary ONE degradation observed in the evaluated image?"
252
+ ],
253
+ "TWO": [
254
+ "Determine the leading TWO degradations in the evaluated image.",
255
+ "Determine the most impactful TWO distortions in the evaluated image.",
256
+ "Highlight the most significant TWO distortions in the evaluated image.",
257
+ "Identify the chief TWO degradations in the evaluated image.",
258
+ "Identify the most critical TWO distortions in the evaluated image.",
259
+ "Identify the most notable TWO distortions in the evaluated image's quality.",
260
+ "In terms of image quality, what are the most glaring TWO issues with the evaluated image?",
261
+ "In the evaluated image, what TWO distortions are most detrimental to image quality?",
262
+ "Pinpoint the foremost TWO image quality issues in the evaluated image.",
263
+ "What TWO distortions are most apparent in the evaluated image?",
264
+ "What TWO distortions are most evident in the evaluated image?",
265
+ "What TWO distortions are most prominent in the evaluated image?",
266
+ "What TWO distortions are most prominent when examining the evaluated image?",
267
+ "What TWO distortions most detrimentally affect the overall quality of the evaluated image?",
268
+ "What TWO distortions most notably affect the clarity of the evaluated image?",
269
+ "What TWO distortions most significantly affect the evaluated image?",
270
+ "What TWO distortions stand out in the evaluated image?",
271
+ "What TWO quality degradations are most apparent in the evaluated image?",
272
+ "What are the foremost TWO distortions affecting the evaluated image's quality?",
273
+ "What are the leading TWO distortions in the evaluated image?",
274
+ "What are the most critical TWO image quality issues in the evaluated image?",
275
+ "What are the most severe TWO degradations observed in the evaluated image?",
276
+ "What are the primary TWO degradations observed in the evaluated image?",
277
+ "What critical TWO quality degradations are present in the evaluated image?",
278
+ ],
279
+ "Common": [
280
+ "Determine the leading degradation(s) in the evaluated image.",
281
+ "Determine the most impactful distortion(s) in the evaluated image.",
282
+ "Highlight the most significant distortion(s) in the evaluated image.",
283
+ "Identify the chief degradation(s) in the evaluated image.",
284
+ "Identify the most critical distortion(s) in the evaluated image.",
285
+ "Identify the most notable distortion(s) in the evaluated image's quality.",
286
+ "In terms of image quality, what are the most glaring issue(s) with the evaluated image?",
287
+ "In the evaluated image, what distortion(s) are most detrimental to image quality?",
288
+ "Pinpoint the foremost image quality issue(s) in the evaluated image.",
289
+ "What are the foremost distortion(s) affecting the evaluated image's quality?",
290
+ "What are the leading distortion(s) in the evaluated image?",
291
+ "What are the most critical image quality issue(s) in the evaluated image?",
292
+ "What are the most severe degradation(s) observed in the evaluated image?",
293
+ "What are the primary degradation(s) observed in the evaluated image?",
294
+ "What critical quality degradation(s) are present in the evaluated image?",
295
+ "What distortion(s) are most apparent in the evaluated image?",
296
+ "What distortion(s) are most evident in the evaluated image?",
297
+ "What distortion(s) are most prominent in the evaluated image?",
298
+ "What distortion(s) are most prominent when examining the evaluated image?",
299
+ "What distortion(s) most detrimentally affect the overall quality of the evaluated image?",
300
+ "What distortion(s) most notably affect the clarity of the evaluated image?",
301
+ "What distortion(s) most significantly affect the evaluated image?",
302
+ "What distortion(s) stand out in the evaluated image?",
303
+ "What quality degradation(s) are most apparent in the evaluated image?"
304
+ ]
305
+ }
306
+ }
307
+
308
+ def question_generate(ref="Full-Reference", degra="Common"):
309
+ option = f" Answer the question using a single word or phrase."
310
+ template = random.choice(question_dict[ref]["Common"] + question_dict[ref][degra])
311
+ if random.random() >= 0.4:
312
+ template += option
313
+ return template
314
+
315
+ if __name__ == "__main__":
316
+ for degradation, degra_dict in dataset_dict.items():
317
+ for dname, ddict in degra_dict.items():
318
+ for dset, list_path in ddict.items():
319
+
320
+ meta_refA = []
321
+ meta_A = []
322
+ meta_syn = []
323
+
324
+ # read data list
325
+ paths = []
326
+ list_path = os.path.join(base_rt, list_path)
327
+ with open(list_path) as fin:
328
+ for line in fin:
329
+ line = line.strip().split()
330
+ if len(line) == 3:
331
+ paths.append(line)
332
+ paths = sorted(paths)
333
+
334
+ # Dataset
335
+ LQ_list = []
336
+ HQ_list = []
337
+ for data in paths:
338
+ lq_pth, hq_pth, label = data
339
+
340
+ if os.path.isfile(hq_pth):
341
+ HQ_list.append(hq_pth)
342
+ image_ref = os.path.relpath(hq_pth, base_rt).replace("\\", "/")
343
+ id = os.path.basename(image_ref)
344
+ else:
345
+ image_ref = None
346
+
347
+ if os.path.isfile(lq_pth):
348
+ LQ_list.append(lq_pth)
349
+ image_A = os.path.relpath(lq_pth, base_rt).replace("\\", "/")
350
+ id = os.path.basename(image_A)
351
+ else:
352
+ image_A = None
353
+
354
+ meta_refA.append({
355
+ "distortion_class": degradation,
356
+ "distortion_name": degradation,
357
+ "severity": 3,
358
+ "id": id,
359
+ "image_ref": image_ref,
360
+ "image_A": image_A,
361
+ "image_B": None,
362
+ "task_type": "quality_single_A",
363
+ "conversations": [
364
+ {
365
+ "from": "human",
366
+ "value": question_generate(ref="Full-Reference", degra="ONE"),
367
+ },
368
+ {
369
+ "from": "gpt",
370
+ "value": degradation
371
+ }
372
+ ],
373
+ })
374
+
375
+ meta_A.append({
376
+ "distortion_class": degradation,
377
+ "distortion_name": degradation,
378
+ "severity": 3,
379
+ "id": id,
380
+ "image_ref": None,
381
+ "image_A": image_A,
382
+ "image_B": None,
383
+ "task_type": "quality_single_A_noref",
384
+ "conversations": [
385
+ {
386
+ "from": "human",
387
+ "value": question_generate(ref="Non-Reference", degra="ONE"),
388
+ },
389
+ {
390
+ "from": "gpt",
391
+ "value": degradation
392
+ }
393
+ ],
394
+ })
395
+
396
+ meta_syn.append({
397
+ "distortion_class": degradation,
398
+ "distortion_name": degradation,
399
+ "severity": 3,
400
+ "id": id,
401
+ "image_ref": None,
402
+ "image_A": image_ref,
403
+ "image_B": None,
404
+ "task_type": "quality_single_A_noref",
405
+ "conversations": [
406
+ {
407
+ "from": "human",
408
+ "value": question_generate(ref="Non-Reference", degra="ONE"),
409
+ },
410
+ {
411
+ "from": "gpt",
412
+ "value": degradation
413
+ }
414
+ ],
415
+ })
416
+
417
+ if len(LQ_list) > 0 and len(HQ_list) > 0 and len(LQ_list) == len(HQ_list): # pair
418
+ meta_refA_pth = list_path.replace(".list", "_iqa_refA_brief.json")
419
+ meta_A_pth = list_path.replace(".list", "_iqa_A_brief.json")
420
+ with open(meta_refA_pth, "w") as f:
421
+ json.dump(meta_refA, f, indent=4)
422
+ with open(meta_A_pth, "w") as f:
423
+ json.dump(meta_A, f, indent=4)
424
+
425
+ print(f"[{os.path.relpath(meta_refA_pth, base_rt)}, ], # LQ[{len(LQ_list)}], HQ[{len(HQ_list)}], quality_single_A, {degradation}, {dname}-{dset}")
426
+ print(f"[{os.path.relpath(meta_A_pth, base_rt)}, ], # LQ[{len(LQ_list)}], HQ[{len(HQ_list)}], quality_single_A_noref, {degradation}, {dname}-{dset}")
427
+
428
+ elif len(LQ_list) > 0 and len(HQ_list) == 0: # real image
429
+ meta_A_pth = list_path.replace(".list", "_iqa_A_brief.json")
430
+ with open(meta_A_pth, "w") as f:
431
+ json.dump(meta_A, f, indent=4)
432
+ print(f"[{os.path.relpath(meta_A_pth, base_rt)}, ], # LQ[{len(LQ_list)}], HQ[{len(HQ_list)}], quality_single_A_noref, {degradation}, {dname}-{dset}")
433
+
434
+ elif len(LQ_list) == 0 and len(HQ_list) > 0: # syn image
435
+ meta_refA_pth = list_path.replace(".list", "_iqa_syn_refA_brief.json")
436
+ meta_syn_pth = list_path.replace(".list", "_iqa_syn_A_brief.json")
437
+ with open(meta_refA_pth, "w") as f:
438
+ json.dump(meta_refA, f, indent=4)
439
+ with open(meta_syn_pth, "w") as f:
440
+ json.dump(meta_syn, f, indent=4)
441
+ print(f"[{os.path.relpath(meta_refA_pth, base_rt)}, ], # LQ[{len(LQ_list)}], HQ[{len(HQ_list)}], quality_single_A, {degradation}, {dname}-{dset}")
442
+ print(f"[{os.path.relpath(meta_syn_pth, base_rt)}, ], # LQ[{len(LQ_list)}], HQ[{len(HQ_list)}], quality_single_A_noref, {degradation}, {dname}-{dset}")
443
+
444
+ else:
445
+ raise KeyError(f"the task is not matched, please check the dataset {list_path}")
446
+
447
+
generate_lowresolution.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ from PIL import Image
5
+
6
+ import cv2
7
+ import math
8
+ import numpy as np
9
+ import os
10
+ import os.path as osp
11
+ import random
12
+ import time
13
+ import torch
14
+ from tqdm import tqdm
15
+
16
+ from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
17
+ from basicsr.data.transforms import augment
18
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
19
+ from basicsr.utils.registry import DATASET_REGISTRY
20
+ from torch.utils import data as data
21
+ from torchvision.transforms.functional import center_crop
22
+ import torchvision.transforms as T
23
+ from torchvision.utils import save_image
24
+
25
+ from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
26
+ from basicsr.data.transforms import paired_random_crop
27
+ from basicsr.utils import DiffJPEG, USMSharp
28
+ from basicsr.utils.img_process_util import filter2D
29
+ from basicsr.utils.registry import MODEL_REGISTRY
30
+ from collections import OrderedDict
31
+ from torch.nn import functional as F
32
+
33
+ cfg = {
34
+ # dataset info.
35
+ "name": "DF2K+OST",
36
+ "type": "RealESRGANDataset",
37
+ "dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution",
38
+ "meta_train": [
39
+ "DIV2K/metas/DIV2K_train_HR.list",
40
+ "Flickr2K/metas/Flickr2K.list",
41
+ "OST/metas/OST.list",
42
+ ],
43
+ "meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"],
44
+
45
+ # the first degradation process
46
+ "resize_prob": [0.2, 0.7, 0.1], # up, down, keep
47
+ "resize_range": [0.15, 1.5],
48
+ "gaussian_noise_prob": 0.5,
49
+ "noise_range": [1, 30],
50
+ "poisson_scale_range": [0.05, 3],
51
+ "gray_noise_prob": 0.4,
52
+ "jpeg_range": [30, 95],
53
+
54
+ "blur_kernel_size": 21,
55
+ "kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
56
+ "kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
57
+ "sinc_prob": 0.1,
58
+ "blur_sigma": [0.2, 3],
59
+ "betag_range": [0.5, 4],
60
+ "betap_range": [1, 2],
61
+
62
+ # the second degradation process
63
+ "second_blur_prob": 0.8,
64
+ "resize_prob2": [0.3, 0.4, 0.3], # up, down, keep
65
+ "resize_range2": [0.3, 1.2],
66
+ "gaussian_noise_prob2": 0.5,
67
+ "noise_range2": [1, 25],
68
+ "poisson_scale_range2": [0.05, 2.5],
69
+ "gray_noise_prob2": 0.4,
70
+ "jpeg_range2": [30, 95],
71
+
72
+ "blur_kernel_size2": 21,
73
+ "kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
74
+ "kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
75
+ "sinc_prob2": 0.1,
76
+ "blur_sigma2": [0.2, 1.5],
77
+ "betag_range2": [0.5, 4],
78
+ "betap_range2": [1, 2],
79
+
80
+ "final_sinc_prob": 0.8,
81
+
82
+ "gt_size": 512,
83
+ "keep_ratio": True,
84
+ "use_hflip": True,
85
+ "use_rot": False,
86
+
87
+ # data loader
88
+ "use_shuffle": True,
89
+ "num_worker_per_gpu": 5,
90
+ "batch_size_per_gpu": 12,
91
+ "dataset_enlarge_ratio": 1,
92
+ "prefetch_mode": None,
93
+ }
94
+
95
+ def set_seed(seed=42):
96
+ random.seed(seed)
97
+ np.random.seed(seed)
98
+ torch.manual_seed(seed)
99
+ torch.cuda.manual_seed(seed)
100
+ torch.cuda.manual_seed_all(seed)
101
+
102
+ torch.backends.cudnn.deterministic = True
103
+ torch.backends.cudnn.benchmark = False
104
+
105
+ @DATASET_REGISTRY.register()
106
+ class RealESRGANDataset(data.Dataset):
107
+ """Dataset used for Real-ESRGAN model:
108
+ Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
109
+
110
+ It loads gt (Ground-Truth) images, and augments them.
111
+ It also generates blur kernels and sinc kernels for generating low-quality images.
112
+ Note that the low-quality images are processed in tensors on GPUS for faster processing.
113
+
114
+ Args:
115
+ opt (dict): Config for train datasets. It contains the following keys:
116
+ dataroot_gt (str): Data root path for gt.
117
+ meta_info (str): Path for meta information file.
118
+ io_backend (dict): IO backend type and other kwarg.
119
+ use_hflip (bool): Use horizontal flips.
120
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
121
+ Please see more options in the codes.
122
+ """
123
+
124
+ def __init__(self, opt, train=True):
125
+ super(RealESRGANDataset, self).__init__()
126
+ self.opt = opt
127
+ self.file_client = None
128
+
129
+ # kernel define
130
+ self.data_rt = opt['dataroot_gt']
131
+
132
+ # dataload
133
+ self.train = train
134
+ if self.train:
135
+ self.metas = opt['meta_train']
136
+ else:
137
+ self.metas = opt['meta_test']
138
+
139
+ self.paths = []
140
+ for meta in self.metas:
141
+ with open(os.path.join(self.data_rt, meta)) as fin:
142
+ self.paths += [line.strip().split(' ')[1] for line in fin]
143
+
144
+ # Hyperparameter of Degradation
145
+ # blur settings for the first degradation
146
+ self.blur_kernel_size = opt['blur_kernel_size']
147
+ self.kernel_list = opt['kernel_list']
148
+ self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
149
+ self.blur_sigma = opt['blur_sigma']
150
+ self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
151
+ self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
152
+ self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
153
+
154
+ # blur settings for the second degradation
155
+ self.blur_kernel_size2 = opt['blur_kernel_size2']
156
+ self.kernel_list2 = opt['kernel_list2']
157
+ self.kernel_prob2 = opt['kernel_prob2']
158
+ self.blur_sigma2 = opt['blur_sigma2']
159
+ self.betag_range2 = opt['betag_range2']
160
+ self.betap_range2 = opt['betap_range2']
161
+ self.sinc_prob2 = opt['sinc_prob2']
162
+
163
+ # a final sinc filter
164
+ self.final_sinc_prob = opt['final_sinc_prob']
165
+
166
+ self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
167
+ # TODO: kernel range is now hard-coded, should be in the configure file
168
+ self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
169
+ self.pulse_tensor[10, 10] = 1
170
+
171
+ self.device = torch.cuda.current_device()
172
+ self.jpeger = DiffJPEG(differentiable=False).to(self.device) # simulate JPEG compression artifacts
173
+ self.usm_sharpener = USMSharp().to(self.device) # do usm sharpening
174
+ self.resize = opt['gt_size']
175
+ self.keep_ratio = opt['keep_ratio']
176
+
177
+ # function
178
+ self.crop = T.RandomCrop((self.resize, self.resize))
179
+ self.flip = T.RandomHorizontalFlip()
180
+ self.transform = T.Compose(
181
+ [
182
+ # T.ToDtype(torch.float32, scale=True), # only support for torch 2.++
183
+ T.ToTensor(),
184
+ ]
185
+ )
186
+
187
+ def __getitem__(self, index):
188
+ # -------------------------------- Load gt images -------------------------------- #
189
+ gt_path = self.paths[index]
190
+ img_gt = Image.open(gt_path).convert("RGB")
191
+
192
+ # -------------------------------- Image Process --------------------------------
193
+ # resize
194
+ h, w = img_gt.height, img_gt.width
195
+ if self.keep_ratio:
196
+ ratio = self.resize / min(h, w)
197
+ h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2)
198
+ img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS)
199
+ else:
200
+ img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS)
201
+ # crop and
202
+ img_gt = self.crop(img_gt)
203
+ # flip (only for train)
204
+ if self.train:
205
+ img_gt = self.flip(img_gt)
206
+ # transform to tensor
207
+ img_gt = self.transform(img_gt).to(torch.float32)
208
+
209
+ # -------------------------------- Generate Kernels --------------------------------
210
+ kernel, kernel2, sinc_kernel = self.generate_kernel()
211
+
212
+ # ------------------------- Generate Low Resolutino Sample -------------------------
213
+ lq, hq = self.generate_lr({
214
+ "gt": img_gt.unsqueeze(0),
215
+ "kernel1": kernel,
216
+ "kernel2": kernel2,
217
+ "sinc_kernel": sinc_kernel,
218
+ })
219
+
220
+ return lq, hq, gt_path
221
+
222
+ def generate_kernel(self, ):
223
+ # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
224
+ kernel_size = random.choice(self.kernel_range)
225
+ if np.random.uniform() < self.opt['sinc_prob']:
226
+ # this sinc filter setting is for kernels ranging from [7, 21]
227
+ if kernel_size < 13:
228
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
229
+ else:
230
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
231
+ kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
232
+ else:
233
+ kernel = random_mixed_kernels(
234
+ self.kernel_list,
235
+ self.kernel_prob,
236
+ kernel_size,
237
+ self.blur_sigma,
238
+ self.blur_sigma, [-math.pi, math.pi],
239
+ self.betag_range,
240
+ self.betap_range,
241
+ noise_range=None)
242
+ # pad kernel
243
+ pad_size = (21 - kernel_size) // 2
244
+ kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
245
+ kernel = torch.FloatTensor(kernel)
246
+
247
+ # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
248
+ kernel_size = random.choice(self.kernel_range)
249
+ if np.random.uniform() < self.opt['sinc_prob2']:
250
+ if kernel_size < 13:
251
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
252
+ else:
253
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
254
+ kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
255
+ else:
256
+ kernel2 = random_mixed_kernels(
257
+ self.kernel_list2,
258
+ self.kernel_prob2,
259
+ kernel_size,
260
+ self.blur_sigma2,
261
+ self.blur_sigma2, [-math.pi, math.pi],
262
+ self.betag_range2,
263
+ self.betap_range2,
264
+ noise_range=None)
265
+
266
+ # pad kernel
267
+ pad_size = (21 - kernel_size) // 2
268
+ kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
269
+ kernel2 = torch.FloatTensor(kernel2)
270
+
271
+ # ------------------------------------- the final sinc kernel ------------------------------------- #
272
+ if np.random.uniform() < self.opt['final_sinc_prob']:
273
+ kernel_size = random.choice(self.kernel_range)
274
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
275
+ sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
276
+ sinc_kernel = torch.FloatTensor(sinc_kernel)
277
+ else:
278
+ sinc_kernel = self.pulse_tensor
279
+ return kernel, kernel2, sinc_kernel
280
+
281
+ def generate_lr(self, data):
282
+ """Accept data from dataloader, and then add two-order degradations to obtain LQ images.
283
+ """
284
+ # training data synthesis
285
+ self.gt = data['gt'].to(self.device)
286
+ self.gt_usm = self.usm_sharpener(self.gt)
287
+
288
+ self.kernel1 = data['kernel1'].to(self.device)
289
+ self.kernel2 = data['kernel2'].to(self.device)
290
+ self.sinc_kernel = data['sinc_kernel'].to(self.device)
291
+
292
+ ori_h, ori_w = self.gt.size()[2:4]
293
+
294
+ # ----------------------- The first degradation process ----------------------- #
295
+ # blur
296
+ out = filter2D(self.gt_usm, self.kernel1)
297
+ # random resize
298
+ updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
299
+ if updown_type == 'up':
300
+ scale = np.random.uniform(1, self.opt['resize_range'][1])
301
+ elif updown_type == 'down':
302
+ scale = np.random.uniform(self.opt['resize_range'][0], 1)
303
+ else:
304
+ scale = 1
305
+ mode = random.choice(['area', 'bilinear', 'bicubic'])
306
+ out = F.interpolate(out, scale_factor=scale, mode=mode)
307
+ # add noise
308
+ gray_noise_prob = self.opt['gray_noise_prob']
309
+ if np.random.uniform() < self.opt['gaussian_noise_prob']:
310
+ out = random_add_gaussian_noise_pt(
311
+ out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
312
+ else:
313
+ out = random_add_poisson_noise_pt(
314
+ out,
315
+ scale_range=self.opt['poisson_scale_range'],
316
+ gray_prob=gray_noise_prob,
317
+ clip=True,
318
+ rounds=False)
319
+ # JPEG compression
320
+ jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
321
+ out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
322
+ out = self.jpeger(out, quality=jpeg_p)
323
+
324
+ # ----------------------- The second degradation process ----------------------- #
325
+ # blur
326
+ if np.random.uniform() < self.opt['second_blur_prob']:
327
+ out = filter2D(out, self.kernel2)
328
+ # random resize
329
+ updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
330
+ if updown_type == 'up':
331
+ scale = np.random.uniform(1, self.opt['resize_range2'][1])
332
+ elif updown_type == 'down':
333
+ scale = np.random.uniform(self.opt['resize_range2'][0], 1)
334
+ else:
335
+ scale = 1
336
+ mode = random.choice(['area', 'bilinear', 'bicubic'])
337
+ out = F.interpolate(
338
+ out, size=(int(ori_h * scale), int(ori_w * scale)), mode=mode)
339
+ # add noise
340
+ gray_noise_prob = self.opt['gray_noise_prob2']
341
+ if np.random.uniform() < self.opt['gaussian_noise_prob2']:
342
+ out = random_add_gaussian_noise_pt(
343
+ out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
344
+ else:
345
+ out = random_add_poisson_noise_pt(
346
+ out,
347
+ scale_range=self.opt['poisson_scale_range2'],
348
+ gray_prob=gray_noise_prob,
349
+ clip=True,
350
+ rounds=False)
351
+
352
+ # JPEG compression + the final sinc filter
353
+ # We also need to resize images to desired sizes. We group [resize back + sinc filter] together
354
+ # as one operation.
355
+ # We consider two orders:
356
+ # 1. [resize back + sinc filter] + JPEG compression
357
+ # 2. JPEG compression + [resize back + sinc filter]
358
+ # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
359
+ if np.random.uniform() < 0.5:
360
+ # resize back + the final sinc filter
361
+ mode = random.choice(['area', 'bilinear', 'bicubic'])
362
+ out = F.interpolate(out, size=(ori_h, ori_w), mode=mode)
363
+ out = filter2D(out, self.sinc_kernel)
364
+ # JPEG compression
365
+ jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
366
+ out = torch.clamp(out, 0, 1)
367
+ out = self.jpeger(out, quality=jpeg_p)
368
+ else:
369
+ # JPEG compression
370
+ jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
371
+ out = torch.clamp(out, 0, 1)
372
+ out = self.jpeger(out, quality=jpeg_p)
373
+ # resize back + the final sinc filter
374
+ mode = random.choice(['area', 'bilinear', 'bicubic'])
375
+ out = F.interpolate(out, size=(ori_h, ori_w), mode=mode)
376
+ out = filter2D(out, self.sinc_kernel)
377
+
378
+ # clamp and round
379
+ lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
380
+ lq = lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
381
+
382
+ hq = self.usm_sharpener(self.gt)
383
+ return lq[0], hq[0]
384
+
385
+ def __len__(self):
386
+ return len(self.paths)
387
+
388
+ def real_esrgan_sampler():
389
+ """
390
+ Generate multi-scale versions for GT images with LANCZOS resampling.
391
+ It is now used for DF2K dataset (DIV2K + Flickr 2K)
392
+ """
393
+ parser = argparse.ArgumentParser()
394
+ parser.add_argument('--num_samples', type=int, default=3, help='train: one to many')
395
+ args = parser.parse_args()
396
+
397
+ # generate training dataset
398
+ dataset = RealESRGANDataset(cfg, train=True)
399
+ data_dl = data.DataLoader(
400
+ dataset,
401
+ batch_size = 1
402
+ )
403
+ print("Train Data:", dataset.data_rt, len(data_dl))
404
+ for number in range(args.num_samples):
405
+ print("="*100)
406
+ print(f"Generate round {number}...")
407
+
408
+ meta_info = {}
409
+ for sample in tqdm(data_dl):
410
+ lq, hq, path = sample
411
+ # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
412
+ file_name = os.path.basename(path[0])
413
+ gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
414
+ if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
415
+ hq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/HR")
416
+ lq_folder = gt_folder.replace("HR", f"pair/SR{number+1}/LR")
417
+ else:
418
+ hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "HR/")
419
+ lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR{number+1}"), "LR/")
420
+
421
+ os.makedirs(hq_folder, exist_ok=True)
422
+ os.makedirs(lq_folder, exist_ok=True)
423
+
424
+ hq_path = os.path.join(hq_folder, file_name)
425
+ lq_path = os.path.join(lq_folder, file_name)
426
+
427
+ save_image(hq[0], hq_path)
428
+ save_image(lq[0], lq_path)
429
+
430
+ dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
431
+ if dset not in meta_info:
432
+ meta_info[dset] = [(lq_path, hq_path)]
433
+ else:
434
+ meta_info[dset].append((lq_path, hq_path))
435
+
436
+ for dset, dlist in meta_info.items():
437
+ with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), 'w') as fp:
438
+ for item in dlist:
439
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
440
+ print(os.path.join(dataset.data_rt,'{}/metas/{}_train_SR{}.list'.format(dset, dset, number+1)), len(dlist))
441
+
442
+
443
+ # generate testing dataset
444
+ dataset = RealESRGANDataset(cfg, train=False)
445
+ data_dl = data.DataLoader(
446
+ dataset,
447
+ batch_size = 1
448
+ )
449
+ print("Test Data:", dataset.data_rt, len(data_dl))
450
+ print("="*100)
451
+ print(f"Generate round {number}...")
452
+
453
+ meta_info = {}
454
+ for sample in tqdm(data_dl):
455
+ lq, hq, path = sample
456
+ # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
457
+ file_name = os.path.basename(path[0])
458
+ gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
459
+ if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
460
+ hq_folder = gt_folder.replace("HR", f"pair/SR/HR")
461
+ lq_folder = gt_folder.replace("HR", f"pair/SR/LR")
462
+ else:
463
+ hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "HR/")
464
+ lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/SR"), "LR/")
465
+
466
+ os.makedirs(hq_folder, exist_ok=True)
467
+ os.makedirs(lq_folder, exist_ok=True)
468
+
469
+ hq_path = os.path.join(hq_folder, file_name)
470
+ lq_path = os.path.join(lq_folder, file_name)
471
+
472
+ save_image(hq[0], hq_path)
473
+ save_image(lq[0], lq_path)
474
+
475
+ dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
476
+ if dset not in meta_info:
477
+ meta_info[dset] = [(lq_path, hq_path)]
478
+ else:
479
+ meta_info[dset].append((lq_path, hq_path))
480
+
481
+ for dset, dlist in meta_info.items():
482
+ with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), 'w') as fp:
483
+ for item in dlist:
484
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
485
+ print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_SR.list'.format(dset, dset)), len(dlist))
486
+
487
+ def simple_multiscale():
488
+ """
489
+ Generate multi-scale versions for GT images with LANCZOS resampling.
490
+ It is now used for DF2K dataset (DIV2K + Flickr 2K)
491
+ """
492
+ parser = argparse.ArgumentParser()
493
+ parser.add_argument('--input', type=str, default='DIV2K/DIV2K_train_HR', help='Input folder')
494
+ parser.add_argument('--output', type=str, default='DIV2K/DIV2K_train_multiscale', help='Output folder')
495
+ args = parser.parse_args()
496
+ os.makedirs(args.output, exist_ok=True)
497
+
498
+ # For DF2K, we consider the following three scales,
499
+ # and the smallest image whose shortest edge is 400
500
+ scale_list = [0.75, 0.5, 1 / 3]
501
+ shortest_edge = 400
502
+
503
+ path_list = sorted(glob.glob(os.path.join(args.input, '*')))
504
+ for path in path_list:
505
+ basename = os.path.splitext(os.path.basename(path))[0]
506
+
507
+ img = Image.open(path)
508
+ width, height = img.size
509
+ for idx, scale in enumerate(scale_list):
510
+ print(f'\t{scale:.2f}')
511
+ rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
512
+ rlt = rlt.resize((width, height), resample=Image.NEAREST)
513
+ rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
514
+
515
+ # save the smallest image which the shortest edge is 400
516
+ if width < height:
517
+ ratio = height / width
518
+ width = shortest_edge
519
+ height = int(width * ratio)
520
+ else:
521
+ ratio = width / height
522
+ height = shortest_edge
523
+ width = int(height * ratio)
524
+ rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
525
+ rlt = rlt.resize(img.size, resample=Image.NEAREST)
526
+ rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
527
+
528
+
529
+ if __name__ == '__main__':
530
+ set_seed(1229)
531
+ # simple version
532
+ # simple_multiscale()
533
+
534
+ # Real-ESRGAN for data generation
535
+ real_esrgan_sampler()
536
+
537
+ # python 2_generate_lowresolution.py --num_samples 3
generate_noise.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ from PIL import Image
5
+
6
+ import cv2
7
+ import math
8
+ import numpy as np
9
+ import os
10
+ import os.path as osp
11
+ import random
12
+ import time
13
+ import torch
14
+ from tqdm import tqdm
15
+
16
+ from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
17
+ from basicsr.data.transforms import augment
18
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
19
+ from basicsr.utils.registry import DATASET_REGISTRY
20
+ from torch.utils import data as data
21
+ from torchvision.transforms.functional import center_crop
22
+ import torchvision.transforms as T
23
+ from torchvision.utils import save_image
24
+
25
+ from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
26
+ from basicsr.data.transforms import paired_random_crop
27
+ from basicsr.utils import DiffJPEG, USMSharp
28
+ from basicsr.utils.img_process_util import filter2D
29
+ from basicsr.utils.registry import MODEL_REGISTRY
30
+ from collections import OrderedDict
31
+ from torch.nn import functional as F
32
+
33
+ cfg = {
34
+ # dataset info.
35
+ "name": "DF2K+OST",
36
+ "type": "RealESRGANDataset",
37
+ "dataroot_gt": "/home/CORP/hsiang.chen/Project/Datasets/IR/Denoise",
38
+ "meta_train": [
39
+ "BSD68/metas/BSD68.list",
40
+ "BSD400/metas/BSD400.list",
41
+ "CBSD68/metas/CBSD68.list",
42
+ "Kodak/metas/Kodak.list",
43
+ "McMaster/metas/McMaster.list",
44
+ "Set12/metas/Set12.list",
45
+ "Urban100/metas/Urban100.list",
46
+ "WaterlooED/metas/WaterlooED.list",
47
+ ],
48
+ # "meta_test": ["DIV2K/metas/DIV2K_valid_HR.list"],
49
+
50
+ # the first degradation process
51
+ "resize_prob": [0.2, 0.7, 0.1], # up, down, keep
52
+ "resize_range": [0.15, 1.5],
53
+ "gaussian_noise_prob": 0.5,
54
+ "noise_range": [1, 30],
55
+ "poisson_scale_range": [0.05, 3],
56
+ "gray_noise_prob": 0.4,
57
+ "jpeg_range": [30, 95],
58
+
59
+ "blur_kernel_size": 21,
60
+ "kernel_list": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
61
+ "kernel_prob": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
62
+ "sinc_prob": 0.1,
63
+ "blur_sigma": [0.2, 3],
64
+ "betag_range": [0.5, 4],
65
+ "betap_range": [1, 2],
66
+
67
+ # the second degradation process
68
+ "second_blur_prob": 0.8,
69
+ "resize_prob2": [0.3, 0.4, 0.3], # up, down, keep
70
+ "resize_range2": [0.3, 1.2],
71
+ "gaussian_noise_prob2": 0.5,
72
+ "noise_range2": [1, 25],
73
+ "poisson_scale_range2": [0.05, 2.5],
74
+ "gray_noise_prob2": 0.4,
75
+ "jpeg_range2": [30, 95],
76
+
77
+ "blur_kernel_size2": 21,
78
+ "kernel_list2": ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'],
79
+ "kernel_prob2": [0.45, 0.25, 0.12, 0.03, 0.12, 0.03],
80
+ "sinc_prob2": 0.1,
81
+ "blur_sigma2": [0.2, 1.5],
82
+ "betag_range2": [0.5, 4],
83
+ "betap_range2": [1, 2],
84
+
85
+ "final_sinc_prob": 0.8,
86
+
87
+ "gt_size": 512,
88
+ "keep_ratio": True,
89
+ "use_hflip": True,
90
+ "use_rot": False,
91
+
92
+ # data loader
93
+ "use_shuffle": True,
94
+ "num_worker_per_gpu": 5,
95
+ "batch_size_per_gpu": 12,
96
+ "dataset_enlarge_ratio": 1,
97
+ "prefetch_mode": None,
98
+ }
99
+
100
+ def set_seed(seed=42):
101
+ random.seed(seed)
102
+ np.random.seed(seed)
103
+ torch.manual_seed(seed)
104
+ torch.cuda.manual_seed(seed)
105
+ torch.cuda.manual_seed_all(seed)
106
+
107
+ torch.backends.cudnn.deterministic = True
108
+ torch.backends.cudnn.benchmark = False
109
+
110
+ @DATASET_REGISTRY.register()
111
+ class NoiseDataset(data.Dataset):
112
+ """Dataset used for Denoise model:
113
+ synthetic Gaussian and Poisson noise dataset.
114
+ """
115
+ def __init__(self, opt, train=True, level=None):
116
+ super(NoiseDataset, self).__init__()
117
+ self.opt = opt
118
+
119
+ # kernel define
120
+ self.data_rt = opt['dataroot_gt']
121
+
122
+ # dataload
123
+ self.train = train
124
+ if self.train:
125
+ self.metas = opt['meta_train']
126
+ else:
127
+ self.metas = opt['meta_test']
128
+
129
+ self.paths = []
130
+ for meta in self.metas:
131
+ with open(os.path.join(self.data_rt, meta)) as fin:
132
+ self.paths += [line.strip().split(' ')[1] for line in fin]
133
+
134
+ # hyperparameter
135
+
136
+ self.device = torch.cuda.current_device()
137
+ self.jpeger = DiffJPEG(differentiable=False).to(self.device) # simulate JPEG compression artifacts
138
+ self.usm_sharpener = USMSharp().to(self.device) # do usm sharpening
139
+ self.resize = opt['gt_size']
140
+ self.keep_ratio = opt['keep_ratio']
141
+
142
+ # function
143
+ self.crop = T.RandomCrop((self.resize, self.resize))
144
+ self.flip = T.RandomHorizontalFlip()
145
+ self.transform = T.Compose(
146
+ [
147
+ # T.ToDtype(torch.float32, scale=True), # only support for torch 2.++
148
+ T.ToTensor(),
149
+ ]
150
+ )
151
+
152
+ # noise
153
+ self.sigma = [0.0588, 0.0784, 0.098, 0.1451, 0.1961] # 5 levels: 15, 20, 25, 37, 50
154
+ if level:
155
+ self.level = [level]
156
+ else:
157
+ self.level = [1,2,3,4,5]
158
+
159
+ def __getitem__(self, index):
160
+ # -------------------------------- Load gt images -------------------------------- #
161
+ gt_path = self.paths[index]
162
+ img_gt = Image.open(gt_path).convert("RGB")
163
+
164
+ # -------------------------------- Image Process --------------------------------
165
+ # resize
166
+ h, w = img_gt.height, img_gt.width
167
+ if self.keep_ratio:
168
+ ratio = self.resize / min(h, w)
169
+ h_new, w_new = round(h * ratio * 1.2), round(w * ratio * 1.2)
170
+ img_gt = img_gt.resize((w_new, h_new), resample=Image.LANCZOS)
171
+ else:
172
+ img_gt = img_gt.resize((self.resize, self.resize), resample=Image.LANCZOS)
173
+ # crop and
174
+ img_gt = self.crop(img_gt)
175
+ # flip (only for train)
176
+ if self.train:
177
+ img_gt = self.flip(img_gt)
178
+ # transform to tensor
179
+ img_gt = self.transform(img_gt).to(torch.float32)
180
+
181
+ # -------------------------------- Generate Noise --------------------------------
182
+ # Poisson Noise
183
+ peak = 255
184
+ lam = torch.clamp(img_gt, 0, 1) * peak
185
+ counts = torch.poisson(lam)
186
+ img_poisson = torch.clamp(counts / float(peak), 0.0, 1.0)
187
+
188
+ # Gaussian Noise
189
+ level = random.choice(self.level)
190
+ noise = torch.randn(size=img_poisson.size())
191
+ img_poisson_gaussian = torch.clamp(img_poisson + self.sigma[level-1] * noise, 0., 1.)
192
+
193
+ return img_poisson_gaussian, img_gt, gt_path
194
+
195
+ def __len__(self):
196
+ return len(self.paths)
197
+
198
+ def poisson_gaussian_sampler():
199
+ """
200
+ It is now used for DF2K dataset (DIV2K + Flickr 2K)
201
+ """
202
+ parser = argparse.ArgumentParser()
203
+ parser.add_argument('--level', type=int, default=None, help='train: one to many')
204
+ args = parser.parse_args()
205
+
206
+ if args.level:
207
+ level = args.level
208
+ else:
209
+ level = [1,3,5]
210
+
211
+ # generate training dataset
212
+ for number in level:
213
+ print("="*100)
214
+ print(f"Generate Noise Level {number}...")
215
+
216
+ dataset = NoiseDataset(cfg, train=True, level=number)
217
+ data_dl = data.DataLoader(
218
+ dataset,
219
+ batch_size = 1
220
+ )
221
+ print("Train Data:", dataset.data_rt, len(data_dl))
222
+ meta_info = {}
223
+ for sample in tqdm(data_dl):
224
+ lq, hq, path = sample
225
+ # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
226
+ file_name = os.path.basename(path[0])
227
+ gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
228
+ if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
229
+ hq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/HQ")
230
+ lq_folder = gt_folder.replace("HR", f"pair/Noise_L{number}/LQ")
231
+ elif "Denoise" in gt_folder:
232
+ hq_folder = os.path.join(gt_folder.replace("image", f"image_pair/Noise_L{number}"), "HQ/")
233
+ lq_folder = os.path.join(gt_folder.replace("image", f"image_pair/Noise_L{number}"), "LQ/")
234
+ else:
235
+ hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "HQ/")
236
+ lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise_L{number}"), "LQ/")
237
+
238
+ os.makedirs(hq_folder, exist_ok=True)
239
+ os.makedirs(lq_folder, exist_ok=True)
240
+
241
+ hq_path = os.path.join(hq_folder, file_name)
242
+ lq_path = os.path.join(lq_folder, file_name)
243
+
244
+ save_image(hq[0], hq_path)
245
+ save_image(lq[0], lq_path)
246
+
247
+ dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
248
+ if dset not in meta_info:
249
+ meta_info[dset] = [(lq_path, hq_path)]
250
+ else:
251
+ meta_info[dset].append((lq_path, hq_path))
252
+
253
+ for dset, dlist in meta_info.items():
254
+ with open(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), 'w') as fp:
255
+ for item in dlist:
256
+ fp.write('{} {} {}\n'.format(item[0], item[1], None))
257
+ print(os.path.join(dataset.data_rt,'{}/metas/{}_train_Noise_L{}.list'.format(dset, dset, number)), len(dlist))
258
+
259
+ # generate testing dataset
260
+ # dataset = NoiseDataset(cfg, train=False)
261
+ # data_dl = data.DataLoader(
262
+ # dataset,
263
+ # batch_size = 1
264
+ # )
265
+ # print("Test Data:", dataset.data_rt, len(data_dl))
266
+ # print("="*100)
267
+ # print(f"Generate Testing Noise...")
268
+
269
+ # meta_info = {}
270
+ # for sample in tqdm(data_dl):
271
+ # lq, hq, path = sample
272
+ # # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/0098.png
273
+ # file_name = os.path.basename(path[0])
274
+ # gt_folder = os.path.dirname(path[0]) # /home/CORP/hsiang.chen/Project/Datasets/IR/SuperResolution/DIV2K/DIV2K_train_HR/
275
+ # if "DIV2K_train_HR" in gt_folder or "DIV2K_valid_HR" in gt_folder:
276
+ # hq_folder = gt_folder.replace("HR", f"pair/Noise/HQ")
277
+ # lq_folder = gt_folder.replace("HR", f"pair/Noise/LQ")
278
+ # else:
279
+ # hq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "HQ/")
280
+ # lq_folder = os.path.join(gt_folder.replace("images", f"images_pair/Noise"), "LQ/")
281
+
282
+ # os.makedirs(hq_folder, exist_ok=True)
283
+ # os.makedirs(lq_folder, exist_ok=True)
284
+
285
+ # hq_path = os.path.join(hq_folder, file_name)
286
+ # lq_path = os.path.join(lq_folder, file_name)
287
+
288
+ # save_image(hq[0], hq_path)
289
+ # save_image(lq[0], lq_path)
290
+
291
+ # dset = os.path.relpath(gt_folder, dataset.data_rt).split("/")[0]
292
+ # if dset not in meta_info:
293
+ # meta_info[dset] = [(lq_path, hq_path)]
294
+ # else:
295
+ # meta_info[dset].append((lq_path, hq_path))
296
+
297
+ # for dset, dlist in meta_info.items():
298
+ # with open(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), 'w') as fp:
299
+ # for item in dlist:
300
+ # fp.write('{} {} {}\n'.format(item[0], item[1], None))
301
+ # print(os.path.join(dataset.data_rt,'{}/metas/{}_valid_Noise.list'.format(dset, dset)), len(dlist))
302
+
303
+ if __name__ == '__main__':
304
+ set_seed(1229)
305
+ # poisson_gaussian for data generation
306
+ poisson_gaussian_sampler()
307
+
308
+ # python 3_generate_noise.py