PIR_tar / scripts /0_generate_list.py
AaronCIH's picture
Update scripts/0_generate_list.py
c5a80d3 verified
import os, sys
import shutil
import numpy as np
from pathlib import Path
base_rt = f"/home/CORP/hsiang.chen/Project/Datasets/IR"
"""
Deblur: GoPro, HIDE, RealBlur
"""
# GoPro
# pre-process for GoPro, dataset: https://seungjunnah.github.io/Datasets/gopro
# Seungjun Nah, Tae Hyun Kim, and Kyoung Mu Lee. Deep multi-scale convolutional neural network for dynamic scene deblurring. In CVPR, 2017
rt = os.path.join(base_rt, "Deblur/GoPro")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['train', 'test']: # train, test
dset_pth = os.path.join(rt, dset)
list_file = []
total_ct = 0
for sample_folder in os.listdir(dset_pth): # GOPOXXX_XX_XX
folder_pth = os.path.join(dset_pth, sample_folder)
blur_folder = os.path.join(folder_pth, 'blur')
sharp_folder = os.path.join(folder_pth, 'sharp')
for ct, sample in enumerate(os.listdir(blur_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
blur_file = os.path.join(blur_folder, sample)
sharp_file = os.path.join(sharp_folder, sample)
list_file.append((blur_file, sharp_file))
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_pth, total_ct)
# HIDE
# pre-process for HIDE, dataset: https://github.com/joanshen0508/HA_deblur
# Ziyi Shen, Wenguan Wang, Xiankai Lu, Jianbing Shen, Haibin Ling, Tingfa Xu, and Ling Shao. Human-aware mo- tion deblurring. In ICCV, 2019.
# ==============================================
# HIDE/
# |- train/{image, gt}
# |- test/{image, gt}
# ==============================================
rt = os.path.join(base_rt, "Deblur/HIDE")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['train', 'test']: # train, test
dset_pth = os.path.join(rt, dset)
list_file = []
total_ct = 0
if dset == 'train':
blur_folder = dset_pth
sharp_folder = os.path.join(rt, 'GT')
for ct, sample in enumerate(os.listdir(blur_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
blur_file = os.path.join(blur_folder, sample)
sharp_file = os.path.join(sharp_folder, sample)
list_file.append((blur_file, sharp_file))
total_ct += 1
else:
for sample_folder in ['test-close-ups', 'test-long-shot']:
folder_pth = os.path.join(dset_pth, sample_folder)
blur_folder = folder_pth
sharp_folder = os.path.join(rt, 'GT')
for ct, sample in enumerate(os.listdir(blur_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
blur_file = os.path.join(blur_folder, sample)
sharp_file = os.path.join(sharp_folder, sample)
list_file.append((blur_file, sharp_file))
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_pth, total_ct)
# RealBlur
# pre-process for RealBlur-J,R, dataset: https://github.com/rimchang/RealBlur
# Jaesung Rim, Haeyun Lee, Jucheol Won, and Sunghyun Cho. Real-world blur dataset for learning and benchmarking de- blurring algorithms. In ECCV, 2020.
rt = os.path.join(base_rt, "Deblur/")
realblur_j_test_txt = os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref/RealBlur_J_test_list.txt")
realblur_r_test_txt = os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref/RealBlur_R_test_list.txt")
for txt in [realblur_j_test_txt, realblur_r_test_txt]:
data_list = []
total_ct = 0
miss_ct = 0
with open(txt) as fin:
for ct, line in enumerate(fin): # gt, image
line = line.strip().split()
if len(line) == 1: # no gt
data_list.append([None, os.path.join(rt, line[0])]) # image, None
miss_ct += 1
else:
data_list.append([os.path.join(rt, line[1]), os.path.join(rt, line[0])]) # image ,gt
total_ct += 1
set_dict = {realblur_j_test_txt: os.path.join(rt, f"RealBlur-J_ECC_IMCORR_centroid_itensity_ref"),
realblur_r_test_txt: os.path.join(rt, f"RealBlur-R_BM3D_ECC_IMCORR_centroid_itensity_ref")}
meta_folder = os.path.join(set_dict[txt], "metas")
os.makedirs(meta_folder, exist_ok=True)
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
for item in data_list:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(os.path.join(meta_folder,'test.list'), total_ct, miss_ct)
"""
Dehaze: 4kID, NH-Haze, OTS, SOTS
"""
# OST
# pre-process for OTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
# Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
rt = os.path.join(base_rt, "Dehaze/OTS")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['hazy']:
dset_pth = os.path.join(rt, dset)
list_file = []
total_ct = 0
# 0025_0.8_0.1.jpg
for sub_folder in os.listdir(dset_pth):
input_folder = os.path.join(dset_pth, sub_folder)
gt_folder = os.path.join(rt, 'gt')
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, "%s.jpg"%(sample.split('_')[0]))
list_file.append((input_file, gt_file))
total_ct += 1
set_dict = {"hazy":'train'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_pth, total_ct)
# SOTS
# pre-process for SOTS, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-standard
# Boyi Li, Wenqi Ren, Dengpan Fu, Dacheng Tao, Dan Feng, Wenjun Zeng, and Zhangyang Wang. Benchmarking single- image dehazing and beyond. TIP, 2018.
rt = os.path.join(base_rt, "Dehaze/SOTS")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['outdoor']:
dset_pth = os.path.join(rt, dset)
list_file = []
total_ct = 0
input_folder = os.path.join(dset_pth, 'hazy')
gt_folder = os.path.join(dset_pth, 'gt')
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, "%s.png"%(sample.split("_")[0]))
list_file.append((input_file, gt_file))
total_ct += 1
set_dict = {"outdoor":'test'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(input_folder, total_ct)
# 4kID
# pre-process for 4kID, dataset: https://github.com/zzr-idam/4KDehazing
# Ultra-High-Definition Image Dehazing via Multi-Guided Bilateral Learning, CVPR21.
rt = os.path.join(base_rt, "Dehaze/4kID")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['4KDehazing', '4KDehazing_test']: # train, test
dset_pth = os.path.join(rt, dset)
list_file = []
total_ct = 0
input_folder = os.path.join(dset_pth, 'inputs')
gt_folder = os.path.join(dset_pth, 'groundtrues')
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
total_ct += 1
set_dict = {"4KDehazing":'train', "4KDehazing_test":'test'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_pth, total_ct)
# Unann
# pre-process for unann, dataset: https://sites.google.com/view/reside-dehaze-datasets/reside-%CE%B2?authuser=0
rt = os.path.join(base_rt, "Dehaze/UnannotatedHazyImages")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
list_file = []
total_ct = 0
input_folder = os.path.join(rt, "Image")
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file, ""))
total_ct += 1
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], None, None))
print(rt, total_ct)
# NH-Haze
rt = os.path.join(base_rt, "Dehaze/NH-Haze")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
list_file = []
total_ct = 0
input_folder = os.path.join(rt, "images")
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file, ""))
total_ct += 1
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], None, None))
print(rt, total_ct)
"""
Denoise: BSD68, BSD400, CBSD68, KodaK, McMaster, Set12, SIDD, Urban100, WaterlooED
"""
"""
pre-process for denoise dataset
1. BSD400:
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
* https://github.com/smartboy110/denoising-datasets/tree/main
2. WED:
* Kede Ma, Zhengfang Duanmu, Qingbo Wu, Zhou Wang, Hongwei Yong, Hongliang Li, and Lei Zhang. Waterloo exploration database: New challenges for image quality as- sessment models. IEEE Transactions on Image Processing, 26(2):1004–1016, 2016.
* https://kedema.org/project/exploration/index.html
3. BSD68:
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
* https://github.com/smartboy110/denoising-datasets/tree/main
4. CBSD68:
* David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416–423. IEEE, 2001.
* https://github.com/smartboy110/denoising-datasets/tree/main
5. Urban100:
* Jia-BinHuang,AbhishekSingh,andNarendraAhuja.Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197–5206, 2015.
* https://github.com/jbhuang0604/SelfExSR
6. Kodak:
* Rich Franzen. Kodak lossless true color image suite. source: http://r0k. us/graphics/kodak, 4(2), 1999.
* https://www.kaggle.com/datasets/sherylmehta/kodak-dataset
7. McMaster, Set12
"""
rt = os.path.join(base_rt, "Denoise")
dataset = ['BSD68', 'BSD400', 'CBSD68', 'Kodak',
'McMaster', 'Set12', 'Urban100', 'WaterlooED']
for dset in dataset:
# list sample
list_file = []
total_ct = 0
dset_pth = os.path.join(rt, dset)
meta_folder = os.path.join(dset_pth, "metas")
os.makedirs(meta_folder, exist_ok=True)
input_folder = os.path.join(rt, '%s/image'%(dset))
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append([input_file])
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(None, item[0], None))
print(input_folder, total_ct)
# for pair data
for dset in dataset:
dset_pth = os.path.join(rt, dset)
meta_folder = os.path.join(dset_pth, "metas")
os.makedirs(meta_folder, exist_ok=True)
input_folder = os.path.join(rt, '%s/image_pair'%(dset))
for distortion in os.listdir(input_folder):
# list sample
list_file = []
total_ct = 0
image_folder = os.path.join(input_folder, distortion)
hq_folder = os.path.join(image_folder, "HQ")
lq_folder = os.path.join(image_folder, "LQ")
for ct, sample in enumerate(os.listdir(hq_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
hq_file = os.path.join(hq_folder, sample)
lq_file = os.path.join(lq_folder, sample)
list_file.append((lq_file, hq_file))
total_ct += 1
with open(os.path.join(meta_folder, f"{dset}_{distortion}.list"), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(input_folder, total_ct)
# SIDD
rt = os.path.join(base_rt, "Denoise/SIDD")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['train', 'test']:
# list sample
list_file = []
total_ct = 0
dset_folder = os.path.join(rt, dset)
if dset == 'train':
for folder in os.listdir(dset_folder):
file_folder = os.path.join(dset_folder, folder)
for ct, sample in enumerate(os.listdir(file_folder)):
if "NOISY_SRGB" in sample:
input_file = os.path.join(file_folder, sample)
gt_file = os.path.join(file_folder, sample.replace("NOISY", "GT"))
list_file.append([input_file, gt_file])
total_ct += 1
elif dset == 'test':
image_folder = os.path.join(dset_folder, 'NOISY')
gt_folder = os.path.join(dset_folder, "GT")
for ct, sample in enumerate(os.listdir(image_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(image_folder, sample)
gt_file = os.path.join(gt_folder, sample.replace("NOISY", "GT"))
list_file.append([input_file, gt_file])
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_folder, total_ct)
"""
Derain: LHPRain, Practical, Rain100L, RainDS, RainTrianL, UHD-Rain, RainDrop
"""
# RainDS
rt = os.path.join(base_rt, "Derain/RainDS")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['RainDS_syn', 'RainDS_real']: # dataset
dataset_folder = os.path.join(rt, dset)
if dset == 'RainDS_syn':
set_list = ['train', 'test']
elif dset == 'RainDS_real':
set_list = ['train_set', 'test_set']
for subset in set_list:
input_folder = os.path.join(dataset_folder, subset)
for raintype in ['rainstreak', 'raindrop', 'rainstreak_raindrop']:
rain_folder = os.path.join(input_folder, raintype)
clear_folder = os.path.join(input_folder, 'gt')
list_file = []
total_ct = 0
rain_dict = {'rainstreak_raindrop': 'rd-rain', 'rainstreak': 'rain', 'raindrop': 'rd'}
# rd-rain-97, rain-97, rd-97 -> norain-97
# pie-rd-rain-97, pie-rain-97, pie-rd-97 -> pie-norain-97
for ct, sample in enumerate(os.listdir(rain_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(rain_folder, sample)
gt_file = os.path.join(clear_folder, sample.replace(rain_dict[raintype], "norain"))
list_file.append((input_file, gt_file))
total_ct += 1
with open(os.path.join(meta_folder,'{}_{}_{}.list'.format(dset, subset, raintype)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rain_folder, total_ct)
# LHPRain
# pre-process for LHPRain: https://github.com/yunguo224/LHP-Rain
# From Sky to the Ground: A Large-scale Benchmark and Simple Baseline Towards Real Rain Removal (ICCV 2023)
rt = os.path.join(base_rt, "Derain/LHPRain")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['train', 'val', 'test']: # train, test
input_folder = os.path.join(rt, "input/%s"%(dset))
gt_folder = os.path.join(rt, "gt/%s"%(dset))
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(input_folder, total_ct)
# Practical
# pre-process for LHPRain: https://github.com/ZhangXinNan/RainDetectionAndRemoval
# WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In CVPR, 2017
rt = os.path.join(base_rt, "Derain/Practical")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
# create image folder
input_folder = os.path.join(rt, "image")
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file))
total_ct += 1
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item, None, None))
print(rt, total_ct)
# Rain100L
# pre-process for Rain100L: https://github.com/shangwei5/BRN
# WenhanYang,RobbyTTan,JiashiFeng,JiayingLiu,Zong- ming Guo, and Shuicheng Yan. Deep joint rain detection and removal from a single image. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 1357–1366, 2017.
# norain-xxx.png
# rain-xxx.png
# rainregion-xxx.png
# rainstreak-xxx.png
rt = os.path.join(base_rt, "Derain/Rain100L")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
image_folder = os.path.join(rt, 'image')
gt_folder = os.path.join(rt, 'gt')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(image_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(image_folder, sample)
gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
list_file.append((input_file, gt_file))
total_ct += 1
with open(os.path.join(meta_folder,'test.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rt, total_ct)
# Rain200L
# pre-process for RainTrainL(Rain200L): https://github.com/shangwei5/BRN
# Yang W, Tan RT, Feng J, Liu J, Guo Z, Yan S. Deep joint rain detection and removal from a single image. In IEEE CVPR 2017.
# norain-xxx.png
# rain-xxx.png
# rainregion-xxx.png
# rainstreak-xxx.png
rt = os.path.join(base_rt, "Derain/RainTrainL")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
image_folder = os.path.join(rt, 'image')
gt_folder = os.path.join(rt, 'gt')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(image_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(image_folder, sample)
gt_file = os.path.join(gt_folder, sample.replace('rain', 'norain'))
list_file.append((input_file, gt_file))
total_ct += 1
with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rt, total_ct)
# UHD-Rain
# pre-process for UHD-Rain: https://github.com/wlydlut/uhddip
# UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
rt = os.path.join(base_rt, "Derain/UHD-Rain")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['training_set', 'testing_set']: # train, test
dset_path = os.path.join(rt, dset)
input_folder = os.path.join(dset_path, 'input')
gt_folder = os.path.join(dset_path, 'gt')
list_file = []
for ct, sample in enumerate(os.listdir(input_folder)):
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
set_dict = {'training_set': 'train', 'testing_set':'test'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rt, ct+1)
# Rain-drop
# pre-process for Rain-drop: https://github.com/rui1996/DeRaindrop
# Attentive Generative Adversarial Network for Raindrop Removal from A Single Image (CVPR'2018 Highlight)
rt = os.path.join(base_rt, "Derain/RainDrop")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['train', 'test_a', 'test_b']: # train, test
dset_path = os.path.join(rt, dset)
input_folder = os.path.join(dset_path, 'data')
gt_folder = os.path.join(dset_path, 'gt')
list_file = []
for ct, sample in enumerate(os.listdir(input_folder)):
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample.replace("rain", "clean"))
list_file.append((input_file, gt_file))
with open(os.path.join(meta_folder,'Raindrop_{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rt, ct+1)
"""
Desnow: Snow100k, UHD-Snow
"""
# Snow100k: download: https://pan.baidu.com/s/1Y8fq8qQjC0YK5DTktYPfbQ?pwd=nyop#list/path=/sharelink688030094-540249285017805/snow100k&parentPath=/sharelink688030094-540249285017805
# Training set (50,000 images, 7.8GB), Test set (50,000 images, 7.8GB), Realistic snowy images (1,329 images, 67MB).
rt = os.path.join(base_rt, "Desnow/Snow100k")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
folder = Path(rt)
img_list = list(folder.rglob("*.[jp][pn]g"))
for data in img_list:
if " " in str(data):
old = str(data)
new = old.replace(" ", "_")
os.rename(old, new)
# # training
train_rt = os.path.join(rt, 'all')
train_gt_rt = os.path.join(train_rt, 'gt')
train_img_rt = os.path.join(train_rt, 'synthetic')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(train_img_rt)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
img_ = os.path.join(train_img_rt, sample)
gt_ = os.path.join(train_gt_rt, sample)
list_file.append((img_, gt_))
total_ct += 1
with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(train_rt, total_ct)
# test_syn
test_rt = os.path.join(rt, 'media/jdway/GameSSD/overlapping/test')
for dset in os.listdir(test_rt):
test_folder = os.path.join(test_rt, dset)
test_gt_rt = os.path.join(test_folder, 'gt')
test_img_rt = os.path.join(test_folder, 'synthetic')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(test_img_rt)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
img_ = os.path.join(test_img_rt, sample)
gt_ = os.path.join(test_gt_rt, sample)
list_file.append((img_, gt_))
total_ct += 1
with open(os.path.join(meta_folder,'test_%s.list'%(dset[-1])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(test_folder, total_ct)
# real-world testing
test_rt = os.path.join(rt, 'realistic')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(test_rt)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
img_ = os.path.join(test_rt, sample)
list_file.append(img_)
total_ct += 1
with open(os.path.join(meta_folder,'test_realistic.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item, None, None))
print(test_rt, total_ct)
# UHD-Snow
# pre-process for UHD-Snow: https://github.com/wlydlut/uhddip
# UHDDIP: Ultra-High-Definition Restoration: New Benchmarks and A Dual Interaction Prior-Driven Solution
rt = os.path.join(base_rt, "Desnow/UHD-Snow")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['training_set', 'testing_set']: # train, test
dset_path = os.path.join(rt, dset)
input_folder = os.path.join(dset_path, 'input')
gt_folder = os.path.join(dset_path, 'gt')
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
total_ct += 1
set_dict = {'training_set': 'train', 'testing_set':'test'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_path, total_ct)
"""
Lowlight: DICM, LIME, LOL, MEF, NPE, VV
"""
# LOL
# pre-process for LOL: https://github.com/fediory/hvi-cidnet
# Chen Wei, Wenjing Wang, Wenhan Yang, and Jiaying Liu. Deep retinex decomposition for low-light enhancement. In BMVC,2018
rt = os.path.join(base_rt, "LowLight/LOL")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['our485', 'eval15']: # train, test
dset_path = os.path.join(rt, dset)
input_folder = os.path.join(dset_path, "low")
gt_folder = os.path.join(dset_path, "high")
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
total_ct += 1
set_dict = {'our485':'train', 'eval15':'test'}
with open(os.path.join(meta_folder,'{}.list'.format(set_dict[dset])), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_path, total_ct)
# pre-process for low-light dataset
# 2. DICM:
# * Chulwoo Lee, Chul Lee, and Chang-Su Kim. Contrast en- hancement based on layered difference representation. In ICIP, 2012.
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
# 3. MEF:
# * Kede Ma, Kai Zeng, and Zhou Wang. Perceptual quality assessment for multi-exposure image fusion. TIP, 2015.
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
# 4. NPE:
# * ShuhangWang,JinZheng,Hai-MiaoHu,andBoLi.Naturalness preserved enhancement algorithm for non-uniform illumination images. TIP, 2013.
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
# 5. LIME:
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
# 6. VV:
# * Link: Fediory/HVI-CIDNet: "You Only Need One Color Space: An Efficient Network for Low-light Image Enhancement" (github.com)
rt = os.path.join(base_rt, "LowLight")
dataset = ['DICM', 'LIME', 'MEF', 'NPE', 'VV']
for dset in dataset:
# create image folder
# dset_pth = "./%s"%(dset)
# os.rename(dset_pth, 'image')
# os.makedirs(dset_pth)
# shutil.move('./image', dset_pth)
# list sample
dset_pth = os.path.join(rt, dset)
list_file = []
input_folder = os.path.join(dset_pth, 'image')
meta_folder = os.path.join(dset_pth, "metas")
os.makedirs(meta_folder, exist_ok=True)
folder = Path(input_folder)
img_list = list(folder.rglob("*.[jp][pn]g"))
for data in img_list:
if " " in str(data):
old = str(data)
new = old.replace(" ", "_")
os.rename(old, new)
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file))
total_ct += 1
with open(os.path.join(meta_folder, 'test.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item, None, None))
print(input_folder, total_ct)
"""
Other: UDC
"""
# pre-process for UDC(TOLED+POLED): https://yzhouas.github.io/projects/UDC/udc.html
# Yuqian Zhou, David Ren, Neil Emerton, Sehoon Lim, and Timothy Large. Image restoration for under-display camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9179–9188, 2021.
def mat2png(dset_folder, name, rt):
# dset_folder: '~/UDC/{poled,toled}', name: 'poled_test_display.mat', save_folder: '~/UDC/image/'
udc_key = name.split('.')[0][6:]
udc_file = os.path.join(dset_folder, name)
udc_mat = loadmat(udc_file)[udc_key]
dset, imggt = udc_key.split('_')[0], udc_key.split('_')[1]
dset_folder = os.path.join(rt, dset)
os.makedirs(dset_folder, exist_ok=True)
if imggt == 'display':
imggt = 'image'
else:
imggt = 'gt'
imggt_folder = os.path.join(dset_folder, imggt)
os.makedirs(imggt_folder, exist_ok=True)
# restoration
n_im, h, w, c = udc_mat.shape
results = udc_mat.copy()
for i in range(n_im):
print(i, end='\r')
udc = np.reshape(udc_mat[i, :, :, :], (h, w, c))
sample = Image.fromarray(np.uint8(udc)).convert('RGB')
sample = sample.save(os.path.join(imggt_folder, '%s_%d.png'%(name.split('_')[0], i)))
rt = os.path.join(base_rt, "Other/UDC")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
# pre-processing
poled_folder = os.path.join(rt, 'poled')
poled_sample = ['poled_test_display.mat', 'poled_test_gt.mat',
'poled_val_display.mat', 'poled_val_gt.mat']
toled_folder = os.path.join(rt, 'toled')
toled_sample = ['toled_test_display.mat', 'toled_test_gt.mat',
'toled_val_display.mat', 'toled_val_gt.mat']
# for sample_set in poled_sample:
# print(sample_set)
# mat2png(poled_folder, sample_set, rt)
# print()
# for sample_set in toled_sample:
# print(sample_set)
# mat2png(toled_folder, sample_set, rt)
# print()
for dset in ['val', 'test']: # train, test
dset_folder = os.path.join(rt, dset)
input_folder = os.path.join(dset_folder, 'image')
gt_folder = os.path.join(dset_folder, 'gt')
list_file = []
total_gt = 0
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
gt_file = os.path.join(gt_folder, sample)
list_file.append((input_file, gt_file))
total_gt += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(dset_folder, total_gt)
"""
SR: DIV2K, Flickr2K, OST
"""
# OST
rt = os.path.join(base_rt, "SuperResolution/OST")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
list_file = []
total_ct = 0
image_folder = os.path.join(rt, "images")
for dset in os.listdir(image_folder): # train, test
input_folder = os.path.join(image_folder, dset)
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file, None))
total_ct += 1
with open(os.path.join(meta_folder,'OST_HR.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(None, item[0], None))
print(rt, total_ct)
# aug version (SR)
image_folder = os.path.join(rt, "images_pair")
for distortion in os.listdir(image_folder): # SR1, SR2, SR3
list_file = []
total_ct = 0
pair_folder = os.path.join(image_folder, distortion)
for dset in os.listdir(pair_folder): # animal, building, grass, ...
input_folder = os.path.join(pair_folder, dset)
if "SR" in distortion:
hq_folder = os.path.join(input_folder, "HR")
lq_folder = os.path.join(input_folder, "LR")
else:
raise KeyError("Unknown {distortion} for Augmented OST dataset.")
for ct, sample in enumerate(os.listdir(hq_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
hq_file = os.path.join(hq_folder, sample)
lq_file = os.path.join(lq_folder, sample)
list_file.append((lq_file, hq_file))
total_ct += 1
with open(os.path.join(meta_folder, f"OST_train_pair_{distortion}.list"), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(pair_folder, total_ct)
# Flickr2K
rt = os.path.join(base_rt, "SuperResolution/Flickr2K")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
list_file = []
total_ct = 0
for dset in ['images']: # train, test
input_folder = os.path.join(rt, dset)
for ct, sample in enumerate(os.listdir(input_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file, None))
total_ct += 1
with open(os.path.join(meta_folder,'Flickr2K_HR.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(None, item[0], None))
print(rt, total_ct)
# aug version (SR, Noise)
image_folder = os.path.join(rt, "images_pair")
for distortion in os.listdir(image_folder): # Noise_L1, ..., SR1, ...
list_file = []
total_ct = 0
input_folder = os.path.join(image_folder, distortion)
if "SR" in distortion:
hq_folder = os.path.join(input_folder, "HR")
lq_folder = os.path.join(input_folder, "LR")
elif "Noise" in distortion:
hq_folder = os.path.join(input_folder, "HQ")
lq_folder = os.path.join(input_folder, "LQ")
else:
raise KeyError("Unknown {distortion} for Augmented Flickr2K dataset.")
for ct, sample in enumerate(os.listdir(hq_folder)):
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
hq_file = os.path.join(hq_folder, sample)
lq_file = os.path.join(lq_folder, sample)
list_file.append((lq_file, hq_file))
total_ct += 1
with open(os.path.join(meta_folder, f"Flickr2K_train_pair_{distortion}.list"), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(rt, total_ct)
# DIV2K
rt = os.path.join(base_rt, "SuperResolution/DIV2K")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['DIV2K_train_HR', 'DIV2K_valid_HR']: # train, test
input_folder = os.path.join(rt, dset)
list_file = []
total_ct = 0
for ct, sample in enumerate(os.listdir(input_folder)):
print(ct, end='\r')
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
input_file = os.path.join(input_folder, sample)
list_file.append((input_file, None))
total_ct += 1
with open(os.path.join(meta_folder,'{}.list'.format(dset)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(None, item[0], None))
print(input_folder, total_ct)
# aug version (SR, Noise)
for dset in ['DIV2K_train_pair', 'DIV2K_valid_pair']: # train, test
imgae_folder = os.path.join(rt, dset)
for distortion in os.listdir(imgae_folder): # Noise, SR.
list_file = []
total_ct = 0
input_folder = os.path.join(imgae_folder, distortion)
if "SR" in distortion:
hq_folder = os.path.join(input_folder, "HR")
lq_folder = os.path.join(input_folder, "LR")
elif "Noise" in distortion:
hq_folder = os.path.join(input_folder, "HQ")
lq_folder = os.path.join(input_folder, "LQ")
else:
raise KeyError("Unknown {distortion} for Augmented DIV2K dataset.")
for ct, sample in enumerate(os.listdir(hq_folder)):
print(ct, end='\r')
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
hq_file = os.path.join(hq_folder, sample)
lq_file = os.path.join(lq_folder, sample)
list_file.append((lq_file, hq_file))
total_ct += 1
with open(os.path.join(meta_folder, f"{dset}_{distortion}.list"), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], None))
print(input_folder, total_ct)
# CDD11: composite
base_rt = f"/home/work/shared-fi-datasets-01/users/hsiang.chen/Project/Datasets/IR"
rt = os.path.join(base_rt, "Composite/CDD11")
meta_folder = os.path.join(rt, "metas")
os.makedirs(meta_folder, exist_ok=True)
for dset in ['CDD-11_train']: # train
input_folder = os.path.join(rt, dset)
list_file = []
total_ct = 0
for distortion in os.listdir(input_folder):
if distortion == "clear":
continue
distortion_folder = os.path.join(input_folder, distortion)
clear_folder = os.path.join(input_folder, "clear")
for ct, sample in enumerate(os.listdir(distortion_folder)):
print(ct, end='\r')
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
lq_file = os.path.join(distortion_folder, sample)
hq_file = os.path.join(clear_folder, sample)
list_file.append((lq_file, hq_file, distortion))
total_ct += 1
with open(os.path.join(meta_folder,'train.list'), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], item[2]))
print(input_folder, total_ct)
for dset in ['CDD-11_test']: # test
input_folder = os.path.join(rt, dset)
for distortion in os.listdir(input_folder):
total_ct = 0
list_file = []
if distortion == "clear":
continue
distortion_folder = os.path.join(input_folder, distortion)
clear_folder = os.path.join(input_folder, "clear")
for ct, sample in enumerate(os.listdir(distortion_folder)):
print(ct, end='\r')
if ".png" in sample or ".jpg" in sample or ".jpeg" in sample or ".bmp" in sample or ".tif" in sample or ".JPG" in sample or ".PNG" in sample:
lq_file = os.path.join(distortion_folder, sample)
hq_file = os.path.join(clear_folder, sample)
list_file.append((lq_file, hq_file))
total_ct += 1
with open(os.path.join(meta_folder,'test_{}.list'.format(distortion)), 'w') as fp:
for item in list_file:
fp.write('{} {} {}\n'.format(item[0], item[1], distortion))
print(distortion_folder, total_ct)