AlexeyZhigalov commited on
Commit
689ccff
·
verified ·
1 Parent(s): 347b486

Upload 6 files

Browse files
options/finetune_realesrgan_x4plus.yml ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: finetune_RealESRGANx4plus_400k
3
+ model_type: RealESRGANModel
4
+ scale: 4
5
+ num_gpu: auto
6
+ manual_seed: 0
7
+
8
+ # ----------------- options for synthesizing training data in RealESRGANModel ----------------- #
9
+ # USM the ground-truth
10
+ l1_gt_usm: True
11
+ percep_gt_usm: True
12
+ gan_gt_usm: False
13
+
14
+ # the first degradation process
15
+ resize_prob: [0.2, 0.7, 0.1] # up, down, keep
16
+ resize_range: [0.15, 1.5]
17
+ gaussian_noise_prob: 0.5
18
+ noise_range: [1, 30]
19
+ poisson_scale_range: [0.05, 3]
20
+ gray_noise_prob: 0.4
21
+ jpeg_range: [30, 95]
22
+
23
+ # the second degradation process
24
+ second_blur_prob: 0.8
25
+ resize_prob2: [0.3, 0.4, 0.3] # up, down, keep
26
+ resize_range2: [0.3, 1.2]
27
+ gaussian_noise_prob2: 0.5
28
+ noise_range2: [1, 25]
29
+ poisson_scale_range2: [0.05, 2.5]
30
+ gray_noise_prob2: 0.4
31
+ jpeg_range2: [30, 95]
32
+
33
+ gt_size: 256
34
+ queue_size: 180
35
+
36
+ # dataset and data loader settings
37
+ datasets:
38
+ train:
39
+ name: DF2K+OST
40
+ type: RealESRGANDataset
41
+ dataroot_gt: datasets/DF2K
42
+ meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale_cleaned.txt
43
+ io_backend:
44
+ type: disk
45
+
46
+ blur_kernel_size: 21
47
+ kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
48
+ kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
49
+ sinc_prob: 0.1
50
+ blur_sigma: [0.2, 3]
51
+ betag_range: [0.5, 4]
52
+ betap_range: [1, 2]
53
+
54
+ blur_kernel_size2: 21
55
+ kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
56
+ kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
57
+ sinc_prob2: 0.1
58
+ blur_sigma2: [0.2, 1.5]
59
+ betag_range2: [0.5, 4]
60
+ betap_range2: [1, 2]
61
+
62
+ final_sinc_prob: 0.8
63
+
64
+ gt_size: 256
65
+ use_hflip: True
66
+ use_rot: False
67
+
68
+ # data loader
69
+ use_shuffle: true
70
+ num_worker_per_gpu: 5
71
+ batch_size_per_gpu: 12
72
+ dataset_enlarge_ratio: 1
73
+ prefetch_mode: ~
74
+
75
+ # Uncomment these for validation
76
+ # val:
77
+ # name: validation
78
+ # type: PairedImageDataset
79
+ # dataroot_gt: path_to_gt
80
+ # dataroot_lq: path_to_lq
81
+ # io_backend:
82
+ # type: disk
83
+
84
+ # network structures
85
+ network_g:
86
+ type: RRDBNet
87
+ num_in_ch: 3
88
+ num_out_ch: 3
89
+ num_feat: 64
90
+ num_block: 23
91
+ num_grow_ch: 32
92
+
93
+ network_d:
94
+ type: UNetDiscriminatorSN
95
+ num_in_ch: 3
96
+ num_feat: 64
97
+ skip_connection: True
98
+
99
+ # path
100
+ path:
101
+ # use the pre-trained Real-ESRGAN model
102
+ pretrain_network_g: experiments/pretrained_models/RealESRGAN_x4plus.pth
103
+ param_key_g: params_ema
104
+ strict_load_g: true
105
+ pretrain_network_d: experiments/pretrained_models/RealESRGAN_x4plus_netD.pth
106
+ param_key_d: params
107
+ strict_load_d: true
108
+ resume_state: ~
109
+
110
+ # training settings
111
+ train:
112
+ ema_decay: 0.999
113
+ optim_g:
114
+ type: Adam
115
+ lr: !!float 1e-4
116
+ weight_decay: 0
117
+ betas: [0.9, 0.99]
118
+ optim_d:
119
+ type: Adam
120
+ lr: !!float 1e-4
121
+ weight_decay: 0
122
+ betas: [0.9, 0.99]
123
+
124
+ scheduler:
125
+ type: MultiStepLR
126
+ milestones: [400000]
127
+ gamma: 0.5
128
+
129
+ total_iter: 400000
130
+ warmup_iter: -1 # no warm up
131
+
132
+ # losses
133
+ pixel_opt:
134
+ type: L1Loss
135
+ loss_weight: 1.0
136
+ reduction: mean
137
+ # perceptual loss (content and style losses)
138
+ perceptual_opt:
139
+ type: PerceptualLoss
140
+ layer_weights:
141
+ # before relu
142
+ 'conv1_2': 0.1
143
+ 'conv2_2': 0.1
144
+ 'conv3_4': 1
145
+ 'conv4_4': 1
146
+ 'conv5_4': 1
147
+ vgg_type: vgg19
148
+ use_input_norm: true
149
+ perceptual_weight: !!float 1.0
150
+ style_weight: 0
151
+ range_norm: false
152
+ criterion: l1
153
+ # gan loss
154
+ gan_opt:
155
+ type: GANLoss
156
+ gan_type: vanilla
157
+ real_label_val: 1.0
158
+ fake_label_val: 0.0
159
+ loss_weight: !!float 1e-1
160
+
161
+ net_d_iters: 1
162
+ net_d_init_iters: 0
163
+
164
+ # Uncomment these for validation
165
+ # validation settings
166
+ # val:
167
+ # val_freq: !!float 5e3
168
+ # save_img: True
169
+
170
+ # metrics:
171
+ # psnr: # metric name
172
+ # type: calculate_psnr
173
+ # crop_border: 4
174
+ # test_y_channel: false
175
+
176
+ # logging settings
177
+ logger:
178
+ print_freq: 100
179
+ save_checkpoint_freq: !!float 5e3
180
+ use_tb_logger: true
181
+ wandb:
182
+ project: ~
183
+ resume_id: ~
184
+
185
+ # dist training settings
186
+ dist_params:
187
+ backend: nccl
188
+ port: 29500
options/train_realesrnet_x4plus.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: train_RealESRNetx4plus_1000k_B12G4
3
+ model_type: RealESRNetModel
4
+ scale: 4
5
+ num_gpu: auto # auto: can infer from your visible devices automatically. official: 4 GPUs
6
+ manual_seed: 0
7
+
8
+ # ----------------- options for synthesizing training data in RealESRNetModel ----------------- #
9
+ gt_usm: True # USM the ground-truth
10
+
11
+ # the first degradation process
12
+ resize_prob: [0.2, 0.7, 0.1] # up, down, keep
13
+ resize_range: [0.15, 1.5]
14
+ gaussian_noise_prob: 0.5
15
+ noise_range: [1, 30]
16
+ poisson_scale_range: [0.05, 3]
17
+ gray_noise_prob: 0.4
18
+ jpeg_range: [30, 95]
19
+
20
+ # the second degradation process
21
+ second_blur_prob: 0.8
22
+ resize_prob2: [0.3, 0.4, 0.3] # up, down, keep
23
+ resize_range2: [0.3, 1.2]
24
+ gaussian_noise_prob2: 0.5
25
+ noise_range2: [1, 25]
26
+ poisson_scale_range2: [0.05, 2.5]
27
+ gray_noise_prob2: 0.4
28
+ jpeg_range2: [30, 95]
29
+
30
+ gt_size: 256
31
+ queue_size: 180
32
+
33
+ # dataset and data loader settings
34
+ datasets:
35
+ train:
36
+ name: DF2K+OST
37
+ type: RealESRGANDataset
38
+ dataroot_gt: datasets/DF2K/DF2K_multiscale_sub
39
+ meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale_${TILE_SIZE}.txt
40
+ io_backend:
41
+ type: disk
42
+
43
+ blur_kernel_size: 21
44
+ kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
45
+ kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
46
+ sinc_prob: 0.1
47
+ blur_sigma: [0.2, 3]
48
+ betag_range: [0.5, 4]
49
+ betap_range: [1, 2]
50
+
51
+ blur_kernel_size2: 21
52
+ kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
53
+ kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
54
+ sinc_prob2: 0.1
55
+ blur_sigma2: [0.2, 1.5]
56
+ betag_range2: [0.5, 4]
57
+ betap_range2: [1, 2]
58
+
59
+ final_sinc_prob: 0.8
60
+
61
+ gt_size: 256
62
+ use_hflip: True
63
+ use_rot: False
64
+
65
+ # data loader
66
+ use_shuffle: true
67
+ num_worker_per_gpu: 5
68
+ batch_size_per_gpu: 12
69
+ dataset_enlarge_ratio: 1
70
+ prefetch_mode: ~
71
+
72
+ # Uncomment these for validation
73
+ # val:
74
+ # name: validation
75
+ # type: PairedImageDataset
76
+ # dataroot_gt: path_to_gt
77
+ # dataroot_lq: path_to_lq
78
+ # io_backend:
79
+ # type: disk
80
+
81
+ # network structures
82
+ network_g:
83
+ type: RRDBNet
84
+ num_in_ch: 3
85
+ num_out_ch: 3
86
+ num_feat: 64
87
+ num_block: 23
88
+ num_grow_ch: 32
89
+
90
+ # path
91
+ path:
92
+ pretrain_network_g: experiments/pretrained_models/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth
93
+ param_key_g: params_ema
94
+ strict_load_g: true
95
+ resume_state: ~
96
+
97
+ # training settings
98
+ train:
99
+ ema_decay: 0.999
100
+ optim_g:
101
+ type: Adam
102
+ lr: !!float 2e-4
103
+ weight_decay: 0
104
+ betas: [0.9, 0.99]
105
+
106
+ scheduler:
107
+ type: MultiStepLR
108
+ milestones: [1000000]
109
+ gamma: 0.5
110
+
111
+ total_iter: 1000000
112
+ warmup_iter: -1 # no warm up
113
+
114
+ # losses
115
+ pixel_opt:
116
+ type: L1Loss
117
+ loss_weight: 1.0
118
+ reduction: mean
119
+
120
+ # Uncomment these for validation
121
+ # validation settings
122
+ # val:
123
+ # val_freq: !!float 5e3
124
+ # save_img: True
125
+
126
+ # metrics:
127
+ # psnr: # metric name
128
+ # type: calculate_psnr
129
+ # crop_border: 4
130
+ # test_y_channel: false
131
+
132
+ # logging settings
133
+ logger:
134
+ print_freq: 100
135
+ save_checkpoint_freq: !!float 5e3
136
+ use_tb_logger: true
137
+ wandb:
138
+ project: ~
139
+ resume_id: ~
140
+
141
+ # dist training settings
142
+ dist_params:
143
+ backend: nccl
144
+ port: 29500
scripts/extract_subimages_fixed.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+ import sys
6
+ from basicsr.utils import scandir
7
+ from multiprocessing import Pool
8
+ from os import path as osp
9
+ from tqdm import tqdm
10
+ import logging
11
+
12
+ # Настройка логирования для отслеживания ошибок
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ def main(args):
17
+ """A multi-thread tool to crop large images to sub-images for faster IO.
18
+
19
+ opt (dict): Configuration dict. It contains:
20
+ n_thread (int): Thread number.
21
+ compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size
22
+ and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2.
23
+ input_folder (str): Path to the input folder.
24
+ save_folder (str): Path to save folder.
25
+ crop_size (int): Crop size.
26
+ step (int): Step for overlapped sliding window.
27
+ thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
28
+
29
+ Usage:
30
+ For each folder, run this script.
31
+ Typically, there are GT folder and LQ folder to be processed for DIV2K dataset.
32
+ After process, each sub_folder should have the same number of subimages.
33
+ Remember to modify opt configurations according to your settings.
34
+ """
35
+
36
+ opt = {}
37
+ opt['n_thread'] = args.n_thread
38
+ opt['compression_level'] = args.compression_level
39
+ opt['input_folder'] = args.input
40
+ opt['save_folder'] = args.output
41
+ opt['crop_size'] = args.crop_size
42
+ opt['step'] = args.step
43
+ opt['thresh_size'] = args.thresh_size
44
+ extract_subimages(opt)
45
+
46
+
47
+ def extract_subimages(opt):
48
+ """Crop images to subimages.
49
+
50
+ Args:
51
+ opt (dict): Configuration dict. It contains:
52
+ input_folder (str): Path to the input folder.
53
+ save_folder (str): Path to save folder.
54
+ n_thread (int): Thread number.
55
+ """
56
+ input_folder = opt['input_folder']
57
+ save_folder = opt['save_folder']
58
+ if not osp.exists(save_folder):
59
+ os.makedirs(save_folder)
60
+ print(f'mkdir {save_folder} ...')
61
+ else:
62
+ print(f'Folder {save_folder} already exists. Exit.')
63
+ sys.exit(1)
64
+
65
+ # scan all images
66
+ img_list = list(scandir(input_folder, full_path=True))
67
+
68
+ pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
69
+ pool = Pool(opt['n_thread'])
70
+ for path in img_list:
71
+ pool.apply_async(
72
+ worker,
73
+ args=(path, opt),
74
+ callback=lambda arg: pbar.update(1),
75
+ error_callback=lambda err: logger.error(f"Error processing {path}: {err}")
76
+ )
77
+ pool.close()
78
+ pool.join()
79
+ pbar.close()
80
+ print('All processes done.')
81
+
82
+
83
+ def worker(path, opt):
84
+ """Worker for each process.
85
+
86
+ Args:
87
+ path (str): Image path.
88
+ opt (dict): Configuration dict. It contains:
89
+ crop_size (int): Crop size.
90
+ step (int): Step for overlapped sliding window.
91
+ thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
92
+ save_folder (str): Path to save folder.
93
+ compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
94
+
95
+ Returns:
96
+ process_info (str): Process information displayed in progress bar.
97
+ """
98
+ crop_size = opt['crop_size']
99
+ step = opt['step']
100
+ thresh_size = opt['thresh_size']
101
+ img_name, extension = osp.splitext(osp.basename(path))
102
+
103
+ # remove the x2, x3, x4 and x8 in the filename for DIV2K
104
+ img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')
105
+
106
+ # Попробуем прочитать изображение, и если произойдет ошибка - пропустим его
107
+ try:
108
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
109
+
110
+ # Проверим, что изображение было успешно загружено
111
+ if img is None:
112
+ logger.warning(f"Could not read image: {path}")
113
+ return f"Skipped {img_name} (could not read)"
114
+
115
+ h, w = img.shape[0:2]
116
+
117
+ # Проверим минимальный размер изображения
118
+ if h < crop_size or w < crop_size:
119
+ logger.warning(f"Image {path} is smaller than crop size: ({h}, {w}) < {crop_size}")
120
+ return f"Skipped {img_name} (too small)"
121
+
122
+ h_space = np.arange(0, h - crop_size + 1, step)
123
+ if h - (h_space[-1] + crop_size) > thresh_size:
124
+ h_space = np.append(h_space, h - crop_size)
125
+ w_space = np.arange(0, w - crop_size + 1, step)
126
+ if w - (w_space[-1] + crop_size) > thresh_size:
127
+ w_space = np.append(w_space, w - crop_size)
128
+
129
+ index = 0
130
+ for x in h_space:
131
+ for y in w_space:
132
+ index += 1
133
+ cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
134
+ cropped_img = np.ascontiguousarray(cropped_img)
135
+ cv2.imwrite(
136
+ osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img,
137
+ [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
138
+ process_info = f'Processing {img_name} ...'
139
+ return process_info
140
+ except Exception as e:
141
+ logger.error(f"Error processing image {path}: {e}")
142
+ return f"Error processing {img_name}: {str(e)}"
143
+
144
+
145
+ if __name__ == '__main__':
146
+ parser = argparse.ArgumentParser()
147
+ parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
148
+ parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder')
149
+ parser.add_argument('--crop_size', type=int, default=480, help='Crop size')
150
+ parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window')
151
+ parser.add_argument(
152
+ '--thresh_size',
153
+ type=int,
154
+ default=0,
155
+ help='Threshold size. Patches whose size is lower than thresh_size will be dropped.')
156
+ parser.add_argument('--n_thread', type=int, default=20, help='Thread number.')
157
+ parser.add_argument('--compression_level', type=int, default=3, help='Compression level')
158
+ args = parser.parse_args()
159
+
160
+ main(args)
scripts/filter_images_by_size.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from PIL import Image
4
+ import sys
5
+ import argparse
6
+
7
+ def filter_images(input_meta_file, output_meta_file=None, image_root_dir=None, min_size=1024):
8
+ """
9
+ Filters a meta_info file to keep only images with dimensions greater than or equal to min_size.
10
+ """
11
+ if output_meta_file is None:
12
+ base, ext = os.path.splitext(input_meta_file)
13
+ output_meta_file = f"{base}_{min_size}{ext}"
14
+
15
+ if image_root_dir is None:
16
+ image_root_dir = os.path.dirname(input_meta_file)
17
+
18
+ kept_count = 0
19
+ skipped_count = 0
20
+
21
+ print(f"Starting to filter images from: {input_meta_file}")
22
+ print(f"Minimum required size: {min_size}x{min_size}")
23
+
24
+ try:
25
+ with open(input_meta_file, 'r') as infile, open(output_meta_file, 'w') as outfile:
26
+ for line in infile:
27
+ # The first part of the line is the relative image path
28
+ parts = line.strip().split()
29
+ if not parts:
30
+ continue
31
+
32
+ relative_img_path = parts[0]
33
+ # The meta file seems to be in the DF2K folder, so paths are relative to that
34
+ full_img_path = os.path.join(image_root_dir, relative_img_path)
35
+
36
+ try:
37
+ with Image.open(full_img_path) as img:
38
+ width, height = img.size
39
+ if width >= min_size and height >= min_size:
40
+ outfile.write(line)
41
+ kept_count += 1
42
+ else:
43
+ skipped_count += 1
44
+ except FileNotFoundError:
45
+ print(f"Warning: Image file not found, skipping: {full_img_path}")
46
+ skipped_count += 1
47
+ except Exception as e:
48
+ print(f"Warning: Could not read image {full_img_path}, skipping. Error: {e}")
49
+ skipped_count += 1
50
+ except FileNotFoundError:
51
+ print(f"Error: Input meta file not found at {input_meta_file}")
52
+ sys.exit(1)
53
+
54
+
55
+ print("\nFiltering complete.")
56
+ print(f"Output file created: {output_meta_file}")
57
+ print(f"Images kept: {kept_count}")
58
+ print(f"Images skipped: {skipped_count}")
59
+
60
+ if __name__ == "__main__":
61
+ parser = argparse.ArgumentParser(description="Filter images in a meta info file by minimum size.")
62
+ parser.add_argument('input_meta_file', type=str,
63
+ help='Path to the input meta info file.')
64
+ parser.add_argument('--output_meta_file', type=str, default=None,
65
+ help='Path to the output meta info file. If not provided, it will be generated based on the input file name.')
66
+ parser.add_argument('--image_root_dir', type=str, default=None,
67
+ help='Root directory for the image paths in the meta file. If not provided, the directory of the input file is used.')
68
+ parser.add_argument('--min_size', type=int, default=1024,
69
+ help='Minimum required size (width and height) for images to be kept.')
70
+ args = parser.parse_args()
71
+ filter_images(args.input_meta_file, args.output_meta_file, args.image_root_dir, args.min_size)
scripts/generate_meta_info.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import cv2
3
+ import glob
4
+ import os
5
+
6
+
7
+ def main(args):
8
+ txt_file = open(args.meta_info, 'w')
9
+ for folder, root in zip(args.input, args.root):
10
+ img_paths = sorted(glob.glob(os.path.join(folder, '*')))
11
+ for img_path in img_paths:
12
+ status = True
13
+ if args.check:
14
+ # read the image once for check, as some images may have errors
15
+ try:
16
+ img = cv2.imread(img_path)
17
+ except (IOError, OSError) as error:
18
+ print(f'Read {img_path} error: {error}')
19
+ status = False
20
+ if img is None:
21
+ status = False
22
+ print(f'Img is None: {img_path}')
23
+ if status:
24
+ # get the relative path
25
+ img_name = os.path.relpath(img_path, root)
26
+ print(img_name)
27
+ txt_file.write(f'{img_name}\n')
28
+
29
+
30
+ if __name__ == '__main__':
31
+ """Generate meta info (txt file) for only Ground-Truth images.
32
+
33
+ It can also generate meta info from several folders into one txt file.
34
+ """
35
+ parser = argparse.ArgumentParser()
36
+ parser.add_argument(
37
+ '--input',
38
+ nargs='+',
39
+ default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
40
+ help='Input folder, can be a list')
41
+ parser.add_argument(
42
+ '--root',
43
+ nargs='+',
44
+ default=['datasets/DF2K', 'datasets/DF2K'],
45
+ help='Folder root, should have the length as input folders')
46
+ parser.add_argument(
47
+ '--meta_info',
48
+ type=str,
49
+ default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
50
+ help='txt path for meta info')
51
+ parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
52
+ args = parser.parse_args()
53
+
54
+ assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
55
+ f'{len(args.input)} and {len(args.root)}.')
56
+ os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
57
+
58
+ main(args)
scripts/generate_multiscale_DF2K.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ from PIL import Image
5
+
6
+
7
+ def main(args):
8
+ # For DF2K, we consider the following three scales,
9
+ # and the smallest image whose shortest edge is 400
10
+ scale_list = [0.75, 0.5, 1 / 3]
11
+ shortest_edge = args.min_size
12
+
13
+ path_list = sorted(glob.glob(os.path.join(args.input, '*')))
14
+ for path in path_list:
15
+ print(path)
16
+ basename = os.path.splitext(os.path.basename(path))[0]
17
+
18
+ img = Image.open(path)
19
+ width, height = img.size
20
+ for idx, scale in enumerate(scale_list):
21
+ print(f'\t{scale:.2f}')
22
+ rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
23
+ rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
24
+
25
+ # save the smallest image which the shortest edge is 400
26
+ if width < height:
27
+ ratio = height / width
28
+ width = shortest_edge
29
+ height = int(width * ratio)
30
+ else:
31
+ ratio = width / height
32
+ height = shortest_edge
33
+ width = int(height * ratio)
34
+ rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
35
+ rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
36
+
37
+
38
+ if __name__ == '__main__':
39
+ """Generate multi-scale versions for GT images with LANCZOS resampling.
40
+ It is now used for DF2K dataset (DIV2K + Flickr 2K)
41
+ """
42
+ parser = argparse.ArgumentParser()
43
+ parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
44
+ parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
45
+ parser.add_argument('--min_size', type=int, default=400, help='Minimum image size')
46
+ args = parser.parse_args()
47
+
48
+ os.makedirs(args.output, exist_ok=True)
49
+ main(args)