|
|
import argparse |
|
|
import cv2 |
|
|
import numpy as np |
|
|
import os |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from basicsr.utils import scandir |
|
|
from os import path as osp |
|
|
from tqdm import tqdm |
|
|
import logging |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
def worker(path, opt): |
|
|
"""Worker for each thread. |
|
|
|
|
|
Args: |
|
|
path (str): Image path. |
|
|
opt (dict): Configuration dict. It contains: |
|
|
crop_size (int): Crop size. |
|
|
step (int): Step for overlapped sliding window. |
|
|
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. |
|
|
save_folder (str): Path to save folder. |
|
|
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION. |
|
|
|
|
|
Returns: |
|
|
tuple: (status, img_name, count, message) - status can be 'processed', 'skipped', 'error', or 'too_small' |
|
|
""" |
|
|
crop_size = opt['crop_size'] |
|
|
step = opt['step'] |
|
|
thresh_size = opt['thresh_size'] |
|
|
save_folder = opt['save_folder'] |
|
|
img_name, extension = osp.splitext(osp.basename(path)) |
|
|
|
|
|
|
|
|
img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '') |
|
|
|
|
|
try: |
|
|
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) |
|
|
|
|
|
|
|
|
if img is None: |
|
|
logger.warning(f"Could not read image: {path}") |
|
|
return ('error', img_name, 0, f"Could not read image: {path}") |
|
|
|
|
|
h, w = img.shape[0:2] |
|
|
|
|
|
|
|
|
if h < crop_size or w < crop_size: |
|
|
logger.warning(f"Image {path} is smaller than crop size: ({h}, {w}) < {crop_size}") |
|
|
return ('too_small', img_name, 0, f"Image too small: ({h}, {w}) < {crop_size}") |
|
|
|
|
|
h_space = np.arange(0, h - crop_size + 1, step) |
|
|
if h - (h_space[-1] + crop_size) > thresh_size: |
|
|
h_space = np.append(h_space, h - crop_size) |
|
|
w_space = np.arange(0, w - crop_size + 1, step) |
|
|
if w - (w_space[-1] + crop_size) > thresh_size: |
|
|
w_space = np.append(w_space, w - crop_size) |
|
|
|
|
|
|
|
|
saved_count = 0 |
|
|
skipped_count = 0 |
|
|
index = 0 |
|
|
for x in h_space: |
|
|
for y in w_space: |
|
|
index += 1 |
|
|
output_path = osp.join(save_folder, f'{img_name}_s{index:03d}{extension}') |
|
|
if osp.exists(output_path): |
|
|
skipped_count += 1 |
|
|
continue |
|
|
cropped_img = img[x:x + crop_size, y:y + crop_size, ...] |
|
|
cropped_img = np.ascontiguousarray(cropped_img) |
|
|
cv2.imwrite( |
|
|
output_path, cropped_img, |
|
|
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']]) |
|
|
saved_count += 1 |
|
|
|
|
|
total_patches = saved_count + skipped_count |
|
|
if saved_count == 0 and skipped_count > 0: |
|
|
return ('skipped', img_name, total_patches, f"All {total_patches} patches already exist") |
|
|
return ('processed', img_name, total_patches, f"Saved {saved_count}, skipped {skipped_count}") |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error processing image {path}: {e}") |
|
|
return ('error', img_name, 0, str(e)) |
|
|
|
|
|
|
|
|
def extract_subimages(opt): |
|
|
"""Crop images to subimages. |
|
|
|
|
|
Args: |
|
|
opt (dict): Configuration dict. It contains: |
|
|
input_folder (str): Path to the input folder. |
|
|
save_folder (str): Path to save folder. |
|
|
n_thread (int): Thread number. |
|
|
""" |
|
|
input_folder = opt['input_folder'] |
|
|
save_folder = opt['save_folder'] |
|
|
if not osp.exists(save_folder): |
|
|
os.makedirs(save_folder) |
|
|
print(f'mkdir {save_folder} ...') |
|
|
else: |
|
|
print(f'Папка {save_folder} уже существует. Продолжаем обработку...') |
|
|
|
|
|
|
|
|
img_list = list(scandir(input_folder, full_path=True)) |
|
|
|
|
|
if not img_list: |
|
|
print('Изображения не найдены') |
|
|
return |
|
|
|
|
|
|
|
|
processed = 0 |
|
|
skipped = 0 |
|
|
errors = 0 |
|
|
too_small = 0 |
|
|
total_patches = 0 |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=opt['n_thread']) as executor: |
|
|
futures = { |
|
|
executor.submit(worker, path, opt): path |
|
|
for path in img_list |
|
|
} |
|
|
|
|
|
with tqdm(total=len(img_list), desc='Извлечение подизображений', unit='img') as pbar: |
|
|
for future in as_completed(futures): |
|
|
try: |
|
|
status, img_name, count, message = future.result() |
|
|
if status == 'skipped': |
|
|
skipped += 1 |
|
|
total_patches += count |
|
|
elif status == 'processed': |
|
|
processed += 1 |
|
|
total_patches += count |
|
|
elif status == 'too_small': |
|
|
too_small += 1 |
|
|
else: |
|
|
errors += 1 |
|
|
tqdm.write(f'Ошибка: {img_name} - {message}') |
|
|
pbar.set_postfix({ |
|
|
'обработано': processed, |
|
|
'пропущено': skipped, |
|
|
'маленьких': too_small, |
|
|
'ошибок': errors, |
|
|
'патчей': total_patches |
|
|
}) |
|
|
except Exception as e: |
|
|
path = futures[future] |
|
|
errors += 1 |
|
|
tqdm.write(f'Ошибка при обработке {path}: {e}') |
|
|
pbar.set_postfix({ |
|
|
'обработано': processed, |
|
|
'пропущено': skipped, |
|
|
'маленьких': too_small, |
|
|
'ошибок': errors, |
|
|
'патчей': total_patches |
|
|
}) |
|
|
finally: |
|
|
pbar.update(1) |
|
|
|
|
|
print(f'Все процессы завершены. Обработано: {processed}, пропущено: {skipped}, ' |
|
|
f'маленьких: {too_small}, ошибок: {errors}, всего патчей: {total_patches}') |
|
|
|
|
|
|
|
|
def main(args): |
|
|
"""A multi-thread tool to crop large images to sub-images for faster IO. |
|
|
|
|
|
opt (dict): Configuration dict. It contains: |
|
|
n_thread (int): Thread number. |
|
|
compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size |
|
|
and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2. |
|
|
input_folder (str): Path to the input folder. |
|
|
save_folder (str): Path to save folder. |
|
|
crop_size (int): Crop size. |
|
|
step (int): Step for overlapped sliding window. |
|
|
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. |
|
|
|
|
|
Usage: |
|
|
For each folder, run this script. |
|
|
Typically, there are GT folder and LQ folder to be processed for DIV2K dataset. |
|
|
After process, each sub_folder should have the same number of subimages. |
|
|
Remember to modify opt configurations according to your settings. |
|
|
""" |
|
|
|
|
|
opt = {} |
|
|
opt['n_thread'] = args.n_thread |
|
|
opt['compression_level'] = args.compression_level |
|
|
opt['input_folder'] = args.input |
|
|
opt['save_folder'] = args.output |
|
|
opt['crop_size'] = args.crop_size |
|
|
opt['step'] = args.step |
|
|
opt['thresh_size'] = args.thresh_size |
|
|
extract_subimages(opt) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') |
|
|
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder') |
|
|
parser.add_argument('--crop_size', type=int, default=480, help='Crop size') |
|
|
parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window') |
|
|
parser.add_argument( |
|
|
'--thresh_size', |
|
|
type=int, |
|
|
default=0, |
|
|
help='Threshold size. Patches whose size is lower than thresh_size will be dropped.') |
|
|
parser.add_argument('--n_thread', type=int, default=None, help='Thread number (default: CPU count)') |
|
|
parser.add_argument('--compression_level', type=int, default=3, help='Compression level') |
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.n_thread is None: |
|
|
import multiprocessing |
|
|
args.n_thread = multiprocessing.cpu_count() |
|
|
|
|
|
main(args) |
|
|
|
|
|
|