|
|
""" |
|
|
ref: https://github.com/robustsam/RobustSAM/blob/main/data/augment.py |
|
|
Source Dataset: |
|
|
BAPPS: (one to one, 150K) |
|
|
/group-volume/Human-Action-Analysis/datasets/IQA_datasets/DataDepictQA/BAPPS/images/twoafc_train/train/cnn/ref # 38120 |
|
|
/group-volume/Human-Action-Analysis/datasets/IQA_datasets/DataDepictQA/BAPPS/images/twoafc_train/train/mix/ref # 56640 |
|
|
/group-volume/Human-Action-Analysis/datasets/IQA_datasets/DataDepictQA/BAPPS/images/twoafc_train/train/traditional/ref # 56640 |
|
|
|
|
|
PIPAL: (one to one, 50) |
|
|
/home/CORP/hsiang.chen/Project/Datasets/DataDepictQA/PIPAL/metas/train_refAB_mix_pipal_70k.json |
|
|
|
|
|
KADID10K: (one to one, need to find clear sample from json, estimated 70) |
|
|
/home/CORP/hsiang.chen/Project/Datasets/DataDepictQA/KADID10K/metas/train_refAB_mix_kadid_30k.json |
|
|
|
|
|
KADID700K: (one to one, 140k) |
|
|
|
|
|
DetailDescriptionLAMM/metas/detailed_description_49k.json (one to one, 57895 images, 58k) |
|
|
|
|
|
Annotation Format: |
|
|
data = [ |
|
|
{ |
|
|
"distortion_class": "saturate_strengthen", |
|
|
"distortion_name": "saturate_strengthen_YCrCb", |
|
|
"severity": 5, |
|
|
"id": "121-cc-774921_0", |
|
|
"image_ref": "KADIS700K/ref_imgs_s224/121-cc-774921.png", |
|
|
"image_A": "KADIS700K/refA_sd_brief/dist_imgs/121-cc-774921_0.png", |
|
|
"image_B": None, |
|
|
"task_type": "quality_single_A", |
|
|
"conversations": [ |
|
|
{ |
|
|
"from": "human", |
|
|
"value": "What critical ONE quality degradation is present in the evaluated image versus the reference?", |
|
|
}, |
|
|
{ |
|
|
"from": "gpt", |
|
|
"value": "The critical ONE quality degradation presented is overly high saturation." |
|
|
} |
|
|
] |
|
|
} |
|
|
] |
|
|
""" |
|
|
|
|
|
import json |
|
|
import cv2 |
|
|
from glob import glob |
|
|
import os |
|
|
import numpy as np |
|
|
from PIL import Image, ImageDraw |
|
|
import matplotlib.pyplot as plt |
|
|
import albumentations as A |
|
|
import imgaug.augmenters as iaa |
|
|
import random |
|
|
import argparse |
|
|
from tqdm import tqdm |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
|
|
|
import torchvision.transforms as T |
|
|
import torchvision.transforms.functional as TF |
|
|
import torchvision.transforms as transforms |
|
|
|
|
|
|
|
|
question_dict = { |
|
|
"Full-Reference": { |
|
|
"ONE": [ |
|
|
"Compared to the reference, what ONE distortion stands out most in the evaluated image?", |
|
|
"Determine the leading ONE degradation when comparing the evaluated image to the reference.", |
|
|
"Determine the most impactful ONE distortion in the evaluated image compared to the reference.", |
|
|
"Highlight the most significant ONE distortion in the evaluated image in comparison to the reference.", |
|
|
"Identify the chief ONE degradation in the evaluated image when compared to the reference.", |
|
|
"Identify the most notable ONE distortion in the evaluated image's quality when compared to the reference.", |
|
|
"In comparison to the reference, what ONE distortion is most prominent in the evaluated image?", |
|
|
"What ONE distortion is most apparent in the evaluated image relative to the reference?", |
|
|
"What ONE distortion most significantly affects the evaluated image compared to the reference?", |
|
|
"What ONE distortion stands out in the evaluated image against the reference?", |
|
|
"What critical ONE quality degradation is present in the evaluated image versus the reference?", |
|
|
], |
|
|
"TWO": [ |
|
|
"Compared to the reference, what TWO distortions stand out most in the evaluated image?", |
|
|
"Determine the leading TWO degradations when comparing the evaluated image to the reference.", |
|
|
"Determine the most impactful TWO distortions in the evaluated image compared to the reference.", |
|
|
"Highlight the most significant TWO distortions in the evaluated image in comparison to the reference.", |
|
|
"Identify the chief TWO degradations in the evaluated image when compared to the reference.", |
|
|
"Identify the most notable TWO distortions in the evaluated image's quality when compared to the reference.", |
|
|
"In comparison to the reference, what TWO distortions are most prominent in the evaluated image?", |
|
|
"What TWO distortions are most apparent in the evaluated image relative to the reference?", |
|
|
"What TWO distortions most significantly affect the evaluated image compared to the reference?", |
|
|
"What TWO distortions stand out in the evaluated image against the reference?", |
|
|
"What critical TWO quality degradations are present in the evaluated image versus the reference?", |
|
|
], |
|
|
"Common": [ |
|
|
"Compared to the reference, what distortion(s) stand out most in the evaluated image?", |
|
|
"Determine the leading degradation(s) when comparing the evaluated image to the reference.", |
|
|
"Determine the most impactful distortion(s) in the evaluated image compared to the reference.", |
|
|
"Highlight the most significant distortion(s) in the evaluated image in comparison to the reference.", |
|
|
"Identify the chief degradation(s) in the evaluated image when compared to the reference.", |
|
|
"Identify the most notable distortion(s) in the evaluated image's quality when compared to the reference.", |
|
|
"In comparison to the reference, what distortion(s) are most prominent in the evaluated image?", |
|
|
"What critical quality degradation(s) are present in the evaluated image versus the reference?", |
|
|
"What distortion(s) are most apparent in the evaluated image relative to the reference?", |
|
|
"What distortion(s) most significantly affect the evaluated image compared to the reference?", |
|
|
"What distortion(s) stand out in the evaluated image against the reference?" |
|
|
] |
|
|
}, |
|
|
"Non-Reference": { |
|
|
"ONE": [ |
|
|
"Determine the leading ONE degradation in the evaluated image.", |
|
|
"Determine the most impactful ONE distortion in the evaluated image.", |
|
|
"Highlight the most significant ONE distortion in the evaluated image.", |
|
|
"Identify the chief ONE degradation in the evaluated image.", |
|
|
"Identify the most critical ONE distortion in the evaluated image.", |
|
|
"Identify the most notable ONE distortion in the evaluated image's quality.", |
|
|
"In terms of image quality, what is the most glaring ONE issue with the evaluated image?", |
|
|
"In the evaluated image, what ONE distortion is most detrimental to image quality?", |
|
|
"Pinpoint the foremost ONE image quality issue in the evaluated image.", |
|
|
"What ONE distortion is most apparent in the evaluated image?", |
|
|
"What ONE distortion is most evident in the evaluated image?", |
|
|
"What ONE distortion is most prominent in the evaluated image?", |
|
|
"What ONE distortion is most prominent when examining the evaluated image?", |
|
|
"What ONE distortion most detrimentally affects the overall quality of the evaluated image?", |
|
|
"What ONE distortion most notably affects the clarity of the evaluated image?", |
|
|
"What ONE distortion most significantly affects the evaluated image?", |
|
|
"What ONE distortion stands out in the evaluated image?", |
|
|
"What ONE quality degradation is most apparent in the evaluated image?", |
|
|
"What critical ONE quality degradation is present in the evaluated image?", |
|
|
"What is the foremost ONE distortion affecting the evaluated image's quality?", |
|
|
"What is the leading ONE distortion in the evaluated image?", |
|
|
"What is the most critical ONE image quality issue in the evaluated image?", |
|
|
"What is the most severe ONE degradation observed in the evaluated image?", |
|
|
"What is the primary ONE degradation observed in the evaluated image?" |
|
|
], |
|
|
"TWO": [ |
|
|
"Determine the leading TWO degradations in the evaluated image.", |
|
|
"Determine the most impactful TWO distortions in the evaluated image.", |
|
|
"Highlight the most significant TWO distortions in the evaluated image.", |
|
|
"Identify the chief TWO degradations in the evaluated image.", |
|
|
"Identify the most critical TWO distortions in the evaluated image.", |
|
|
"Identify the most notable TWO distortions in the evaluated image's quality.", |
|
|
"In terms of image quality, what are the most glaring TWO issues with the evaluated image?", |
|
|
"In the evaluated image, what TWO distortions are most detrimental to image quality?", |
|
|
"Pinpoint the foremost TWO image quality issues in the evaluated image.", |
|
|
"What TWO distortions are most apparent in the evaluated image?", |
|
|
"What TWO distortions are most evident in the evaluated image?", |
|
|
"What TWO distortions are most prominent in the evaluated image?", |
|
|
"What TWO distortions are most prominent when examining the evaluated image?", |
|
|
"What TWO distortions most detrimentally affect the overall quality of the evaluated image?", |
|
|
"What TWO distortions most notably affect the clarity of the evaluated image?", |
|
|
"What TWO distortions most significantly affect the evaluated image?", |
|
|
"What TWO distortions stand out in the evaluated image?", |
|
|
"What TWO quality degradations are most apparent in the evaluated image?", |
|
|
"What are the foremost TWO distortions affecting the evaluated image's quality?", |
|
|
"What are the leading TWO distortions in the evaluated image?", |
|
|
"What are the most critical TWO image quality issues in the evaluated image?", |
|
|
"What are the most severe TWO degradations observed in the evaluated image?", |
|
|
"What are the primary TWO degradations observed in the evaluated image?", |
|
|
"What critical TWO quality degradations are present in the evaluated image?", |
|
|
], |
|
|
"Common": [ |
|
|
"Determine the leading degradation(s) in the evaluated image.", |
|
|
"Determine the most impactful distortion(s) in the evaluated image.", |
|
|
"Highlight the most significant distortion(s) in the evaluated image.", |
|
|
"Identify the chief degradation(s) in the evaluated image.", |
|
|
"Identify the most critical distortion(s) in the evaluated image.", |
|
|
"Identify the most notable distortion(s) in the evaluated image's quality.", |
|
|
"In terms of image quality, what are the most glaring issue(s) with the evaluated image?", |
|
|
"In the evaluated image, what distortion(s) are most detrimental to image quality?", |
|
|
"Pinpoint the foremost image quality issue(s) in the evaluated image.", |
|
|
"What are the foremost distortion(s) affecting the evaluated image's quality?", |
|
|
"What are the leading distortion(s) in the evaluated image?", |
|
|
"What are the most critical image quality issue(s) in the evaluated image?", |
|
|
"What are the most severe degradation(s) observed in the evaluated image?", |
|
|
"What are the primary degradation(s) observed in the evaluated image?", |
|
|
"What critical quality degradation(s) are present in the evaluated image?", |
|
|
"What distortion(s) are most apparent in the evaluated image?", |
|
|
"What distortion(s) are most evident in the evaluated image?", |
|
|
"What distortion(s) are most prominent in the evaluated image?", |
|
|
"What distortion(s) are most prominent when examining the evaluated image?", |
|
|
"What distortion(s) most detrimentally affect the overall quality of the evaluated image?", |
|
|
"What distortion(s) most notably affect the clarity of the evaluated image?", |
|
|
"What distortion(s) most significantly affect the evaluated image?", |
|
|
"What distortion(s) stand out in the evaluated image?", |
|
|
"What quality degradation(s) are most apparent in the evaluated image?" |
|
|
] |
|
|
} |
|
|
} |
|
|
|
|
|
def question_generate(ref="Full-Reference", degra="Common"): |
|
|
option = f" Answer the question using a single word or phrase." |
|
|
template = random.choice(question_dict[ref]["Common"] + question_dict[ref][degra]) |
|
|
if random.random() >= 0.4: |
|
|
template += option |
|
|
return template |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def lerp(a, b, t): |
|
|
return a + (b - a) * t |
|
|
|
|
|
def choice_with_level(options_low, options_high, t): |
|
|
|
|
|
low = (lerp(options_low[0], options_high[0], t), |
|
|
lerp(options_low[1], options_high[1], t)) |
|
|
return random.uniform(low[0], low[1]) |
|
|
|
|
|
def int_range_with_level(rng_low, rng_high, t): |
|
|
low = int(round(lerp(rng_low[0], rng_high[0], t))) |
|
|
high = int(round(lerp(rng_low[1], rng_high[1], t))) |
|
|
return random.randint(low, max(low, high)) |
|
|
|
|
|
def tuple_range_with_level(rng_low, rng_high, t): |
|
|
return (lerp(rng_low[0], rng_high[0], t), lerp(rng_low[1], rng_high[1], t)) |
|
|
|
|
|
|
|
|
def level_to_t(level): |
|
|
level = max(1, min(5, int(level))) |
|
|
return (level - 1) / 4.0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def snow(image, level): |
|
|
""" |
|
|
original: A.RandomSnow(brightness_coeff=1.0, snow_point_lower=0.3, snow_point_upper=0.7, p=1), |
|
|
iaa.Snowflakes(density=0.35, flake_size=(0.6, 0.8), speed=(0.01, 0.015), angle=0) |
|
|
|
|
|
snow_point_lower, snow_point_upper: the cover rate of the snow |
|
|
density = (0.005, 0.075) |
|
|
density_uniformity = (0.3, 0.9) |
|
|
flake_size = (0.2, 0.7) # ratio to image |
|
|
flake_size_unifromity = (0.4, 0.8) |
|
|
speed = (0.007, 0.03) |
|
|
|
|
|
# t = level_to_t(level) # 1-5 to 0-1 |
|
|
# 亮度係數固定 1.0,雪點密度/大小/速度隨 level 增加 |
|
|
# snow_point_lower = lerp(0.1, 0.6, t) # 0.1, 0.225, 0.35, 0.475, 0.6 |
|
|
# snow_point_upper = lerp(0.3, 0.9, t) # 0.3, 0.45, 0.6, 0.75, 0.9 |
|
|
# flake_size = tuple_range_with_level((0.2, 0.4), (0.7, 1.0), t) |
|
|
# density = lerp(0.05, 0.6, t) |
|
|
# speed = tuple_range_with_level((0.005, 0.01), (0.02, 0.04), t) |
|
|
""" |
|
|
brightness_coeff_level = {1: (1.1, 1.15), 2: (1.15, 1.2), 3:(1.2, 1.25), 4:(1.3, 1.35), 5:(1.35, 1.4)} |
|
|
lightness_threshold_level = {1: (60, 80), 2: (80, 100), 3:(100, 115), 4:(115, 130), 5:(130, 140)} |
|
|
brightness = brightness_coeff_level[level] |
|
|
lightness_threshold = lightness_threshold_level[level] |
|
|
|
|
|
aug1 = iaa.FastSnowyLandscape( |
|
|
lightness_threshold=lightness_threshold, |
|
|
lightness_multiplier=brightness |
|
|
) |
|
|
img = aug1.augment_image(image) |
|
|
|
|
|
density_level = {1: (0.005, 0.015), 2: (0.015, 0.030), 3: (0.030, 0.045), 4: (0.045, 0.060), 5: (0.060, 0.075)} |
|
|
density = density_level[level] |
|
|
flake_size = (0.2, 0.5) |
|
|
speed = (0.007, 0.03) |
|
|
aug = iaa.Snowflakes(density=density, density_uniformity=0.95, |
|
|
flake_size=flake_size, speed=speed, angle=0) |
|
|
img = aug.augment_image(img) |
|
|
|
|
|
params = { |
|
|
"lightness_threshold": lightness_threshold, |
|
|
"brightness": brightness, |
|
|
"flake_size": flake_size, |
|
|
"density": density, |
|
|
"speed": speed |
|
|
} |
|
|
return img, params |
|
|
|
|
|
|
|
|
def fog(image, level): |
|
|
""" |
|
|
original: fog_coef_lower = 0.1, fog_coef_upper = 0.2, alpha_coef = 0.08 |
|
|
t = level_to_t(level) |
|
|
fog_coef_lower = lerp(0.05, 0.4, t) |
|
|
fog_coef_upper = lerp(0.12, 0.7, t) |
|
|
alpha_coef = lerp(0.02, 0.15, t) |
|
|
""" |
|
|
ratio = {1: (0.05, 0.2), 2: (0.2, 0.4), 3:(0.4, 0.6), 4:(0.6, 0.8), 5:(0.8, 1.0)} |
|
|
aug = iaa.Fog() |
|
|
fog_map = aug.augment_image(image) |
|
|
img = image * (1-ratio[level][1]) + fog_map * ratio[level][1] |
|
|
img = img.astype(np.uint8) |
|
|
|
|
|
params = { |
|
|
"alpha": ratio[level][1] |
|
|
} |
|
|
return img, params |
|
|
|
|
|
def rain(image, level): |
|
|
""" |
|
|
original: iaa.Rain(drop_size=(0.40, 0.50), speed=(0.05, 0.1)) |
|
|
official: drop_size = (0.01, 0.02), speed = (0.04, 0.2) |
|
|
|
|
|
t = level_to_t(level) |
|
|
drop_size = tuple_range_with_level((0.20, 0.35), (0.45, 0.65), t) |
|
|
speed = tuple_range_with_level((0.02, 0.06), (0.08, 0.15), t) |
|
|
|
|
|
nb_iterations: 1-3 DENSITY (1,1), (1,2), (2,2), (2,3), (3,3) |
|
|
drop_size: coarse of raindrop (corresponding to image scale), (0.01, 0.01) |
|
|
speed: length of the raindrop (more large more length but more thin) |
|
|
""" |
|
|
H,W,C = image.shape |
|
|
|
|
|
nb_iterations_level = {1: (1,1), 2: (1,2), 3: (2,2), 4: (2, 3), 5: (3, 3)} |
|
|
drop_size_level = {1: (0.01, 0.01), 2: (0.01, 0.02), 3: (0.01, 0.02), 4: (0.01, 0.02), 5: (0.02, 0.03)} |
|
|
|
|
|
drop_rate = W / 256 |
|
|
drop_size = list(np.array(drop_size_level[level]) * drop_rate) |
|
|
|
|
|
speed_level = (0.04, 0.2) |
|
|
speed_rate = H / 192 |
|
|
speed = list(np.array(speed_level) / speed_rate) |
|
|
|
|
|
|
|
|
aug = iaa.Rain(nb_iterations=nb_iterations_level[level], |
|
|
drop_size=drop_size, |
|
|
speed=speed) |
|
|
img = aug.augment_image(image) |
|
|
|
|
|
params = {"nb_iterations": nb_iterations_level[level], "drop_size": drop_size, "speed": speed} |
|
|
return img, params |
|
|
|
|
|
DEG_FUNCS = {'Snow': snow, 'Fog': fog, 'Rain': rain} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def apply_degradation(case, image, level, seed=None): |
|
|
if seed is not None: |
|
|
random.seed(seed) |
|
|
np.random.seed(seed) |
|
|
img_out, params = DEG_FUNCS[case](image, level) |
|
|
return img_out, params |
|
|
|
|
|
def read_rgb(path): |
|
|
bgr = cv2.imread(path, cv2.IMREAD_COLOR) |
|
|
if bgr is None: |
|
|
raise FileNotFoundError(path) |
|
|
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) |
|
|
return rgb |
|
|
|
|
|
def save_bgr(path, rgb): |
|
|
bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) |
|
|
cv2.imwrite(path, bgr) |
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--rt", type=str, default="/home/CORP/hsiang.chen/Project/Datasets/DataDepictQA") |
|
|
parser.add_argument("--save_rt", type=str, default="/home/CORP/hsiang.chen/Project/Datasets/DataDepictQA") |
|
|
parser.add_argument("--image_folder", type=str, default=None) |
|
|
parser.add_argument("--meta_folder", type=str, default=None) |
|
|
parser.add_argument("--case", type=str, default="Random", help="'Snow', 'Fog', 'Rain' or 'Random'") |
|
|
parser.add_argument("--n_per_image", type=int, default=1, help="how many degraded samples per clear image") |
|
|
parser.add_argument("--levels", type=str, default="1-5", help="level range, e.g. '3-5' or '2,3,5'") |
|
|
parser.add_argument("--seed", type=int, default=42) |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
level_tokens = [s.strip() for s in args.levels.split(",")] |
|
|
levels = [] |
|
|
for tok in level_tokens: |
|
|
if "-" in tok: |
|
|
a, b = tok.split("-") |
|
|
a, b = int(a), int(b) |
|
|
levels.extend(list(range(min(a, b), max(a, b) + 1))) |
|
|
else: |
|
|
levels.append(int(tok)) |
|
|
levels = sorted(set([L for L in levels if 1 <= L <= 5])) |
|
|
|
|
|
|
|
|
clear_dir = args.image_folder |
|
|
jpg_list = glob("%s/*.jpg"%clear_dir) |
|
|
png_list = glob("%s/*.png"%clear_dir) |
|
|
bmp_list = glob("%s/*.bmp"%clear_dir) |
|
|
print(len(jpg_list), len(png_list), len(bmp_list)) |
|
|
image_list = sorted(jpg_list + png_list + bmp_list) |
|
|
|
|
|
if len(image_list) == 0: |
|
|
print(f"No images found under {clear_dir}") |
|
|
return |
|
|
|
|
|
all_cases = list(DEG_FUNCS.keys()) |
|
|
|
|
|
|
|
|
clear_folder = Path(clear_dir) |
|
|
distortion_dir = clear_folder.with_name(clear_folder.name + "_weather") |
|
|
meta_refA_path = os.path.join(args.meta_folder, "train_refA_weather_brief.json") |
|
|
meta_A_path = os.path.join(args.meta_folder, "train_A_weather_brief.json") |
|
|
|
|
|
|
|
|
distortion_dir = os.path.join(args.save_rt, os.path.relpath(distortion_dir, args.rt)) |
|
|
os.makedirs(distortion_dir, exist_ok=True) |
|
|
meta_refA_path = os.path.join(args.save_rt, os.path.relpath(meta_refA_path, args.rt)) |
|
|
os.makedirs(os.path.dirname(meta_refA_path), exist_ok=True) |
|
|
meta_A_path = os.path.join(args.save_rt, os.path.relpath(meta_A_path, args.rt)) |
|
|
os.makedirs(os.path.dirname(meta_A_path), exist_ok=True) |
|
|
|
|
|
print("="*100) |
|
|
print(f"Found {len(image_list)} clear images from {args.image_folder}") |
|
|
print(f"Save images in {distortion_dir}, and save annotation in {meta_A_path}, {meta_refA_path}") |
|
|
print(f"Cases: {'ALL (random)' if args.case=='random' else args.case}, Levels: {levels}, n_per_image={args.n_per_image}, n_per_image={args.n_per_image}, seed={args.seed}") |
|
|
print("="*100) |
|
|
|
|
|
g_seed = args.seed |
|
|
random.seed(g_seed) |
|
|
np.random.seed(g_seed) |
|
|
|
|
|
meta_refA = [] |
|
|
meta_A = [] |
|
|
|
|
|
for path in tqdm(image_list): |
|
|
base = os.path.basename(path) |
|
|
rgb = read_rgb(path) |
|
|
|
|
|
save_folder = os.path.join(distortion_dir, os.path.relpath(os.path.dirname(path), clear_dir)) |
|
|
os.makedirs(save_folder, exist_ok=True) |
|
|
|
|
|
for k in range(args.n_per_image): |
|
|
|
|
|
if args.case == "Random": |
|
|
case = random.choice(all_cases) |
|
|
else: |
|
|
if args.case not in DEG_FUNCS: |
|
|
raise ValueError(f"Unknown case: {args.case}") |
|
|
case = args.case |
|
|
level = random.choice(levels) |
|
|
|
|
|
|
|
|
|
|
|
local_seed = (hash((base, k, case, level)) ^ g_seed) & 0xFFFFFFFF |
|
|
out_img, params = apply_degradation(case, rgb, level, seed=local_seed) |
|
|
|
|
|
|
|
|
stem, ext = os.path.splitext(base) |
|
|
out_name = f"{stem}_{case}_L{level}_{k}{ext}" |
|
|
out_path = os.path.join(save_folder, out_name) |
|
|
save_bgr(out_path, out_img) |
|
|
|
|
|
|
|
|
meta_refA.append({ |
|
|
"distortion_class": case, |
|
|
"distortion_name": case, |
|
|
"severity": level, |
|
|
"id": f"{stem}_{k}", |
|
|
"image_ref": os.path.relpath(path, args.rt).replace("\\", "/"), |
|
|
"image_A": os.path.relpath(out_path, args.save_rt).replace("\\", "/"), |
|
|
"image_B": None, |
|
|
"task_type": "quality_single_A", |
|
|
"conversations": [ |
|
|
{ |
|
|
"from": "human", |
|
|
"value": question_generate(ref="Full-Reference", degra="ONE"), |
|
|
}, |
|
|
{ |
|
|
"from": "gpt", |
|
|
"value": case |
|
|
} |
|
|
], |
|
|
"params": params |
|
|
}) |
|
|
|
|
|
meta_A.append({ |
|
|
"distortion_class": case, |
|
|
"distortion_name": case, |
|
|
"severity": level, |
|
|
"id": f"{stem}_{k}", |
|
|
"image_ref": None, |
|
|
"image_A": os.path.relpath(out_path, args.save_rt).replace("\\", "/"), |
|
|
"image_B": None, |
|
|
"task_type": "quality_single_A_noref", |
|
|
"conversations": [ |
|
|
{ |
|
|
"from": "human", |
|
|
"value": question_generate(ref="Non-Reference", degra="ONE"), |
|
|
}, |
|
|
{ |
|
|
"from": "gpt", |
|
|
"value": case |
|
|
} |
|
|
], |
|
|
"params": params |
|
|
}) |
|
|
|
|
|
with open(meta_refA_path, "w") as f: |
|
|
json.dump(meta_refA, f, indent=4) |
|
|
|
|
|
with open(meta_A_path, "w") as f: |
|
|
json.dump(meta_A, f, indent=4) |
|
|
|
|
|
print(f"Done. Metadata saved to: {meta_refA_path}, {meta_A_path}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|