UIR-2.5M-Reference / dataset.py
Legitking4pf's picture
Update dataset.py
66b9c5d verified
import os
import json
from datasets import DatasetInfo, GeneratorBasedBuilder, Features, Image, ClassLabel, Split
from huggingface_hub import hf_hub_download
class UIR25MReference(GeneratorBasedBuilder):
"""UIR-2.5M Reference Dataset Loader with metadata mapping"""
VERSION = "1.0.0"
# Directory for pretrained models
PRETRAINED_DIR = os.path.expanduser("./pretrained_models")
os.makedirs(PRETRAINED_DIR, exist_ok=True)
# Pretrained models info
PRETRAINED_MODELS = [
"nafnet_maskdcpt_12d.pth",
"nafnet_maskdcpt_5d.pth",
"nafnet_maskdcpt_mixed.pth",
"promptir_maskdcpt_5d.pth",
"restormer_maskdcpt_5d.pth",
"swinir_maskdcpt_5d.pth"
]
PRETRAINED_REPO = "Jiakui/MaskDCPT"
def download_pretrained_models(self):
"""Download all pretrained models to local directory."""
downloaded_files = []
for filename in self.PRETRAINED_MODELS:
target_path = os.path.join(self.PRETRAINED_DIR, filename)
if not os.path.exists(target_path):
print(f"Downloading pretrained model: {filename}")
hf_hub_download(
repo_id=self.PRETRAINED_REPO,
filename=f"pretrained_models/{filename}",
local_dir=self.PRETRAINED_DIR
)
else:
print(f"{filename} already exists, skipping download.")
downloaded_files.append(target_path)
return downloaded_files
def _info(self):
return DatasetInfo(
description="UIR-2.5M Reference: Universal Image Restoration paired dataset schema.",
homepage="https://huggingface.co/datasets/Legitking4pf/UIR-2.5M-Reference",
license="mit",
features=Features({
"low_quality": Image(),
"high_quality": Image(),
"degradation_type": ClassLabel(names=[
"noise", "blur", "compression", "haze", "low_light",
"degradation_5", "degradation_6", "degradation_7",
"degradation_8", "degradation_9",
"degradation_10", "degradation_11", "degradation_12",
"degradation_13", "degradation_14", "degradation_15",
"degradation_16", "degradation_17", "degradation_18", "degradation_19"
])
}),
task_categories=["image-to-image"],
language=["en"],
)
def _split_generators(self, dl_manager):
"""
Automatic split detection:
Expects directories named train/test/validation
"""
data_dir = dl_manager.download_and_extract("https://github.com/MILab-PKU/MaskDCPT.git")
splits = []
for split_name in ["train", "test", "validation"]:
split_path = os.path.join(data_dir, split_name)
if os.path.exists(split_path):
splits.append(
self.SplitGenerator(
name=getattr(Split, split_name.upper()),
gen_kwargs={"images_dir": split_path}
)
)
return splits
def _generate_examples(self, images_dir):
"""Yield examples using metadata JSON mapping"""
low_dir = os.path.join(images_dir, "low_quality")
high_dir = os.path.join(images_dir, "high_quality")
metadata_file = os.path.join(images_dir, "metadata.json")
if not os.path.exists(metadata_file):
raise FileNotFoundError(f"Metadata file not found: {metadata_file}")
with open(metadata_file, "r") as f:
metadata = json.load(f)
for idx, sample in enumerate(metadata):
low_fname = sample["low_quality"]
high_fname = sample["high_quality"]
degradation_type = sample["degradation_type"]
low_path = os.path.join(low_dir, low_fname)
high_path = os.path.join(high_dir, high_fname)
if os.path.exists(low_path) and os.path.exists(high_path):
yield idx, {
"low_quality": low_path,
"high_quality": high_path,
"degradation_type": degradation_type
}
else:
print(f"Skipping missing files: {low_fname} or {high_fname}")