INR-benchmark / INRBenchmark.py
etoilekim's picture
Update INRBenchmark.py
f100166 verified
"""
Custom Hugging Face dataset loader for the INR-benchmark repository.
* Place this file in the root of the dataset repo alongside README.md.
* Use `load_dataset("username/INR-benchmark", "spheres", split="1234", trust_remote_code=True)`.
* Each example yields only the **file path** to keep memory/lightweight; users can `np.load` or `cv2.imread` themselves.
Supported configs
├── `div2k` – 10 RGB PNG images (HR or ×4 LR)
├── `ct` – single chest CT slice (PNG)
├── `spheres` – generated sparse-sphere .npy grids for 5 seeds
├── `bandlimited` – band-limited white-noise .npy grids for 5 seeds
├── `sierpinski` – 9 depth levels of Sierpinski triangle .npy
└── `star_target` – 1 synthetic star-resolution target .npy
The loader intentionally returns **file paths** so that 2-D PNGs and 2-/3-D NPYs
coexist without coercing them into a single Arrow schema.
"""
import os
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
_HOMEPAGE = "https://huggingface.co/datasets/etoilekim/INR-benchmark"
_LICENSE = "CC-BY-4.0"
# Mapping config → {split → glob-pattern list}
_CONFIG_MAP: Dict[str, List[Tuple[str, str]]] = {
"div2k": [(f"DIV2K/{name}.png") for i, name in enumerate(
["0064", "0007", "0010", "0029", "0063", "0072", "0079", "0088", "0093", "0131"]
)],
"ct": [("chest.png")],
"spheres": [(seed, f"SparseSphereSignal/{seed}/*.npy") for seed in ["1234", "2024", "5678", "7618", "7890"]],
"bandlimited": [(seed, f"BandlimitedSignal/{seed}/*.npy") for seed in ["1234", "2024", "5678", "7618", "7890"]],
"sierpinski": [(f"sierpinski_triangle/*{i}.npy") for i in range(9)],
"star_target": [("star_target", "star_resolution_target.npy")],
}
class INRBenchmark(datasets.GeneratorBasedBuilder):
"""GeneratorBasedBuilder with one config per logical subset."""
BUILDER_CONFIGS = [datasets.BuilderConfig(name=cfg, version=datasets.Version("1.0.0"))
for cfg in _CONFIG_MAP.keys()]
DEFAULT_CONFIG_NAME = "div2k"
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
homepage=_HOMEPAGE,
license=_LICENSE,
description="INR-benchmark: collection of synthetic & real signals for implicit neural representation research.",
features=datasets.Features({
"file_path": datasets.Value("string"),
}),
)
def _split_generators(self, dl_manager: datasets.download.DownloadManager):
cfg_name = self.config.name
if cfg_name not in _CONFIG_MAP:
raise ValueError(f"Unknown config: {cfg_name}")
# Ensure local path (no remote download; dataset files live in repo)
base_dir = Path(dl_manager.download_and_extract("."))
splits = []
for split_name, pattern in _CONFIG_MAP[cfg_name]:
abs_pattern = base_dir / pattern
# keep as glob pattern; actual resolution happens in _generate_examples
splits.append(
datasets.SplitGenerator(name=split_name, gen_kwargs={"glob_pattern": abs_pattern})
)
return splits
def _generate_examples(self, glob_pattern: Path):
"""Yields index, {file_path} for each matched file."""
files = sorted(Path().glob(str(glob_pattern)))
if not files:
# allow single file pattern without wildcard
if glob_pattern.exists():
files = [glob_pattern]
else:
raise FileNotFoundError(f"No files matched pattern: {glob_pattern}")
for idx, path in enumerate(files):
yield idx, {"file_path": str(path.relative_to(Path.cwd()))}