cuebench / scripts /build_viewer_files.py
Ishwar B
Use hf URLs for images
d212377
#!/usr/bin/env python3
"""Utilities to convert the raw metadata dumps into viewer-friendly JSONL files."""
from __future__ import annotations
import json
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Dict, Iterator, MutableMapping
ROOT = Path(__file__).resolve().parents[1]
RAW_DIR = ROOT / "raw"
OUTPUT_DIR = ROOT / "data"
HF_DATASET_ID = "ishwarbb23/cuebench"
HF_IMAGE_PREFIX = f"hf://datasets/{HF_DATASET_ID}/"
CONFIG_SOURCES: Dict[str, Path] = {
"clue": RAW_DIR / "clue_metadata.jsonl",
"mep": RAW_DIR / "mep_metadata.jsonl",
}
@dataclass
class BuildStats:
"""Simple container for summary numbers we surface in README/stats.json."""
num_examples: int
num_bytes: int
source_path: str
output_path: str
def as_dict(self) -> Dict[str, object]:
return asdict(self)
def _normalize_record(record: MutableMapping[str, object]) -> MutableMapping[str, object]:
"""Add the columns expected by the README and dataset viewer."""
image_id = record.get("aligned_id") or record.get("image_id")
if image_id is None:
seq = record.get("seq_name", "seq")
frame = record.get("frame_count", 0)
image_id = f"{seq}.{int(frame):05d}"
record["image_id"] = image_id
observed = record.get("observed_classes") or record.get("detected_classes") or []
record["observed_classes"] = observed
# Preserve the detected_classes alias so legacy tooling keeps working.
record.setdefault("detected_classes", observed)
record["target_classes"] = record.get("target_classes", [])
image_path = record.get("image_path")
record["image_path"] = image_path
if image_path:
normalized_path = str(Path(image_path))
record["image"] = f"{HF_IMAGE_PREFIX}{normalized_path}"
else:
record["image"] = None
return record
def _iter_records(path: Path) -> Iterator[MutableMapping[str, object]]:
with path.open("r", encoding="utf-8") as src:
for line in src:
if not line.strip():
continue
yield json.loads(line)
def build_split(config_name: str, source_path: Path, output_path: Path) -> BuildStats:
output_path.parent.mkdir(parents=True, exist_ok=True)
count = 0
with output_path.open("w", encoding="utf-8") as dst:
for record in _iter_records(source_path):
normalized = _normalize_record(record)
dst.write(json.dumps(normalized, ensure_ascii=False) + "\n")
count += 1
num_bytes = output_path.stat().st_size
return BuildStats(
num_examples=count,
num_bytes=num_bytes,
source_path=str(source_path.relative_to(ROOT)),
output_path=str(output_path.relative_to(ROOT)),
)
def main() -> None:
stats: Dict[str, Dict[str, object]] = {}
for config_name, source in CONFIG_SOURCES.items():
if not source.exists():
raise FileNotFoundError(f"Missing source file for {config_name}: {source}")
output_path = OUTPUT_DIR / config_name / "train.jsonl"
summary = build_split(config_name, source, output_path)
stats[config_name] = summary.as_dict()
print(
f"[{config_name}] wrote {summary.num_examples} examples -> {summary.output_path} "
f"({summary.num_bytes} bytes)."
)
stats_path = OUTPUT_DIR / "stats.json"
with stats_path.open("w", encoding="utf-8") as handle:
json.dump(stats, handle, indent=2)
handle.write("\n")
print(f"Wrote summary stats to {stats_path.relative_to(ROOT)}")
if __name__ == "__main__":
main()