| |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import re |
| import unicodedata |
| from collections import defaultdict |
| from pathlib import Path |
|
|
| import pyarrow as pa |
| import pyarrow.parquet as pq |
|
|
|
|
| INVALID_FILENAME_CHARS = r'[/\\:*?"<>|\n\r\t]' |
|
|
|
|
| def normalize_text(value: str) -> str: |
| return unicodedata.normalize("NFC", str(value).strip()) |
|
|
|
|
| def safe_filename(name: str, max_len: int = 180) -> str: |
| name = normalize_text(name) |
| name = re.sub(INVALID_FILENAME_CHARS, "_", name) |
| name = re.sub(r"\s+", " ", name).strip() |
| if not name: |
| name = "untitled" |
| if len(name) > max_len: |
| name = name[:max_len].rstrip() |
| return name |
|
|
|
|
| def load_prompt_index(prompts_dir: Path) -> dict[str, dict[str, dict[str, object]]]: |
| prompt_index: dict[str, dict[str, dict[str, object]]] = defaultdict(dict) |
| duplicates: list[tuple[str, str]] = [] |
|
|
| for prompt_file in sorted(prompts_dir.glob("*.json")): |
| category = prompt_file.stem |
| records = json.loads(prompt_file.read_text(encoding="utf-8")) |
| for prompt_id, record in enumerate(records): |
| content = normalize_text(record["content"]) |
| key = safe_filename(content) |
| if key in prompt_index[category]: |
| duplicates.append((category, key)) |
| prompt_index[category][key] = { |
| "content": content, |
| "prompt": normalize_text(record["prompt"]), |
| "prompt_id": prompt_id, |
| } |
|
|
| if duplicates: |
| duplicate_lines = "\n".join(f"- {category}: {key}" for category, key in duplicates[:20]) |
| raise ValueError(f"Duplicate prompt filenames detected:\n{duplicate_lines}") |
|
|
| return prompt_index |
|
|
|
|
| def build_rows(repo_root: Path) -> list[dict[str, object]]: |
| prompt_index = load_prompt_index(repo_root / "prompts") |
| rows: list[dict[str, object]] = [] |
|
|
| for video_path in sorted(repo_root.glob("*/*/*.mp4")): |
| model = video_path.parts[-3] |
| category = video_path.parts[-2] |
| filename = normalize_text(video_path.stem) |
| prompt_record = prompt_index.get(category, {}).get(filename) |
| rel_path = video_path.relative_to(repo_root).as_posix() |
|
|
| rows.append( |
| { |
| "file_name": rel_path, |
| "model": model, |
| "category": category, |
| "content": prompt_record["content"] if prompt_record else filename, |
| "prompt": prompt_record["prompt"] if prompt_record else None, |
| "prompt_id": prompt_record["prompt_id"] if prompt_record else None, |
| "matched_prompt": prompt_record is not None, |
| } |
| ) |
|
|
| return rows |
|
|
|
|
| def write_parquet(rows: list[dict[str, object]], output_path: Path) -> None: |
| table = pa.Table.from_pylist(rows) |
| pq.write_table(table, output_path) |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser( |
| description="Build metadata.parquet for Hugging Face Dataset Viewer." |
| ) |
| parser.add_argument( |
| "--repo-root", |
| type=Path, |
| default=Path(__file__).resolve().parents[1], |
| help="Path to the dataset repository root.", |
| ) |
| parser.add_argument( |
| "--output", |
| type=Path, |
| default=None, |
| help="Output parquet path. Defaults to <repo-root>/metadata.parquet.", |
| ) |
| args = parser.parse_args() |
|
|
| repo_root = args.repo_root.resolve() |
| output_path = args.output.resolve() if args.output else repo_root / "metadata.parquet" |
|
|
| rows = build_rows(repo_root) |
| if not rows: |
| raise ValueError(f"No MP4 files found under {repo_root}") |
|
|
| write_parquet(rows, output_path) |
|
|
| matched = sum(1 for row in rows if row["matched_prompt"]) |
| print(f"Wrote {len(rows)} rows to {output_path}") |
| print(f"Matched prompts: {matched}/{len(rows)}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|