File size: 3,891 Bytes
07a7354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python3

from __future__ import annotations

import argparse
import json
import re
import unicodedata
from collections import defaultdict
from pathlib import Path

import pyarrow as pa
import pyarrow.parquet as pq


INVALID_FILENAME_CHARS = r'[/\\:*?"<>|\n\r\t]'


def normalize_text(value: str) -> str:
    return unicodedata.normalize("NFC", str(value).strip())


def safe_filename(name: str, max_len: int = 180) -> str:
    name = normalize_text(name)
    name = re.sub(INVALID_FILENAME_CHARS, "_", name)
    name = re.sub(r"\s+", " ", name).strip()
    if not name:
        name = "untitled"
    if len(name) > max_len:
        name = name[:max_len].rstrip()
    return name


def load_prompt_index(prompts_dir: Path) -> dict[str, dict[str, dict[str, object]]]:
    prompt_index: dict[str, dict[str, dict[str, object]]] = defaultdict(dict)
    duplicates: list[tuple[str, str]] = []

    for prompt_file in sorted(prompts_dir.glob("*.json")):
        category = prompt_file.stem
        records = json.loads(prompt_file.read_text(encoding="utf-8"))
        for prompt_id, record in enumerate(records):
            content = normalize_text(record["content"])
            key = safe_filename(content)
            if key in prompt_index[category]:
                duplicates.append((category, key))
            prompt_index[category][key] = {
                "content": content,
                "prompt": normalize_text(record["prompt"]),
                "prompt_id": prompt_id,
            }

    if duplicates:
        duplicate_lines = "\n".join(f"- {category}: {key}" for category, key in duplicates[:20])
        raise ValueError(f"Duplicate prompt filenames detected:\n{duplicate_lines}")

    return prompt_index


def build_rows(repo_root: Path) -> list[dict[str, object]]:
    prompt_index = load_prompt_index(repo_root / "prompts")
    rows: list[dict[str, object]] = []

    for video_path in sorted(repo_root.glob("*/*/*.mp4")):
        model = video_path.parts[-3]
        category = video_path.parts[-2]
        filename = normalize_text(video_path.stem)
        prompt_record = prompt_index.get(category, {}).get(filename)
        rel_path = video_path.relative_to(repo_root).as_posix()

        rows.append(
            {
                "file_name": rel_path,
                "model": model,
                "category": category,
                "content": prompt_record["content"] if prompt_record else filename,
                "prompt": prompt_record["prompt"] if prompt_record else None,
                "prompt_id": prompt_record["prompt_id"] if prompt_record else None,
                "matched_prompt": prompt_record is not None,
            }
        )

    return rows


def write_parquet(rows: list[dict[str, object]], output_path: Path) -> None:
    table = pa.Table.from_pylist(rows)
    pq.write_table(table, output_path)


def main() -> None:
    parser = argparse.ArgumentParser(
        description="Build metadata.parquet for Hugging Face Dataset Viewer."
    )
    parser.add_argument(
        "--repo-root",
        type=Path,
        default=Path(__file__).resolve().parents[1],
        help="Path to the dataset repository root.",
    )
    parser.add_argument(
        "--output",
        type=Path,
        default=None,
        help="Output parquet path. Defaults to <repo-root>/metadata.parquet.",
    )
    args = parser.parse_args()

    repo_root = args.repo_root.resolve()
    output_path = args.output.resolve() if args.output else repo_root / "metadata.parquet"

    rows = build_rows(repo_root)
    if not rows:
        raise ValueError(f"No MP4 files found under {repo_root}")

    write_parquet(rows, output_path)

    matched = sum(1 for row in rows if row["matched_prompt"])
    print(f"Wrote {len(rows)} rows to {output_path}")
    print(f"Matched prompts: {matched}/{len(rows)}")


if __name__ == "__main__":
    main()