temp_dataset / metadb_code /export_speakervid_jsonl.py
xingzhaohu's picture
Initial upload
4b3a024 verified
from __future__ import annotations
import argparse
import json
import sys
import time
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple
from speakervid_metadb import SpeakerVidMetaDB
def _maybe_parse_json_text(value: Any) -> Any:
if value is None:
return None
if not isinstance(value, str):
return value
s = value.strip()
if not s:
return value
if s[0] not in "{[":
return value
try:
return json.loads(s)
except Exception:
return value
def _format_rate(rows: int, seconds: float) -> str:
if seconds <= 0:
return "?"
return f"{rows / seconds:,.0f} rows/s"
def _render_bar(done: int, total: int, width: int = 28) -> str:
if total <= 0:
return "[" + ("#" * (width // 2)) + (" " * (width - width // 2)) + "]"
frac = min(max(done / total, 0.0), 1.0)
filled = int(round(frac * width))
return "[" + ("#" * filled) + (" " * (width - filled)) + "]"
def main() -> int:
parser = argparse.ArgumentParser(
description="Export filtered SpeakerVid rows to JSONL with progress."
)
parser.add_argument(
"--parquet-glob",
type=str,
default="/mnt/nfs/datasets/SpeakerVid-5M/merged_anno/extracted_parquet/*.parquet",
help="Parquet glob path.",
)
parser.add_argument(
"--output",
type=Path,
required=True,
help="Output JSONL path.",
)
parser.add_argument(
"--where",
type=str,
default=None,
help="Optional extra DuckDB WHERE fragment (e.g. 'is_talking=1 AND conf>5').",
)
parser.add_argument(
"--sync-key",
type=str,
default="0",
help='Sync dict key for Sync-c (default: "0").',
)
parser.add_argument(
"--sync-c-min",
type=float,
default=None,
help="Optional Sync-c lower bound (inclusive).",
)
parser.add_argument(
"--sync-c-max",
type=float,
default=None,
help="Optional Sync-c upper bound (inclusive).",
)
parser.add_argument(
"--include-json-path",
action="store_true",
help="Include computed json_path in each row (requires filename=true).",
)
parser.add_argument(
"--batch-size",
type=int,
default=5000,
help="Fetch rows in batches to control memory.",
)
args = parser.parse_args()
db = SpeakerVidMetaDB(
parquet_glob=args.parquet_glob,
enable_filename=args.include_json_path,
)
sync_where = None
if args.sync_c_min is not None or args.sync_c_max is not None:
sync_where = db.where_sync_c(
sync_c_min=args.sync_c_min,
sync_c_max=args.sync_c_max,
sync_key=args.sync_key,
)
combined_where = db.where_and(args.where, sync_where)
select_cols = "*"
if args.include_json_path:
select_cols = f"*, {db.json_path_expr()}"
total = db.count(where=combined_where)
args.output.parent.mkdir(parents=True, exist_ok=True)
out_path = args.output
sql = f"SELECT {select_cols} FROM {db.view_name} WHERE {combined_where}"
cur = db.con.execute(sql)
colnames = [d[0] for d in cur.description]
done = 0
t0 = time.time()
last_print = 0.0
with out_path.open("w", encoding="utf-8") as f:
while True:
rows = cur.fetchmany(args.batch_size)
if not rows:
break
for row in rows:
item: Dict[str, Any] = dict(zip(colnames, row))
# Make sync/extras actual JSON objects in the exported JSONL.
if "sync" in item:
item["sync"] = _maybe_parse_json_text(item["sync"])
if "extras" in item:
item["extras"] = _maybe_parse_json_text(item["extras"])
f.write(json.dumps(item, ensure_ascii=False) + "\n")
done += len(rows)
now = time.time()
if now - last_print >= 0.5:
bar = _render_bar(done, total)
rate = _format_rate(done, now - t0)
msg = f"\r{bar} {done}/{total} ({done/total*100:5.1f}%) {rate}"
print(msg, end="", file=sys.stderr, flush=True)
last_print = now
bar = _render_bar(done, total)
rate = _format_rate(done, time.time() - t0)
print(f"\r{bar} {done}/{total} (100.0%) {rate}", file=sys.stderr, flush=True)
print(f"\n[OK] Wrote {done} rows -> {out_path}", file=sys.stderr, flush=True)
return 0
if __name__ == "__main__":
raise SystemExit(main())