legco-speech / scripts /dataset_summary.py
laubonghaudoi's picture
Super squash: clean up history after removing duplicate folders
4f81a3b
"""Compute summary statistics for the raw and segmented subsets.
Usage:
python -m scripts.dataset_summary
python -m scripts.dataset_summary --json
python -m scripts.dataset_summary --char-mode nowhitespace
python -m scripts.dataset_summary --no-progress
"""
from __future__ import annotations
import argparse
import csv
import json
import re
import sys
from array import array
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent
CHAR_MODES = ("raw", "strip", "nowhitespace")
WS_RE = re.compile(r"\s+")
def _increase_csv_field_limit() -> None:
"""Raise CSV field size limit to handle long transcripts safely."""
max_int = sys.maxsize
while True:
try:
csv.field_size_limit(max_int)
return
except OverflowError:
max_int = int(max_int / 10)
def _count_chars(text: str, mode: str) -> int:
if mode == "raw":
return len(text)
if mode == "strip":
return len(text.strip())
if mode == "nowhitespace":
return len(WS_RE.sub("", text))
raise ValueError(f"Unsupported char mode: {mode}")
def _median(values: array, numpy) -> float | None:
if not values:
return None
if numpy is not None:
if values.typecode in ("d", "f"):
dtype = numpy.float64
else:
dtype = numpy.uint64
arr = numpy.frombuffer(values, dtype=dtype)
return float(numpy.median(arr))
values_list = list(values)
values_list.sort()
mid = len(values_list) // 2
if len(values_list) % 2 == 1:
return float(values_list[mid])
return float((values_list[mid - 1] + values_list[mid]) / 2)
def compute_raw(raw_metadata: Path, char_mode: str, numpy, progress=None, task_id=None):
durations = array("d")
char_counts = array("Q")
total_duration = 0.0
total_chars = 0
row_count = 0
missing_duration = 0
missing_text = 0
_increase_csv_field_limit()
with raw_metadata.open("r", encoding="utf-8", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
row_count += 1
if progress is not None and task_id is not None and row_count % 500 == 0:
progress.update(task_id, completed=row_count)
duration_raw = (row.get("duration_seconds") or "").strip()
if duration_raw:
try:
duration = float(duration_raw)
except ValueError:
duration = None
if duration is not None:
durations.append(duration)
total_duration += duration
else:
missing_duration += 1
else:
missing_duration += 1
transcription = row.get("transcription")
if transcription is None:
missing_text += 1
continue
char_count = _count_chars(transcription, char_mode)
char_counts.append(char_count)
total_chars += char_count
if progress is not None and task_id is not None:
progress.update(task_id, completed=row_count)
duration_count = len(durations)
char_count_n = len(char_counts)
avg_duration = (total_duration / duration_count) if duration_count else None
avg_chars = (total_chars / char_count_n) if char_count_n else None
return {
"files": row_count,
"total_duration_seconds": total_duration,
"avg_duration_seconds": avg_duration,
"median_duration_seconds": _median(durations, numpy),
"total_subtitle_chars": total_chars,
"avg_subtitle_chars": avg_chars,
"median_subtitle_chars": _median(char_counts, numpy),
"missing_duration_rows": missing_duration,
"missing_transcription_rows": missing_text,
}
def compute_segmented(segmented_dir: Path, char_mode: str, numpy, batch_size: int,
progress=None, task_id=None):
try:
import pyarrow.dataset as ds
except ImportError as exc:
raise RuntimeError(
"pyarrow is required to read segmented parquet shards. "
"Install with: pip install pyarrow (or uv sync --extra hf)."
) from exc
durations = array("d")
char_counts = array("Q")
total_duration = 0.0
total_chars = 0
row_count = 0
missing_duration = 0
missing_text = 0
dataset = ds.dataset(segmented_dir, format="parquet")
scanner = dataset.scanner(columns=["duration", "text"], batch_size=batch_size)
for batch in scanner.to_batches():
row_count += batch.num_rows
if progress is not None and task_id is not None:
progress.update(task_id, completed=row_count)
duration_arr = batch.column(0)
if duration_arr.null_count:
for value in duration_arr.to_pylist():
if value is None:
missing_duration += 1
continue
durations.append(float(value))
total_duration += float(value)
else:
if numpy is not None:
values = duration_arr.to_numpy(zero_copy_only=False)
durations.extend(values)
total_duration += float(values.sum())
else:
for value in duration_arr.to_pylist():
durations.append(float(value))
total_duration += float(value)
text_arr = batch.column(1)
if text_arr.null_count:
for value in text_arr.to_pylist():
if value is None:
missing_text += 1
continue
char_count = _count_chars(value, char_mode)
char_counts.append(char_count)
total_chars += char_count
else:
for value in text_arr.to_pylist():
char_count = _count_chars(value, char_mode)
char_counts.append(char_count)
total_chars += char_count
duration_count = len(durations)
char_count_n = len(char_counts)
avg_duration = (total_duration / duration_count) if duration_count else None
avg_chars = (total_chars / char_count_n) if char_count_n else None
return {
"segments": row_count,
"total_duration_seconds": total_duration,
"avg_duration_seconds": avg_duration,
"median_duration_seconds": _median(durations, numpy),
"total_subtitle_chars": total_chars,
"avg_subtitle_chars": avg_chars,
"median_subtitle_chars": _median(char_counts, numpy),
"missing_duration_rows": missing_duration,
"missing_text_rows": missing_text,
}
def _format_seconds(seconds: float | None) -> str:
if seconds is None:
return "n/a"
hours = seconds / 3600
return f"{seconds:.2f} ({hours:.2f} hours)"
def _format_number(value: float | int | None) -> str:
if value is None:
return "n/a"
if isinstance(value, float):
return f"{value:.2f}"
return str(value)
def main() -> int:
parser = argparse.ArgumentParser(description="Compute dataset summary statistics")
parser.add_argument(
"--raw-metadata",
type=Path,
default=REPO_ROOT / "raw" / "metadata.csv",
help="Path to raw metadata.csv (default: %(default)s)",
)
parser.add_argument(
"--segmented-dir",
type=Path,
default=REPO_ROOT / "segmented",
help="Directory with segmented parquet shards (default: %(default)s)",
)
parser.add_argument(
"--char-mode",
choices=CHAR_MODES,
default="nowhitespace",
help="How to count subtitle characters: raw, strip, or nowhitespace (default: %(default)s)",
)
parser.add_argument(
"--batch-size",
type=int,
default=10000,
help="Batch size when scanning segmented parquet shards (default: %(default)s)",
)
parser.add_argument(
"--json",
action="store_true",
help="Print machine-readable JSON only",
)
parser.add_argument(
"--no-progress",
action="store_true",
help="Disable rich progress output",
)
args = parser.parse_args()
try:
import numpy as np # pyarrow depends on numpy, but keep optional
except ImportError:
np = None
if not args.raw_metadata.exists():
print(f"Missing raw metadata: {args.raw_metadata}", file=sys.stderr)
return 2
if not args.segmented_dir.exists():
print(f"Missing segmented directory: {args.segmented_dir}", file=sys.stderr)
return 2
show_progress = (not args.json) and (not args.no_progress)
progress = None
raw_stats = None
segmented_stats = None
if show_progress:
from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
progress = Progress(
SpinnerColumn(),
TextColumn("[bold blue]{task.description}"),
TextColumn("{task.completed} rows"),
TimeElapsedColumn(),
)
with progress:
raw_task = progress.add_task("Raw subset", total=None)
raw_stats = compute_raw(args.raw_metadata, args.char_mode, np,
progress=progress, task_id=raw_task)
seg_task = progress.add_task("Segmented subset", total=None)
try:
segmented_stats = compute_segmented(
args.segmented_dir,
args.char_mode,
np,
args.batch_size,
progress=progress,
task_id=seg_task,
)
except RuntimeError as exc:
progress.stop()
print(str(exc), file=sys.stderr)
return 2
else:
raw_stats = compute_raw(args.raw_metadata, args.char_mode, np)
try:
segmented_stats = compute_segmented(args.segmented_dir, args.char_mode, np, args.batch_size)
except RuntimeError as exc:
print(str(exc), file=sys.stderr)
return 2
payload = {
"char_mode": args.char_mode,
"raw": raw_stats,
"segmented": segmented_stats,
}
if args.json:
print(json.dumps(payload, ensure_ascii=False, indent=2))
return 0
print(f"Dataset summary (char_mode={args.char_mode})")
print("")
print("Raw subset")
print(f"- files: {raw_stats['files']}")
print(f"- total_duration_seconds: {_format_seconds(raw_stats['total_duration_seconds'])}")
print(f"- avg_duration_seconds: {_format_number(raw_stats['avg_duration_seconds'])}")
print(f"- median_duration_seconds: {_format_number(raw_stats['median_duration_seconds'])}")
print(f"- total_subtitle_chars: {_format_number(raw_stats['total_subtitle_chars'])}")
print(f"- avg_subtitle_chars: {_format_number(raw_stats['avg_subtitle_chars'])}")
print(f"- median_subtitle_chars: {_format_number(raw_stats['median_subtitle_chars'])}")
if raw_stats["missing_duration_rows"] or raw_stats["missing_transcription_rows"]:
print(f"- missing_duration_rows: {raw_stats['missing_duration_rows']}")
print(f"- missing_transcription_rows: {raw_stats['missing_transcription_rows']}")
print("")
print("Segmented subset")
print(f"- segments: {segmented_stats['segments']}")
print(f"- total_duration_seconds: {_format_seconds(segmented_stats['total_duration_seconds'])}")
print(f"- avg_duration_seconds: {_format_number(segmented_stats['avg_duration_seconds'])}")
print(f"- median_duration_seconds: {_format_number(segmented_stats['median_duration_seconds'])}")
print(f"- total_subtitle_chars: {_format_number(segmented_stats['total_subtitle_chars'])}")
print(f"- avg_subtitle_chars: {_format_number(segmented_stats['avg_subtitle_chars'])}")
print(f"- median_subtitle_chars: {_format_number(segmented_stats['median_subtitle_chars'])}")
if segmented_stats["missing_duration_rows"] or segmented_stats["missing_text_rows"]:
print(f"- missing_duration_rows: {segmented_stats['missing_duration_rows']}")
print(f"- missing_text_rows: {segmented_stats['missing_text_rows']}")
return 0
if __name__ == "__main__":
raise SystemExit(main())