| | """Segment full meeting audio into sentence-level clips and write Parquet shards. |
| | |
| | For each meeting in metadata.csv, this script: |
| | 1. Parses the SRT file to get (start, end, text) segments. |
| | 2. Uses ffmpeg to extract each segment from the opus file (stream copy, no re-encoding). |
| | 3. Batches segments and writes Parquet shards with embedded audio bytes. |
| | |
| | Usage: |
| | python -m scripts.segment_audio [--workers N] [--shard-size N] [--out-dir DIR] |
| | """ |
| |
|
| | import argparse |
| | import collections |
| | import csv |
| | import itertools |
| | import re |
| | import subprocess |
| | from concurrent.futures import FIRST_COMPLETED, ProcessPoolExecutor, wait |
| | from dataclasses import dataclass |
| | from pathlib import Path |
| |
|
| | from rich.console import Console |
| | from rich.progress import ( |
| | BarColumn, |
| | MofNCompleteColumn, |
| | Progress, |
| | SpinnerColumn, |
| | TextColumn, |
| | TimeElapsedColumn, |
| | TimeRemainingColumn, |
| | ) |
| |
|
| | REPO_ROOT = Path(__file__).resolve().parent.parent |
| | DEFAULT_SHARD_SIZE = 5000 |
| | DEFAULT_WORKERS = 4 |
| |
|
| | console = Console() |
| |
|
| |
|
| | @dataclass |
| | class SrtSegment: |
| | index: int |
| | start_seconds: float |
| | end_seconds: float |
| | text: str |
| |
|
| |
|
| | def _ts_to_seconds(ts: str) -> float: |
| | """Convert SRT timestamp (HH:MM:SS,mmm) to seconds.""" |
| | h, m, rest = ts.split(":") |
| | s, ms = rest.split(",") |
| | return int(h) * 3600 + int(m) * 60 + int(s) + int(ms) / 1000 |
| |
|
| |
|
| | def parse_srt(srt_path: Path) -> list[SrtSegment]: |
| | """Parse an SRT file into a list of timed segments.""" |
| | try: |
| | content = srt_path.read_text(encoding="utf-8") |
| | except FileNotFoundError: |
| | return [] |
| |
|
| | segments: list[SrtSegment] = [] |
| | blocks = re.split(r"\n\s*\n", content.strip()) |
| |
|
| | for block in blocks: |
| | lines = block.strip().split("\n") |
| | if len(lines) < 3: |
| | continue |
| |
|
| | try: |
| | idx = int(lines[0].strip()) |
| | except ValueError: |
| | continue |
| |
|
| | ts_match = re.match( |
| | r"(\d{2}:\d{2}:\d{2},\d{3})\s*-->\s*(\d{2}:\d{2}:\d{2},\d{3})", |
| | lines[1].strip(), |
| | ) |
| | if not ts_match: |
| | continue |
| |
|
| | start = _ts_to_seconds(ts_match.group(1)) |
| | end = _ts_to_seconds(ts_match.group(2)) |
| | text = " ".join(l.strip() for l in lines[2:] if l.strip()) |
| |
|
| | if not text or end <= start: |
| | continue |
| |
|
| | segments.append(SrtSegment(index=idx, start_seconds=start, end_seconds=end, text=text)) |
| |
|
| | return segments |
| |
|
| |
|
| | def extract_segment_audio(opus_path: Path, start: float, duration: float) -> bytes | None: |
| | """Extract a segment from an opus file using ffmpeg stream copy. |
| | |
| | Returns the raw OGG/Opus bytes, or None on failure. |
| | """ |
| | cmd = [ |
| | "ffmpeg", |
| | "-v", "error", |
| | "-ss", f"{start:.3f}", |
| | "-i", str(opus_path), |
| | "-t", f"{duration:.3f}", |
| | "-c", "copy", |
| | "-f", "ogg", |
| | "pipe:1", |
| | ] |
| | try: |
| | result = subprocess.run(cmd, capture_output=True, timeout=30) |
| | if result.returncode != 0: |
| | return None |
| | if len(result.stdout) < 100: |
| | return None |
| | return result.stdout |
| | except (subprocess.TimeoutExpired, OSError): |
| | return None |
| |
|
| |
|
| | def process_meeting(row: dict) -> list[dict]: |
| | """Process a single meeting: parse SRT, extract all audio segments.""" |
| | video_id = row["id"] |
| | opus_path = REPO_ROOT / row["audio"] |
| | srt_path = REPO_ROOT / row["subtitles"] |
| |
|
| | if not opus_path.exists(): |
| | return [] |
| |
|
| | segments = parse_srt(srt_path) |
| | if not segments: |
| | return [] |
| |
|
| | results = [] |
| | for seg in segments: |
| | duration = seg.end_seconds - seg.start_seconds |
| | audio_bytes = extract_segment_audio(opus_path, seg.start_seconds, duration) |
| | if audio_bytes is None: |
| | continue |
| |
|
| | results.append({ |
| | "video_id": video_id, |
| | "segment_id": seg.index, |
| | "audio": {"bytes": audio_bytes, "path": f"{video_id}_{seg.index:05d}.opus"}, |
| | "text": seg.text, |
| | "start_time": round(seg.start_seconds, 3), |
| | "end_time": round(seg.end_seconds, 3), |
| | "duration": round(duration, 3), |
| | }) |
| |
|
| | return results |
| |
|
| |
|
| | def write_shard(segments: list[dict], shard_idx: int, out_dir: Path) -> Path: |
| | """Write a list of segment dicts as a Parquet shard with Audio feature.""" |
| | from datasets import Audio, Dataset, Features, Value |
| |
|
| | features = Features({ |
| | "video_id": Value("string"), |
| | "segment_id": Value("int32"), |
| | "audio": Audio(), |
| | "text": Value("string"), |
| | "start_time": Value("float64"), |
| | "end_time": Value("float64"), |
| | "duration": Value("float64"), |
| | }) |
| |
|
| | ds = Dataset.from_dict( |
| | {k: [s[k] for s in segments] for k in segments[0]}, |
| | features=features, |
| | ) |
| |
|
| | path = out_dir / f"train-{shard_idx:05d}.parquet" |
| | ds.to_parquet(path) |
| | return path |
| |
|
| |
|
| | def _flush_buffer(buffer: collections.deque, shard_size: int, shard_idx: int, |
| | out_dir: Path, *, force: bool = False) -> int: |
| | """Write complete shards from buffer. Returns updated shard_idx.""" |
| | while len(buffer) >= shard_size or (force and buffer): |
| | n = min(shard_size, len(buffer)) |
| | batch = [buffer.popleft() for _ in range(n)] |
| | shard_path = write_shard(batch, shard_idx, out_dir) |
| | console.print( |
| | f" Wrote shard {shard_idx} ({n} segments) -> {shard_path.name}" |
| | ) |
| | del batch |
| | shard_idx += 1 |
| | return shard_idx |
| |
|
| |
|
| | def main() -> None: |
| | parser = argparse.ArgumentParser(description="Segment audio and build Parquet shards") |
| | parser.add_argument("--workers", type=int, default=DEFAULT_WORKERS, |
| | help="Number of parallel workers (default: %(default)s)") |
| | parser.add_argument("--shard-size", type=int, default=DEFAULT_SHARD_SIZE, |
| | help="Segments per Parquet shard (default: %(default)s)") |
| | parser.add_argument("--out-dir", type=Path, default=REPO_ROOT / "segmented", |
| | help="Output directory for Parquet shards") |
| | args = parser.parse_args() |
| |
|
| | args.out_dir.mkdir(parents=True, exist_ok=True) |
| |
|
| | src = REPO_ROOT / "metadata.csv" |
| | with open(src, encoding="utf-8", newline="") as f: |
| | reader = csv.DictReader(f) |
| | rows = list(reader) |
| |
|
| | console.print(f"Processing {len(rows)} meetings with {args.workers} workers") |
| | console.print(f"Shard size: {args.shard_size} segments") |
| | console.print(f"Output: {args.out_dir}") |
| |
|
| | buffer: collections.deque[dict] = collections.deque() |
| | shard_idx = 0 |
| | total_segments = 0 |
| | errors = 0 |
| | meetings_done = 0 |
| | max_in_flight = args.workers * 2 |
| |
|
| | progress = Progress( |
| | SpinnerColumn(), |
| | TextColumn("[progress.description]{task.description}"), |
| | BarColumn(), |
| | MofNCompleteColumn(), |
| | TimeElapsedColumn(), |
| | TimeRemainingColumn(), |
| | console=console, |
| | ) |
| |
|
| | with progress: |
| | task = progress.add_task("Meetings processed", total=len(rows)) |
| | rows_iter = iter(rows) |
| |
|
| | with ProcessPoolExecutor(max_workers=args.workers) as pool: |
| | active: dict = {} |
| |
|
| | |
| | for row in itertools.islice(rows_iter, max_in_flight): |
| | f = pool.submit(process_meeting, row) |
| | active[f] = row["id"] |
| |
|
| | while active: |
| | done, _ = wait(active, return_when=FIRST_COMPLETED) |
| |
|
| | for future in done: |
| | video_id = active.pop(future) |
| | try: |
| | segments = future.result() |
| | buffer.extend(segments) |
| | total_segments += len(segments) |
| | del segments |
| | except Exception as e: |
| | errors += 1 |
| | console.print(f"[red]Error processing {video_id}: {e}[/red]") |
| |
|
| | |
| | future._result = None |
| |
|
| | meetings_done += 1 |
| | progress.advance(task) |
| |
|
| | |
| | row = next(rows_iter, None) |
| | if row is not None: |
| | f = pool.submit(process_meeting, row) |
| | active[f] = row["id"] |
| |
|
| | |
| | shard_idx = _flush_buffer( |
| | buffer, args.shard_size, shard_idx, args.out_dir |
| | ) |
| |
|
| | |
| | shard_idx = _flush_buffer( |
| | buffer, args.shard_size, shard_idx, args.out_dir, force=True |
| | ) |
| |
|
| | console.print(f"\n[bold green]Done![/bold green]") |
| | console.print(f" Total segments: {total_segments}") |
| | console.print(f" Total shards: {shard_idx}") |
| | console.print(f" Errors: {errors}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|