|
|
|
|
|
""" |
|
|
Processes the DeepMind LibriSpeech-Long dataset into Parquet files |
|
|
for Hugging Face Hub compatibility. |
|
|
|
|
|
This script converts FLAC audio files to WAV format in-memory, |
|
|
gathers metadata, and saves the data into Parquet files, one for each split. |
|
|
It also generates the necessary YAML front-matter for the README.md file |
|
|
on the Hugging Face Hub, ensuring the dataset is correctly displayed, |
|
|
especially in the Data Studio. |
|
|
|
|
|
Example Usage: |
|
|
python process.py \ |
|
|
/path/to/source/librispeech-long \ |
|
|
/path/to/your/cloned-hf-repo |
|
|
|
|
|
To process only a small subset for testing: |
|
|
python process.py \ |
|
|
/path/to/source/librispeech-long \ |
|
|
/path/to/your/cloned-hf-repo \ |
|
|
--limit-speakers |
|
|
""" |
|
|
import argparse |
|
|
import re |
|
|
import shutil |
|
|
import subprocess |
|
|
from dataclasses import dataclass |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Optional, Tuple |
|
|
|
|
|
import datasets |
|
|
from datasets import Audio, Dataset, Features, Value |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
datasets.config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 20 |
|
|
|
|
|
|
|
|
CONFIG_NAME = "librispeech_long" |
|
|
|
|
|
|
|
|
OUTPUT_SUBDIR = CONFIG_NAME |
|
|
|
|
|
|
|
|
|
|
|
SPLIT_MAP: Dict[str, str] = { |
|
|
"dev-clean": "dev_clean", |
|
|
"dev-other": "dev_other", |
|
|
"test-clean": "test_clean", |
|
|
"test-other": "test_other", |
|
|
} |
|
|
|
|
|
|
|
|
DATASET_NAME = "librispeech-long" |
|
|
|
|
|
|
|
|
FILENAME_TEMPLATE = "{split}-00000-of-00001.parquet" |
|
|
|
|
|
|
|
|
TARGET_SR = 16000 |
|
|
TARGET_CHANNELS = 1 |
|
|
TARGET_CODEC = "pcm_s16le" |
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class Row: |
|
|
"""Represents a single row in the dataset.""" |
|
|
audio_bytes: bytes |
|
|
dataset: str |
|
|
text: str |
|
|
id: str |
|
|
audio_length_s: float |
|
|
|
|
|
|
|
|
|
|
|
def check_ffmpeg() -> None: |
|
|
"""Checks if ffmpeg and ffprobe are installed and available on PATH.""" |
|
|
for bin_name in ("ffmpeg", "ffprobe"): |
|
|
try: |
|
|
subprocess.run( |
|
|
[bin_name, "-version"], |
|
|
check=True, |
|
|
stdout=subprocess.DEVNULL, |
|
|
stderr=subprocess.DEVNULL, |
|
|
) |
|
|
except FileNotFoundError: |
|
|
raise RuntimeError( |
|
|
f"'{bin_name}' not found on PATH. Please install FFmpeg and retry." |
|
|
) |
|
|
|
|
|
def cleanup_outputs(target_dir: Path, clean: bool) -> None: |
|
|
"""Removes old Parquet files and temporary directories.""" |
|
|
if not clean: |
|
|
return |
|
|
|
|
|
output_path = target_dir / OUTPUT_SUBDIR |
|
|
if output_path.exists(): |
|
|
print(f"[INFO] Cleaning up old output directory: {output_path}") |
|
|
shutil.rmtree(output_path) |
|
|
|
|
|
|
|
|
for p in target_dir.glob(".tmp_write_*"): |
|
|
try: |
|
|
shutil.rmtree(p, ignore_errors=True) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
def find_first_speaker_dir(split_dir: Path) -> Optional[Path]: |
|
|
"""Finds the first speaker directory within a split directory.""" |
|
|
if not split_dir.is_dir(): |
|
|
return None |
|
|
speakers = sorted(p for p in split_dir.iterdir() if p.is_dir()) |
|
|
return speakers[0] if speakers else None |
|
|
|
|
|
def collect_flac_pairs(root: Path) -> List[Tuple[Path, Path]]: |
|
|
"""Collects pairs of FLAC audio files and their corresponding text files.""" |
|
|
pairs: List[Tuple[Path, Path]] = [] |
|
|
for flac in sorted(root.rglob("*.flac")): |
|
|
txt = flac.with_suffix(".txt") |
|
|
if txt.exists(): |
|
|
pairs.append((flac, txt)) |
|
|
return pairs |
|
|
|
|
|
def ffmpeg_flac_to_wav_bytes(flac_path: Path) -> bytes: |
|
|
"""Converts a FLAC file to WAV format bytes using ffmpeg.""" |
|
|
cmd = [ |
|
|
"ffmpeg", "-v", "error", "-i", str(flac_path), |
|
|
"-ac", str(TARGET_CHANNELS), "-ar", str(TARGET_SR), |
|
|
"-f", "wav", "-acodec", TARGET_CODEC, "pipe:1", |
|
|
] |
|
|
proc = subprocess.run(cmd, check=True, capture_output=True) |
|
|
return proc.stdout |
|
|
|
|
|
def ffprobe_duration_seconds(audio_path: Path) -> float: |
|
|
"""Gets the duration of an audio file in seconds using ffprobe.""" |
|
|
cmd = [ |
|
|
"ffprobe", "-v", "error", "-show_entries", "format=duration", |
|
|
"-of", "default=noprint_wrappers=1:nokey=1", str(audio_path), |
|
|
] |
|
|
proc = subprocess.run(cmd, check=True, capture_output=True, text=True) |
|
|
try: |
|
|
return float(proc.stdout.strip()) |
|
|
except (ValueError, TypeError): |
|
|
return 0.0 |
|
|
|
|
|
def make_id_from_path(flac_path: Path, split_dir: Path) -> str: |
|
|
"""Creates a unique ID from the file path, e.g., '1272-128104-0000'.""" |
|
|
rel_path = flac_path.relative_to(split_dir) |
|
|
parts = rel_path.parts |
|
|
if len(parts) < 3: |
|
|
return flac_path.stem.replace("_", "-") |
|
|
|
|
|
speaker, session, stem_with_ext = parts[0], parts[1], parts[-1] |
|
|
stem = Path(stem_with_ext).stem |
|
|
|
|
|
match = re.match(r"^\d+_(\d+)$", stem) |
|
|
utt_id = match.group(1) if match else stem.replace('_', '-') |
|
|
return f"{speaker}-{session}-{utt_id}" |
|
|
|
|
|
def read_text(txt_path: Path) -> str: |
|
|
"""Reads the text from a transcript file.""" |
|
|
return txt_path.read_text(encoding="utf-8").strip() |
|
|
|
|
|
def rows_for_split(source_split_path: Path, limit_speakers: bool) -> List[Row]: |
|
|
"""Generates a list of Row objects for a given data split.""" |
|
|
if not source_split_path.exists(): |
|
|
print(f"[WARN] Source split directory not found: {source_split_path}") |
|
|
return [] |
|
|
|
|
|
if limit_speakers: |
|
|
spk_dir = find_first_speaker_dir(source_split_path) |
|
|
if spk_dir is None: |
|
|
print(f"[WARN] No speaker directories in {source_split_path}") |
|
|
return [] |
|
|
roots_to_process = [spk_dir] |
|
|
print(f"[INFO] Using speaker subset for {source_split_path.name}: {spk_dir.name}") |
|
|
else: |
|
|
roots_to_process = [p for p in source_split_path.iterdir() if p.is_dir()] |
|
|
|
|
|
file_pairs = [] |
|
|
for root in roots_to_process: |
|
|
file_pairs.extend(collect_flac_pairs(root)) |
|
|
|
|
|
rows: List[Row] = [] |
|
|
for flac_path, txt_path in tqdm(file_pairs, desc=f"{source_split_path.name}: converting", unit="file"): |
|
|
rows.append(Row( |
|
|
audio_bytes=ffmpeg_flac_to_wav_bytes(flac_path), |
|
|
dataset=DATASET_NAME, |
|
|
text=read_text(txt_path), |
|
|
id=make_id_from_path(flac_path, source_split_path), |
|
|
audio_length_s=ffprobe_duration_seconds(flac_path), |
|
|
)) |
|
|
|
|
|
rows.sort(key=lambda r: r.id) |
|
|
return rows |
|
|
|
|
|
def build_parquet_dataset(rows: List[Row]) -> Dataset: |
|
|
""" |
|
|
Builds a Hugging Face Dataset with the correct Audio feature type. |
|
|
This ensures the Parquet file has the right schema (a STRUCT for audio) |
|
|
for the Hugging Face Data Studio to interpret it correctly. |
|
|
""" |
|
|
features = Features({ |
|
|
"audio": Audio(sampling_rate=TARGET_SR, decode=False), |
|
|
"dataset": Value("string"), |
|
|
"text": Value("string"), |
|
|
"id": Value("string"), |
|
|
"audio_length_s": Value("float64"), |
|
|
}) |
|
|
|
|
|
data_list = [ |
|
|
{ |
|
|
"audio": {"bytes": r.audio_bytes, "path": None}, |
|
|
"dataset": r.dataset, |
|
|
"text": r.text, |
|
|
"id": r.id, |
|
|
"audio_length_s": r.audio_length_s, |
|
|
} |
|
|
for r in rows |
|
|
] |
|
|
|
|
|
return Dataset.from_list(data_list, features=features) |
|
|
|
|
|
|
|
|
def write_split(ds: Dataset, out_path: Path) -> Tuple[int, int]: |
|
|
"""Writes a Dataset split to a Parquet file.""" |
|
|
out_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
if out_path.exists(): |
|
|
out_path.unlink() |
|
|
ds.to_parquet(str(out_path)) |
|
|
return ds.num_rows, out_path.stat().st_size |
|
|
|
|
|
def format_yaml_block(stats: Dict[str, Dict[str, float]]) -> str: |
|
|
"""Generates the YAML front-matter for the README.md file.""" |
|
|
download_size = sum(int(v["num_bytes"]) for v in stats.values()) |
|
|
|
|
|
lines = [ |
|
|
"---", |
|
|
"license: cc-by-4.0", |
|
|
"dataset_info:", |
|
|
f"- config_name: {CONFIG_NAME}", |
|
|
" features:", |
|
|
" - name: audio", |
|
|
" dtype:", |
|
|
" audio:", |
|
|
f" sampling_rate: {TARGET_SR}", |
|
|
" - name: dataset", |
|
|
" dtype: string", |
|
|
" - name: text", |
|
|
" dtype: string", |
|
|
" - name: id", |
|
|
" dtype: string", |
|
|
" - name: audio_length_s", |
|
|
" dtype: float64", |
|
|
" splits:", |
|
|
] |
|
|
|
|
|
for hub_split, vals in sorted(stats.items()): |
|
|
lines.append(f" - name: {hub_split}") |
|
|
lines.append(f" num_bytes: {float(vals['num_bytes'])}") |
|
|
lines.append(f" num_examples: {int(vals['num_examples'])}") |
|
|
|
|
|
lines.extend([ |
|
|
f" download_size: {download_size}", |
|
|
f" dataset_size: {download_size}", |
|
|
"configs:", |
|
|
f"- config_name: {CONFIG_NAME}", |
|
|
" data_files:", |
|
|
]) |
|
|
|
|
|
for hub_split in sorted(stats.keys()): |
|
|
path_pattern = f"{OUTPUT_SUBDIR}/{hub_split}-*" |
|
|
lines.append(f" - split: {hub_split}") |
|
|
lines.append(f" path: {path_pattern}") |
|
|
|
|
|
lines.append("---") |
|
|
return "\n".join(lines) |
|
|
|
|
|
def main() -> None: |
|
|
"""Main function to run the data processing pipeline.""" |
|
|
parser = argparse.ArgumentParser( |
|
|
description="Process LibriSpeech-Long dataset for Hugging Face Hub." |
|
|
) |
|
|
parser.add_argument( |
|
|
"source_root", type=Path, help="Path to the source data directory (e.g., '.../librispeech-long')." |
|
|
) |
|
|
parser.add_argument( |
|
|
"target_repo_root", type=Path, help="Path to your cloned Hugging Face dataset repository." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--limit-speakers", action="store_true", help="Only process the first speaker per split for quick testing." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--no-clean", dest="clean", action="store_false", help="Do not clean up old output files before running." |
|
|
) |
|
|
args = parser.parse_args() |
|
|
|
|
|
check_ffmpeg() |
|
|
cleanup_outputs(args.target_repo_root, args.clean) |
|
|
|
|
|
out_dir = args.target_repo_root / OUTPUT_SUBDIR |
|
|
out_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
all_stats: Dict[str, Dict[str, float]] = {} |
|
|
|
|
|
for src_split, hub_split in SPLIT_MAP.items(): |
|
|
source_split_path = args.source_root / src_split |
|
|
rows = rows_for_split(source_split_path, args.limit_speakers) |
|
|
if not rows: |
|
|
print(f"[WARN] No rows found for split '{src_split}', skipping.") |
|
|
continue |
|
|
|
|
|
ds = build_parquet_dataset(rows) |
|
|
out_name = FILENAME_TEMPLATE.format(split=hub_split) |
|
|
out_path = out_dir / out_name |
|
|
|
|
|
print(f"[INFO] Writing {hub_split} -> {out_path}") |
|
|
num_examples, num_bytes = write_split(ds, out_path) |
|
|
all_stats[hub_split] = { |
|
|
"num_examples": num_examples, |
|
|
"num_bytes": num_bytes, |
|
|
} |
|
|
print(f"[INFO] {hub_split}: examples={num_examples}, bytes={num_bytes}") |
|
|
|
|
|
print("\n" + "="*60) |
|
|
print("=== Paste this YAML at the top of your README.md ===") |
|
|
print("="*60) |
|
|
print(format_yaml_block(all_stats)) |
|
|
print("="*60) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|