Raon-OpenTTS-Pool / prepare_nonredist_datasets.py
sjchung's picture
initial commit
dbeb0df
#!/usr/bin/env python3
"""
Convert non-redistributable datasets to RAON-TTS-Pool WebDataset format.
GigaSpeech and SPGISpeech cannot be included directly in RAON-TTS-Pool due to
license restrictions. Users who have agreed to the license on HuggingFace can
run this script to automatically download and convert them into the same
WebDataset tar + metadata parquet format.
Prerequisites
-------------
1. Accept the dataset license on HuggingFace:
- GigaSpeech: https://huggingface.co/datasets/speechcolab/gigaspeech
- SPGISpeech: https://huggingface.co/datasets/kensho/spgispeech
2. Set your HuggingFace token:
export HF_TOKEN=hf_your_token_here
Supported datasets
------------------
gigaspeech : GigaSpeech (HuggingFace: speechcolab/gigaspeech)
License: requires signing a license agreement
Subsets: xs (10h), s (250h), m (1000h), l (2500h), xl (10000h)
spgispeech : SPGISpeech (HuggingFace: kensho/spgispeech)
License: non-commercial use only
Output layout (per dataset)
----------------------------
<output_dir>/
{prefix}-000000.tar # WebDataset shard (~10 GB each)
{prefix}-000001.tar
...
metadata_pool.parquet # all samples (sample_key, text, duration, shard_name)
metadata_core.parquet # quality-filtered subset (requires --core_json)
Each tar contains pairs:
{sample_key}.opus 16 kHz mono Opus 64 kbps
{sample_key}.json {"text": "...", "duration": 4.2, "source": "..."}
Usage
-----
# GigaSpeech xl subset (requires HF_TOKEN with license agreement)
python prepare_nonredist_datasets.py gigaspeech \\
--output_dir ./GigaSpeech \\
--gigaspeech_subset xl
# GigaSpeech from local HF snapshot
python prepare_nonredist_datasets.py gigaspeech \\
--source_dir /path/to/gigaspeech_local \\
--output_dir ./GigaSpeech \\
--gigaspeech_subset xl
# SPGISpeech (requires HF_TOKEN with license agreement)
python prepare_nonredist_datasets.py spgispeech \\
--output_dir ./SPGISpeech
# SPGISpeech from local HF snapshot
python prepare_nonredist_datasets.py spgispeech \\
--source_dir /path/to/spgispeech_local \\
--output_dir ./SPGISpeech
# With core parquet (requires pool_indices JSON from RAON-TTS-Pool maintainers)
python prepare_nonredist_datasets.py gigaspeech \\
--output_dir ./GigaSpeech \\
--core_json /path/to/pool_indices_filter_remove_15pct_combined.json \\
--pool_offset <offset>
# Parquet-only (tars already built)
python prepare_nonredist_datasets.py gigaspeech \\
--output_dir ./GigaSpeech \\
--parquet_only
Notes
-----
- Audio is always re-encoded to 16 kHz mono Opus 64 kbps via ffmpeg.
- Samples longer than --max_duration (default 30s) are skipped.
- Parallel ffmpeg encoding uses --num_workers (default 16) threads.
- Resume: existing complete shards (>= 10 GB) are skipped automatically.
- Requires: pip install "datasets<4.0" soundfile pyarrow numpy tqdm
- datasets>=4.0 dropped soundfile audio decoding; use datasets<4.0 (e.g. 3.5.0).
"""
import os
import io
import sys
import json
import tarfile
import argparse
import subprocess
import collections
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
try:
import soundfile as sf
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from datasets import load_from_disk, load_dataset
except ImportError as e:
print(f"Missing dependency: {e}")
print('Install: pip install "datasets<4.0" soundfile pyarrow numpy tqdm')
sys.exit(1)
# ── Dataset configuration ─────────────────────────────────────────────────────
DATASET_CONFIGS = {
"gigaspeech": {
"hf_repo": "speechcolab/gigaspeech",
"prefix": "gs",
"source_label": "gigaspeech",
"pool_offset": None, # set via --pool_offset if needed
"text_col": "text",
"audio_col": "audio",
"key_col": "segment_id", # use as sample_key if available
"splits": ["train"],
"description": "GigaSpeech (speechcolab/gigaspeech)",
},
"spgispeech": {
"hf_repo": "kensho/spgispeech",
"prefix": "sp",
"source_label": "spgispeech",
"pool_offset": None, # set via --pool_offset if needed
"text_col": "transcript", # fallback: "text"
"audio_col": "audio",
"key_col": None,
"splits": ["train"],
"description": "SPGISpeech (kensho/spgispeech, non-commercial)",
},
}
SHARD_SIZE_DEFAULT = 10.0 # GB
MAX_DURATION_DEFAULT = 30.0 # seconds
NUM_WORKERS_DEFAULT = 16
# ── Audio conversion ──────────────────────────────────────────────────────────
def audio_feature_to_opus_16k_bytes(audio_feature):
"""
Convert a HuggingFace Audio feature dict to 16 kHz mono Opus bytes.
audio_feature: {"array": np.ndarray, "sampling_rate": int, "path": str|None}
"""
array = audio_feature["array"]
sr = audio_feature["sampling_rate"]
buf = io.BytesIO()
sf.write(buf, array, sr, format="WAV", subtype="PCM_16")
wav_bytes = buf.getvalue()
result = subprocess.run(
["ffmpeg", "-y",
"-f", "wav", "-i", "pipe:0",
"-ar", "16000", "-ac", "1",
"-c:a", "libopus", "-b:a", "64k",
"-f", "ogg", "pipe:1",
"-loglevel", "error"],
input=wav_bytes,
capture_output=True,
)
if result.returncode != 0:
raise RuntimeError(result.stderr.decode()[:300])
return result.stdout
def file_path_to_opus_16k_bytes(audio_path):
"""Convert any audio file to 16 kHz mono Opus bytes via ffmpeg."""
# Fast path: already 16 kHz opus
if audio_path.endswith(".opus"):
try:
if sf.info(audio_path).samplerate == 16000:
with open(audio_path, "rb") as f:
return f.read()
except Exception:
pass
result = subprocess.run(
["ffmpeg", "-y", "-i", audio_path,
"-ar", "16000", "-ac", "1",
"-c:a", "libopus", "-b:a", "64k",
"-f", "ogg", "pipe:1",
"-loglevel", "error"],
capture_output=True,
)
if result.returncode != 0:
raise RuntimeError(result.stderr.decode()[:300])
return result.stdout
# ── Sample key ────────────────────────────────────────────────────────────────
def make_sample_key(row, cfg, idx):
"""Generate a stable sample key from row data."""
# Prefer dataset-specific ID column
key_col = cfg.get("key_col")
if key_col and key_col in row and row[key_col]:
return str(row[key_col])
# Fall back to audio path basename
audio = row.get(cfg["audio_col"]) or row.get("audio_path")
if isinstance(audio, dict):
path = audio.get("path")
if path:
return os.path.splitext(os.path.basename(path))[0]
elif isinstance(audio, str) and audio:
return os.path.splitext(os.path.basename(audio))[0]
return f"{cfg['source_label']}-{idx:08d}"
# ── Per-sample worker ─────────────────────────────────────────────────────────
def process_sample(args_tuple):
"""
Returns (sample_key, audio_bytes, meta_json_bytes) or None if skip.
"""
idx, row, cfg, pool_offset, max_duration = args_tuple
audio_feature = row.get(cfg["audio_col"])
# Fallback: local Arrow datasets may use "audio_path" instead of "audio"
if audio_feature is None:
audio_feature = row.get("audio_path")
text_col = cfg["_resolved_text_col"]
text = row.get(text_col, "") or ""
duration = row.get("duration")
try:
if isinstance(audio_feature, dict) and "array" in audio_feature:
if duration is None:
array = audio_feature["array"]
sr = audio_feature["sampling_rate"]
duration = len(array) / sr
if duration > max_duration:
return None
audio_bytes = audio_feature_to_opus_16k_bytes(audio_feature)
elif isinstance(audio_feature, str):
if not os.path.exists(audio_feature):
return None
if duration is None:
duration = sf.info(audio_feature).duration
if duration > max_duration:
return None
audio_bytes = file_path_to_opus_16k_bytes(audio_feature)
else:
return None
except Exception as e:
print(f" skip idx={idx}: {e}", flush=True)
return None
sample_key = make_sample_key(row, cfg, idx)
meta = {"text": text, "duration": round(float(duration), 4),
"source": cfg["source_label"]}
if pool_offset is not None:
meta["pool_idx"] = pool_offset + idx
return (sample_key, audio_bytes, json.dumps(meta).encode("utf-8"))
# ── Tar writer ────────────────────────────────────────────────────────────────
class TarWriter:
def __init__(self, output_dir, prefix, shard_size_limit, shard_idx_start=0):
self.output_dir = output_dir
self.prefix = prefix
self.shard_size_limit = shard_size_limit
self.shard_idx = shard_idx_start
self.shard_count = 0
self.sample_count = 0
self.tar = None
self.current_size = 0
def _open_new_shard(self):
name = f"{self.prefix}-{self.shard_idx:06d}.tar"
path = os.path.join(self.output_dir, name)
self.tar = tarfile.open(path, "w")
self.current_size = 0
print(f" → {name}", flush=True)
def write(self, sample_key, audio_bytes, meta_bytes):
opus_size = len(audio_bytes)
if self.tar is None or self.current_size + opus_size > self.shard_size_limit:
if self.tar is not None:
self.tar.close()
self.shard_count += 1
self.shard_idx += 1
self._open_new_shard()
opus_info = tarfile.TarInfo(name=f"{sample_key}.opus")
opus_info.size = opus_size
self.tar.addfile(opus_info, io.BytesIO(audio_bytes))
json_info = tarfile.TarInfo(name=f"{sample_key}.json")
json_info.size = len(meta_bytes)
self.tar.addfile(json_info, io.BytesIO(meta_bytes))
self.current_size += opus_size + len(meta_bytes)
self.sample_count += 1
def close(self):
if self.tar is not None:
self.tar.close()
self.shard_count += 1
self.shard_idx += 1
# ── Existing shard detection (resume) ────────────────────────────────────────
def count_complete_shards(output_dir, prefix, shard_size_limit):
"""Return (n_complete, next_shard_idx)."""
if not os.path.isdir(output_dir):
return 0, 0
pattern_prefix = f"{prefix}-"
shard_indices = []
for fname in os.listdir(output_dir):
if fname.startswith(pattern_prefix) and fname.endswith(".tar"):
try:
idx = int(fname[len(pattern_prefix):-4])
size = os.path.getsize(os.path.join(output_dir, fname))
if size >= shard_size_limit:
shard_indices.append(idx)
except (ValueError, OSError):
pass
if not shard_indices:
return 0, 0
return len(shard_indices), max(shard_indices) + 1
# ── Parquet generation ────────────────────────────────────────────────────────
def build_parquets(output_dir, prefix, core_json=None):
"""Scan tars in output_dir and write metadata_pool.parquet + metadata_core.parquet."""
tar_files = sorted(f for f in os.listdir(output_dir) if f.endswith(".tar"))
if not tar_files:
print("No tar files found — skipping parquet generation.", flush=True)
return
print(f"\nBuilding parquets from {len(tar_files)} tars ...", flush=True)
core_set = None
if core_json:
print(f"Loading core indices: {core_json}", flush=True)
with open(core_json) as f:
core_set = set(json.load(f))
print(f" Core set: {len(core_set):,} indices", flush=True)
schema = pa.schema([
pa.field("sample_key", pa.string()),
pa.field("text", pa.string()),
pa.field("duration", pa.float64()),
pa.field("shard_name", pa.string()),
])
pool_rows = {k: [] for k in ["sample_key", "text", "duration", "shard_name"]}
core_rows = {k: [] for k in ["sample_key", "text", "duration", "shard_name"]}
for tar_name in tqdm(tar_files, desc="Scanning tars"):
tar_path = os.path.join(output_dir, tar_name)
try:
with tarfile.open(tar_path, "r") as tf:
members = {m.name: m for m in tf.getmembers()}
opus_keys = [
os.path.splitext(n)[0] for n in members if n.endswith(".opus")
]
for key in opus_keys:
json_name = f"{key}.json"
if json_name not in members:
continue
meta = json.loads(tf.extractfile(members[json_name]).read())
text = meta.get("text", "")
duration = float(meta.get("duration", 0.0))
pool_idx = meta.get("pool_idx")
row = {"sample_key": key, "text": text,
"duration": duration, "shard_name": tar_name}
for k in pool_rows:
pool_rows[k].append(row[k])
is_core = True
if core_set is not None and pool_idx is not None:
is_core = pool_idx in core_set
if is_core:
for k in core_rows:
core_rows[k].append(row[k])
except Exception as e:
print(f" skip {tar_name}: {e}")
pool_path = os.path.join(output_dir, "metadata_pool.parquet")
core_path = os.path.join(output_dir, "metadata_core.parquet")
pq.write_table(pa.table(pool_rows, schema=schema), pool_path)
pq.write_table(pa.table(core_rows, schema=schema), core_path)
pool_n = len(pool_rows["sample_key"])
core_n = len(core_rows["sample_key"])
pool_h = sum(pool_rows["duration"]) / 3600
core_h = sum(core_rows["duration"]) / 3600
print(f" pool: {pool_n:,} samples, {pool_h:.0f}h → {pool_path}")
print(f" core: {core_n:,} samples, {core_h:.0f}h → {core_path}")
if core_set is None:
print(" (core = pool; no --core_json provided)")
# ── Main build ────────────────────────────────────────────────────────────────
def build_tars(ds, cfg, args):
"""Iterate dataset ds and write WebDataset tars."""
shard_size_limit = int(args.shard_size_gb * 1024 ** 3)
pool_offset = args.pool_offset if args.pool_offset is not None else cfg.get("pool_offset")
os.makedirs(args.output_dir, exist_ok=True)
# Detect text column
col_names = ds.column_names
text_col = cfg["text_col"]
if text_col not in col_names:
fallbacks = ["text", "transcript", "sentence", "normalized_text", "transcription"]
for fb in fallbacks:
if fb in col_names:
text_col = fb
break
else:
raise ValueError(f"No text column found. Available: {col_names}")
cfg = dict(cfg, _resolved_text_col=text_col)
print(f"Columns: audio={cfg['audio_col']!r} text={text_col!r}", flush=True)
# Resume
n_complete, shard_idx_start = count_complete_shards(
args.output_dir, cfg["prefix"], shard_size_limit
)
# Rough skip estimate (samples already in complete shards)
# Use 0 to be safe — duplicate sample_keys in tar would just overwrite
skip_samples = 0
if n_complete > 0:
# Estimate from parquet if it exists (more accurate)
pool_parquet = os.path.join(args.output_dir, "metadata_pool.parquet")
if os.path.exists(pool_parquet):
skip_samples = len(pq.read_table(pool_parquet))
print(f"Resume: {n_complete} complete shards, shard_idx_start={shard_idx_start}, "
f"skipping {skip_samples:,} samples", flush=True)
print(f"Dataset size: {len(ds):,} samples", flush=True)
writer = TarWriter(args.output_dir, cfg["prefix"], shard_size_limit, shard_idx_start)
prefetch = args.num_workers * 2
with ThreadPoolExecutor(max_workers=args.num_workers) as executor:
pending = collections.deque()
pbar = tqdm(total=len(ds) - skip_samples, desc=cfg["source_label"])
for i in range(len(ds)):
if i < skip_samples:
continue
row = ds[i]
task = (i, row, cfg, pool_offset, args.max_duration)
pending.append(executor.submit(process_sample, task))
while len(pending) >= prefetch:
result = pending.popleft().result()
if result is not None:
writer.write(*result)
pbar.update(1)
while pending:
result = pending.popleft().result()
if result is not None:
writer.write(*result)
pbar.update(1)
pbar.close()
writer.close()
print(f"\nTar build done: {writer.shard_count} shards, {writer.sample_count:,} samples",
flush=True)
# ── Argument parsing ──────────────────────────────────────────────────────────
def get_args():
p = argparse.ArgumentParser(
description="Convert non-redistributable datasets to RAON-TTS-Pool format.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
p.add_argument("dataset", choices=list(DATASET_CONFIGS.keys()),
help="Dataset to convert")
p.add_argument("--output_dir", required=True,
help="Output directory for tar shards and parquet files")
p.add_argument("--source_dir", default=None,
help="Local HF dataset snapshot path (default: download from Hub)")
p.add_argument("--gigaspeech_subset", default="xl",
choices=["xs", "s", "m", "l", "xl"],
help="GigaSpeech subset size (default: xl)")
p.add_argument("--spgispeech_subset", default="L",
choices=["L", "M", "S", "dev", "test"],
help="SPGISpeech subset size (default: L)")
p.add_argument("--split", default="train",
help="Dataset split (default: train)")
p.add_argument("--shard_size_gb", type=float, default=SHARD_SIZE_DEFAULT)
p.add_argument("--max_duration", type=float, default=MAX_DURATION_DEFAULT)
p.add_argument("--num_workers", type=int, default=NUM_WORKERS_DEFAULT)
p.add_argument("--pool_offset", type=int, default=None,
help="pool_idx base offset for this dataset (optional)")
p.add_argument("--core_json", default=None,
help="pool_indices_filter_remove_15pct_combined.json for core split")
p.add_argument("--parquet_only", action="store_true",
help="Skip tar build; only (re)generate parquet files")
return p.parse_args()
# ── Entry point ───────────────────────────────────────────────────────────────
def main():
args = get_args()
cfg = DATASET_CONFIGS[args.dataset]
print(f"=== RAON-TTS-Pool: {cfg['description']} ===", flush=True)
print(f"Output: {args.output_dir}", flush=True)
if not args.parquet_only:
if args.source_dir:
print(f"Loading from local path: {args.source_dir}", flush=True)
ds = load_from_disk(args.source_dir)
elif cfg["hf_repo"]:
print(f"Downloading from HuggingFace Hub: {cfg['hf_repo']}", flush=True)
load_kwargs = {"split": args.split}
if args.dataset == "gigaspeech":
load_kwargs["name"] = args.gigaspeech_subset
elif args.dataset == "spgispeech":
load_kwargs["name"] = args.spgispeech_subset
ds = load_dataset(cfg["hf_repo"], **load_kwargs)
else:
print(f"ERROR: {args.dataset} has no public HF Hub repo. "
f"Please provide --source_dir.", file=sys.stderr)
sys.exit(1)
build_tars(ds, cfg, args)
build_parquets(args.output_dir, cfg["prefix"], args.core_json)
print("\n=== Done ===", flush=True)
print(f"Output directory: {args.output_dir}")
if __name__ == "__main__":
main()