cloudflare-speech-dataset-augmented / cloudflare-speech-dataset-augmented.py
Muraddshi's picture
Update cloudflare-speech-dataset-augmented.py
3e91046 verified
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
HuggingFace datasets loader for Cloudflare R2 zipped speech datasets.
Supports optional, load-time controlled augmentations:
load_dataset(..., aug_enable=True, apply_augmentations="train", noise="25", reverb="25", ...)
Features:
- apply_augmentations: "train" | "validation" | "test" | "all" | "none"
- augmentation field per example:
- "none" -> augmentation disabled for this split
- "clean" -> enabled but nothing applied (or assets missing / errors)
- "noise", "reverb", "channel", "bitcrush" or stacked like "channel+reverb+noise"
- optional stacked augmentation mode:
- aug_mode: "single" (default) | "stacked"
- max_aug_stack: int (default 2)
- optional combo control (works for both single and stacked):
- allowed_combos: e.g. "clean,noise,channel+noise,reverb+noise"
* if set, ONLY these combos are sampled deterministically
* weights use product(p_aug) for combo; clean weight uses remaining mass (1 - sum(p))
- safer audio handling:
- sanitize NaN/Inf -> 0 after each step (no mid-chain level pumping)
- prevent clipping via peak normalization + hard clip to [-1, 1] ONCE at the end
- keep dtype float32
"""
from __future__ import annotations
import datetime
import hashlib
import json
import os
import random
import re
import zipfile
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Set, Tuple
import datasets
import librosa
import numpy as np
import s3fs
from audiomentations import (
AddBackgroundNoise,
ApplyImpulseResponse,
BitCrush,
Compose,
HighPassFilter,
LowPassFilter,
)
from pydantic import BaseModel, Field
from pydub import AudioSegment
from tqdm import tqdm
import pysrt
class Sample(BaseModel):
id: Optional[str | int] = None
group: Optional[str] = None
zip_entry_name: Optional[str] = None
sample_rate: Optional[int] = None
sample_count: Optional[int] = None
seq_no: Optional[int] = None
format: Optional[str] = None
speaker: Optional[str] = None
timestamp: Optional[str] = None
class_: Optional[str] = Field(default=None, alias="class")
text: Optional[str] = None
start_ts: Optional[int] = None
end_ts: Optional[int] = None
wav_file_name: Optional[str] = None
TARGET_SR = 22050
_CONFIGS: Dict[str, Dict[str, Any]] = {
"liepa": {"bucket": "asr-training-data", "directory": "liepa", "test_size": 0.05, "val_size": 0.05},
"liepa2": {
"bucket": "asr-training-data",
"directory": "liepa2",
"test_size": 0.05,
"val_size": 0.05,
"max_audio_length": 10,
"min_audio_length": 0.5,
},
"liepa2_gcm_pcm": {
"bucket": "asr-training-data",
"directory": "liepa2_gcm_pcm",
"test_size": 0.05,
"val_size": 0.05,
"max_audio_length": 10,
"min_audio_length": 0.5,
},
"liepa_gcm_pcm": {
"bucket": "asr-training-data",
"directory": "liepa_gcm_pcm",
"test_size": 0.05,
"val_size": 0.05,
},
"liepa_alaw_gcm_pcm": {
"bucket": "asr-training-data",
"directory": "liepa_alaw_gcm_pcm",
"test_size": 0.05,
"val_size": 0.05,
},
"liepa2_alaw_gcm_pcm_small": {
"bucket": "asr-training-data",
"directory": "liepa2_alaw_gcm_pcm_small",
"max_audio_length": 10,
"min_audio_length": 0.5
},
"kmp_pcm": {"bucket": "asr-training-data", "directory": "kmp_pcm"},
}
def _to_prob(x: Any) -> float:
if x is None:
return 0.0
if isinstance(x, bool):
return 1.0 if x else 0.0
if isinstance(x, str):
s = x.strip().lower().replace("%", "")
if not s:
return 0.0
v = float(s)
return v / 100.0 if v > 1.0 else v
v = float(x)
return v / 100.0 if v > 1.0 else v
def _build_channel(cfg: "AaiLabsDatasetConfig") -> Compose:
return Compose(
[
HighPassFilter(min_cutoff_freq=cfg.hp_min_cutoff, max_cutoff_freq=cfg.hp_max_cutoff, p=1.0),
LowPassFilter(min_cutoff_freq=cfg.lp_min_cutoff, max_cutoff_freq=cfg.lp_max_cutoff, p=1.0),
]
)
def _deterministic_seed(base_seed: int, key: str) -> int:
h = hashlib.sha256(f"{base_seed}|{key}".encode("utf-8")).digest()
return int.from_bytes(h[:4], "little", signed=False)
def _rng_for(base_seed: int, key: str) -> random.Random:
return random.Random(_deterministic_seed(base_seed, key))
def _sanitize_numeric(x: np.ndarray) -> np.ndarray:
x = np.asarray(x)
if x.dtype != np.float32:
x = x.astype(np.float32, copy=False)
if not np.isfinite(x).all():
x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0).astype(np.float32, copy=False)
return x
def _finalize_audio(x: np.ndarray, eps: float = 1e-8) -> np.ndarray:
x = _sanitize_numeric(x)
peak = float(np.max(np.abs(x))) if x.size else 0.0
if peak > 1.0:
x = x / (peak + eps)
return np.clip(x, -1.0, 1.0).astype(np.float32, copy=False)
# Correct and safe application order for stacked augs:
# channel (bandlimit) -> reverb (room) -> noise (environment) -> bitcrush (codec)
_AUG_ORDER = ["channel", "reverb", "noise", "bitcrush"]
_AUG_SET = set(_AUG_ORDER)
def _canonical_combo(parts: List[str]) -> str:
parts = [p for p in parts if p and p != "clean"]
if not parts:
return "clean"
seen: Set[str] = set()
ordered: List[str] = []
for k in _AUG_ORDER:
if k in parts and k not in seen:
ordered.append(k)
seen.add(k)
return "+".join(ordered) if ordered else "clean"
def _parse_combo_list(s: Optional[str]) -> Optional[List[str]]:
if s is None:
return None
raw = str(s).strip()
if not raw:
return None
tokens = re.split(r"[,\|\;\n\r\t ]+", raw)
combos: List[str] = []
for t in tokens:
t = t.strip().lower()
if not t:
continue
if t == "none":
continue
parts = [p.strip().lower() for p in t.split("+") if p.strip()]
parts = [p for p in parts if p in _AUG_SET or p == "clean"]
combos.append(_canonical_combo(parts))
out: List[str] = []
seen: Set[str] = set()
for c in combos:
if c not in seen:
out.append(c)
seen.add(c)
return out or None
def _choose_single_from_probs(rng: random.Random, probs: Dict[str, float]) -> str:
u = rng.random()
cum = 0.0
for name, p in probs.items():
cum += float(p)
if u < cum:
return name
return "clean"
def _choose_stack_independent(rng: random.Random, probs: Dict[str, float], max_stack: int) -> List[str]:
if max_stack < 1:
return ["clean"]
selected: List[str] = []
# Iterate in canonical order to avoid dict-order bias when capping
for name in _AUG_ORDER:
p = float(probs.get(name, 0.0))
if rng.random() < p:
selected.append(name)
if not selected:
return ["clean"]
return selected[:max_stack]
def _weighted_choice(rng: random.Random, items: List[str], weights: List[float]) -> str:
total = float(sum(weights))
if total <= 0.0:
return items[int(rng.random() * len(items))]
r = rng.random() * total
cum = 0.0
for it, w in zip(items, weights):
cum += float(w)
if r <= cum:
return it
return items[-1]
def _combo_weight(combo: str, probs: Dict[str, float]) -> float:
# weight ~ product of included probs; "clean" weight ~ remaining mass
if combo == "clean":
s = float(sum(float(p) for p in probs.values()))
return max(0.0, 1.0 - s)
w = 1.0
for part in combo.split("+"):
if part in probs:
w *= max(0.0, float(probs[part]))
else:
return 0.0
return w
class AaiLabsDatasetConfig(datasets.BuilderConfig):
def __init__(self, name: str, **kwargs):
self.name = name
self.length = kwargs.pop("length", None)
self.bucket = kwargs.pop("bucket", None)
self.directory = kwargs.pop("directory", None)
self.max_audio_length = kwargs.pop("max_audio_length", 10)
self.min_audio_length = kwargs.pop("min_audio_length", 0.5)
self.test_size = kwargs.pop("test_size", None)
self.val_size = kwargs.pop("val_size", None)
self.aug_enable: bool = bool(kwargs.pop("aug_enable", False))
self.apply_augmentations: Literal["train", "validation", "test", "all", "none"] = kwargs.pop(
"apply_augmentations", "none"
)
self.aug_seed: int = int(kwargs.pop("aug_seed", 42))
self.aug_mode: Literal["single", "stacked"] = kwargs.pop("aug_mode", "single")
self.max_aug_stack: int = int(kwargs.pop("max_aug_stack", 2))
self.allowed_combos_raw: Optional[str] = kwargs.pop("allowed_combos", None)
noise = kwargs.pop("noise", None)
reverb = kwargs.pop("reverb", None)
channel = kwargs.pop("channel", None)
bitcrush = kwargs.pop("bitcrush", None)
self.p_noise = float(kwargs.pop("p_noise", _to_prob(noise) if noise is not None else 0.0))
self.p_reverb = float(kwargs.pop("p_reverb", _to_prob(reverb) if reverb is not None else 0.0))
self.p_channel = float(kwargs.pop("p_channel", _to_prob(channel) if channel is not None else 0.0))
self.p_bitcrush = float(kwargs.pop("p_bitcrush", _to_prob(bitcrush) if bitcrush is not None else 0.0))
self.min_snr_db = float(kwargs.pop("min_snr_db", 22.0))
self.max_snr_db = float(kwargs.pop("max_snr_db", 35.0))
self.hp_min_cutoff = int(kwargs.pop("hp_min_cutoff", 80))
self.hp_max_cutoff = int(kwargs.pop("hp_max_cutoff", 250))
self.lp_min_cutoff = int(kwargs.pop("lp_min_cutoff", 2800))
self.lp_max_cutoff = int(kwargs.pop("lp_max_cutoff", 4500))
self.min_bit_depth = int(kwargs.pop("min_bit_depth", 10))
self.max_bit_depth = int(kwargs.pop("max_bit_depth", 14))
self.noise_prefix = str(kwargs.pop("noise_prefix", "asr-training-data/augmentation/noise"))
self.ir_prefix = str(kwargs.pop("ir_prefix", "asr-training-data/augmentation/ir"))
s = self.p_noise + self.p_reverb + self.p_channel + self.p_bitcrush
if s > 1.0 + 1e-9:
raise ValueError(f"Aug probs must sum <= 1.0, got {s:.3f}")
super().__init__(name=name, **kwargs)
self.allowed_combos: Optional[List[str]] = _parse_combo_list(self.allowed_combos_raw)
class CloudflareSpeechDatasetAugmented(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
AaiLabsDatasetConfig(
name=n,
length=_CONFIGS[n].get("length"),
bucket=_CONFIGS[n].get("bucket"),
directory=_CONFIGS[n].get("directory"),
max_audio_length=_CONFIGS[n].get("max_audio_length", 10),
min_audio_length=_CONFIGS[n].get("min_audio_length", 0.5),
test_size=_CONFIGS[n].get("test_size"),
val_size=_CONFIGS[n].get("val_size"),
)
for n in _CONFIGS.keys()
]
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features(
{
"audio": datasets.Audio(),
"text": datasets.Value("string"),
"id": datasets.Value("string"),
"speaker": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"class": datasets.Value("string"),
"augmentation": datasets.Value("string"),
}
)
return datasets.DatasetInfo(features=features)
def _get_r2_fs(self) -> s3fs.S3FileSystem:
account_id = os.environ["R2_ACCOUNT_ID"]
endpoint = os.environ.get("R2_ENDPOINT", f"https://{account_id}.r2.cloudflarestorage.com")
return s3fs.S3FileSystem(
key=os.environ["R2_ACCESS_KEY_ID"],
secret=os.environ["R2_SECRET_ACCESS_KEY"],
client_kwargs={"endpoint_url": endpoint, "region_name": "auto"},
)
def _clean_text(self, text: str) -> str:
if text is None:
return ""
chars_to_ignore = r"[^ aąbcčdeėęfghiįyjklmnoprsštuųūvzž]"
return re.sub(chars_to_ignore, "", str(text), flags=re.IGNORECASE).lower().strip()
def __convert_to_ms(self, time_obj: datetime.time) -> int:
return int(
time_obj.hour * 3600 * 1000
+ time_obj.minute * 60 * 1000
+ time_obj.second * 1000
+ time_obj.microsecond // 1000
)
def __get_manifest_samples(self, path: str) -> List[Sample]:
with zipfile.ZipFile(path, "r") as zip_file:
raw_manifest = zip_file.read("manifest.jsona").decode("utf-8")
raw_manifest = raw_manifest.replace("\ufeff", "").strip()
manifest_entries: Optional[List[dict]] = None
try:
parsed = json.loads(raw_manifest)
manifest_entries = parsed if isinstance(parsed, list) else [parsed]
except Exception:
manifest_entries = None
if manifest_entries is None:
entries: List[dict] = []
for line in raw_manifest.splitlines():
line = line.strip()
if not line:
continue
try:
entries.append(json.loads(line))
except Exception:
continue
if entries:
manifest_entries = entries
if manifest_entries is None:
fixed = "[\n" + re.sub(r"}\s*{", "},\n{", raw_manifest) + "\n]"
manifest_entries = json.loads(fixed)
samples: List[Sample] = []
for entry in manifest_entries:
try:
if "seq_no" in entry and entry["seq_no"] is not None and isinstance(entry["seq_no"], str):
entry["seq_no"] = int(entry["seq_no"].lstrip("\ufeff").strip())
sample = Sample.model_validate(entry)
if sample.zip_entry_name is None and sample.wav_file_name:
sample.zip_entry_name = sample.wav_file_name
clean = self._clean_text(sample.text)
if not clean:
continue
sample.text = clean
if not sample.zip_entry_name:
continue
samples.append(sample)
except Exception:
continue
if self.config.max_audio_length:
min_len = float(self.config.min_audio_length)
max_len = float(self.config.max_audio_length)
filtered: List[Sample] = []
for s in samples:
if not (s.sample_rate and s.sample_count):
continue
dur = s.sample_count / s.sample_rate
if min_len <= dur <= max_len:
filtered.append(s)
samples = filtered
return samples
def __get_srt_samples(self, path: str) -> List[Sample]:
with zipfile.ZipFile(path, "r") as zip_file:
names = zip_file.namelist()
srt_files = [n for n in names if n.endswith(".srt")]
wav_files = {n for n in names if n.endswith(".wav")}
samples: List[Sample] = []
for srt_file in srt_files:
wav_file = srt_file.replace(".srt", ".wav")
if wav_file not in wav_files:
continue
with zip_file.open(srt_file) as f:
subtitles = pysrt.from_string(f.read().decode("utf-8"))
with zip_file.open(wav_file) as f:
audio = AudioSegment.from_wav(BytesIO(f.read()))
sample_rate = audio.frame_rate
for subtitle in subtitles:
if "*" in subtitle.text:
continue
start_ms = self.__convert_to_ms(subtitle.start.to_time())
end_ms = self.__convert_to_ms(subtitle.end.to_time())
clean_text = self._clean_text(subtitle.text)
if not clean_text:
continue
if self.config.max_audio_length and (end_ms - start_ms) / 1000 > self.config.max_audio_length:
continue
samples.append(
Sample(
zip_entry_name=wav_file,
sample_rate=sample_rate,
sample_count=int((end_ms - start_ms) / 1000 * sample_rate),
seq_no=int(str(subtitle.index).lstrip("\ufeff").strip()),
text=clean_text,
start_ts=start_ms,
end_ts=end_ms,
wav_file_name=wav_file,
)
)
return samples
def __get_samples(self, path: str) -> List[Sample]:
with zipfile.ZipFile(path, "r") as zip_file:
if "manifest.jsona" in zip_file.namelist():
return self.__get_manifest_samples(path)
return self.__get_srt_samples(path)
def _download_aug_assets(self, dl_manager: datasets.DownloadManager) -> Tuple[Optional[str], Optional[str]]:
fs = self._get_r2_fs()
cache_root = (
dl_manager.download_config.cache_dir
or os.environ.get("HF_DATASETS_CACHE")
or datasets.config.HF_DATASETS_CACHE
)
cache_dir = Path(cache_root)
noise_dir = cache_dir / "augmentation_assets/noise"
ir_dir = cache_dir / "augmentation_assets/ir"
noise_dir.mkdir(parents=True, exist_ok=True)
ir_dir.mkdir(parents=True, exist_ok=True)
noise_keys = sorted(fs.glob(f"{self.config.noise_prefix}/*.wav"))
ir_keys = sorted(fs.glob(f"{self.config.ir_prefix}/*.wav"))
for k in noise_keys:
dst = noise_dir / Path(k).name
if not dst.exists():
fs.get(k, str(dst))
for k in ir_keys:
dst = ir_dir / Path(k).name
if not dst.exists():
fs.get(k, str(dst))
noise_ok = any(noise_dir.glob("*.wav"))
ir_ok = any(ir_dir.glob("*.wav"))
return (str(noise_dir) if noise_ok else None), (str(ir_dir) if ir_ok else None)
def _split_generators(self, dl_manager: datasets.DownloadManager):
self._dl_manager = dl_manager # used in _generate_examples
fs = self._get_r2_fs()
bucket = self.config.bucket
prefix = self.config.directory.strip("/")
pattern = f"{bucket}/{prefix}/*.zip"
files = sorted(fs.glob(pattern))
if getattr(self.config, "length", None):
files = files[: self.config.length]
dataset_entries: List[Dict[str, Any]] = []
cache_root = (
dl_manager.download_config.cache_dir
or os.environ.get("HF_DATASETS_CACHE")
or datasets.config.HF_DATASETS_CACHE
)
cache_dir = Path(cache_root)
for key in tqdm(files):
local_path = cache_dir / key
local_path.parent.mkdir(parents=True, exist_ok=True)
if not (local_path.exists() and zipfile.is_zipfile(local_path)):
fs.get(key, str(local_path))
file_samples = self.__get_samples(str(local_path))
if file_samples:
dataset_entries.append({"path": str(local_path), "samples": file_samples})
random.seed(42)
random.shuffle(dataset_entries)
if not self.config.test_size and not self.config.val_size:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"paths": [e["path"] for e in dataset_entries],
"samples_list": [e["samples"] for e in dataset_entries],
"split_name": "train",
},
)
]
total_count = len(dataset_entries)
test_size = int(float(self.config.test_size) * total_count)
val_size = int(float(self.config.val_size) * total_count)
test_entries = dataset_entries[:test_size]
val_entries = dataset_entries[test_size : test_size + val_size]
train_entries = dataset_entries[test_size + val_size :]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"paths": [e["path"] for e in train_entries],
"samples_list": [e["samples"] for e in train_entries],
"split_name": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"paths": [e["path"] for e in val_entries],
"samples_list": [e["samples"] for e in val_entries],
"split_name": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"paths": [e["path"] for e in test_entries],
"samples_list": [e["samples"] for e in test_entries],
"split_name": "test",
},
),
]
def _pick_target_combo(self, rng: random.Random, probs: Dict[str, float]) -> str:
if self.config.allowed_combos:
combos = self.config.allowed_combos
weights = [_combo_weight(c, probs) for c in combos]
return _weighted_choice(rng, combos, weights)
if self.config.aug_mode == "single":
c = _choose_single_from_probs(rng, probs)
return "clean" if c == "clean" else _canonical_combo([c])
chosen = _choose_stack_independent(rng, probs, self.config.max_aug_stack)
return _canonical_combo(chosen)
def _generate_examples(self, paths: List[str], samples_list: List[List[Sample]], split_name: str):
do_aug = bool(self.config.aug_enable) and (
self.config.apply_augmentations == split_name or self.config.apply_augmentations == "all"
)
noise_dir: Optional[str] = None
ir_dir: Optional[str] = None
if do_aug:
noise_dir, ir_dir = self._download_aug_assets(self._dl_manager)
aug_noise = (
AddBackgroundNoise(
sounds_path=noise_dir,
min_snr_db=self.config.min_snr_db,
max_snr_db=self.config.max_snr_db,
noise_rms="relative",
p=1.0,
)
if (do_aug and noise_dir)
else None
)
aug_reverb = ApplyImpulseResponse(ir_path=ir_dir, p=1.0) if (do_aug and ir_dir) else None
aug_channel = _build_channel(self.config) if do_aug else None
aug_bitcrush = (
BitCrush(min_bit_depth=self.config.min_bit_depth, max_bit_depth=self.config.max_bit_depth, p=1.0)
if do_aug
else None
)
# Keep dict order stable for single-mode CDF sampling
probs = {
"noise": float(self.config.p_noise),
"reverb": float(self.config.p_reverb),
"channel": float(self.config.p_channel),
"bitcrush": float(self.config.p_bitcrush),
}
i = 0
for path, samples in zip(paths, samples_list):
with zipfile.ZipFile(path, "r") as zip_file:
for sample in samples:
entry_name = sample.zip_entry_name or sample.wav_file_name
if not entry_name:
continue
try:
contents = zip_file.read(entry_name)
except KeyError:
continue
if sample.start_ts is not None and sample.end_ts is not None:
audio = AudioSegment.from_file(BytesIO(contents), format="wav")
audio_chunk = audio[sample.start_ts : sample.end_ts]
contents = audio_chunk.export(format="wav").read()
audio_data, sr = librosa.load(BytesIO(contents), sr=TARGET_SR, mono=True)
audio_data = _sanitize_numeric(audio_data)
aug_applied = "none"
if do_aug:
sid = str(sample.id) if sample.id is not None else f"idx_{i}"
rng = _rng_for(self.config.aug_seed, sid)
target = self._pick_target_combo(rng, probs)
applied: List[str] = []
if target != "clean":
want = set(target.split("+"))
for aug in _AUG_ORDER:
if aug not in want:
continue
try:
if aug == "channel" and aug_channel is not None:
audio_data = aug_channel(samples=audio_data, sample_rate=sr)
elif aug == "reverb" and aug_reverb is not None:
audio_data = aug_reverb(samples=audio_data, sample_rate=sr)
elif aug == "noise" and aug_noise is not None:
audio_data = aug_noise(samples=audio_data, sample_rate=sr)
elif aug == "bitcrush" and aug_bitcrush is not None:
audio_data = aug_bitcrush(samples=audio_data, sample_rate=sr)
else:
continue
applied.append(aug)
audio_data = _sanitize_numeric(audio_data)
except Exception:
continue
aug_applied = "+".join(applied) if applied else "clean"
audio_data = _finalize_audio(audio_data)
yield i, {
"audio": {"array": audio_data, "sampling_rate": sr},
"text": sample.text or "",
"id": str(sample.id) if sample.id is not None else "",
"speaker": sample.speaker or "",
"timestamp": sample.timestamp or "",
"class": sample.class_ or "",
"augmentation": aug_applied,
}
i += 1
Dataset = CloudflareSpeechDatasetAugmented