french-education-speech / segment_vad.py
mathisescriva
Initial commit: French Education Speech Corpus (Phase 1)
5d2c0a9
#!/usr/bin/env python3
import argparse
import math
from pathlib import Path
from typing import List, Tuple
import numpy as np
import soundfile as sf
import webrtcvad
from tqdm import tqdm
def read_audio(path: Path) -> Tuple[np.ndarray, int]:
data, sr = sf.read(str(path))
if data.ndim == 2:
data = data.mean(axis=1)
return data.astype(np.float32), sr
def write_audio(path: Path, data: np.ndarray, sr: int) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
sf.write(str(path), data, sr)
def frame_generator(audio: np.ndarray, sample_rate: int, frame_ms: int) -> List[np.ndarray]:
samples_per_frame = int(sample_rate * frame_ms / 1000)
num_frames = int(math.ceil(len(audio) / samples_per_frame))
frames = []
for i in range(num_frames):
start = i * samples_per_frame
end = min((i + 1) * samples_per_frame, len(audio))
frame = audio[start:end]
if len(frame) < samples_per_frame:
pad = np.zeros(samples_per_frame - len(frame), dtype=audio.dtype)
frame = np.concatenate([frame, pad], axis=0)
frames.append(frame)
return frames
def vad_segments(audio: np.ndarray, sample_rate: int, aggressiveness: int = 2, frame_ms: int = 30,
min_speech_ms: int = 300, max_silence_ms: int = 300) -> List[Tuple[int, int]]:
assert frame_ms in (10, 20, 30), "WebRTC VAD supports 10/20/30 ms"
vad = webrtcvad.Vad(aggressiveness)
frames = frame_generator(audio, sample_rate, frame_ms)
is_speech = []
for frame in frames:
int16 = np.clip(frame * 32768.0, -32768, 32767).astype(np.int16).tobytes()
is_speech.append(vad.is_speech(int16, sample_rate))
ms_per_frame = frame_ms
segments = []
start = None
run_silence = 0
for i, speech in enumerate(is_speech):
if speech:
if start is None:
start = i
run_silence = 0
else:
if start is not None:
run_silence += ms_per_frame
if run_silence >= max_silence_ms:
end = i
duration_ms = (end - start) * ms_per_frame
if duration_ms >= min_speech_ms:
segments.append((start, end))
start = None
run_silence = 0
# tail
if start is not None:
segments.append((start, len(is_speech)))
# convert to sample indices
spf = int(sample_rate * frame_ms / 1000)
sample_segments = [(s * spf, e * spf) for s, e in segments]
return sample_segments
def hard_cap_segments(segments: List[Tuple[int, int]], max_seconds: float, sample_rate: int) -> List[Tuple[int, int]]:
max_len = int(max_seconds * sample_rate)
capped = []
for s, e in segments:
length = e - s
if length <= max_len:
capped.append((s, e))
else:
num = math.ceil(length / max_len)
for i in range(num):
cs = s + i * max_len
ce = min(s + (i + 1) * max_len, e)
if ce - cs > int(0.2 * sample_rate): # drop ultra-short tails
capped.append((cs, ce))
return capped
def process_file(in_path: Path, out_dir: Path, max_seconds: float) -> int:
audio, sr = read_audio(in_path)
if sr != 16000:
# Expect normalized inputs; skip others
return 0
segments = vad_segments(audio, sr, aggressiveness=2, frame_ms=30, min_speech_ms=250, max_silence_ms=300)
segments = hard_cap_segments(segments, max_seconds, sr)
stem = in_path.stem
for idx, (s, e) in enumerate(segments):
seg = audio[s:e]
out_path = out_dir / f"{stem}_seg{idx:04d}.wav"
write_audio(out_path, seg, sr)
return len(segments)
def main() -> None:
parser = argparse.ArgumentParser(description="VAD-based segmentation to <= max seconds, expects 16kHz mono WAV inputs.")
parser.add_argument("--indir", required=True, help="Input audio root")
parser.add_argument("--outdir", required=True, help="Output audio root (can be same as input subfolders)")
parser.add_argument("--max-seconds", type=float, default=30.0, help="Maximum segment length in seconds")
args = parser.parse_args()
inroot = Path(args.indir)
outroot = Path(args.outdir)
wavs = list(inroot.rglob("*.16k.wav"))
if not wavs:
# fallback: all wavs
wavs = list(inroot.rglob("*.wav"))
total_segments = 0
for f in tqdm(wavs, desc="Segmenting"):
out_dir = f.parent
total_segments += process_file(f, out_dir, args.max_seconds)
print(f"Total segments written: {total_segments}")
if __name__ == "__main__":
main()