sound_events / normalizer_audios.py
SevKod's picture
audio trimming and adding up to normalizer
47e5a47
#!/usr/bin/env python3
"""
All-in-one audio processing pipeline for sound event files.
Pipeline: 1) Normalize -> 2) Trim silence -> 3) Verify cuts
1. NORMALIZE – scale every file to a target RMS (or peak) level in dBFS
so all events share a consistent loudness.
2. TRIM – remove leading / trailing silence using an energy-based detector
with a configurable safety margin.
3. VERIFY – check every trimmed file for corruption, suspiciously short
duration, or abrupt start/end (which would indicate a bad cut).
Usage examples
--------------
# Full pipeline with defaults (in-place, -20 dBFS RMS):
python normalizer_audios.py
# Normalize to -25 dBFS, wider trim margin:
python normalizer_audios.py --target-db -25 --margin-ms 50
# Peak-normalize instead of RMS:
python normalizer_audios.py --mode peak --target-db -1
# Preview everything without writing:
python normalizer_audios.py --dry-run
# Skip normalization (trim + verify only):
python normalizer_audios.py --skip-normalize
# Skip trimming (normalize + verify only):
python normalizer_audios.py --skip-trim
"""
import argparse
import json
import logging
import math
import os
import struct
import sys
import wave
from pathlib import Path
import numpy as np
import soundfile as sf
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)s | %(message)s",
datefmt="%H:%M:%S",
)
log = logging.getLogger(__name__)
AUDIO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sound_audios")
# ─────────────────────────────────────────────────────────────────────────────
# Shared I/O (stdlib wave – used by trimmer & verifier)
# ─────────────────────────────────────────────────────────────────────────────
def _wav_fmt(sampwidth):
if sampwidth == 1:
return "B"
elif sampwidth == 2:
return "h"
elif sampwidth == 4:
return "i"
raise ValueError(f"Unsupported sample width: {sampwidth}")
def read_wav_stdlib(filepath):
"""Read a WAV file with the stdlib *wave* module.
Returns (samples_list, sample_rate, sampwidth, nchannels)."""
with wave.open(filepath, "rb") as wf:
nchannels = wf.getnchannels()
sampwidth = wf.getsampwidth()
sr = wf.getframerate()
nframes = wf.getnframes()
raw = wf.readframes(nframes)
fmt = _wav_fmt(sampwidth)
n_samples = nframes * nchannels
samples = list(struct.unpack(f"<{n_samples}{fmt}", raw))
return samples, sr, sampwidth, nchannels
def write_wav_stdlib(filepath, samples, sr, sampwidth, nchannels):
"""Write samples to a WAV file with the stdlib *wave* module."""
fmt = _wav_fmt(sampwidth)
raw = struct.pack(f"<{len(samples)}{fmt}", *samples)
with wave.open(filepath, "wb") as wf:
wf.setnchannels(nchannels)
wf.setsampwidth(sampwidth)
wf.setframerate(sr)
wf.writeframes(raw)
def collect_wav_files(audio_dir):
"""Return a sorted list of all .wav paths under *audio_dir*."""
wav_files = []
for root, _dirs, files in os.walk(audio_dir):
for f in sorted(files):
if f.lower().endswith(".wav"):
wav_files.append(os.path.join(root, f))
return sorted(wav_files)
# ═════════════════════════════════════════════════════════════════════════════
# STEP 1 – NORMALIZE
# ═════════════════════════════════════════════════════════════════════════════
def rms_db(signal: np.ndarray) -> float:
rms = np.sqrt(np.mean(signal.astype(np.float64) ** 2))
return -np.inf if rms == 0 else 20 * np.log10(rms)
def peak_db(signal: np.ndarray) -> float:
peak = np.max(np.abs(signal.astype(np.float64)))
return -np.inf if peak == 0 else 20 * np.log10(peak)
def _apply_gain(signal: np.ndarray, current_db: float, target_db: float) -> np.ndarray:
if current_db == -np.inf:
return signal
gain_linear = 10 ** ((target_db - current_db) / 20)
out = signal.astype(np.float64) * gain_linear
peak = np.max(np.abs(out))
if peak > 1.0:
log.warning(" Clipping detected (peak %.2f dB), applying limiter.", 20 * np.log10(peak))
out /= peak
return out
def step_normalize(audio_dir, target_db, mode, dry_run):
"""Normalize all .wav files in *audio_dir* in-place."""
print("\n" + "=" * 70)
print(" STEP 1 / 3 β€” NORMALIZE")
print("=" * 70)
input_dir = Path(audio_dir).resolve()
measure_fn = rms_db if mode == "rms" else peak_db
mode_label = "RMS" if mode == "rms" else "Peak"
wav_files = sorted(input_dir.rglob("*.wav"))
if not wav_files:
log.error("No .wav files found under %s", input_dir)
return
log.info("Found %d .wav files", len(wav_files))
log.info("Mode: %s | Target: %.1f dBFS | dry_run=%s", mode_label, target_db, dry_run)
stats = []
n_clipped = 0
for wav_path in wav_files:
rel = wav_path.relative_to(input_dir)
signal, sr = sf.read(wav_path, dtype="float64")
mono = signal.mean(axis=1) if signal.ndim == 2 else signal
orig_db = measure_fn(mono)
orig_peak = peak_db(mono)
entry = {
"file": str(rel),
"sr": sr,
"duration_s": round(len(mono) / sr, 3),
f"original_{mode}_db": round(orig_db, 2),
"original_peak_db": round(orig_peak, 2),
}
if orig_db == -np.inf:
log.warning(" Skipping silent file: %s", rel)
entry["status"] = "skipped_silent"
stats.append(entry)
continue
normed = _apply_gain(signal, orig_db, target_db)
new_mono = normed if normed.ndim == 1 else normed.mean(axis=1)
new_db = measure_fn(new_mono)
new_peak = peak_db(new_mono)
if new_peak >= 0.0:
n_clipped += 1
entry[f"new_{mode}_db"] = round(new_db, 2)
entry["new_peak_db"] = round(new_peak, 2)
entry["gain_db"] = round(target_db - orig_db, 2)
entry["status"] = "ok"
stats.append(entry)
log.info(
"%-55s %s: %+6.1f -> %+6.1f dBFS (gain %+.1f dB)",
str(rel), mode_label, orig_db, new_db, target_db - orig_db,
)
if not dry_run:
sf.write(str(wav_path), normed, sr, subtype="PCM_16")
# Summary
orig_levels = [s[f"original_{mode}_db"] for s in stats if s["status"] == "ok"]
new_levels = [s[f"new_{mode}_db"] for s in stats if s["status"] == "ok"]
print("-" * 70)
log.info("Normalized %d / %d files", len(orig_levels), len(wav_files))
if orig_levels:
log.info(
"Original %s β€” min: %.1f max: %.1f mean: %.1f std: %.1f dBFS",
mode_label, np.min(orig_levels), np.max(orig_levels),
np.mean(orig_levels), np.std(orig_levels),
)
log.info(
"After norm %s β€” min: %.1f max: %.1f mean: %.1f std: %.1f dBFS",
mode_label, np.min(new_levels), np.max(new_levels),
np.mean(new_levels), np.std(new_levels),
)
if n_clipped:
log.warning("%d file(s) required limiting to avoid clipping.", n_clipped)
if not dry_run:
stats_path = os.path.join(audio_dir, "normalization_stats.json")
with open(stats_path, "w") as f:
json.dump(
{"mode": mode, "target_db": target_db,
"n_files": len(wav_files), "n_processed": len(orig_levels),
"n_clipped": n_clipped, "files": stats},
f, indent=2,
)
log.info("Stats written to %s", stats_path)
# ═════════════════════════════════════════════════════════════════════════════
# STEP 2 – TRIM SILENCE
# ═════════════════════════════════════════════════════════════════════════════
def _to_mono_abs(samples, nchannels, nframes):
if nchannels == 1:
return [abs(s) for s in samples]
mono = []
for i in range(nframes):
val = sum(abs(samples[i * nchannels + ch]) for ch in range(nchannels))
mono.append(val // nchannels)
return mono
def trim_silence(samples, sr, nchannels, threshold_ratio=0.02,
margin_ms=30, frame_ms=10):
"""Return (trimmed_samples, original_ms, new_ms)."""
nframes = len(samples) // nchannels
mono = _to_mono_abs(samples, nchannels, nframes)
peak = max(mono) if mono else 0
if peak == 0:
dur = nframes / sr * 1000
return samples, dur, dur
threshold = peak * threshold_ratio
frame_len = max(1, int(sr * frame_ms / 1000))
hop = max(1, frame_len // 2)
first_loud = last_loud = None
pos = 0
while pos + frame_len <= nframes:
window = mono[pos:pos + frame_len]
rms = math.sqrt(sum(s * s for s in window) / frame_len)
if rms > threshold:
if first_loud is None:
first_loud = pos
last_loud = pos + frame_len
pos += hop
if first_loud is None:
dur = nframes / sr * 1000
return samples, dur, dur
margin_samp = int(sr * margin_ms / 1000)
start = max(0, first_loud - margin_samp)
end = min(nframes, last_loud + margin_samp)
trimmed = samples[start * nchannels : end * nchannels]
return trimmed, nframes / sr * 1000, (end - start) / sr * 1000
def step_trim(audio_dir, threshold_ratio, margin_ms, dry_run):
"""Trim leading/trailing silence from every .wav in *audio_dir*."""
print("\n" + "=" * 70)
print(" STEP 2 / 3 β€” TRIM SILENCE")
print("=" * 70)
wav_files = collect_wav_files(audio_dir)
if not wav_files:
log.error("No .wav files found in %s", audio_dir)
return
log.info("Found %d .wav files", len(wav_files))
log.info("Settings: threshold=%.3f, margin=%gms, dry_run=%s",
threshold_ratio, margin_ms, dry_run)
print("-" * 70)
total_saved = 0
trimmed_count = 0
for i, fpath in enumerate(wav_files, 1):
rel = os.path.relpath(fpath, audio_dir)
try:
samples, sr, sw, nch = read_wav_stdlib(fpath)
except Exception as e:
log.error("[%d/%d] ERROR reading %s: %s", i, len(wav_files), rel, e)
continue
trimmed, orig_ms, new_ms = trim_silence(
samples, sr, nch, threshold_ratio, margin_ms)
saved = orig_ms - new_ms
if saved > 1:
trimmed_count += 1
total_saved += saved
tag = "DRY-RUN" if dry_run else "TRIMMED"
log.info("[%d/%d] %s %s: %dms -> %dms (cut %dms)",
i, len(wav_files), tag, rel,
int(orig_ms), int(new_ms), int(saved))
if not dry_run:
write_wav_stdlib(fpath, trimmed, sr, sw, nch)
else:
log.info("[%d/%d] OK %s: %dms (no silence)",
i, len(wav_files), rel, int(orig_ms))
print("-" * 70)
log.info("Trimmed %d/%d files, saved %.1fs total.",
trimmed_count, len(wav_files), total_saved / 1000)
# ═════════════════════════════════════════════════════════════════════════════
# STEP 3 – VERIFY CUTS
# ═════════════════════════════════════════════════════════════════════════════
MIN_DURATION_MS = 50
EDGE_CHECK_MS = 5
EDGE_AMPLITUDE_RATIO = 0.5
def _check_one_file(filepath, audio_dir):
"""Return (duration_ms, [warnings])."""
warnings = []
try:
samples, sr, sw, nch = read_wav_stdlib(filepath)
except Exception as e:
return 0, [f"CORRUPT: {e}"]
nframes = len(samples) // nch
dur = nframes / sr * 1000
if nframes == 0:
return 0, ["EMPTY: 0 frames"]
if dur < MIN_DURATION_MS:
warnings.append(f"VERY SHORT: {dur:.0f}ms (< {MIN_DURATION_MS}ms)")
mono = _to_mono_abs(samples, nch, nframes)
peak = max(mono) if mono else 0
if peak == 0:
warnings.append("SILENT: completely silent")
return dur, warnings
edge_n = max(1, int(sr * EDGE_CHECK_MS / 1000))
start_max = max(mono[:edge_n])
if start_max > peak * EDGE_AMPLITUDE_RATIO:
warnings.append(
f"ABRUPT START: first {EDGE_CHECK_MS}ms at "
f"{start_max/peak*100:.0f}% of peak")
end_max = max(mono[-edge_n:])
if end_max > peak * EDGE_AMPLITUDE_RATIO:
warnings.append(
f"ABRUPT END: last {EDGE_CHECK_MS}ms at "
f"{end_max/peak*100:.0f}% of peak")
return dur, warnings
def step_verify(audio_dir):
"""Verify all trimmed .wav files for quality issues."""
print("\n" + "=" * 70)
print(" STEP 3 / 3 β€” VERIFY CUTS")
print("=" * 70)
wav_files = collect_wav_files(audio_dir)
if not wav_files:
log.error("No .wav files found in %s", audio_dir)
return
log.info("Verifying %d .wav files", len(wav_files))
print("-" * 70)
flagged = []
total_dur = 0
for i, fpath in enumerate(wav_files, 1):
rel = os.path.relpath(fpath, audio_dir)
dur, warns = _check_one_file(fpath, audio_dir)
total_dur += dur
if warns:
flagged.append((rel, dur, warns))
log.warning("[%d/%d] WARNING %s (%dms)", i, len(wav_files), rel, int(dur))
for w in warns:
log.warning(" -> %s", w)
else:
log.info("[%d/%d] OK %s (%dms)", i, len(wav_files), rel, int(dur))
print("=" * 70)
log.info("Total files: %d | Duration: %.1fs", len(wav_files), total_dur / 1000)
log.info("Files OK: %d | Flagged: %d", len(wav_files) - len(flagged), len(flagged))
if flagged:
print("\n--- FLAGGED FILES (review manually) ---")
for rel, dur, warns in flagged:
print(f"\n {rel} ({dur:.0f}ms):")
for w in warns:
print(f" - {w}")
else:
print("\nAll files look good!")
# ═════════════════════════════════════════════════════════════════════════════
# MAIN
# ═════════════════════════════════════════════════════════════════════════════
def main():
parser = argparse.ArgumentParser(
description="Audio pipeline: Normalize -> Trim silence -> Verify cuts",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# General
parser.add_argument(
"--audio-dir", type=str, default=AUDIO_DIR,
help="Root directory containing event sub-folders with .wav files "
f"(default: {AUDIO_DIR})",
)
parser.add_argument(
"--dry-run", action="store_true",
help="Preview all steps without writing any files.",
)
# Normalize options
norm = parser.add_argument_group("Normalization (step 1)")
norm.add_argument("--skip-normalize", action="store_true",
help="Skip the normalization step.")
norm.add_argument("--target-db", type=float, default=-20.0,
help="Target level in dBFS (default: -20).")
norm.add_argument("--mode", choices=["rms", "peak"], default="rms",
help="Normalization mode (default: rms).")
# Trim options
trim = parser.add_argument_group("Trimming (step 2)")
trim.add_argument("--skip-trim", action="store_true",
help="Skip the silence trimming step.")
trim.add_argument("--threshold", type=float, default=0.02,
help="Silence threshold as fraction of peak (default: 0.02).")
trim.add_argument("--margin-ms", type=float, default=30,
help="Margin in ms to keep around the sound (default: 30).")
# Verify options
verify = parser.add_argument_group("Verification (step 3)")
verify.add_argument("--skip-verify", action="store_true",
help="Skip the verification step.")
args = parser.parse_args()
audio_dir = args.audio_dir
log.info("Audio directory: %s", audio_dir)
# ── Step 1 ──
if not args.skip_normalize:
step_normalize(audio_dir, args.target_db, args.mode, args.dry_run)
else:
log.info("Skipping normalization (--skip-normalize).")
# ── Step 2 ──
if not args.skip_trim:
step_trim(audio_dir, args.threshold, args.margin_ms, args.dry_run)
else:
log.info("Skipping trimming (--skip-trim).")
# ── Step 3 ──
if not args.skip_verify:
step_verify(audio_dir)
else:
log.info("Skipping verification (--skip-verify).")
print("\n" + "=" * 70)
log.info("Pipeline complete.")
print("=" * 70)
if __name__ == "__main__":
main()