audio-mastering-suite / visualization.py
AnimalMonk's picture
Upload folder using huggingface_hub
d3d48b7 verified
"""Before/after waveform and spectrum comparison plots."""
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def _to_mono(audio):
"""Collapse to mono for plotting."""
if audio.ndim > 1 and audio.shape[1] > 1:
return audio.mean(axis=1)
return audio.ravel()
def _downsample_for_plot(signal, time, max_points=500_000):
"""Reduce sample count so matplotlib stays responsive."""
if len(signal) > max_points:
step = len(signal) // max_points
return signal[::step], time[::step]
return signal, time
def plot_waveform_comparison(original, mastered, sample_rate):
"""Create a stacked before/after waveform plot.
Returns a matplotlib Figure.
"""
fig, axes = plt.subplots(2, 1, figsize=(8, 4), sharex=True)
duration = len(original) / sample_rate
time_o = np.linspace(0, duration, len(original))
time_m = np.linspace(0, duration, len(mastered))
orig_mono = _to_mono(original)
mast_mono = _to_mono(mastered)
orig_mono, time_o = _downsample_for_plot(orig_mono, time_o)
mast_mono, time_m = _downsample_for_plot(mast_mono, time_m)
axes[0].plot(time_o, orig_mono, color="#4a90d9", linewidth=0.3)
axes[0].set_ylabel("Amplitude")
axes[0].set_title("Original")
axes[0].set_ylim(-1.05, 1.05)
axes[1].plot(time_m, mast_mono, color="#d94a4a", linewidth=0.3)
axes[1].set_ylabel("Amplitude")
axes[1].set_title("Mastered")
axes[1].set_xlabel("Time (seconds)")
axes[1].set_ylim(-1.05, 1.05)
plt.tight_layout()
return fig
def plot_spectrum_comparison(original, mastered, sample_rate):
"""Create a frequency spectrum comparison with shape-normalized overlay
and a difference trace showing the processing's spectral impact.
The mastered spectrum is level-aligned to the original so the plot
compares spectral *shape*, not overall loudness (LUFS stats handle that).
Returns a matplotlib Figure.
"""
fig, (ax_spec, ax_diff) = plt.subplots(
2, 1, figsize=(8, 5), height_ratios=[3, 1], sharex=True,
)
orig_mono = _to_mono(original)
mast_mono = _to_mono(mastered)
n_fft = 8192
def avg_spectrum(signal, n_fft, sr):
hop = n_fft // 2
n_windows = max(1, (len(signal) - n_fft) // hop)
spectra = []
for i in range(min(n_windows, 100)):
start = i * hop
window = signal[start : start + n_fft] * np.hanning(n_fft)
spectrum = np.abs(np.fft.rfft(window))
spectra.append(spectrum)
avg = np.mean(spectra, axis=0)
freqs = np.fft.rfftfreq(n_fft, 1.0 / sr)
avg_db = 20.0 * np.log10(avg + 1e-10)
return freqs, avg_db
freqs_o, spec_o = avg_spectrum(orig_mono, n_fft, sample_rate)
freqs_m, spec_m = avg_spectrum(mast_mono, n_fft, sample_rate)
# --- Level-align mastered to original (remove overall loudness diff) ---
# Use only the passband (100 Hz – 10 kHz) for alignment so the HPF/LPF
# roll-offs at the extremes don't skew the offset.
passband = (freqs_o >= 100) & (freqs_o <= 10000)
level_offset = np.mean(spec_o[passband]) - np.mean(spec_m[passband])
spec_m_aligned = spec_m + level_offset
# --- Top: overlaid spectra (shape comparison) ---
ax_spec.plot(freqs_o, spec_o, color="#4a90d9", alpha=0.7, linewidth=1,
label="Original")
ax_spec.plot(freqs_m, spec_m_aligned, color="#d94a4a", alpha=0.7,
linewidth=1, label="Mastered (level-aligned)")
ax_spec.set_ylabel("Magnitude (dB)")
ax_spec.set_title("Spectral Shape Comparison")
ax_spec.legend(loc="upper right", fontsize=8)
ax_spec.grid(True, alpha=0.3)
# --- Bottom: difference (mastered − original) ---
diff = spec_m_aligned - spec_o
ax_diff.plot(freqs_o, diff, color="#2ca02c", linewidth=1)
ax_diff.axhline(0, color="gray", linewidth=0.5, linestyle="--")
ax_diff.set_ylabel("Δ dB")
ax_diff.set_xlabel("Frequency")
ax_diff.set_title("Processing Difference (Mastered − Original)", fontsize=9)
ax_diff.set_ylim(-6, 6)
ax_diff.grid(True, alpha=0.3)
# Shared x-axis settings
ax_diff.set_xscale("log")
ax_diff.set_xlim(20, sample_rate / 2)
ax_diff.set_xticks([10, 100, 1000, 10000])
ax_diff.set_xticklabels(["10 Hz", "100 Hz", "1 kHz", "10 kHz"])
plt.tight_layout()
return fig