Datasets:
metadata
license: cc-by-nc-sa-4.0
task_categories:
- audio-to-audio
language:
- en
tags:
- SFZ
- SFZ-Instruments
- Instruments
- MIDI
- MIDI-Instruments
pretty_name: sfzinstruments
size_categories:
- n<1K
SFZ Instruments
Select open source musical instruments which use the SFZ format
Attribution
All SFZ instruments were sourced from sfzinstruments
How to use (Linux/Ubuntu)
1) Unzip the dataset into some folder
2) Setup environment
!sudo apt install libsndfile1 ffmpeg libjack-jackd2-dev -y
!pip install soundfile numpy scipy tqdm
3) Build and install sfizz
!sudo apt install cmake g++ git libsndfile1-dev libjack-jackd2-dev \
libsamplerate0-dev libboost-dev libzstd-dev \
libcurl4-openssl-dev libx11-dev -y
!git clone https://github.com/sfztools/sfizz.git
%cd sfizz
!mkdir build
%cd build
!cmake .. -DCMAKE_BUILD_TYPE=Release
!make -j$(nproc)
!sudo make install
!sudo ldconfig
4) Use the following python script to render your MIDIs
#!/usr/bin/env python3
"""
render_and_mix.py
Render MIDI+SFZ pairs using sfizz_render, mix them, apply basic mastering (limiter + normalize),
and optionally run ffmpeg loudness normalization.
Dependencies:
- sfizz_render (system binary)
- libsndfile (system)
- Python packages: soundfile, numpy, scipy, tqdm
- Optional: ffmpeg (for LUFS normalization)
"""
import os
import shutil
import subprocess
import tempfile
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Dict, Optional
import numpy as np
import soundfile as sf
from scipy.signal import fftconvolve
from tqdm import tqdm
# ---------- Utility audio functions ----------
def db_to_linear(db: float) -> float:
return 10.0 ** (db / 20.0)
def linear_to_db(x: float) -> float:
return 20.0 * np.log10(np.maximum(x, 1e-12))
def apply_pan(stereo: np.ndarray, pan: float) -> np.ndarray:
# pan: -1 (left) .. +1 (right)
left_gain = np.cos((pan + 1) * (np.pi / 4))
right_gain = np.sin((pan + 1) * (np.pi / 4))
stereo[:, 0] *= left_gain
stereo[:, 1] *= right_gain
return stereo
def ensure_stereo(arr: np.ndarray) -> np.ndarray:
if arr.ndim == 1:
return np.stack([arr, arr], axis=1)
if arr.shape[1] == 1:
return np.repeat(arr, 2, axis=1)
return arr[:, :2]
def soft_limiter(signal: np.ndarray, threshold: float = 0.98, release: float = 0.01, sample_rate: int = 48000) -> np.ndarray:
# Simple per-sample soft clipping with smoothing
out = np.copy(signal)
# apply tanh-style soft clip scaled to threshold
scale = 1.0 / threshold
out = np.tanh(out * scale) / scale
return out
def normalize_peak(signal: np.ndarray, target_dbfs: float = -1.0) -> np.ndarray:
peak = np.max(np.abs(signal))
if peak <= 0:
return signal
target_lin = db_to_linear(target_dbfs)
gain = target_lin / peak
return signal * gain
# ---------- sfizz_render wrapper ----------
def find_sfizz_render() -> Optional[str]:
# Try common binary names
for name in ("sfizz_render", "sfizz-render", "sfizz_render.exe"):
path = shutil.which(name)
if path:
return path
return None
def render_with_sfizz(sfizz_bin: str, midi_path: str, sfz_path: str, out_wav: str,
sample_rate: int = 48000, quality: int = 3, polyphony: int = 256,
use_eot: bool = True, verbose: bool = False) -> None:
cmd = [
sfizz_bin,
"--midi", str(midi_path),
"--sfz", str(sfz_path),
"--wav", str(out_wav),
"--samplerate", str(sample_rate),
"--quality", str(quality),
"--polyphony", str(polyphony),
]
if use_eot:
cmd.append("--use-eot")
if verbose:
cmd.append("--verbose")
# Run and raise on error
subprocess.run(cmd, check=True)
# ---------- Main render and mix function ----------
def render_and_mix(
midi_sfz_map: Dict[str, str],
out_path: str,
*,
sample_rate: int = 48000,
quality: int = 3,
polyphony: int = 256,
track_options: Optional[Dict[str, Dict]] = None,
normalize_lufs: Optional[float] = None,
use_eot: bool = True,
workers: int = 2,
verbose: bool = False
) -> None:
"""
Render each MIDI->SFZ pair, mix, post-process, and write final WAV to out_path.
"""
sfizz_bin = find_sfizz_render()
if not sfizz_bin:
raise FileNotFoundError("sfizz_render binary not found in PATH. Install sfizz-render first.")
tmpdir = Path(tempfile.mkdtemp(prefix="sfizz_render_"))
rendered_files = {}
# Render in parallel
with ThreadPoolExecutor(max_workers=workers) as ex:
futures = {}
for midi, sfz in midi_sfz_map.items():
midi_p = Path(midi)
sfz_p = Path(sfz)
if not midi_p.exists():
raise FileNotFoundError(f"MIDI file not found: {midi}")
if not sfz_p.exists():
raise FileNotFoundError(f"SFZ file not found: {sfz}")
out_wav = tmpdir / (midi_p.stem + "_" + sfz_p.stem + ".wav")
futures[ex.submit(render_with_sfizz, sfizz_bin, str(midi_p), str(sfz_p), str(out_wav),
sample_rate, quality, polyphony, use_eot, verbose)] = (midi, str(out_wav))
# Wait and collect
for fut in tqdm(as_completed(futures), total=len(futures), desc="Rendering"):
midi_key, wav_path = futures[fut]
fut.result() # will raise if render failed
rendered_files[midi_key] = wav_path
# Load and align
tracks = []
max_len = 0
for midi_key, wav_path in rendered_files.items():
data, sr = sf.read(wav_path, always_2d=True)
if sr != sample_rate:
# resample if needed (simple linear resample)
import math
ratio = sample_rate / sr
new_len = int(math.ceil(data.shape[0] * ratio))
# use scipy.signal.resample for decent quality
from scipy.signal import resample
data = resample(data, new_len, axis=0)
data = ensure_stereo(data)
tracks.append((midi_key, data))
if data.shape[0] > max_len:
max_len = data.shape[0]
# Prepare final mix buffer
mix = np.zeros((max_len, 2), dtype=np.float32)
# Apply per-track options and mix
for midi_key, data in tracks:
opts = (track_options or {}).get(midi_key, {})
gain_db = float(opts.get("gain_db", 0.0))
pan = float(opts.get("pan", 0.0))
gain_lin = db_to_linear(gain_db)
# pad to max_len
pad_len = max_len - data.shape[0]
if pad_len > 0:
data = np.vstack([data, np.zeros((pad_len, 2), dtype=data.dtype)])
data = data.astype(np.float32) * gain_lin
data = apply_pan(data, pan)
mix[:data.shape[0], :] += data
# Basic safety: prevent NaNs/Infs
mix = np.nan_to_num(mix, nan=0.0, posinf=0.0, neginf=0.0)
# Apply soft limiter and normalization
mix = soft_limiter(mix, threshold=0.98, sample_rate=sample_rate)
mix = normalize_peak(mix, target_dbfs=-1.0)
# Write intermediate file
intermediate = tmpdir / "mixed_intermediate.wav"
sf.write(str(intermediate), mix, samplerate=sample_rate, subtype="PCM_24")
# Optional LUFS normalization via ffmpeg loudnorm
final_out = Path(out_path)
if normalize_lufs is not None:
ffmpeg = shutil.which("ffmpeg")
if not ffmpeg:
raise FileNotFoundError("ffmpeg not found but normalize_lufs requested.")
# two-pass loudnorm recommended; here we do a single-pass approximate target
cmd = [
ffmpeg, "-y", "-i", str(intermediate),
"-af", f"loudnorm=I={normalize_lufs}:TP=-1.5:LRA=11",
"-ar", str(sample_rate),
"-ac", "2",
"-c:a", "pcm_s24le",
str(final_out)
]
subprocess.run(cmd, check=True)
else:
# move intermediate to final
shutil.move(str(intermediate), str(final_out))
# cleanup
try:
shutil.rmtree(tmpdir)
except Exception:
pass
# ---------- Example usage ----------
if __name__ == "__main__":
# Example mapping: two MIDI files each with their SFZ instrument
mapping = {
"midi/drums.mid": "sfz/drumkit.sfz",
"midi/piano.mid": "sfz/grand_piano.sfz",
}
track_opts = {
"midi/drums.mid": {"gain_db": -1.5, "pan": 0.0},
"midi/piano.mid": {"gain_db": -3.0, "pan": -0.1},
}
render_and_mix(mapping, "final_mix.wav", sample_rate=48000, quality=3, polyphony=256,
track_options=track_opts, normalize_lufs=-14.0, use_eot=True, workers=2, verbose=False)