projectlosangeles commited on
Commit
fa45593
·
verified ·
1 Parent(s): 0cb6753

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +261 -0
README.md CHANGED
@@ -26,5 +26,266 @@ size_categories:
26
 
27
  ***
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  ### Project Los Angeles
30
  ### Tegridy Code 2026
 
26
 
27
  ***
28
 
29
+ ## How to use (Linux/Ubuntu)
30
+
31
+ ### 1) Unzip the dataset into some folder
32
+
33
+ ### 2) Setup environment
34
+
35
+ ```sh
36
+ !sudo apt install libsndfile1 ffmpeg libjack-jackd2-dev -y
37
+ !pip install soundfile numpy scipy tqdm
38
+ ```
39
+
40
+ ### 3) Build and install [sfizz](https://github.com/sfztools/sfizz)
41
+
42
+ ```sh
43
+ !sudo apt install cmake g++ git libsndfile1-dev libjack-jackd2-dev \
44
+ libsamplerate0-dev libboost-dev libzstd-dev \
45
+ libcurl4-openssl-dev libx11-dev -y
46
+
47
+ !git clone https://github.com/sfztools/sfizz.git
48
+ %cd sfizz
49
+ !mkdir build
50
+ %cd build
51
+ !cmake .. -DCMAKE_BUILD_TYPE=Release
52
+ !make -j$(nproc)
53
+ !sudo make install
54
+ !sudo ldconfig
55
+ ```
56
+
57
+ ### 4) Use the following python script to render your MIDIs
58
+
59
+ ```python
60
+ #!/usr/bin/env python3
61
+ """
62
+ render_and_mix.py
63
+
64
+ Render MIDI+SFZ pairs using sfizz_render, mix them, apply basic mastering (limiter + normalize),
65
+ and optionally run ffmpeg loudness normalization.
66
+
67
+ Dependencies:
68
+ - sfizz_render (system binary)
69
+ - libsndfile (system)
70
+ - Python packages: soundfile, numpy, scipy, tqdm
71
+ - Optional: ffmpeg (for LUFS normalization)
72
+ """
73
+
74
+ import os
75
+ import shutil
76
+ import subprocess
77
+ import tempfile
78
+ from concurrent.futures import ThreadPoolExecutor, as_completed
79
+ from pathlib import Path
80
+ from typing import Dict, Optional
81
+
82
+ import numpy as np
83
+ import soundfile as sf
84
+ from scipy.signal import fftconvolve
85
+ from tqdm import tqdm
86
+
87
+ # ---------- Utility audio functions ----------
88
+
89
+ def db_to_linear(db: float) -> float:
90
+ return 10.0 ** (db / 20.0)
91
+
92
+ def linear_to_db(x: float) -> float:
93
+ return 20.0 * np.log10(np.maximum(x, 1e-12))
94
+
95
+ def apply_pan(stereo: np.ndarray, pan: float) -> np.ndarray:
96
+ # pan: -1 (left) .. +1 (right)
97
+ left_gain = np.cos((pan + 1) * (np.pi / 4))
98
+ right_gain = np.sin((pan + 1) * (np.pi / 4))
99
+ stereo[:, 0] *= left_gain
100
+ stereo[:, 1] *= right_gain
101
+ return stereo
102
+
103
+ def ensure_stereo(arr: np.ndarray) -> np.ndarray:
104
+ if arr.ndim == 1:
105
+ return np.stack([arr, arr], axis=1)
106
+ if arr.shape[1] == 1:
107
+ return np.repeat(arr, 2, axis=1)
108
+ return arr[:, :2]
109
+
110
+ def soft_limiter(signal: np.ndarray, threshold: float = 0.98, release: float = 0.01, sample_rate: int = 48000) -> np.ndarray:
111
+ # Simple per-sample soft clipping with smoothing
112
+ out = np.copy(signal)
113
+ # apply tanh-style soft clip scaled to threshold
114
+ scale = 1.0 / threshold
115
+ out = np.tanh(out * scale) / scale
116
+ return out
117
+
118
+ def normalize_peak(signal: np.ndarray, target_dbfs: float = -1.0) -> np.ndarray:
119
+ peak = np.max(np.abs(signal))
120
+ if peak <= 0:
121
+ return signal
122
+ target_lin = db_to_linear(target_dbfs)
123
+ gain = target_lin / peak
124
+ return signal * gain
125
+
126
+ # ---------- sfizz_render wrapper ----------
127
+
128
+ def find_sfizz_render() -> Optional[str]:
129
+ # Try common binary names
130
+ for name in ("sfizz_render", "sfizz-render", "sfizz_render.exe"):
131
+ path = shutil.which(name)
132
+ if path:
133
+ return path
134
+ return None
135
+
136
+ def render_with_sfizz(sfizz_bin: str, midi_path: str, sfz_path: str, out_wav: str,
137
+ sample_rate: int = 48000, quality: int = 3, polyphony: int = 256,
138
+ use_eot: bool = True, verbose: bool = False) -> None:
139
+ cmd = [
140
+ sfizz_bin,
141
+ "--midi", str(midi_path),
142
+ "--sfz", str(sfz_path),
143
+ "--wav", str(out_wav),
144
+ "--samplerate", str(sample_rate),
145
+ "--quality", str(quality),
146
+ "--polyphony", str(polyphony),
147
+ ]
148
+ if use_eot:
149
+ cmd.append("--use-eot")
150
+ if verbose:
151
+ cmd.append("--verbose")
152
+ # Run and raise on error
153
+ subprocess.run(cmd, check=True)
154
+
155
+ # ---------- Main render and mix function ----------
156
+
157
+ def render_and_mix(
158
+ midi_sfz_map: Dict[str, str],
159
+ out_path: str,
160
+ *,
161
+ sample_rate: int = 48000,
162
+ quality: int = 3,
163
+ polyphony: int = 256,
164
+ track_options: Optional[Dict[str, Dict]] = None,
165
+ normalize_lufs: Optional[float] = None,
166
+ use_eot: bool = True,
167
+ workers: int = 2,
168
+ verbose: bool = False
169
+ ) -> None:
170
+ """
171
+ Render each MIDI->SFZ pair, mix, post-process, and write final WAV to out_path.
172
+ """
173
+ sfizz_bin = find_sfizz_render()
174
+ if not sfizz_bin:
175
+ raise FileNotFoundError("sfizz_render binary not found in PATH. Install sfizz-render first.")
176
+
177
+ tmpdir = Path(tempfile.mkdtemp(prefix="sfizz_render_"))
178
+ rendered_files = {}
179
+
180
+ # Render in parallel
181
+ with ThreadPoolExecutor(max_workers=workers) as ex:
182
+ futures = {}
183
+ for midi, sfz in midi_sfz_map.items():
184
+ midi_p = Path(midi)
185
+ sfz_p = Path(sfz)
186
+ if not midi_p.exists():
187
+ raise FileNotFoundError(f"MIDI file not found: {midi}")
188
+ if not sfz_p.exists():
189
+ raise FileNotFoundError(f"SFZ file not found: {sfz}")
190
+ out_wav = tmpdir / (midi_p.stem + "_" + sfz_p.stem + ".wav")
191
+ futures[ex.submit(render_with_sfizz, sfizz_bin, str(midi_p), str(sfz_p), str(out_wav),
192
+ sample_rate, quality, polyphony, use_eot, verbose)] = (midi, str(out_wav))
193
+
194
+ # Wait and collect
195
+ for fut in tqdm(as_completed(futures), total=len(futures), desc="Rendering"):
196
+ midi_key, wav_path = futures[fut]
197
+ fut.result() # will raise if render failed
198
+ rendered_files[midi_key] = wav_path
199
+
200
+ # Load and align
201
+ tracks = []
202
+ max_len = 0
203
+ for midi_key, wav_path in rendered_files.items():
204
+ data, sr = sf.read(wav_path, always_2d=True)
205
+ if sr != sample_rate:
206
+ # resample if needed (simple linear resample)
207
+ import math
208
+ ratio = sample_rate / sr
209
+ new_len = int(math.ceil(data.shape[0] * ratio))
210
+ # use scipy.signal.resample for decent quality
211
+ from scipy.signal import resample
212
+ data = resample(data, new_len, axis=0)
213
+ data = ensure_stereo(data)
214
+ tracks.append((midi_key, data))
215
+ if data.shape[0] > max_len:
216
+ max_len = data.shape[0]
217
+
218
+ # Prepare final mix buffer
219
+ mix = np.zeros((max_len, 2), dtype=np.float32)
220
+
221
+ # Apply per-track options and mix
222
+ for midi_key, data in tracks:
223
+ opts = (track_options or {}).get(midi_key, {})
224
+ gain_db = float(opts.get("gain_db", 0.0))
225
+ pan = float(opts.get("pan", 0.0))
226
+ gain_lin = db_to_linear(gain_db)
227
+ # pad to max_len
228
+ pad_len = max_len - data.shape[0]
229
+ if pad_len > 0:
230
+ data = np.vstack([data, np.zeros((pad_len, 2), dtype=data.dtype)])
231
+ data = data.astype(np.float32) * gain_lin
232
+ data = apply_pan(data, pan)
233
+ mix[:data.shape[0], :] += data
234
+
235
+ # Basic safety: prevent NaNs/Infs
236
+ mix = np.nan_to_num(mix, nan=0.0, posinf=0.0, neginf=0.0)
237
+
238
+ # Apply soft limiter and normalization
239
+ mix = soft_limiter(mix, threshold=0.98, sample_rate=sample_rate)
240
+ mix = normalize_peak(mix, target_dbfs=-1.0)
241
+
242
+ # Write intermediate file
243
+ intermediate = tmpdir / "mixed_intermediate.wav"
244
+ sf.write(str(intermediate), mix, samplerate=sample_rate, subtype="PCM_24")
245
+
246
+ # Optional LUFS normalization via ffmpeg loudnorm
247
+ final_out = Path(out_path)
248
+ if normalize_lufs is not None:
249
+ ffmpeg = shutil.which("ffmpeg")
250
+ if not ffmpeg:
251
+ raise FileNotFoundError("ffmpeg not found but normalize_lufs requested.")
252
+ # two-pass loudnorm recommended; here we do a single-pass approximate target
253
+ cmd = [
254
+ ffmpeg, "-y", "-i", str(intermediate),
255
+ "-af", f"loudnorm=I={normalize_lufs}:TP=-1.5:LRA=11",
256
+ "-ar", str(sample_rate),
257
+ "-ac", "2",
258
+ "-c:a", "pcm_s24le",
259
+ str(final_out)
260
+ ]
261
+ subprocess.run(cmd, check=True)
262
+ else:
263
+ # move intermediate to final
264
+ shutil.move(str(intermediate), str(final_out))
265
+
266
+ # cleanup
267
+ try:
268
+ shutil.rmtree(tmpdir)
269
+ except Exception:
270
+ pass
271
+
272
+ # ---------- Example usage ----------
273
+ if __name__ == "__main__":
274
+ # Example mapping: two MIDI files each with their SFZ instrument
275
+ mapping = {
276
+ "midi/drums.mid": "sfz/drumkit.sfz",
277
+ "midi/piano.mid": "sfz/grand_piano.sfz",
278
+ }
279
+ track_opts = {
280
+ "midi/drums.mid": {"gain_db": -1.5, "pan": 0.0},
281
+ "midi/piano.mid": {"gain_db": -3.0, "pan": -0.1},
282
+ }
283
+ render_and_mix(mapping, "final_mix.wav", sample_rate=48000, quality=3, polyphony=256,
284
+ track_options=track_opts, normalize_lufs=-14.0, use_eot=True, workers=2, verbose=False)
285
+
286
+ ```
287
+
288
+ ***
289
+
290
  ### Project Los Angeles
291
  ### Tegridy Code 2026