""" Audio Engine for Harmonic Catalyst Synthesizes browser-playable WAV audio from MIDI note data. Pure Python — numpy + wave, no external audio libraries required. """ import io import wave import numpy as np class AudioEngine: SAMPLE_RATE = 44100 @staticmethod def _note_to_freq(midi_note): return 440.0 * (2 ** ((midi_note - 69) / 12)) @staticmethod def _synthesize(lh_notes, rh_notes, duration_sec, sample_rate=44100): n = int(sample_rate * duration_sec) t = np.linspace(0, duration_sec, n, endpoint=False) audio = np.zeros(n) # LH: warm, soft, slower decay — bass character for note in lh_notes: freq = AudioEngine._note_to_freq(note) env = 0.55 * np.exp(-1.8 * t) audio += env * np.sin(2 * np.pi * freq * t) audio += env * 0.25 * np.sin(2 * np.pi * 2 * freq * t) # RH: brighter, more harmonic content, slightly faster decay — chord character for note in rh_notes: freq = AudioEngine._note_to_freq(note) env = np.exp(-2.2 * t) audio += env * np.sin(2 * np.pi * freq * t) audio += env * 0.35 * np.sin(2 * np.pi * 2 * freq * t) audio += env * 0.15 * np.sin(2 * np.pi * 3 * freq * t) return audio @staticmethod def _to_wav_bytes(audio, sample_rate=44100): peak = np.max(np.abs(audio)) if peak > 0: audio = audio / peak * 0.8 pcm = (audio * 32767).astype(np.int16) buf = io.BytesIO() with wave.open(buf, 'wb') as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(pcm.tobytes()) buf.seek(0) return buf.read() @staticmethod def chord_to_wav(lh_notes, rh_notes, beats=4, bpm=120): """WAV bytes for a single chord at given beat duration and tempo""" duration_sec = (beats / bpm) * 60 # Cap at 4 seconds for per-chord preview to keep it snappy duration_sec = min(duration_sec, 4.0) audio = AudioEngine._synthesize(lh_notes, rh_notes, duration_sec) return AudioEngine._to_wav_bytes(audio) @staticmethod def progression_to_wav(progression_data, bpm=120): """WAV bytes for a full progression — all chords concatenated in sequence""" sample_rate = AudioEngine.SAMPLE_RATE segments = [] for chord in progression_data: beats = chord.get('beats', 4) duration_sec = (beats / bpm) * 60 seg = AudioEngine._synthesize( chord.get('lh', []), chord.get('rh', []), duration_sec, sample_rate ) # Small silence gap between chords (20ms) gap = np.zeros(int(sample_rate * 0.02)) segments.append(np.concatenate([seg, gap])) full = np.concatenate(segments) if segments else np.zeros(sample_rate) return AudioEngine._to_wav_bytes(full, sample_rate)