Spaces:
Running
Running
File size: 4,796 Bytes
3bb804c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
"""
Frequency to MIDI Node - Converts a raw frequency signal into quantized
MIDI note number and velocity based on the 12-tone equal temperament scale.
Place this file in the 'nodes' folder
"""
import numpy as np
from PyQt6 import QtGui
import cv2
import math
import __main__
BaseNode = __main__.BaseNode
PA_INSTANCE = getattr(__main__, "PA_INSTANCE", None)
QtGui = __main__.QtGui
# Reference Frequency: A4 = 440 Hz (MIDI note 69)
A4_FREQ = 440.0
A4_MIDI = 69
# MIDI Note formula: N = 69 + 12 * log2(f / 440)
class FreqToMidiNode(BaseNode):
NODE_CATEGORY = "Transform"
NODE_COLOR = QtGui.QColor(150, 50, 200) # Musical Purple
def __init__(self, midi_offset=0):
super().__init__()
self.node_title = "Freq to MIDI"
self.inputs = {
'frequency_in': 'signal',
'amplitude_in': 'signal'
}
self.outputs = {
'midi_note': 'signal',
'velocity': 'signal'
}
self.midi_offset = int(midi_offset) # Shifts the output keyboard range
self.output_note = 0.0
self.output_velocity = 0.0
def _freq_to_midi(self, frequency):
"""Converts frequency (Hz) to the nearest integer MIDI note number."""
if frequency <= 0:
return 0 # Off note
try:
# N = 69 + 12 * log2(f / 440)
midi_note_float = A4_MIDI + 12 * np.log2(frequency / A4_FREQ)
# Round to the nearest integer note
midi_note = int(round(midi_note_float))
# Apply offset and clamp to MIDI range [0, 127]
return np.clip(midi_note + self.midi_offset, 0, 127)
except ValueError:
return 0
def step(self):
# 1. Get raw inputs
freq_in = self.get_blended_input('frequency_in', 'sum')
amp_in = self.get_blended_input('amplitude_in', 'sum')
# 2. Process Frequency
# Map input signal [-1, 1] to an audible range (e.g., 50 Hz to 2000 Hz)
if freq_in is not None:
# We assume the input signal is normalized (e.g., from SpectrumAnalyzer)
# Map [-1, 1] to [50, 2000] Hz
target_freq = (freq_in + 1.0) / 2.0 * 1950.0 + 50.0
self.output_note = float(self._freq_to_midi(target_freq))
# 3. Process Amplitude
if amp_in is not None:
# Map signal [0, 1] (or [-1, 1]) to normalized velocity [0.0, 1.0]
# Use abs() to treat negative signals as volume
velocity_norm = np.clip(np.abs(amp_in), 0.0, 1.0)
self.output_velocity = float(velocity_norm)
else:
self.output_velocity = 0.0
def get_output(self, port_name):
if port_name == 'midi_note':
# Only output the note if the velocity is above a threshold
return self.output_note if self.output_velocity > 0.05 else 0.0
elif port_name == 'velocity':
return self.output_velocity
return None
def get_display_image(self):
w, h = 96, 48
img = np.zeros((h, w, 3), dtype=np.uint8)
# Draw piano key visualization
note = int(self.output_note)
# Calculate Octave and Note Name
note_name_map = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
note_name = note_name_map[note % 12]
octave = note // 12 - 1
# Color based on velocity
vel_norm = self.output_velocity
color_val = int(vel_norm * 255)
if vel_norm > 0.05:
# Draw an active key (white or black key color based on sharp/flat)
is_sharp = ('#' in note_name)
fill_color = (255, 0, color_val) if is_sharp else (color_val, color_val, color_val) # Red/Magenta for sharps
text_color = (0, 0, 0) if not is_sharp else (255, 255, 255)
cv2.rectangle(img, (0, 0), (w, h), fill_color, -1)
else:
text_color = (100, 100, 100)
# Draw Note Label
label = f"{note_name}{octave}"
cv2.putText(img, label, (w//4, h//2), cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 2, cv2.LINE_AA)
# Draw MIDI number
cv2.putText(img, f"MIDI: {note}", (w//4, h//2 + 18), cv2.FONT_HERSHEY_SIMPLEX, 0.4, text_color, 1, cv2.LINE_AA)
img = np.ascontiguousarray(img)
return QtGui.QImage(img.data, w, h, 3*w, QtGui.QImage.Format.Format_BGR888)
def get_config_options(self):
return [
("Keyboard Offset (semitones)", "midi_offset", self.midi_offset, None),
] |