PerceptionLabPortable / app /nodes /audiobuffernode.py
Aluode's picture
Upload folder using huggingface_hub
3bb804c verified
import numpy as np
from PyQt6 import QtGui
import __main__
BaseNode = __main__.BaseNode
class AudioBufferNode(BaseNode):
NODE_CATEGORY = "Audio"
NODE_COLOR = QtGui.QColor(50, 140, 255)
def __init__(self, buffer_size=2048):
super().__init__()
self.node_title = "Audio Buffer"
self.inputs = {
'signal': 'signal'
}
self.outputs = {
'buffer': 'spectrum'
}
self.buffer_size = buffer_size
self.buf = np.zeros(self.buffer_size, dtype=np.float32)
# Storage for node outputs
self.outputs_data = {}
def step(self):
# Read incoming signal
x = self.get_blended_input('signal', 'sum')
if x is None:
return
# Push signal into rolling buffer
self.buf[:-1] = self.buf[1:]
self.buf[-1] = float(x)
# Store for output
self.outputs_data['buffer'] = self.buf.copy()
def get_output(self, port_name):
return self.outputs_data.get(port_name, None)
def get_display_image(self):
"""
Renders the buffer as a simple waveform image, helpful for debugging.
"""
import cv2
disp = np.zeros((80, 256), dtype=np.uint8)
# Resample to display width
idx = np.linspace(0, len(self.buf) - 1, 256).astype(int)
sig = (self.buf[idx] * 35 + 40).astype(int)
for i in range(1, 256):
cv2.line(disp, (i - 1, sig[i - 1]), (i, sig[i]), 255, 1)
return QtGui.QImage(
disp.data,
disp.shape[1],
disp.shape[0],
disp.shape[1],
QtGui.QImage.Format.Format_Grayscale8
)