| | import pyarrow as pa |
| | import whisper |
| | from pynput import keyboard |
| | from pynput.keyboard import Key, Events |
| | from dora import Node |
| |
|
| | import torch |
| | import numpy as np |
| | import pyarrow as pa |
| | import sounddevice as sd |
| | import gc |
| |
|
| | model = whisper.load_model("base") |
| |
|
| | SAMPLE_RATE = 16000 |
| | MAX_DURATION = 30 |
| |
|
| | policy_init = True |
| | audio_data = None |
| | node = Node() |
| |
|
| | for dora_event in node: |
| | if dora_event["type"] == "INPUT": |
| | |
| | with keyboard.Events() as events: |
| | event = events.get(1.0) |
| | if ( |
| | event is not None |
| | and (event.key == Key.alt_r or event.key == Key.ctrl_r) |
| | and isinstance(event, Events.Press) |
| | ): |
| |
|
| | |
| | audio_data = sd.rec( |
| | int(SAMPLE_RATE * MAX_DURATION), |
| | samplerate=SAMPLE_RATE, |
| | channels=1, |
| | dtype=np.int16, |
| | blocking=False, |
| | ) |
| |
|
| | elif ( |
| | event is not None |
| | and event.key == Key.alt_r |
| | and isinstance(event, Events.Release) |
| | ): |
| | sd.stop() |
| | if audio_data is None: |
| | continue |
| | audio = audio_data.ravel().astype(np.float32) / 32768.0 |
| |
|
| | |
| | audio = whisper.pad_or_trim(audio) |
| | result = model.transcribe(audio, language="en") |
| | node.send_output( |
| | "text_llm", pa.array([result["text"]]), dora_event["metadata"] |
| | ) |
| | |
| |
|
| | gc.collect() |
| | torch.cuda.empty_cache() |
| |
|
| | elif ( |
| | event is not None |
| | and event.key == Key.ctrl_r |
| | and isinstance(event, Events.Release) |
| | ): |
| | sd.stop() |
| | if audio_data is None: |
| | continue |
| | audio = audio_data.ravel().astype(np.float32) / 32768.0 |
| |
|
| | |
| | audio = whisper.pad_or_trim(audio) |
| | result = model.transcribe(audio, language="en") |
| | node.send_output( |
| | "text_policy", pa.array([result["text"]]), dora_event["metadata"] |
| | ) |
| | |
| |
|
| | gc.collect() |
| | torch.cuda.empty_cache() |
| |
|