Commit ·
ffc2aa4
1
Parent(s): 3f09bdf
Use whisper instead of keyboard
Browse files- graphs/dataflow_robot_vlm.yml +6 -21
- operators/idefics2_op.py +17 -47
- operators/keyboard_op.py +0 -65
- operators/microphone_op.py +0 -32
- operators/robot.py +32 -17
- operators/vlm_op.py +0 -273
- operators/whisper_op copy.py +0 -25
- operators/whisper_op.py +30 -5
graphs/dataflow_robot_vlm.yml
CHANGED
|
@@ -6,8 +6,7 @@ nodes:
|
|
| 6 |
inputs:
|
| 7 |
image: webcam/image
|
| 8 |
assistant_message: vlm/assistant_message
|
| 9 |
-
|
| 10 |
-
user_message: keyboard/submitted
|
| 11 |
|
| 12 |
- id: vlm
|
| 13 |
operator:
|
|
@@ -16,7 +15,7 @@ nodes:
|
|
| 16 |
image:
|
| 17 |
source: webcam/image
|
| 18 |
queue_size: 1
|
| 19 |
-
instruction:
|
| 20 |
control_reply: robot/control_reply
|
| 21 |
outputs:
|
| 22 |
- assistant_message
|
|
@@ -28,7 +27,8 @@ nodes:
|
|
| 28 |
conda_env: robomaster
|
| 29 |
inputs:
|
| 30 |
tick: dora/timer/millis/750
|
| 31 |
-
control:
|
|
|
|
| 32 |
outputs:
|
| 33 |
- control_reply
|
| 34 |
|
|
@@ -38,25 +38,10 @@ nodes:
|
|
| 38 |
outputs:
|
| 39 |
- image
|
| 40 |
|
| 41 |
-
- id: keyboard
|
| 42 |
-
custom:
|
| 43 |
-
source: ../operators/keyboard_op.py
|
| 44 |
-
outputs:
|
| 45 |
-
- buffer
|
| 46 |
-
- submitted
|
| 47 |
-
|
| 48 |
- id: whisper
|
| 49 |
operator:
|
| 50 |
python: ../operators/whisper_op.py
|
| 51 |
inputs:
|
| 52 |
-
audio:
|
| 53 |
-
outputs:
|
| 54 |
-
- text
|
| 55 |
-
|
| 56 |
-
- id: microphone
|
| 57 |
-
operator:
|
| 58 |
-
python: ../operators/microphone_op.py
|
| 59 |
-
inputs:
|
| 60 |
-
record: keyboard/submitted
|
| 61 |
outputs:
|
| 62 |
-
-
|
|
|
|
| 6 |
inputs:
|
| 7 |
image: webcam/image
|
| 8 |
assistant_message: vlm/assistant_message
|
| 9 |
+
user_message: whisper/text
|
|
|
|
| 10 |
|
| 11 |
- id: vlm
|
| 12 |
operator:
|
|
|
|
| 15 |
image:
|
| 16 |
source: webcam/image
|
| 17 |
queue_size: 1
|
| 18 |
+
instruction: whisper/text
|
| 19 |
control_reply: robot/control_reply
|
| 20 |
outputs:
|
| 21 |
- assistant_message
|
|
|
|
| 27 |
conda_env: robomaster
|
| 28 |
inputs:
|
| 29 |
tick: dora/timer/millis/750
|
| 30 |
+
control: whisper/text
|
| 31 |
+
assistant_message: vlm/assistant_message
|
| 32 |
outputs:
|
| 33 |
- control_reply
|
| 34 |
|
|
|
|
| 38 |
outputs:
|
| 39 |
- image
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
- id: whisper
|
| 42 |
operator:
|
| 43 |
python: ../operators/whisper_op.py
|
| 44 |
inputs:
|
| 45 |
+
audio: dora/timer/millis/500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
outputs:
|
| 47 |
+
- text
|
operators/idefics2_op.py
CHANGED
|
@@ -27,9 +27,9 @@ def speak(text):
|
|
| 27 |
|
| 28 |
class Operator:
|
| 29 |
def __init__(self):
|
| 30 |
-
self.completed = True
|
| 31 |
self.instruction = "What is in the image?"
|
| 32 |
self.last_message = ""
|
|
|
|
| 33 |
|
| 34 |
def on_event(
|
| 35 |
self,
|
|
@@ -38,54 +38,24 @@ class Operator:
|
|
| 38 |
) -> DoraStatus:
|
| 39 |
if dora_event["type"] == "INPUT":
|
| 40 |
if dora_event["id"] == "image":
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
.copy()
|
| 47 |
-
)
|
| 48 |
-
cv2.imshow("frame2", image)
|
| 49 |
-
if cv2.waitKey(1) & 0xFF == ord("q"):
|
| 50 |
-
return DoraStatus.CONTINUE
|
| 51 |
-
output = ask_vlm(image, self.instruction)
|
| 52 |
-
cv2.putText(
|
| 53 |
-
image,
|
| 54 |
-
output,
|
| 55 |
-
(20, 14 + 15 * 25),
|
| 56 |
-
FONT,
|
| 57 |
-
0.5,
|
| 58 |
-
(190, 250, 0),
|
| 59 |
-
2,
|
| 60 |
-
)
|
| 61 |
-
|
| 62 |
-
if self.last_message != output:
|
| 63 |
-
speak(output)
|
| 64 |
-
print("response: ", output, flush=True)
|
| 65 |
-
send_output(
|
| 66 |
-
"assistant_message",
|
| 67 |
-
pa.array([output]),
|
| 68 |
-
dora_event["metadata"],
|
| 69 |
-
)
|
| 70 |
-
|
| 71 |
-
# stream.feed(output)
|
| 72 |
-
|
| 73 |
-
# stream.play()
|
| 74 |
-
self.last_message = output
|
| 75 |
-
self.completed = False
|
| 76 |
-
else:
|
| 77 |
-
print("Command not complete", flush=True)
|
| 78 |
elif dora_event["id"] == "instruction":
|
| 79 |
self.instruction = dora_event["value"][0].as_py()
|
| 80 |
print("instructions: ", self.instruction, flush=True)
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
)
|
|
|
|
|
|
|
| 91 |
return DoraStatus.CONTINUE
|
|
|
|
| 27 |
|
| 28 |
class Operator:
|
| 29 |
def __init__(self):
|
|
|
|
| 30 |
self.instruction = "What is in the image?"
|
| 31 |
self.last_message = ""
|
| 32 |
+
self.image = None
|
| 33 |
|
| 34 |
def on_event(
|
| 35 |
self,
|
|
|
|
| 38 |
) -> DoraStatus:
|
| 39 |
if dora_event["type"] == "INPUT":
|
| 40 |
if dora_event["id"] == "image":
|
| 41 |
+
self.image = (
|
| 42 |
+
dora_event["value"]
|
| 43 |
+
.to_numpy()
|
| 44 |
+
.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
|
| 45 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
elif dora_event["id"] == "instruction":
|
| 47 |
self.instruction = dora_event["value"][0].as_py()
|
| 48 |
print("instructions: ", self.instruction, flush=True)
|
| 49 |
+
|
| 50 |
+
if self.image is not None:
|
| 51 |
+
output = ask_vlm(self.image, self.instruction)
|
| 52 |
+
speak(output)
|
| 53 |
+
print("response: ", output, flush=True)
|
| 54 |
+
send_output(
|
| 55 |
+
"assistant_message",
|
| 56 |
+
pa.array([output]),
|
| 57 |
+
dora_event["metadata"],
|
| 58 |
)
|
| 59 |
+
|
| 60 |
+
self.last_message = output
|
| 61 |
return DoraStatus.CONTINUE
|
operators/keyboard_op.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
from pynput import keyboard
|
| 2 |
-
from pynput.keyboard import Key, Events
|
| 3 |
-
import pyarrow as pa
|
| 4 |
-
from dora import Node
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
node = Node()
|
| 8 |
-
buffer_text = ""
|
| 9 |
-
ctrl = False
|
| 10 |
-
submitted_text = []
|
| 11 |
-
cursor = 0
|
| 12 |
-
|
| 13 |
-
NODE_TOPIC = ["record", "send", "ask", "change"]
|
| 14 |
-
|
| 15 |
-
with keyboard.Events() as events:
|
| 16 |
-
while True:
|
| 17 |
-
dora_event = node.next(0.01)
|
| 18 |
-
if (
|
| 19 |
-
dora_event is not None
|
| 20 |
-
and dora_event["type"] == "INPUT"
|
| 21 |
-
and dora_event["id"] == "recording"
|
| 22 |
-
):
|
| 23 |
-
buffer_text += dora_event["value"][0].as_py()
|
| 24 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 25 |
-
continue
|
| 26 |
-
|
| 27 |
-
event = events.get(1.0)
|
| 28 |
-
if event is not None and isinstance(event, Events.Press):
|
| 29 |
-
if hasattr(event.key, "char"):
|
| 30 |
-
cursor = 0
|
| 31 |
-
buffer_text += event.key.char
|
| 32 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 33 |
-
else:
|
| 34 |
-
if event.key == Key.backspace:
|
| 35 |
-
buffer_text = buffer_text[:-1]
|
| 36 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 37 |
-
elif event.key == Key.esc:
|
| 38 |
-
buffer_text = ""
|
| 39 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 40 |
-
elif event.key == Key.enter:
|
| 41 |
-
node.send_output("submitted", pa.array([buffer_text]))
|
| 42 |
-
first_word = buffer_text.split(" ")[0]
|
| 43 |
-
if first_word in NODE_TOPIC:
|
| 44 |
-
node.send_output(first_word, pa.array([buffer_text]))
|
| 45 |
-
submitted_text.append(buffer_text)
|
| 46 |
-
buffer_text = ""
|
| 47 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 48 |
-
elif event.key == Key.ctrl:
|
| 49 |
-
ctrl = True
|
| 50 |
-
elif event.key == Key.space:
|
| 51 |
-
buffer_text += " "
|
| 52 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 53 |
-
elif event.key == Key.up:
|
| 54 |
-
if len(submitted_text) > 0:
|
| 55 |
-
cursor = max(cursor - 1, -len(submitted_text))
|
| 56 |
-
buffer_text = submitted_text[cursor]
|
| 57 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 58 |
-
elif event.key == Key.down:
|
| 59 |
-
if len(submitted_text) > 0:
|
| 60 |
-
cursor = min(cursor + 1, 0)
|
| 61 |
-
buffer_text = submitted_text[cursor]
|
| 62 |
-
node.send_output("buffer", pa.array([buffer_text]))
|
| 63 |
-
elif event is not None and isinstance(event, Events.Release):
|
| 64 |
-
if event.key == Key.ctrl:
|
| 65 |
-
ctrl = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
operators/microphone_op.py
DELETED
|
@@ -1,32 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import pyarrow as pa
|
| 3 |
-
import sounddevice as sd
|
| 4 |
-
|
| 5 |
-
from dora import DoraStatus
|
| 6 |
-
|
| 7 |
-
SAMPLE_RATE = 16000
|
| 8 |
-
MAX_DURATION = 5
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class Operator:
|
| 12 |
-
"""
|
| 13 |
-
Microphone operator that records the audio
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
def on_event(
|
| 17 |
-
self,
|
| 18 |
-
dora_event,
|
| 19 |
-
send_output,
|
| 20 |
-
) -> DoraStatus:
|
| 21 |
-
if dora_event["type"] == "INPUT":
|
| 22 |
-
audio_data = sd.rec(
|
| 23 |
-
int(SAMPLE_RATE * MAX_DURATION),
|
| 24 |
-
samplerate=SAMPLE_RATE,
|
| 25 |
-
channels=1,
|
| 26 |
-
dtype=np.int16,
|
| 27 |
-
blocking=True,
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
audio_data = audio_data.ravel().astype(np.float32) / 32768.0
|
| 31 |
-
send_output("audio", pa.array(audio_data), dora_event["metadata"])
|
| 32 |
-
return DoraStatus.CONTINUE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
operators/robot.py
CHANGED
|
@@ -12,50 +12,50 @@ CONN = "ap"
|
|
| 12 |
class Command(Enum):
|
| 13 |
NOD_YES = [
|
| 14 |
{"action": "gimbal", "value": [20.0, 0.0]},
|
| 15 |
-
{"action": "gimbal", "value": [
|
| 16 |
]
|
| 17 |
NOD_NO = [
|
| 18 |
-
{"action": "gimbal", "value": [
|
| 19 |
-
{"action": "gimbal", "value": [
|
| 20 |
-
{"action": "gimbal", "value": [-5.0, 0.0]},
|
| 21 |
]
|
| 22 |
FORWARD = [
|
| 23 |
{
|
| 24 |
"action": "control",
|
| 25 |
-
"value": [
|
| 26 |
}
|
| 27 |
]
|
| 28 |
BACKWARD = [
|
|
|
|
| 29 |
{
|
| 30 |
"action": "control",
|
| 31 |
-
"value": [-
|
| 32 |
},
|
| 33 |
]
|
| 34 |
LEFT = [
|
| 35 |
-
{"action": "gimbal", "value": [
|
| 36 |
{
|
| 37 |
"action": "control",
|
| 38 |
-
"value": [0.
|
| 39 |
},
|
| 40 |
]
|
| 41 |
SLIGHT_LEFT = [
|
| 42 |
-
{"action": "gimbal", "value": [
|
| 43 |
{
|
| 44 |
"action": "control",
|
| 45 |
-
"value": [
|
| 46 |
},
|
| 47 |
]
|
| 48 |
RIGHT = [
|
| 49 |
-
{"action": "gimbal", "value": [
|
| 50 |
{
|
| 51 |
-
"value": [0.
|
| 52 |
"action": "control",
|
| 53 |
},
|
| 54 |
]
|
| 55 |
SLIGHT_RIGHT = [
|
| 56 |
-
{"action": "gimbal", "value": [
|
| 57 |
{
|
| 58 |
-
"value": [
|
| 59 |
"action": "control",
|
| 60 |
},
|
| 61 |
]
|
|
@@ -124,20 +124,35 @@ class Operator:
|
|
| 124 |
raw_command = dora_event["value"][0].as_py()
|
| 125 |
print(raw_command, flush=True)
|
| 126 |
self.last_control = raw_command
|
| 127 |
-
if "
|
| 128 |
-
cmd = Command.
|
|
|
|
|
|
|
| 129 |
elif "right" in raw_command:
|
| 130 |
cmd = Command.RIGHT
|
| 131 |
elif "left" in raw_command:
|
| 132 |
cmd = Command.LEFT
|
| 133 |
elif "forward" in raw_command:
|
| 134 |
cmd = Command.FORWARD
|
| 135 |
-
elif "
|
| 136 |
cmd = Command.BACKWARD
|
| 137 |
else:
|
| 138 |
cmd = Command.UNKNOWN
|
| 139 |
if len(self.backlog) == 0:
|
| 140 |
self.backlog += cmd.value
|
| 141 |
self.execute_backlog()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
return DoraStatus.CONTINUE
|
|
|
|
| 12 |
class Command(Enum):
|
| 13 |
NOD_YES = [
|
| 14 |
{"action": "gimbal", "value": [20.0, 0.0]},
|
| 15 |
+
{"action": "gimbal", "value": [0.0, 0.0]},
|
| 16 |
]
|
| 17 |
NOD_NO = [
|
| 18 |
+
{"action": "gimbal", "value": [0, -55.0]},
|
| 19 |
+
{"action": "gimbal", "value": [0.0, 0.0]},
|
|
|
|
| 20 |
]
|
| 21 |
FORWARD = [
|
| 22 |
{
|
| 23 |
"action": "control",
|
| 24 |
+
"value": [2.0, 0.0, 0.0, 0.8, 0],
|
| 25 |
}
|
| 26 |
]
|
| 27 |
BACKWARD = [
|
| 28 |
+
{"action": "gimbal", "value": [0, -180.0]},
|
| 29 |
{
|
| 30 |
"action": "control",
|
| 31 |
+
"value": [-2.0, 0, 180.0, 0.8, 50],
|
| 32 |
},
|
| 33 |
]
|
| 34 |
LEFT = [
|
| 35 |
+
{"action": "gimbal", "value": [0, -90.0]},
|
| 36 |
{
|
| 37 |
"action": "control",
|
| 38 |
+
"value": [0.0, -1.0, 90.0, 0.6, 50],
|
| 39 |
},
|
| 40 |
]
|
| 41 |
SLIGHT_LEFT = [
|
| 42 |
+
{"action": "gimbal", "value": [0.0, -30.0]},
|
| 43 |
{
|
| 44 |
"action": "control",
|
| 45 |
+
"value": [1.0, -0.5, 30.0, 0.6, 50],
|
| 46 |
},
|
| 47 |
]
|
| 48 |
RIGHT = [
|
| 49 |
+
{"action": "gimbal", "value": [0.0, 90.0]},
|
| 50 |
{
|
| 51 |
+
"value": [0.0, 1.0, -90.0, 0.6, 50],
|
| 52 |
"action": "control",
|
| 53 |
},
|
| 54 |
]
|
| 55 |
SLIGHT_RIGHT = [
|
| 56 |
+
{"action": "gimbal", "value": [0.0, 30.0]},
|
| 57 |
{
|
| 58 |
+
"value": [1.0, 0.5, -30.0, 0.6, 50],
|
| 59 |
"action": "control",
|
| 60 |
},
|
| 61 |
]
|
|
|
|
| 124 |
raw_command = dora_event["value"][0].as_py()
|
| 125 |
print(raw_command, flush=True)
|
| 126 |
self.last_control = raw_command
|
| 127 |
+
if "slight right" in raw_command:
|
| 128 |
+
cmd = Command.BACKWARD
|
| 129 |
+
elif "slight left" in raw_command:
|
| 130 |
+
cmd = Command.BACKWARD
|
| 131 |
elif "right" in raw_command:
|
| 132 |
cmd = Command.RIGHT
|
| 133 |
elif "left" in raw_command:
|
| 134 |
cmd = Command.LEFT
|
| 135 |
elif "forward" in raw_command:
|
| 136 |
cmd = Command.FORWARD
|
| 137 |
+
elif "backward" in raw_command:
|
| 138 |
cmd = Command.BACKWARD
|
| 139 |
else:
|
| 140 |
cmd = Command.UNKNOWN
|
| 141 |
if len(self.backlog) == 0:
|
| 142 |
self.backlog += cmd.value
|
| 143 |
self.execute_backlog()
|
| 144 |
+
elif dora_event["id"] == "assistant_message":
|
| 145 |
+
raw_command = dora_event["value"][0].as_py()
|
| 146 |
+
print(raw_command, flush=True)
|
| 147 |
+
self.last_control = raw_command
|
| 148 |
+
if "No, " in raw_command:
|
| 149 |
+
cmd = Command.NOD_NO
|
| 150 |
+
elif "Yes, " in raw_command:
|
| 151 |
+
cmd = Command.NOD_YES
|
| 152 |
+
else:
|
| 153 |
+
cmd = Command.UNKNOWN
|
| 154 |
+
if len(self.backlog) == 0:
|
| 155 |
+
self.backlog += cmd.value
|
| 156 |
+
self.execute_backlog()
|
| 157 |
|
| 158 |
return DoraStatus.CONTINUE
|
operators/vlm_op.py
DELETED
|
@@ -1,273 +0,0 @@
|
|
| 1 |
-
from dora import DoraStatus
|
| 2 |
-
import pylcs
|
| 3 |
-
import os
|
| 4 |
-
import pyarrow as pa
|
| 5 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
-
import json
|
| 7 |
-
|
| 8 |
-
import re
|
| 9 |
-
import time
|
| 10 |
-
|
| 11 |
-
import torch
|
| 12 |
-
import requests
|
| 13 |
-
|
| 14 |
-
from io import BytesIO
|
| 15 |
-
from PIL import Image
|
| 16 |
-
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 17 |
-
|
| 18 |
-
from transformers.image_utils import (
|
| 19 |
-
to_numpy_array,
|
| 20 |
-
PILImageResampling,
|
| 21 |
-
ChannelDimension,
|
| 22 |
-
)
|
| 23 |
-
from transformers.image_transforms import resize, to_channel_dimension_format
|
| 24 |
-
|
| 25 |
-
API_TOKEN = os.getenv("HF_TOKEN")
|
| 26 |
-
|
| 27 |
-
DEVICE = torch.device("cuda")
|
| 28 |
-
PROCESSOR = AutoProcessor.from_pretrained(
|
| 29 |
-
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
| 30 |
-
token=API_TOKEN,
|
| 31 |
-
)
|
| 32 |
-
MODEL = AutoModelForCausalLM.from_pretrained(
|
| 33 |
-
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
| 34 |
-
token=API_TOKEN,
|
| 35 |
-
trust_remote_code=True,
|
| 36 |
-
torch_dtype=torch.bfloat16,
|
| 37 |
-
).to(DEVICE)
|
| 38 |
-
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
| 39 |
-
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
| 40 |
-
BAD_WORDS_IDS = PROCESSOR.tokenizer(
|
| 41 |
-
["<image>", "<fake_token_around_image>"], add_special_tokens=False
|
| 42 |
-
).input_ids
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
CHATGPT = True
|
| 46 |
-
MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
|
| 47 |
-
|
| 48 |
-
MESSAGE_SENDER_TEMPLATE = """
|
| 49 |
-
### Instruction
|
| 50 |
-
You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed.
|
| 51 |
-
The schema for those json are:
|
| 52 |
-
- forward
|
| 53 |
-
- backward
|
| 54 |
-
- left
|
| 55 |
-
- right
|
| 56 |
-
|
| 57 |
-
The response should look like this:
|
| 58 |
-
```json
|
| 59 |
-
|
| 60 |
-
[
|
| 61 |
-
{{ "topic": "control", "data": "forward" }},
|
| 62 |
-
]
|
| 63 |
-
```
|
| 64 |
-
|
| 65 |
-
{user_message}
|
| 66 |
-
|
| 67 |
-
### Response:
|
| 68 |
-
"""
|
| 69 |
-
|
| 70 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 71 |
-
MODEL_NAME_OR_PATH,
|
| 72 |
-
device_map="auto",
|
| 73 |
-
trust_remote_code=True,
|
| 74 |
-
revision="main",
|
| 75 |
-
)
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
def extract_json_code_blocks(text):
|
| 82 |
-
"""
|
| 83 |
-
Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier.
|
| 84 |
-
|
| 85 |
-
Parameters:
|
| 86 |
-
- text: A string that may contain one or more json code blocks.
|
| 87 |
-
|
| 88 |
-
Returns:
|
| 89 |
-
- A list of strings, where each string is a block of json code extracted from the text.
|
| 90 |
-
"""
|
| 91 |
-
pattern = r"```json\n(.*?)\n```"
|
| 92 |
-
matches = re.findall(pattern, text, re.DOTALL)
|
| 93 |
-
if len(matches) == 0:
|
| 94 |
-
pattern = r"```json\n(.*?)(?:\n```|$)"
|
| 95 |
-
matches = re.findall(pattern, text, re.DOTALL)
|
| 96 |
-
if len(matches) == 0:
|
| 97 |
-
return [text]
|
| 98 |
-
|
| 99 |
-
return matches
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
from openai import OpenAI
|
| 103 |
-
import os
|
| 104 |
-
|
| 105 |
-
import base64
|
| 106 |
-
import requests
|
| 107 |
-
|
| 108 |
-
API_TOKEN = os.getenv("HF_TOKEN")
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
# Function to encode the image
|
| 112 |
-
def encode_image(image_path):
|
| 113 |
-
with open(image_path, "rb") as image_file:
|
| 114 |
-
return base64.b64encode(image_file.read()).decode("utf-8")
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
def understand_image(image_path):
|
| 118 |
-
|
| 119 |
-
# Getting the base64 string
|
| 120 |
-
base64_image = encode_image(image_path)
|
| 121 |
-
|
| 122 |
-
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
|
| 123 |
-
|
| 124 |
-
payload = {
|
| 125 |
-
"model": "gpt-4-vision-preview",
|
| 126 |
-
"messages": [
|
| 127 |
-
{
|
| 128 |
-
"role": "user",
|
| 129 |
-
"content": [
|
| 130 |
-
{
|
| 131 |
-
"type": "text",
|
| 132 |
-
"text": "What’s in this image? Describe it in a short sentence",
|
| 133 |
-
},
|
| 134 |
-
{
|
| 135 |
-
"type": "image_url",
|
| 136 |
-
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
|
| 137 |
-
},
|
| 138 |
-
],
|
| 139 |
-
}
|
| 140 |
-
],
|
| 141 |
-
"max_tokens": 300,
|
| 142 |
-
}
|
| 143 |
-
|
| 144 |
-
response = requests.post(
|
| 145 |
-
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
|
| 146 |
-
)
|
| 147 |
-
|
| 148 |
-
print(response.json()["choices"][0]["message"]["content"])
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
class Operator:
|
| 152 |
-
|
| 153 |
-
def on_event(
|
| 154 |
-
self,
|
| 155 |
-
dora_event,
|
| 156 |
-
send_output,
|
| 157 |
-
) -> DoraStatus:
|
| 158 |
-
if dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender":
|
| 159 |
-
user_message = dora_event["value"][0].as_py()
|
| 160 |
-
output = self.ask_llm(
|
| 161 |
-
MESSAGE_SENDER_TEMPLATE.format(user_message=user_message)
|
| 162 |
-
)
|
| 163 |
-
outputs = extract_json_code_blocks(output)[0]
|
| 164 |
-
print("response: ", output, flush=True)
|
| 165 |
-
try:
|
| 166 |
-
outputs = json.loads(outputs)
|
| 167 |
-
if not isinstance(outputs, list):
|
| 168 |
-
outputs = [outputs]
|
| 169 |
-
for output in outputs:
|
| 170 |
-
if not isinstance(output["data"], list):
|
| 171 |
-
output["data"] = [output["data"]]
|
| 172 |
-
|
| 173 |
-
if output["topic"] in ["led", "blaster"]:
|
| 174 |
-
send_output(
|
| 175 |
-
output["topic"],
|
| 176 |
-
pa.array(output["data"]),
|
| 177 |
-
dora_event["metadata"],
|
| 178 |
-
)
|
| 179 |
-
|
| 180 |
-
send_output(
|
| 181 |
-
"assistant_message",
|
| 182 |
-
pa.array([f"sent: {output}"]),
|
| 183 |
-
dora_event["metadata"],
|
| 184 |
-
)
|
| 185 |
-
else:
|
| 186 |
-
send_output(
|
| 187 |
-
"assistant_message",
|
| 188 |
-
pa.array(
|
| 189 |
-
[f"Could not send as topic was not available: {output}"]
|
| 190 |
-
),
|
| 191 |
-
dora_event["metadata"],
|
| 192 |
-
)
|
| 193 |
-
except:
|
| 194 |
-
send_output(
|
| 195 |
-
"assistant_message",
|
| 196 |
-
pa.array([f"Could not parse json: {outputs}"]),
|
| 197 |
-
dora_event["metadata"],
|
| 198 |
-
)
|
| 199 |
-
# if data is not iterable, put data in a list
|
| 200 |
-
return DoraStatus.CONTINUE
|
| 201 |
-
|
| 202 |
-
def ask_llm(self, prompt):
|
| 203 |
-
|
| 204 |
-
# Generate output
|
| 205 |
-
# prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
|
| 206 |
-
input = tokenizer(prompt, return_tensors="pt")
|
| 207 |
-
input_ids = input.input_ids.cuda()
|
| 208 |
-
|
| 209 |
-
# add attention mask here
|
| 210 |
-
attention_mask = input["attention_mask"]
|
| 211 |
-
|
| 212 |
-
output = model.generate(
|
| 213 |
-
inputs=input_ids,
|
| 214 |
-
temperature=0.7,
|
| 215 |
-
do_sample=True,
|
| 216 |
-
top_p=0.95,
|
| 217 |
-
top_k=40,
|
| 218 |
-
max_new_tokens=512,
|
| 219 |
-
attention_mask=attention_mask,
|
| 220 |
-
eos_token_id=tokenizer.eos_token_id,
|
| 221 |
-
)
|
| 222 |
-
# Get the tokens from the output, decode them, print them
|
| 223 |
-
|
| 224 |
-
# Get text between im_start and im_end
|
| 225 |
-
return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
|
| 226 |
-
|
| 227 |
-
def ask_chatgpt(self, prompt):
|
| 228 |
-
from openai import OpenAI
|
| 229 |
-
|
| 230 |
-
client = OpenAI()
|
| 231 |
-
print("---asking chatgpt: ", prompt, flush=True)
|
| 232 |
-
response = client.chat.completions.create(
|
| 233 |
-
model="gpt-4-turbo-preview",
|
| 234 |
-
messages=[
|
| 235 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
| 236 |
-
{"role": "user", "content": prompt},
|
| 237 |
-
],
|
| 238 |
-
)
|
| 239 |
-
answer = response.choices[0].message.content
|
| 240 |
-
|
| 241 |
-
print("Done", flush=True)
|
| 242 |
-
return answer
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
if __name__ == "__main__":
|
| 246 |
-
op = Operator()
|
| 247 |
-
|
| 248 |
-
# Path to the current file
|
| 249 |
-
current_file_path = __file__
|
| 250 |
-
|
| 251 |
-
# Directory of the current file
|
| 252 |
-
current_directory = os.path.dirname(current_file_path)
|
| 253 |
-
|
| 254 |
-
path = current_directory + "/planning_op.py"
|
| 255 |
-
with open(path, "r", encoding="utf8") as f:
|
| 256 |
-
raw = f.read()
|
| 257 |
-
|
| 258 |
-
op.on_event(
|
| 259 |
-
{
|
| 260 |
-
"type": "INPUT",
|
| 261 |
-
"id": "code_modifier",
|
| 262 |
-
"value": pa.array(
|
| 263 |
-
[
|
| 264 |
-
{
|
| 265 |
-
"path": path,
|
| 266 |
-
"user_message": "change planning to make gimbal follow bounding box ",
|
| 267 |
-
},
|
| 268 |
-
]
|
| 269 |
-
),
|
| 270 |
-
"metadata": [],
|
| 271 |
-
},
|
| 272 |
-
print,
|
| 273 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
operators/whisper_op copy.py
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
import pyarrow as pa
|
| 2 |
-
import whisper
|
| 3 |
-
|
| 4 |
-
from dora import DoraStatus
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
model = whisper.load_model("base")
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class Operator:
|
| 11 |
-
"""
|
| 12 |
-
Transforming Speech to Text using OpenAI Whisper model
|
| 13 |
-
"""
|
| 14 |
-
|
| 15 |
-
def on_event(
|
| 16 |
-
self,
|
| 17 |
-
dora_event,
|
| 18 |
-
send_output,
|
| 19 |
-
) -> DoraStatus:
|
| 20 |
-
if dora_event["type"] == "INPUT":
|
| 21 |
-
audio = dora_event["value"].to_numpy()
|
| 22 |
-
audio = whisper.pad_or_trim(audio)
|
| 23 |
-
result = model.transcribe(audio, language="en")
|
| 24 |
-
send_output("text", pa.array([result["text"]]), dora_event["metadata"])
|
| 25 |
-
return DoraStatus.CONTINUE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
operators/whisper_op.py
CHANGED
|
@@ -1,11 +1,18 @@
|
|
| 1 |
import pyarrow as pa
|
| 2 |
import whisper
|
| 3 |
-
|
|
|
|
| 4 |
from dora import DoraStatus
|
| 5 |
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
model = whisper.load_model("base")
|
| 8 |
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
class Operator:
|
| 11 |
"""
|
|
@@ -18,8 +25,26 @@ class Operator:
|
|
| 18 |
send_output,
|
| 19 |
) -> DoraStatus:
|
| 20 |
if dora_event["type"] == "INPUT":
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
return DoraStatus.CONTINUE
|
|
|
|
| 1 |
import pyarrow as pa
|
| 2 |
import whisper
|
| 3 |
+
from pynput import keyboard
|
| 4 |
+
from pynput.keyboard import Key
|
| 5 |
from dora import DoraStatus
|
| 6 |
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pyarrow as pa
|
| 9 |
+
import sounddevice as sd
|
| 10 |
|
| 11 |
model = whisper.load_model("base")
|
| 12 |
|
| 13 |
+
SAMPLE_RATE = 16000
|
| 14 |
+
MAX_DURATION = 5
|
| 15 |
+
|
| 16 |
|
| 17 |
class Operator:
|
| 18 |
"""
|
|
|
|
| 25 |
send_output,
|
| 26 |
) -> DoraStatus:
|
| 27 |
if dora_event["type"] == "INPUT":
|
| 28 |
+
## Check for keyboard event
|
| 29 |
+
with keyboard.Events() as events:
|
| 30 |
+
event = events.get(1.0)
|
| 31 |
+
if event is not None and event.key == Key.up:
|
| 32 |
+
|
| 33 |
+
## Microphone
|
| 34 |
+
audio_data = sd.rec(
|
| 35 |
+
int(SAMPLE_RATE * MAX_DURATION),
|
| 36 |
+
samplerate=SAMPLE_RATE,
|
| 37 |
+
channels=1,
|
| 38 |
+
dtype=np.int16,
|
| 39 |
+
blocking=True,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
audio = audio_data.ravel().astype(np.float32) / 32768.0
|
| 43 |
+
|
| 44 |
+
## Speech to text
|
| 45 |
+
audio = whisper.pad_or_trim(audio)
|
| 46 |
+
result = model.transcribe(audio, language="en")
|
| 47 |
+
send_output(
|
| 48 |
+
"text", pa.array([result["text"]]), dora_event["metadata"]
|
| 49 |
+
)
|
| 50 |
return DoraStatus.CONTINUE
|