Spaces:
Running
Running
Deploy new version
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- README.md +4 -4
- pyproject.toml +4 -4
- reachys_brain/__init__.py +4 -0
- reachys_brain/__pycache__/__init__.cpython-311.pyc +0 -0
- reachys_brain/__pycache__/__init__.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/animation_coordinator.cpython-311.pyc +0 -0
- reachys_brain/__pycache__/animation_coordinator.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/app_tools.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/audio_capture.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/audio_playback.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/database.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/models.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/motion_service.cpython-311.pyc +0 -0
- reachys_brain/__pycache__/motion_service.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/openai_realtime.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/server.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/tts_service.cpython-311.pyc +0 -0
- reachys_brain/__pycache__/tts_service.cpython-312.pyc +0 -0
- reachys_brain/__pycache__/voice_manager.cpython-311.pyc +0 -0
- reachys_brain/__pycache__/voice_manager.cpython-312.pyc +0 -0
- reachys_brain/animation_coordinator.py +543 -0
- reachys_brain/app_tools.py +892 -0
- reachys_brain/audio_capture.py +375 -0
- reachys_brain/audio_playback.py +332 -0
- reachys_brain/config.py +100 -0
- reachys_brain/daemon_health_monitor.py +414 -0
- reachys_brain/database.py +1620 -0
- reachys_brain/idle_movement_service.py +447 -0
- reachys_brain/main.py +147 -0
- reachys_brain/models.py +537 -0
- reachys_brain/motion_service.py +214 -0
- reachys_brain/openai_realtime.py +1123 -0
- reachys_brain/routes/__init__.py +42 -0
- reachys_brain/routes/__pycache__/__init__.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/animations.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/audio_stream_manager.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/conversation.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/conversation_messages.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/openai_config.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/speech.cpython-312.pyc +0 -0
- reachys_brain/routes/__pycache__/voice.cpython-312.pyc +0 -0
- reachys_brain/routes/animation_manager.py +190 -0
- reachys_brain/routes/animations.py +578 -0
- reachys_brain/routes/apps.py +246 -0
- reachys_brain/routes/audio_manager.py +157 -0
- reachys_brain/routes/audio_stream_manager.py +151 -0
- reachys_brain/routes/broadcast_manager.py +105 -0
- reachys_brain/routes/conversation.py +1132 -0
- reachys_brain/routes/conversation_messages.py +190 -0
- reachys_brain/routes/conversation_services.py +213 -0
README.md
CHANGED
|
@@ -6,7 +6,7 @@ colorTo: yellow
|
|
| 6 |
sdk: static
|
| 7 |
pinned: false
|
| 8 |
short_description: iOS voice assistant with TTS, animations & vision
|
| 9 |
-
thumbnail: https://huggingface.co/spaces/robertkeus/
|
| 10 |
tags:
|
| 11 |
- reachy_mini
|
| 12 |
- reachy_mini_python_app
|
|
@@ -39,7 +39,7 @@ A Reachy Mini app that provides an HTTP API for iOS voice assistant integration.
|
|
| 39 |
|
| 40 |
```bash
|
| 41 |
# Navigate to the app directory
|
| 42 |
-
cd
|
| 43 |
|
| 44 |
# Install in development mode
|
| 45 |
pip install -e .
|
|
@@ -64,7 +64,7 @@ The TTS and voice conversation features require an OpenAI API key. You can confi
|
|
| 64 |
|
| 65 |
2. Open your browser to [http://127.0.0.1:8000/](http://127.0.0.1:8000/)
|
| 66 |
|
| 67 |
-
3. Select "Reachy
|
| 68 |
|
| 69 |
4. The HTTP server will start on port 8080
|
| 70 |
|
|
@@ -74,7 +74,7 @@ The TTS and voice conversation features require an OpenAI API key. You can confi
|
|
| 74 |
|
| 75 |
```bash
|
| 76 |
# Run without Reachy connected
|
| 77 |
-
|
| 78 |
```
|
| 79 |
|
| 80 |
## API Endpoints
|
|
|
|
| 6 |
sdk: static
|
| 7 |
pinned: false
|
| 8 |
short_description: iOS voice assistant with TTS, animations & vision
|
| 9 |
+
thumbnail: https://huggingface.co/spaces/robertkeus/reachys_brain/resolve/main/logo.png
|
| 10 |
tags:
|
| 11 |
- reachy_mini
|
| 12 |
- reachy_mini_python_app
|
|
|
|
| 39 |
|
| 40 |
```bash
|
| 41 |
# Navigate to the app directory
|
| 42 |
+
cd reachys_brain
|
| 43 |
|
| 44 |
# Install in development mode
|
| 45 |
pip install -e .
|
|
|
|
| 64 |
|
| 65 |
2. Open your browser to [http://127.0.0.1:8000/](http://127.0.0.1:8000/)
|
| 66 |
|
| 67 |
+
3. Select "Reachy's Brain" from the installed applications
|
| 68 |
|
| 69 |
4. The HTTP server will start on port 8080
|
| 70 |
|
|
|
|
| 74 |
|
| 75 |
```bash
|
| 76 |
# Run without Reachy connected
|
| 77 |
+
reachys-brain
|
| 78 |
```
|
| 79 |
|
| 80 |
## API Endpoints
|
pyproject.toml
CHANGED
|
@@ -3,9 +3,9 @@ requires = ["setuptools>=61.0"]
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
-
name = "
|
| 7 |
version = "0.1.0"
|
| 8 |
-
description = "
|
| 9 |
readme = "README.md"
|
| 10 |
requires-python = ">=3.10"
|
| 11 |
dependencies = [
|
|
@@ -25,8 +25,8 @@ dependencies = [
|
|
| 25 |
]
|
| 26 |
|
| 27 |
[project.entry-points."reachy_mini_apps"]
|
| 28 |
-
reachys_brain = "
|
| 29 |
|
| 30 |
[project.scripts]
|
| 31 |
-
|
| 32 |
|
|
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
+
name = "reachys_brain"
|
| 7 |
version = "0.1.0"
|
| 8 |
+
description = "Reachy's Brain - Voice assistant and AI integration for Reachy Mini"
|
| 9 |
readme = "README.md"
|
| 10 |
requires-python = ">=3.10"
|
| 11 |
dependencies = [
|
|
|
|
| 25 |
]
|
| 26 |
|
| 27 |
[project.entry-points."reachy_mini_apps"]
|
| 28 |
+
reachys_brain = "reachys_brain.main:ReachyIosBridge"
|
| 29 |
|
| 30 |
[project.scripts]
|
| 31 |
+
reachys-brain = "reachys_brain.server:run_server"
|
| 32 |
|
reachys_brain/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reachy iOS Bridge - HTTP server for voice assistant integration."""
|
| 2 |
+
|
| 3 |
+
__version__ = "0.1.0"
|
| 4 |
+
|
reachys_brain/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (309 Bytes). View file
|
|
|
reachys_brain/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (300 Bytes). View file
|
|
|
reachys_brain/__pycache__/animation_coordinator.cpython-311.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
reachys_brain/__pycache__/animation_coordinator.cpython-312.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
reachys_brain/__pycache__/app_tools.cpython-312.pyc
ADDED
|
Binary file (16 kB). View file
|
|
|
reachys_brain/__pycache__/audio_capture.cpython-312.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
reachys_brain/__pycache__/audio_playback.cpython-312.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
reachys_brain/__pycache__/database.cpython-312.pyc
ADDED
|
Binary file (75.6 kB). View file
|
|
|
reachys_brain/__pycache__/models.cpython-312.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
reachys_brain/__pycache__/motion_service.cpython-311.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
reachys_brain/__pycache__/motion_service.cpython-312.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
reachys_brain/__pycache__/openai_realtime.cpython-312.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
reachys_brain/__pycache__/server.cpython-312.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
reachys_brain/__pycache__/tts_service.cpython-311.pyc
ADDED
|
Binary file (26.5 kB). View file
|
|
|
reachys_brain/__pycache__/tts_service.cpython-312.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
reachys_brain/__pycache__/voice_manager.cpython-311.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
reachys_brain/__pycache__/voice_manager.cpython-312.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
reachys_brain/animation_coordinator.py
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Animation coordinator for speech.
|
| 2 |
+
|
| 3 |
+
Plays a single emotion animation BEFORE speech starts, based on
|
| 4 |
+
sentiment analysis of the text. Animations have sounds, so they
|
| 5 |
+
must complete before TTS begins.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import asyncio
|
| 9 |
+
import logging
|
| 10 |
+
import random
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from typing import Optional
|
| 13 |
+
|
| 14 |
+
from .motion_service import MotionService
|
| 15 |
+
from .routes.audio_stream_manager import ConversationTimings
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class AnimationResult:
|
| 22 |
+
"""Result of playing a pre-speech animation."""
|
| 23 |
+
|
| 24 |
+
played: bool # Whether an animation was played
|
| 25 |
+
sentiment: str # Detected sentiment
|
| 26 |
+
animation: str # Animation name (empty if not played)
|
| 27 |
+
duration: float # Duration in seconds (0 if not played)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Emotion animations grouped by sentiment/context
|
| 31 |
+
# These are played ONCE before speech, based on text analysis
|
| 32 |
+
|
| 33 |
+
GREETING_EMOTIONS = [
|
| 34 |
+
"welcoming1", "welcoming2", "cheerful1", "attentive1"
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
HAPPY_EMOTIONS = [
|
| 38 |
+
"cheerful1", "enthusiastic1", "enthusiastic2", "grateful1",
|
| 39 |
+
"proud1", "success1", "loving1"
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
SAD_EMOTIONS = [
|
| 43 |
+
"sad1", "sad2", "understanding1", "calming1", "lonely1"
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
EXCITED_EMOTIONS = [
|
| 47 |
+
"amazed1", "surprised1", "surprised2", "enthusiastic1"
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
QUESTION_EMOTIONS = [
|
| 51 |
+
"inquiring1", "inquiring2", "inquiring3", "curious1", "attentive1"
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
THINKING_EMOTIONS = [
|
| 55 |
+
"thoughtful1", "thoughtful2", "uncertain1", "confused1"
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
APOLOGETIC_EMOTIONS = [
|
| 59 |
+
"oops1", "oops2", "understanding1", "calming1"
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
HELPFUL_EMOTIONS = [
|
| 63 |
+
"helpful1", "helpful2", "attentive1", "welcoming1"
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
NEUTRAL_EMOTIONS = [
|
| 67 |
+
"attentive1", "attentive2", "serenity1"
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# Multilingual keyword sets for sentiment detection
|
| 72 |
+
|
| 73 |
+
GREETING_KEYWORDS = {
|
| 74 |
+
"en": {"hello", "hi", "hey", "welcome", "greetings", "good morning", "good afternoon", "good evening"},
|
| 75 |
+
"nl": {"hallo", "hoi", "hey", "welkom", "goedemorgen", "goedemiddag", "goedenavond", "dag"},
|
| 76 |
+
"de": {"hallo", "hi", "hey", "willkommen", "guten morgen", "guten tag", "guten abend"},
|
| 77 |
+
"fr": {"bonjour", "salut", "bienvenue", "bonsoir", "coucou"},
|
| 78 |
+
"es": {"hola", "buenos días", "buenas tardes", "buenas noches", "bienvenido"},
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
HAPPY_KEYWORDS = {
|
| 82 |
+
"en": {"happy", "glad", "great", "wonderful", "amazing", "excellent", "love", "fantastic",
|
| 83 |
+
"awesome", "delighted", "pleased", "excited", "joy", "thank", "thanks", "grateful"},
|
| 84 |
+
"nl": {"blij", "gelukkig", "geweldig", "fantastisch", "prachtig", "leuk", "fijn", "super",
|
| 85 |
+
"bedankt", "dank", "graag", "mooi", "heerlijk", "top"},
|
| 86 |
+
"de": {"glücklich", "froh", "toll", "wunderbar", "fantastisch", "super", "danke", "freude",
|
| 87 |
+
"großartig", "herrlich", "schön"},
|
| 88 |
+
"fr": {"heureux", "content", "merveilleux", "fantastique", "excellent", "merci", "génial",
|
| 89 |
+
"super", "magnifique", "formidable"},
|
| 90 |
+
"es": {"feliz", "contento", "maravilloso", "fantástico", "excelente", "gracias", "genial",
|
| 91 |
+
"increíble", "estupendo"},
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
SAD_KEYWORDS = {
|
| 95 |
+
"en": {"sorry", "sad", "unfortunately", "regret", "apologies", "difficult", "hard",
|
| 96 |
+
"disappointed", "upset", "bad news"},
|
| 97 |
+
"nl": {"sorry", "helaas", "jammer", "verdrietig", "spijt", "moeilijk", "teleurgesteld"},
|
| 98 |
+
"de": {"leider", "traurig", "entschuldigung", "bedaure", "schwierig", "schade"},
|
| 99 |
+
"fr": {"désolé", "malheureusement", "triste", "regret", "difficile", "dommage"},
|
| 100 |
+
"es": {"lo siento", "triste", "lamentablemente", "difícil", "desafortunadamente"},
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
EXCITED_KEYWORDS = {
|
| 104 |
+
"en": {"wow", "amazing", "incredible", "awesome", "fantastic", "exciting", "unbelievable",
|
| 105 |
+
"wonderful", "brilliant"},
|
| 106 |
+
"nl": {"wauw", "geweldig", "ongelooflijk", "fantastisch", "super", "gaaf", "cool"},
|
| 107 |
+
"de": {"wow", "unglaublich", "fantastisch", "toll", "wahnsinn", "krass"},
|
| 108 |
+
"fr": {"wow", "incroyable", "fantastique", "génial", "extraordinaire"},
|
| 109 |
+
"es": {"guau", "increíble", "fantástico", "asombroso", "maravilloso"},
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
QUESTION_KEYWORDS = {
|
| 113 |
+
"en": {"what", "how", "why", "when", "where", "who", "which", "could", "would", "can", "?"},
|
| 114 |
+
"nl": {"wat", "hoe", "waarom", "wanneer", "waar", "wie", "welke", "kun", "zou", "?"},
|
| 115 |
+
"de": {"was", "wie", "warum", "wann", "wo", "wer", "welche", "können", "würden", "?"},
|
| 116 |
+
"fr": {"quoi", "comment", "pourquoi", "quand", "où", "qui", "quel", "pouvez", "?"},
|
| 117 |
+
"es": {"qué", "cómo", "por qué", "cuándo", "dónde", "quién", "cuál", "puede", "?"},
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
THINKING_KEYWORDS = {
|
| 121 |
+
"en": {"think", "believe", "maybe", "perhaps", "probably", "might", "consider", "wonder",
|
| 122 |
+
"hmm", "let me", "i think"},
|
| 123 |
+
"nl": {"denk", "misschien", "wellicht", "waarschijnlijk", "hmm", "even kijken", "geloof"},
|
| 124 |
+
"de": {"denke", "vielleicht", "wahrscheinlich", "hmm", "glaube", "möglicherweise"},
|
| 125 |
+
"fr": {"pense", "peut-être", "probablement", "hmm", "crois", "je pense"},
|
| 126 |
+
"es": {"creo", "quizás", "probablemente", "hmm", "pienso", "tal vez"},
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
APOLOGETIC_KEYWORDS = {
|
| 130 |
+
"en": {"sorry", "apologize", "apologies", "my mistake", "oops", "my bad", "excuse me"},
|
| 131 |
+
"nl": {"sorry", "excuses", "mijn fout", "oeps", "pardon"},
|
| 132 |
+
"de": {"entschuldigung", "tut mir leid", "verzeihung", "mein fehler", "ups"},
|
| 133 |
+
"fr": {"désolé", "pardon", "excusez", "mes excuses", "oups"},
|
| 134 |
+
"es": {"perdón", "disculpa", "lo siento", "mis disculpas"},
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
HELPFUL_KEYWORDS = {
|
| 138 |
+
"en": {"help", "assist", "here is", "let me", "i can", "sure", "of course", "certainly",
|
| 139 |
+
"absolutely", "no problem", "happy to"},
|
| 140 |
+
"nl": {"help", "hier is", "ik kan", "natuurlijk", "zeker", "geen probleem", "graag"},
|
| 141 |
+
"de": {"hilfe", "hier ist", "ich kann", "natürlich", "sicher", "kein problem", "gerne"},
|
| 142 |
+
"fr": {"aide", "voici", "je peux", "bien sûr", "certainement", "pas de problème"},
|
| 143 |
+
"es": {"ayuda", "aquí está", "puedo", "por supuesto", "claro", "sin problema"},
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class AnimationCoordinator:
|
| 148 |
+
"""Coordinates emotion animations before speech.
|
| 149 |
+
|
| 150 |
+
Analyzes text sentiment and plays ONE appropriate emotion animation
|
| 151 |
+
before speech begins. Animations have their own sounds, so they
|
| 152 |
+
must complete before TTS starts.
|
| 153 |
+
|
| 154 |
+
Custom apps can override the default animation mappings by calling
|
| 155 |
+
set_custom_animations() with their own emotion-to-animation mapping.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
def __init__(self, motion_service: MotionService):
|
| 159 |
+
"""Initialize the coordinator.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
motion_service: Service for playing animations.
|
| 163 |
+
"""
|
| 164 |
+
self._motion = motion_service
|
| 165 |
+
self._language = "en"
|
| 166 |
+
self._is_animating = False
|
| 167 |
+
self._last_emotion: Optional[str] = None
|
| 168 |
+
|
| 169 |
+
# Custom animation overrides from active app
|
| 170 |
+
# Maps sentiment (e.g., "happy") to animation name (e.g., "dance1")
|
| 171 |
+
self._custom_animations: dict[str, str] = {}
|
| 172 |
+
|
| 173 |
+
logger.info(f"AnimationCoordinator initialized (language: {self._language})")
|
| 174 |
+
|
| 175 |
+
def set_custom_animations(self, animations: dict[str, str]) -> None:
|
| 176 |
+
"""Set custom animation mappings for the active app.
|
| 177 |
+
|
| 178 |
+
Args:
|
| 179 |
+
animations: Dict mapping sentiment names to animation names.
|
| 180 |
+
e.g., {"happy": "cheerful1", "sad": "calming1"}
|
| 181 |
+
"""
|
| 182 |
+
self._custom_animations = animations.copy() if animations else {}
|
| 183 |
+
if animations:
|
| 184 |
+
logger.info(f"Custom animations set: {list(animations.keys())}")
|
| 185 |
+
else:
|
| 186 |
+
logger.info("Custom animations cleared - using defaults")
|
| 187 |
+
|
| 188 |
+
def clear_custom_animations(self) -> None:
|
| 189 |
+
"""Clear custom animation mappings, reverting to defaults."""
|
| 190 |
+
self._custom_animations = {}
|
| 191 |
+
logger.info("Custom animations cleared - using defaults")
|
| 192 |
+
|
| 193 |
+
def _normalize_language(self, language: str) -> str:
|
| 194 |
+
"""Normalize language code to base language."""
|
| 195 |
+
if not language:
|
| 196 |
+
return "en"
|
| 197 |
+
# Extract base language (e.g., "nl_NL" -> "nl")
|
| 198 |
+
base = language.split("_")[0].split("-")[0].lower()
|
| 199 |
+
# Return base if we have keywords for it, else default to English
|
| 200 |
+
if base in HAPPY_KEYWORDS:
|
| 201 |
+
return base
|
| 202 |
+
return "en"
|
| 203 |
+
|
| 204 |
+
def set_language(self, language: str) -> None:
|
| 205 |
+
"""Set the language for sentiment analysis.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
language: Language code (e.g., "en", "nl", "nl_NL").
|
| 209 |
+
"""
|
| 210 |
+
old_lang = self._language
|
| 211 |
+
self._language = self._normalize_language(language)
|
| 212 |
+
if old_lang != self._language:
|
| 213 |
+
logger.info(f"AnimationCoordinator language changed: {old_lang} -> {self._language}")
|
| 214 |
+
|
| 215 |
+
@property
|
| 216 |
+
def language(self) -> str:
|
| 217 |
+
"""Get the current language code."""
|
| 218 |
+
return self._language
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def is_animating(self) -> bool:
|
| 222 |
+
"""Check if currently playing an animation."""
|
| 223 |
+
return self._is_animating
|
| 224 |
+
|
| 225 |
+
def _get_keywords(self, keyword_dict: dict) -> set:
|
| 226 |
+
"""Get keywords for current language with English fallback."""
|
| 227 |
+
return keyword_dict.get(self._language, keyword_dict.get("en", set()))
|
| 228 |
+
|
| 229 |
+
def _analyze_sentiment(self, text: str) -> str:
|
| 230 |
+
"""Analyze text and return the best emotion category.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
text: The text to analyze.
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
Emotion category: "greeting", "happy", "sad", "excited",
|
| 237 |
+
"question", "thinking", "apologetic", "helpful", or "neutral".
|
| 238 |
+
"""
|
| 239 |
+
text_lower = text.lower()
|
| 240 |
+
words = set(text_lower.split())
|
| 241 |
+
|
| 242 |
+
# Check for question marks first (high priority)
|
| 243 |
+
if "?" in text:
|
| 244 |
+
return "question"
|
| 245 |
+
|
| 246 |
+
# Check each category
|
| 247 |
+
greeting_kw = self._get_keywords(GREETING_KEYWORDS)
|
| 248 |
+
if words & greeting_kw or any(kw in text_lower for kw in greeting_kw):
|
| 249 |
+
return "greeting"
|
| 250 |
+
|
| 251 |
+
apologetic_kw = self._get_keywords(APOLOGETIC_KEYWORDS)
|
| 252 |
+
if words & apologetic_kw or any(kw in text_lower for kw in apologetic_kw):
|
| 253 |
+
return "apologetic"
|
| 254 |
+
|
| 255 |
+
excited_kw = self._get_keywords(EXCITED_KEYWORDS)
|
| 256 |
+
if words & excited_kw or any(kw in text_lower for kw in excited_kw):
|
| 257 |
+
# Check for exclamation mark to confirm excitement
|
| 258 |
+
if "!" in text:
|
| 259 |
+
return "excited"
|
| 260 |
+
|
| 261 |
+
happy_kw = self._get_keywords(HAPPY_KEYWORDS)
|
| 262 |
+
if words & happy_kw or any(kw in text_lower for kw in happy_kw):
|
| 263 |
+
return "happy"
|
| 264 |
+
|
| 265 |
+
sad_kw = self._get_keywords(SAD_KEYWORDS)
|
| 266 |
+
if words & sad_kw or any(kw in text_lower for kw in sad_kw):
|
| 267 |
+
return "sad"
|
| 268 |
+
|
| 269 |
+
# Check helpful BEFORE thinking (both share "let me")
|
| 270 |
+
helpful_kw = self._get_keywords(HELPFUL_KEYWORDS)
|
| 271 |
+
if words & helpful_kw or any(kw in text_lower for kw in helpful_kw):
|
| 272 |
+
return "helpful"
|
| 273 |
+
|
| 274 |
+
thinking_kw = self._get_keywords(THINKING_KEYWORDS)
|
| 275 |
+
if words & thinking_kw or any(kw in text_lower for kw in thinking_kw):
|
| 276 |
+
return "thinking"
|
| 277 |
+
|
| 278 |
+
question_kw = self._get_keywords(QUESTION_KEYWORDS)
|
| 279 |
+
if words & question_kw:
|
| 280 |
+
return "question"
|
| 281 |
+
|
| 282 |
+
return "neutral"
|
| 283 |
+
|
| 284 |
+
def _get_animation_chance(self, sentiment: str) -> float:
|
| 285 |
+
"""Get the probability of playing an animation for this sentiment.
|
| 286 |
+
|
| 287 |
+
Args:
|
| 288 |
+
sentiment: The sentiment category.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
Probability between 0.0 and 1.0.
|
| 292 |
+
"""
|
| 293 |
+
if sentiment == "greeting":
|
| 294 |
+
return ConversationTimings.PRE_SPEECH_CHANCE_GREETING
|
| 295 |
+
elif sentiment in ("excited", "happy", "sad", "apologetic"):
|
| 296 |
+
return ConversationTimings.PRE_SPEECH_CHANCE_STRONG
|
| 297 |
+
elif sentiment in ("question", "thinking"):
|
| 298 |
+
return ConversationTimings.PRE_SPEECH_CHANCE_THINKING
|
| 299 |
+
else:
|
| 300 |
+
return ConversationTimings.PRE_SPEECH_CHANCE_NEUTRAL
|
| 301 |
+
|
| 302 |
+
def _get_animation_duration(self, sentiment: str) -> float:
|
| 303 |
+
"""Get the animation duration for this sentiment.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
sentiment: The sentiment category.
|
| 307 |
+
|
| 308 |
+
Returns:
|
| 309 |
+
Duration in seconds.
|
| 310 |
+
"""
|
| 311 |
+
if sentiment == "greeting":
|
| 312 |
+
return ConversationTimings.PRE_SPEECH_DURATION_GREETING
|
| 313 |
+
elif sentiment in ("excited", "happy", "sad", "apologetic"):
|
| 314 |
+
return ConversationTimings.PRE_SPEECH_DURATION_STRONG
|
| 315 |
+
else:
|
| 316 |
+
return ConversationTimings.PRE_SPEECH_DURATION_DEFAULT
|
| 317 |
+
|
| 318 |
+
def _get_emotion_for_sentiment(self, sentiment: str) -> str:
|
| 319 |
+
"""Get an emotion animation for the given sentiment.
|
| 320 |
+
|
| 321 |
+
Checks custom animations first, then falls back to defaults.
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
sentiment: The sentiment category.
|
| 325 |
+
|
| 326 |
+
Returns:
|
| 327 |
+
An emotion animation name.
|
| 328 |
+
"""
|
| 329 |
+
# Check for custom animation override first
|
| 330 |
+
if sentiment in self._custom_animations:
|
| 331 |
+
return self._custom_animations[sentiment]
|
| 332 |
+
|
| 333 |
+
# Map some sentiments to custom animation keys if they exist
|
| 334 |
+
# Custom apps might use simpler keys like "happy" instead of "excited"
|
| 335 |
+
sentiment_to_custom_key = {
|
| 336 |
+
"excited": "surprised",
|
| 337 |
+
"apologetic": "sad",
|
| 338 |
+
"helpful": "happy",
|
| 339 |
+
"question": "thinking",
|
| 340 |
+
}
|
| 341 |
+
custom_key = sentiment_to_custom_key.get(sentiment, sentiment)
|
| 342 |
+
if custom_key in self._custom_animations:
|
| 343 |
+
return self._custom_animations[custom_key]
|
| 344 |
+
|
| 345 |
+
# Fall back to default emotion animations
|
| 346 |
+
emotion_map = {
|
| 347 |
+
"greeting": GREETING_EMOTIONS,
|
| 348 |
+
"happy": HAPPY_EMOTIONS,
|
| 349 |
+
"sad": SAD_EMOTIONS,
|
| 350 |
+
"excited": EXCITED_EMOTIONS,
|
| 351 |
+
"question": QUESTION_EMOTIONS,
|
| 352 |
+
"thinking": THINKING_EMOTIONS,
|
| 353 |
+
"apologetic": APOLOGETIC_EMOTIONS,
|
| 354 |
+
"helpful": HELPFUL_EMOTIONS,
|
| 355 |
+
"neutral": NEUTRAL_EMOTIONS,
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
emotions = emotion_map.get(sentiment, NEUTRAL_EMOTIONS)
|
| 359 |
+
|
| 360 |
+
# Try to avoid repeating the same emotion twice in a row
|
| 361 |
+
if len(emotions) > 1 and self._last_emotion in emotions:
|
| 362 |
+
emotions = [e for e in emotions if e != self._last_emotion]
|
| 363 |
+
|
| 364 |
+
emotion = random.choice(emotions)
|
| 365 |
+
self._last_emotion = emotion
|
| 366 |
+
return emotion
|
| 367 |
+
|
| 368 |
+
async def play_emotion_for_text(self, text: str) -> AnimationResult:
|
| 369 |
+
"""Analyze text and maybe play an appropriate emotion animation.
|
| 370 |
+
|
| 371 |
+
This should be called BEFORE speech starts. Uses probability-based
|
| 372 |
+
selection: strong emotions play more often, neutral less often.
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
text: The text that will be spoken.
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
AnimationResult with details of what happened.
|
| 379 |
+
"""
|
| 380 |
+
# Analyze sentiment first (always do this for the result)
|
| 381 |
+
sentiment = self._analyze_sentiment(text)
|
| 382 |
+
|
| 383 |
+
if self._is_animating:
|
| 384 |
+
logger.warning("Already animating, skipping")
|
| 385 |
+
return AnimationResult(
|
| 386 |
+
played=False,
|
| 387 |
+
sentiment=sentiment,
|
| 388 |
+
animation="",
|
| 389 |
+
duration=0.0
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# Check if we should play based on probability
|
| 393 |
+
chance = self._get_animation_chance(sentiment)
|
| 394 |
+
if random.random() > chance:
|
| 395 |
+
logger.debug(f"Skipping animation for '{sentiment}' (chance: {chance:.0%})")
|
| 396 |
+
return AnimationResult(
|
| 397 |
+
played=False,
|
| 398 |
+
sentiment=sentiment,
|
| 399 |
+
animation="",
|
| 400 |
+
duration=0.0
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
self._is_animating = True
|
| 404 |
+
|
| 405 |
+
try:
|
| 406 |
+
emotion = self._get_emotion_for_sentiment(sentiment)
|
| 407 |
+
duration = self._get_animation_duration(sentiment)
|
| 408 |
+
|
| 409 |
+
logger.info(f"Playing emotion '{emotion}' for sentiment '{sentiment}' ({duration}s)")
|
| 410 |
+
|
| 411 |
+
# Play the animation
|
| 412 |
+
success = await self._motion.play_async(
|
| 413 |
+
animation=emotion,
|
| 414 |
+
duration=duration
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
if success:
|
| 418 |
+
# Wait for animation to complete before returning
|
| 419 |
+
await asyncio.sleep(duration)
|
| 420 |
+
logger.info(f"Emotion animation '{emotion}' completed")
|
| 421 |
+
return AnimationResult(
|
| 422 |
+
played=True,
|
| 423 |
+
sentiment=sentiment,
|
| 424 |
+
animation=emotion,
|
| 425 |
+
duration=duration
|
| 426 |
+
)
|
| 427 |
+
else:
|
| 428 |
+
logger.warning(f"Failed to play emotion '{emotion}'")
|
| 429 |
+
return AnimationResult(
|
| 430 |
+
played=False,
|
| 431 |
+
sentiment=sentiment,
|
| 432 |
+
animation=emotion,
|
| 433 |
+
duration=0.0
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
except Exception as e:
|
| 437 |
+
logger.error(f"Error playing emotion animation: {e}")
|
| 438 |
+
return AnimationResult(
|
| 439 |
+
played=False,
|
| 440 |
+
sentiment=sentiment,
|
| 441 |
+
animation="",
|
| 442 |
+
duration=0.0
|
| 443 |
+
)
|
| 444 |
+
finally:
|
| 445 |
+
self._is_animating = False
|
| 446 |
+
|
| 447 |
+
def get_emotion_for_text_sync(self, text: str) -> tuple[str, str]:
|
| 448 |
+
"""Synchronously analyze text and return sentiment and animation.
|
| 449 |
+
|
| 450 |
+
Use this if you need to know the emotion without playing it.
|
| 451 |
+
|
| 452 |
+
Args:
|
| 453 |
+
text: The text to analyze.
|
| 454 |
+
|
| 455 |
+
Returns:
|
| 456 |
+
Tuple of (sentiment, animation_name).
|
| 457 |
+
"""
|
| 458 |
+
sentiment = self._analyze_sentiment(text)
|
| 459 |
+
animation = self._get_emotion_for_sentiment(sentiment)
|
| 460 |
+
return sentiment, animation
|
| 461 |
+
|
| 462 |
+
def analyze_sentiment(self, text: str) -> str:
|
| 463 |
+
"""Public method to analyze text sentiment.
|
| 464 |
+
|
| 465 |
+
Args:
|
| 466 |
+
text: The text to analyze.
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
Sentiment category string.
|
| 470 |
+
"""
|
| 471 |
+
return self._analyze_sentiment(text)
|
| 472 |
+
|
| 473 |
+
async def play_animation(self, animation_name: str, duration: float = 2.0) -> bool:
|
| 474 |
+
"""Play a specific animation by name.
|
| 475 |
+
|
| 476 |
+
Args:
|
| 477 |
+
animation_name: The name of the animation to play.
|
| 478 |
+
duration: Duration in seconds.
|
| 479 |
+
|
| 480 |
+
Returns:
|
| 481 |
+
True if animation was played successfully.
|
| 482 |
+
"""
|
| 483 |
+
if self._is_animating:
|
| 484 |
+
logger.warning("Already animating, skipping")
|
| 485 |
+
return False
|
| 486 |
+
|
| 487 |
+
self._is_animating = True
|
| 488 |
+
|
| 489 |
+
try:
|
| 490 |
+
logger.info(f"Playing animation '{animation_name}' ({duration}s)")
|
| 491 |
+
|
| 492 |
+
success = await self._motion.play_async(
|
| 493 |
+
animation=animation_name,
|
| 494 |
+
duration=duration
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
if success:
|
| 498 |
+
await asyncio.sleep(duration)
|
| 499 |
+
logger.info(f"Animation '{animation_name}' completed")
|
| 500 |
+
return True
|
| 501 |
+
else:
|
| 502 |
+
logger.warning(f"Failed to play animation '{animation_name}'")
|
| 503 |
+
return False
|
| 504 |
+
|
| 505 |
+
except Exception as e:
|
| 506 |
+
logger.error(f"Error playing animation: {e}")
|
| 507 |
+
return False
|
| 508 |
+
finally:
|
| 509 |
+
self._is_animating = False
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
# Singleton instance for global access
|
| 513 |
+
_animation_coordinator_instance: Optional["AnimationCoordinator"] = None
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def get_animation_coordinator() -> Optional["AnimationCoordinator"]:
|
| 517 |
+
"""Get the global AnimationCoordinator instance.
|
| 518 |
+
|
| 519 |
+
Returns the global coordinator set by speech.py, or None if not available.
|
| 520 |
+
"""
|
| 521 |
+
global _animation_coordinator_instance
|
| 522 |
+
|
| 523 |
+
# If not set, try to get from speech module (which creates it in set_services)
|
| 524 |
+
if _animation_coordinator_instance is None:
|
| 525 |
+
try:
|
| 526 |
+
from .routes.speech import animation_coordinator as speech_coordinator
|
| 527 |
+
if speech_coordinator:
|
| 528 |
+
_animation_coordinator_instance = speech_coordinator
|
| 529 |
+
logger.info("Using AnimationCoordinator from speech module")
|
| 530 |
+
except ImportError as e:
|
| 531 |
+
logger.debug(f"Could not get coordinator from speech: {e}")
|
| 532 |
+
|
| 533 |
+
return _animation_coordinator_instance
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def set_animation_coordinator(coordinator: "AnimationCoordinator") -> None:
|
| 537 |
+
"""Set the global AnimationCoordinator instance.
|
| 538 |
+
|
| 539 |
+
Called by speech.py set_services to share the same instance.
|
| 540 |
+
"""
|
| 541 |
+
global _animation_coordinator_instance
|
| 542 |
+
_animation_coordinator_instance = coordinator
|
| 543 |
+
logger.info("Global AnimationCoordinator instance set")
|
reachys_brain/app_tools.py
ADDED
|
@@ -0,0 +1,892 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Voice-activated app management tools for OpenAI Realtime API.
|
| 2 |
+
|
| 3 |
+
Provides tool functions for creating, activating, and managing custom apps
|
| 4 |
+
via voice commands. These tools are called by OpenAI's function calling feature.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
import os
|
| 11 |
+
import uuid
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
from difflib import SequenceMatcher
|
| 14 |
+
from typing import Callable, Optional
|
| 15 |
+
|
| 16 |
+
import aiohttp
|
| 17 |
+
import httpx
|
| 18 |
+
|
| 19 |
+
from .database import get_database
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Tool definitions for OpenAI Realtime API
|
| 25 |
+
APP_TOOLS = [
|
| 26 |
+
{
|
| 27 |
+
"type": "function",
|
| 28 |
+
"name": "create_custom_app",
|
| 29 |
+
"description": (
|
| 30 |
+
"Create a new custom assistant app with a specific personality. "
|
| 31 |
+
"IMPORTANT: Always ask the user for confirmation before calling this tool. "
|
| 32 |
+
"After confirmation, this creates the app and automatically activates it. "
|
| 33 |
+
"The app will have a detailed AI-generated personality based on the description."
|
| 34 |
+
),
|
| 35 |
+
"parameters": {
|
| 36 |
+
"type": "object",
|
| 37 |
+
"properties": {
|
| 38 |
+
"name": {
|
| 39 |
+
"type": "string",
|
| 40 |
+
"description": "A short, catchy name for the app (e.g., 'English Tutor', 'Cooking Helper')"
|
| 41 |
+
},
|
| 42 |
+
"description": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"description": (
|
| 45 |
+
"A brief description of what the app should do and its personality "
|
| 46 |
+
"(e.g., 'A patient English tutor that helps with grammar and vocabulary')"
|
| 47 |
+
)
|
| 48 |
+
},
|
| 49 |
+
"icon_color": {
|
| 50 |
+
"type": "string",
|
| 51 |
+
"enum": ["blue", "purple", "pink", "red", "orange", "yellow", "green", "teal", "indigo"],
|
| 52 |
+
"description": "The color for the app's icon"
|
| 53 |
+
}
|
| 54 |
+
},
|
| 55 |
+
"required": ["name", "description"]
|
| 56 |
+
}
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "function",
|
| 60 |
+
"name": "activate_custom_app",
|
| 61 |
+
"description": (
|
| 62 |
+
"Activate an existing custom app by name. "
|
| 63 |
+
"IMPORTANT: Always ask the user for confirmation before calling this tool. "
|
| 64 |
+
"Use list_custom_apps first if you're not sure which apps are available. "
|
| 65 |
+
"Fuzzy matching is used - you don't need the exact name."
|
| 66 |
+
),
|
| 67 |
+
"parameters": {
|
| 68 |
+
"type": "object",
|
| 69 |
+
"properties": {
|
| 70 |
+
"app_name": {
|
| 71 |
+
"type": "string",
|
| 72 |
+
"description": "The name (or partial name) of the app to activate"
|
| 73 |
+
}
|
| 74 |
+
},
|
| 75 |
+
"required": ["app_name"]
|
| 76 |
+
}
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"type": "function",
|
| 80 |
+
"name": "deactivate_app",
|
| 81 |
+
"description": (
|
| 82 |
+
"Deactivate the current custom app and return to the default Reachy personality. "
|
| 83 |
+
"IMPORTANT: Always ask the user for confirmation before calling this tool. "
|
| 84 |
+
"Call this when the user wants to stop using a custom app."
|
| 85 |
+
),
|
| 86 |
+
"parameters": {
|
| 87 |
+
"type": "object",
|
| 88 |
+
"properties": {},
|
| 89 |
+
"required": []
|
| 90 |
+
}
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "function",
|
| 94 |
+
"name": "list_custom_apps",
|
| 95 |
+
"description": (
|
| 96 |
+
"List all available custom apps. "
|
| 97 |
+
"Use this to show the user what apps they have created. "
|
| 98 |
+
"No confirmation needed for this tool."
|
| 99 |
+
),
|
| 100 |
+
"parameters": {
|
| 101 |
+
"type": "object",
|
| 102 |
+
"properties": {},
|
| 103 |
+
"required": []
|
| 104 |
+
}
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"type": "function",
|
| 108 |
+
"name": "wake_up",
|
| 109 |
+
"description": (
|
| 110 |
+
"Wake up the robot by enabling motors. "
|
| 111 |
+
"Use this when the user says 'wake up', 'turn on', or similar. "
|
| 112 |
+
"No confirmation needed - just do it and announce."
|
| 113 |
+
),
|
| 114 |
+
"parameters": {
|
| 115 |
+
"type": "object",
|
| 116 |
+
"properties": {},
|
| 117 |
+
"required": []
|
| 118 |
+
}
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"type": "function",
|
| 122 |
+
"name": "go_to_sleep",
|
| 123 |
+
"description": (
|
| 124 |
+
"Put the robot to sleep by disabling motors. "
|
| 125 |
+
"Use this when the user says 'go to sleep', 'sleep', 'turn off', or similar. "
|
| 126 |
+
"No confirmation needed - just do it and announce."
|
| 127 |
+
),
|
| 128 |
+
"parameters": {
|
| 129 |
+
"type": "object",
|
| 130 |
+
"properties": {},
|
| 131 |
+
"required": []
|
| 132 |
+
}
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "function",
|
| 136 |
+
"name": "remember_user_name",
|
| 137 |
+
"description": (
|
| 138 |
+
"Store the user's name for personalized greetings. "
|
| 139 |
+
"Use this when the user tells you their name (e.g., 'My name is John', 'I'm Sarah', 'Call me Mike'). "
|
| 140 |
+
"IMPORTANT: Only use the first name or nickname the user prefers. "
|
| 141 |
+
"This makes future greetings more personal."
|
| 142 |
+
),
|
| 143 |
+
"parameters": {
|
| 144 |
+
"type": "object",
|
| 145 |
+
"properties": {
|
| 146 |
+
"name": {
|
| 147 |
+
"type": "string",
|
| 148 |
+
"description": "The user's name or preferred nickname to remember"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"required": ["name"]
|
| 152 |
+
}
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"type": "function",
|
| 156 |
+
"name": "get_user_name",
|
| 157 |
+
"description": (
|
| 158 |
+
"Retrieve the user's stored name. "
|
| 159 |
+
"Use this to check if you already know the user's name. "
|
| 160 |
+
"Returns the stored name or null if not set."
|
| 161 |
+
),
|
| 162 |
+
"parameters": {
|
| 163 |
+
"type": "object",
|
| 164 |
+
"properties": {},
|
| 165 |
+
"required": []
|
| 166 |
+
}
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "function",
|
| 170 |
+
"name": "remember_preferred_country",
|
| 171 |
+
"description": (
|
| 172 |
+
"Store the user's preferred country for timezone and localization. "
|
| 173 |
+
"Use this when the user tells you their country (e.g., 'I live in the Netherlands', "
|
| 174 |
+
"'I'm from Germany', 'My country is Japan'). "
|
| 175 |
+
"This is used to show correct local time and personalize content like news."
|
| 176 |
+
),
|
| 177 |
+
"parameters": {
|
| 178 |
+
"type": "object",
|
| 179 |
+
"properties": {
|
| 180 |
+
"country": {
|
| 181 |
+
"type": "string",
|
| 182 |
+
"description": "The user's country name (e.g., 'Netherlands', 'Germany', 'United States')"
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
"required": ["country"]
|
| 186 |
+
}
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"type": "function",
|
| 190 |
+
"name": "get_preferred_country",
|
| 191 |
+
"description": (
|
| 192 |
+
"Retrieve the user's stored preferred country. "
|
| 193 |
+
"Use this to check if you already know the user's country. "
|
| 194 |
+
"Returns the stored country or null if not set."
|
| 195 |
+
),
|
| 196 |
+
"parameters": {
|
| 197 |
+
"type": "object",
|
| 198 |
+
"properties": {},
|
| 199 |
+
"required": []
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# Country to timezone mapping for common countries
|
| 206 |
+
# Uses primary/capital timezone for countries with multiple zones
|
| 207 |
+
COUNTRY_TIMEZONE_MAP = {
|
| 208 |
+
# Europe
|
| 209 |
+
"netherlands": "Europe/Amsterdam",
|
| 210 |
+
"the netherlands": "Europe/Amsterdam",
|
| 211 |
+
"holland": "Europe/Amsterdam",
|
| 212 |
+
"germany": "Europe/Berlin",
|
| 213 |
+
"france": "Europe/Paris",
|
| 214 |
+
"united kingdom": "Europe/London",
|
| 215 |
+
"uk": "Europe/London",
|
| 216 |
+
"england": "Europe/London",
|
| 217 |
+
"scotland": "Europe/London",
|
| 218 |
+
"wales": "Europe/London",
|
| 219 |
+
"ireland": "Europe/Dublin",
|
| 220 |
+
"spain": "Europe/Madrid",
|
| 221 |
+
"italy": "Europe/Rome",
|
| 222 |
+
"portugal": "Europe/Lisbon",
|
| 223 |
+
"belgium": "Europe/Brussels",
|
| 224 |
+
"austria": "Europe/Vienna",
|
| 225 |
+
"switzerland": "Europe/Zurich",
|
| 226 |
+
"sweden": "Europe/Stockholm",
|
| 227 |
+
"norway": "Europe/Oslo",
|
| 228 |
+
"denmark": "Europe/Copenhagen",
|
| 229 |
+
"finland": "Europe/Helsinki",
|
| 230 |
+
"poland": "Europe/Warsaw",
|
| 231 |
+
"czech republic": "Europe/Prague",
|
| 232 |
+
"czechia": "Europe/Prague",
|
| 233 |
+
"greece": "Europe/Athens",
|
| 234 |
+
"turkey": "Europe/Istanbul",
|
| 235 |
+
"russia": "Europe/Moscow",
|
| 236 |
+
"ukraine": "Europe/Kyiv",
|
| 237 |
+
|
| 238 |
+
# North America
|
| 239 |
+
"united states": "America/New_York",
|
| 240 |
+
"usa": "America/New_York",
|
| 241 |
+
"us": "America/New_York",
|
| 242 |
+
"america": "America/New_York",
|
| 243 |
+
"canada": "America/Toronto",
|
| 244 |
+
"mexico": "America/Mexico_City",
|
| 245 |
+
|
| 246 |
+
# South America
|
| 247 |
+
"brazil": "America/Sao_Paulo",
|
| 248 |
+
"argentina": "America/Buenos_Aires",
|
| 249 |
+
"chile": "America/Santiago",
|
| 250 |
+
"colombia": "America/Bogota",
|
| 251 |
+
"peru": "America/Lima",
|
| 252 |
+
|
| 253 |
+
# Asia
|
| 254 |
+
"japan": "Asia/Tokyo",
|
| 255 |
+
"china": "Asia/Shanghai",
|
| 256 |
+
"south korea": "Asia/Seoul",
|
| 257 |
+
"korea": "Asia/Seoul",
|
| 258 |
+
"india": "Asia/Kolkata",
|
| 259 |
+
"indonesia": "Asia/Jakarta",
|
| 260 |
+
"thailand": "Asia/Bangkok",
|
| 261 |
+
"vietnam": "Asia/Ho_Chi_Minh",
|
| 262 |
+
"philippines": "Asia/Manila",
|
| 263 |
+
"malaysia": "Asia/Kuala_Lumpur",
|
| 264 |
+
"singapore": "Asia/Singapore",
|
| 265 |
+
"hong kong": "Asia/Hong_Kong",
|
| 266 |
+
"taiwan": "Asia/Taipei",
|
| 267 |
+
"pakistan": "Asia/Karachi",
|
| 268 |
+
"bangladesh": "Asia/Dhaka",
|
| 269 |
+
"israel": "Asia/Jerusalem",
|
| 270 |
+
"united arab emirates": "Asia/Dubai",
|
| 271 |
+
"uae": "Asia/Dubai",
|
| 272 |
+
"saudi arabia": "Asia/Riyadh",
|
| 273 |
+
|
| 274 |
+
# Oceania
|
| 275 |
+
"australia": "Australia/Sydney",
|
| 276 |
+
"new zealand": "Pacific/Auckland",
|
| 277 |
+
|
| 278 |
+
# Africa
|
| 279 |
+
"south africa": "Africa/Johannesburg",
|
| 280 |
+
"egypt": "Africa/Cairo",
|
| 281 |
+
"nigeria": "Africa/Lagos",
|
| 282 |
+
"kenya": "Africa/Nairobi",
|
| 283 |
+
"morocco": "Africa/Casablanca",
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def get_timezone_for_country(country: str) -> str | None:
|
| 288 |
+
"""Get the timezone for a country name.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
country: Country name (case-insensitive).
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
IANA timezone string or None if country not found.
|
| 295 |
+
"""
|
| 296 |
+
if not country:
|
| 297 |
+
return None
|
| 298 |
+
return COUNTRY_TIMEZONE_MAP.get(country.lower().strip())
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
async def generate_system_prompt(description: str, name: str, api_key: str) -> str:
|
| 302 |
+
"""Generate a detailed system prompt from a brief description using OpenAI Chat API.
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
description: Brief description of what the app should do.
|
| 306 |
+
name: Name of the app being created.
|
| 307 |
+
api_key: OpenAI API key for the Chat API call.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
A detailed system prompt for the AI personality.
|
| 311 |
+
"""
|
| 312 |
+
meta_prompt = f"""Create a detailed system prompt for an AI robot assistant named Reachy with this role: {name} - {description}
|
| 313 |
+
|
| 314 |
+
The prompt should:
|
| 315 |
+
- Define the assistant's personality and character traits
|
| 316 |
+
- Specify communication style (formal/casual, verbose/concise)
|
| 317 |
+
- Outline areas of expertise and knowledge
|
| 318 |
+
- Include behavior guidelines and boundaries
|
| 319 |
+
- Mention that responses should be conversational and suitable for text-to-speech
|
| 320 |
+
- Keep responses typically 1-3 sentences unless more detail is needed
|
| 321 |
+
- Remember Reachy is a physical robot that can express emotions
|
| 322 |
+
|
| 323 |
+
Keep the prompt under 400 words. Output ONLY the system prompt text, nothing else."""
|
| 324 |
+
|
| 325 |
+
url = "https://api.openai.com/v1/chat/completions"
|
| 326 |
+
headers = {
|
| 327 |
+
"Authorization": f"Bearer {api_key}",
|
| 328 |
+
"Content-Type": "application/json"
|
| 329 |
+
}
|
| 330 |
+
payload = {
|
| 331 |
+
"model": "gpt-4o-mini",
|
| 332 |
+
"messages": [{"role": "user", "content": meta_prompt}],
|
| 333 |
+
"max_tokens": 600,
|
| 334 |
+
"temperature": 0.7
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
async with aiohttp.ClientSession() as session:
|
| 338 |
+
async with session.post(url, headers=headers, json=payload) as response:
|
| 339 |
+
if response.status != 200:
|
| 340 |
+
error_text = await response.text()
|
| 341 |
+
logger.error(f"OpenAI Chat API error: {error_text}")
|
| 342 |
+
raise RuntimeError(f"Failed to generate system prompt: {response.status}")
|
| 343 |
+
|
| 344 |
+
data = await response.json()
|
| 345 |
+
content = data["choices"][0]["message"]["content"]
|
| 346 |
+
return content.strip()
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def find_best_matching_app(apps: list[dict], query: str) -> Optional[dict]:
|
| 350 |
+
"""Find the best matching app by name using fuzzy matching.
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
apps: List of app dictionaries from the database.
|
| 354 |
+
query: The search query (partial or full app name).
|
| 355 |
+
|
| 356 |
+
Returns:
|
| 357 |
+
The best matching app, or None if no good match found.
|
| 358 |
+
"""
|
| 359 |
+
if not apps:
|
| 360 |
+
return None
|
| 361 |
+
|
| 362 |
+
query_lower = query.lower().strip()
|
| 363 |
+
best_match = None
|
| 364 |
+
best_score = 0.0
|
| 365 |
+
|
| 366 |
+
for app in apps:
|
| 367 |
+
app_name_lower = app["name"].lower()
|
| 368 |
+
|
| 369 |
+
# Exact match
|
| 370 |
+
if app_name_lower == query_lower:
|
| 371 |
+
return app
|
| 372 |
+
|
| 373 |
+
# Contains match (higher priority)
|
| 374 |
+
if query_lower in app_name_lower or app_name_lower in query_lower:
|
| 375 |
+
score = 0.9
|
| 376 |
+
else:
|
| 377 |
+
# Fuzzy similarity
|
| 378 |
+
score = SequenceMatcher(None, query_lower, app_name_lower).ratio()
|
| 379 |
+
|
| 380 |
+
if score > best_score:
|
| 381 |
+
best_score = score
|
| 382 |
+
best_match = app
|
| 383 |
+
|
| 384 |
+
# Require at least 50% similarity
|
| 385 |
+
if best_score >= 0.5:
|
| 386 |
+
return best_match
|
| 387 |
+
|
| 388 |
+
return None
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class AppToolsHandler:
|
| 392 |
+
"""Handles execution of app management tools called by OpenAI."""
|
| 393 |
+
|
| 394 |
+
# Type aliases for callback signatures
|
| 395 |
+
OnPersonalityChange = Callable[[Optional[str], Optional[list[str]]], None]
|
| 396 |
+
OnAppChange = Callable[[dict], None]
|
| 397 |
+
|
| 398 |
+
def __init__(self):
|
| 399 |
+
"""Initialize the tools handler."""
|
| 400 |
+
self._active_app: Optional[dict] = None
|
| 401 |
+
|
| 402 |
+
# Callback to update the OpenAI service's personality
|
| 403 |
+
# Signature: (system_prompt: Optional[str], enabled_tools: Optional[list[str]]) -> None
|
| 404 |
+
self.on_personality_change: Optional["AppToolsHandler.OnPersonalityChange"] = None
|
| 405 |
+
# Callback to notify iOS of app changes
|
| 406 |
+
# Signature: (data: dict) -> None
|
| 407 |
+
self.on_app_change: Optional["AppToolsHandler.OnAppChange"] = None
|
| 408 |
+
|
| 409 |
+
@property
|
| 410 |
+
def active_app(self) -> Optional[dict]:
|
| 411 |
+
"""Get the currently active custom app."""
|
| 412 |
+
return self._active_app
|
| 413 |
+
|
| 414 |
+
async def execute_tool(self, tool_name: str, arguments: dict) -> dict:
|
| 415 |
+
"""Execute a tool and return the result.
|
| 416 |
+
|
| 417 |
+
Args:
|
| 418 |
+
tool_name: Name of the tool to execute.
|
| 419 |
+
arguments: Tool arguments from OpenAI.
|
| 420 |
+
|
| 421 |
+
Returns:
|
| 422 |
+
Tool result dictionary with 'success' and relevant data.
|
| 423 |
+
"""
|
| 424 |
+
logger.info(f"🔧 Executing tool: {tool_name} with args: {arguments}")
|
| 425 |
+
|
| 426 |
+
try:
|
| 427 |
+
if tool_name == "create_custom_app":
|
| 428 |
+
return await self._create_app(
|
| 429 |
+
name=arguments.get("name", ""),
|
| 430 |
+
description=arguments.get("description", ""),
|
| 431 |
+
icon_color=arguments.get("icon_color", "blue")
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
elif tool_name == "activate_custom_app":
|
| 435 |
+
return await self._activate_app(
|
| 436 |
+
app_name=arguments.get("app_name", "")
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
elif tool_name == "deactivate_app":
|
| 440 |
+
return await self._deactivate_app()
|
| 441 |
+
|
| 442 |
+
elif tool_name == "list_custom_apps":
|
| 443 |
+
return await self._list_apps()
|
| 444 |
+
|
| 445 |
+
elif tool_name == "wake_up":
|
| 446 |
+
return await self._wake_up()
|
| 447 |
+
|
| 448 |
+
elif tool_name == "go_to_sleep":
|
| 449 |
+
return await self._go_to_sleep()
|
| 450 |
+
|
| 451 |
+
elif tool_name == "remember_user_name":
|
| 452 |
+
return await self._remember_user_name(
|
| 453 |
+
name=arguments.get("name", "")
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
elif tool_name == "get_user_name":
|
| 457 |
+
return await self._get_user_name()
|
| 458 |
+
|
| 459 |
+
elif tool_name == "remember_preferred_country":
|
| 460 |
+
return await self._remember_preferred_country(
|
| 461 |
+
country=arguments.get("country", "")
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
elif tool_name == "get_preferred_country":
|
| 465 |
+
return await self._get_preferred_country()
|
| 466 |
+
|
| 467 |
+
else:
|
| 468 |
+
return {"success": False, "error": f"Unknown tool: {tool_name}"}
|
| 469 |
+
|
| 470 |
+
except Exception as e:
|
| 471 |
+
logger.error(f"Tool execution error: {e}", exc_info=True)
|
| 472 |
+
return {"success": False, "error": str(e)}
|
| 473 |
+
|
| 474 |
+
async def _create_app(self, name: str, description: str, icon_color: str = "blue") -> dict:
|
| 475 |
+
"""Create a new custom app and activate it.
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
name: Display name for the app.
|
| 479 |
+
description: Brief description for prompt generation.
|
| 480 |
+
icon_color: Icon background color.
|
| 481 |
+
|
| 482 |
+
Returns:
|
| 483 |
+
Result dictionary with success status and app info.
|
| 484 |
+
"""
|
| 485 |
+
if not name or not description:
|
| 486 |
+
return {"success": False, "error": "Name and description are required"}
|
| 487 |
+
|
| 488 |
+
# Get API key for prompt generation
|
| 489 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 490 |
+
if not api_key:
|
| 491 |
+
return {"success": False, "error": "OpenAI API key not available"}
|
| 492 |
+
|
| 493 |
+
try:
|
| 494 |
+
# Generate detailed system prompt
|
| 495 |
+
logger.info(f"🧠 Generating system prompt for: {name}")
|
| 496 |
+
system_prompt = await generate_system_prompt(description, name, api_key)
|
| 497 |
+
|
| 498 |
+
# Create app data
|
| 499 |
+
app_id = str(uuid.uuid4())
|
| 500 |
+
now = datetime.utcnow().isoformat()
|
| 501 |
+
|
| 502 |
+
app_data = {
|
| 503 |
+
"id": app_id,
|
| 504 |
+
"name": name,
|
| 505 |
+
"description": description,
|
| 506 |
+
"system_prompt": system_prompt,
|
| 507 |
+
"voice_id": "", # Use default voice
|
| 508 |
+
"emotion_animations": {}, # Use default animations
|
| 509 |
+
"icon_color": icon_color,
|
| 510 |
+
"created_at": now,
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
# Save to database
|
| 514 |
+
db = get_database()
|
| 515 |
+
created_app = await db.create_app(app_data)
|
| 516 |
+
|
| 517 |
+
logger.info(f"✅ Created app: {name} ({app_id})")
|
| 518 |
+
|
| 519 |
+
# Activate the new app
|
| 520 |
+
self._active_app = created_app
|
| 521 |
+
|
| 522 |
+
# Notify personality change with enabled tools
|
| 523 |
+
if self.on_personality_change:
|
| 524 |
+
enabled_tools = created_app.get("enabled_tools", [])
|
| 525 |
+
self.on_personality_change(system_prompt, enabled_tools if enabled_tools else None)
|
| 526 |
+
|
| 527 |
+
# Notify iOS
|
| 528 |
+
if self.on_app_change:
|
| 529 |
+
await self.on_app_change({
|
| 530 |
+
"type": "app_activated",
|
| 531 |
+
"app": created_app
|
| 532 |
+
})
|
| 533 |
+
|
| 534 |
+
return {
|
| 535 |
+
"success": True,
|
| 536 |
+
"message": f"Created and activated '{name}'",
|
| 537 |
+
"app_name": name,
|
| 538 |
+
"app_id": app_id
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
except Exception as e:
|
| 542 |
+
logger.error(f"Failed to create app: {e}", exc_info=True)
|
| 543 |
+
return {"success": False, "error": str(e)}
|
| 544 |
+
|
| 545 |
+
async def _activate_app(self, app_name: str) -> dict:
|
| 546 |
+
"""Activate an existing app by name.
|
| 547 |
+
|
| 548 |
+
Args:
|
| 549 |
+
app_name: Name or partial name of the app to activate.
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
Result dictionary with success status.
|
| 553 |
+
"""
|
| 554 |
+
if not app_name:
|
| 555 |
+
return {"success": False, "error": "App name is required"}
|
| 556 |
+
|
| 557 |
+
try:
|
| 558 |
+
# Get all apps from database
|
| 559 |
+
db = get_database()
|
| 560 |
+
apps = await db.get_all_apps()
|
| 561 |
+
|
| 562 |
+
if not apps:
|
| 563 |
+
return {"success": False, "error": "No custom apps found. Create one first!"}
|
| 564 |
+
|
| 565 |
+
# Find best matching app
|
| 566 |
+
app = find_best_matching_app(apps, app_name)
|
| 567 |
+
|
| 568 |
+
if not app:
|
| 569 |
+
app_names = [a["name"] for a in apps]
|
| 570 |
+
return {
|
| 571 |
+
"success": False,
|
| 572 |
+
"error": f"No app found matching '{app_name}'",
|
| 573 |
+
"available_apps": app_names
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
# Activate the app
|
| 577 |
+
self._active_app = app
|
| 578 |
+
|
| 579 |
+
# Notify personality change with enabled tools
|
| 580 |
+
if self.on_personality_change:
|
| 581 |
+
enabled_tools = app.get("enabled_tools", [])
|
| 582 |
+
self.on_personality_change(
|
| 583 |
+
app["system_prompt"],
|
| 584 |
+
enabled_tools if enabled_tools else None
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
# Notify iOS
|
| 588 |
+
if self.on_app_change:
|
| 589 |
+
await self.on_app_change({
|
| 590 |
+
"type": "app_activated",
|
| 591 |
+
"app": app
|
| 592 |
+
})
|
| 593 |
+
|
| 594 |
+
logger.info(f"✅ Activated app: {app['name']}")
|
| 595 |
+
|
| 596 |
+
return {
|
| 597 |
+
"success": True,
|
| 598 |
+
"message": f"Activated '{app['name']}'",
|
| 599 |
+
"app_name": app["name"]
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
except Exception as e:
|
| 603 |
+
logger.error(f"Failed to activate app: {e}", exc_info=True)
|
| 604 |
+
return {"success": False, "error": str(e)}
|
| 605 |
+
|
| 606 |
+
async def _deactivate_app(self) -> dict:
|
| 607 |
+
"""Deactivate the current app and return to default personality.
|
| 608 |
+
|
| 609 |
+
Returns:
|
| 610 |
+
Result dictionary with success status.
|
| 611 |
+
"""
|
| 612 |
+
if not self._active_app:
|
| 613 |
+
return {
|
| 614 |
+
"success": True,
|
| 615 |
+
"message": "Already using default Reachy personality"
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
app_name = self._active_app["name"]
|
| 619 |
+
self._active_app = None
|
| 620 |
+
|
| 621 |
+
# Clear personality (return to default)
|
| 622 |
+
if self.on_personality_change:
|
| 623 |
+
self.on_personality_change(None) # None signals default
|
| 624 |
+
|
| 625 |
+
# Notify iOS
|
| 626 |
+
if self.on_app_change:
|
| 627 |
+
await self.on_app_change({
|
| 628 |
+
"type": "app_deactivated"
|
| 629 |
+
})
|
| 630 |
+
|
| 631 |
+
logger.info(f"✅ Deactivated app: {app_name}")
|
| 632 |
+
|
| 633 |
+
return {
|
| 634 |
+
"success": True,
|
| 635 |
+
"message": f"Deactivated '{app_name}'. Now using default Reachy personality."
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
async def _list_apps(self) -> dict:
|
| 639 |
+
"""List all available custom apps.
|
| 640 |
+
|
| 641 |
+
Returns:
|
| 642 |
+
Result dictionary with list of apps.
|
| 643 |
+
"""
|
| 644 |
+
try:
|
| 645 |
+
db = get_database()
|
| 646 |
+
apps = await db.get_all_apps()
|
| 647 |
+
|
| 648 |
+
if not apps:
|
| 649 |
+
return {
|
| 650 |
+
"success": True,
|
| 651 |
+
"message": "No custom apps found. You can create one by asking me!",
|
| 652 |
+
"apps": [],
|
| 653 |
+
"count": 0
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
# Simplify app data for the response
|
| 657 |
+
app_list = [
|
| 658 |
+
{
|
| 659 |
+
"name": app["name"],
|
| 660 |
+
"description": app["description"],
|
| 661 |
+
"is_active": self._active_app and self._active_app["id"] == app["id"]
|
| 662 |
+
}
|
| 663 |
+
for app in apps
|
| 664 |
+
]
|
| 665 |
+
|
| 666 |
+
return {
|
| 667 |
+
"success": True,
|
| 668 |
+
"apps": app_list,
|
| 669 |
+
"count": len(apps),
|
| 670 |
+
"active_app": self._active_app["name"] if self._active_app else None
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
except Exception as e:
|
| 674 |
+
logger.error(f"Failed to list apps: {e}", exc_info=True)
|
| 675 |
+
return {"success": False, "error": str(e)}
|
| 676 |
+
|
| 677 |
+
async def _wake_up(self) -> dict:
|
| 678 |
+
"""Wake up the robot by enabling motors.
|
| 679 |
+
|
| 680 |
+
Returns:
|
| 681 |
+
Result dictionary with success status.
|
| 682 |
+
"""
|
| 683 |
+
daemon_url = "http://localhost:8000"
|
| 684 |
+
|
| 685 |
+
try:
|
| 686 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
| 687 |
+
response = await client.post(f"{daemon_url}/api/motors/set_mode/enabled")
|
| 688 |
+
response.raise_for_status()
|
| 689 |
+
result = response.json()
|
| 690 |
+
status_msg = result.get("status", "Motors enabled")
|
| 691 |
+
|
| 692 |
+
logger.info(f"🌅 Robot waking up: {status_msg}")
|
| 693 |
+
|
| 694 |
+
return {
|
| 695 |
+
"success": True,
|
| 696 |
+
"message": "I'm awake! Motors are now enabled.",
|
| 697 |
+
"status": status_msg
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
except httpx.HTTPStatusError as e:
|
| 701 |
+
try:
|
| 702 |
+
error_detail = e.response.json().get("detail", str(e))
|
| 703 |
+
except Exception:
|
| 704 |
+
error_detail = str(e)
|
| 705 |
+
logger.error(f"Failed to wake up: {error_detail}")
|
| 706 |
+
return {"success": False, "error": error_detail}
|
| 707 |
+
|
| 708 |
+
except Exception as e:
|
| 709 |
+
logger.error(f"Failed to wake up: {e}", exc_info=True)
|
| 710 |
+
return {"success": False, "error": str(e)}
|
| 711 |
+
|
| 712 |
+
async def _go_to_sleep(self) -> dict:
|
| 713 |
+
"""Put the robot to sleep by disabling motors.
|
| 714 |
+
|
| 715 |
+
Returns:
|
| 716 |
+
Result dictionary with success status.
|
| 717 |
+
"""
|
| 718 |
+
daemon_url = "http://localhost:8000"
|
| 719 |
+
|
| 720 |
+
try:
|
| 721 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
| 722 |
+
response = await client.post(f"{daemon_url}/api/motors/set_mode/disabled")
|
| 723 |
+
response.raise_for_status()
|
| 724 |
+
result = response.json()
|
| 725 |
+
status_msg = result.get("status", "Motors disabled")
|
| 726 |
+
|
| 727 |
+
logger.info(f"🌙 Robot going to sleep: {status_msg}")
|
| 728 |
+
|
| 729 |
+
return {
|
| 730 |
+
"success": True,
|
| 731 |
+
"message": "Going to sleep now. Motors are disabled. Goodnight!",
|
| 732 |
+
"status": status_msg
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
except httpx.HTTPStatusError as e:
|
| 736 |
+
try:
|
| 737 |
+
error_detail = e.response.json().get("detail", str(e))
|
| 738 |
+
except Exception:
|
| 739 |
+
error_detail = str(e)
|
| 740 |
+
logger.error(f"Failed to go to sleep: {error_detail}")
|
| 741 |
+
return {"success": False, "error": error_detail}
|
| 742 |
+
|
| 743 |
+
except Exception as e:
|
| 744 |
+
logger.error(f"Failed to go to sleep: {e}", exc_info=True)
|
| 745 |
+
return {"success": False, "error": str(e)}
|
| 746 |
+
|
| 747 |
+
async def _remember_user_name(self, name: str) -> dict:
|
| 748 |
+
"""Store the user's name for personalized interactions.
|
| 749 |
+
|
| 750 |
+
Args:
|
| 751 |
+
name: The user's name to remember.
|
| 752 |
+
|
| 753 |
+
Returns:
|
| 754 |
+
Result dictionary with success status.
|
| 755 |
+
"""
|
| 756 |
+
if not name or not name.strip():
|
| 757 |
+
return {"success": False, "error": "Name cannot be empty"}
|
| 758 |
+
|
| 759 |
+
# Clean up the name (capitalize first letter, trim whitespace)
|
| 760 |
+
clean_name = name.strip().title()
|
| 761 |
+
|
| 762 |
+
try:
|
| 763 |
+
db = get_database()
|
| 764 |
+
await db.set_user_setting("user_name", clean_name)
|
| 765 |
+
|
| 766 |
+
logger.info(f"👤 Remembered user name: {clean_name}")
|
| 767 |
+
|
| 768 |
+
return {
|
| 769 |
+
"success": True,
|
| 770 |
+
"message": f"Nice to meet you, {clean_name}! I'll remember your name.",
|
| 771 |
+
"name": clean_name
|
| 772 |
+
}
|
| 773 |
+
|
| 774 |
+
except Exception as e:
|
| 775 |
+
logger.error(f"Failed to remember user name: {e}", exc_info=True)
|
| 776 |
+
return {"success": False, "error": str(e)}
|
| 777 |
+
|
| 778 |
+
async def _get_user_name(self) -> dict:
|
| 779 |
+
"""Retrieve the stored user name.
|
| 780 |
+
|
| 781 |
+
Returns:
|
| 782 |
+
Result dictionary with the user's name if stored.
|
| 783 |
+
"""
|
| 784 |
+
try:
|
| 785 |
+
db = get_database()
|
| 786 |
+
name = await db.get_user_setting("user_name")
|
| 787 |
+
|
| 788 |
+
if name:
|
| 789 |
+
logger.info(f"👤 Retrieved user name: {name}")
|
| 790 |
+
return {
|
| 791 |
+
"success": True,
|
| 792 |
+
"name": name,
|
| 793 |
+
"message": f"The user's name is {name}"
|
| 794 |
+
}
|
| 795 |
+
else:
|
| 796 |
+
return {
|
| 797 |
+
"success": True,
|
| 798 |
+
"name": None,
|
| 799 |
+
"message": "I don't know the user's name yet"
|
| 800 |
+
}
|
| 801 |
+
|
| 802 |
+
except Exception as e:
|
| 803 |
+
logger.error(f"Failed to get user name: {e}", exc_info=True)
|
| 804 |
+
return {"success": False, "error": str(e)}
|
| 805 |
+
|
| 806 |
+
async def _remember_preferred_country(self, country: str) -> dict:
|
| 807 |
+
"""Store the user's preferred country for timezone and localization.
|
| 808 |
+
|
| 809 |
+
Args:
|
| 810 |
+
country: The user's country to remember.
|
| 811 |
+
|
| 812 |
+
Returns:
|
| 813 |
+
Result dictionary with success status.
|
| 814 |
+
"""
|
| 815 |
+
if not country or not country.strip():
|
| 816 |
+
return {"success": False, "error": "Country cannot be empty"}
|
| 817 |
+
|
| 818 |
+
# Clean up the country name (capitalize properly)
|
| 819 |
+
clean_country = country.strip().title()
|
| 820 |
+
|
| 821 |
+
# Check if we have a timezone mapping for this country
|
| 822 |
+
timezone = get_timezone_for_country(country)
|
| 823 |
+
|
| 824 |
+
try:
|
| 825 |
+
db = get_database()
|
| 826 |
+
await db.set_user_setting("preferred_country", clean_country)
|
| 827 |
+
|
| 828 |
+
logger.info(f"🌍 Remembered preferred country: {clean_country}")
|
| 829 |
+
|
| 830 |
+
response = {
|
| 831 |
+
"success": True,
|
| 832 |
+
"message": f"Got it! I'll remember that you're in {clean_country}.",
|
| 833 |
+
"country": clean_country
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
if timezone:
|
| 837 |
+
response["timezone"] = timezone
|
| 838 |
+
response["message"] += f" I'll use {timezone} for showing you the correct local time."
|
| 839 |
+
|
| 840 |
+
return response
|
| 841 |
+
|
| 842 |
+
except Exception as e:
|
| 843 |
+
logger.error(f"Failed to remember preferred country: {e}", exc_info=True)
|
| 844 |
+
return {"success": False, "error": str(e)}
|
| 845 |
+
|
| 846 |
+
async def _get_preferred_country(self) -> dict:
|
| 847 |
+
"""Retrieve the stored preferred country.
|
| 848 |
+
|
| 849 |
+
Returns:
|
| 850 |
+
Result dictionary with the user's country if stored.
|
| 851 |
+
"""
|
| 852 |
+
try:
|
| 853 |
+
db = get_database()
|
| 854 |
+
country = await db.get_user_setting("preferred_country")
|
| 855 |
+
|
| 856 |
+
if country:
|
| 857 |
+
timezone = get_timezone_for_country(country)
|
| 858 |
+
logger.info(f"🌍 Retrieved preferred country: {country}")
|
| 859 |
+
return {
|
| 860 |
+
"success": True,
|
| 861 |
+
"country": country,
|
| 862 |
+
"timezone": timezone,
|
| 863 |
+
"message": f"The user's preferred country is {country}"
|
| 864 |
+
}
|
| 865 |
+
else:
|
| 866 |
+
return {
|
| 867 |
+
"success": True,
|
| 868 |
+
"country": None,
|
| 869 |
+
"timezone": None,
|
| 870 |
+
"message": "I don't know the user's country yet"
|
| 871 |
+
}
|
| 872 |
+
|
| 873 |
+
except Exception as e:
|
| 874 |
+
logger.error(f"Failed to get preferred country: {e}", exc_info=True)
|
| 875 |
+
return {"success": False, "error": str(e)}
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
# Global tools handler instance
|
| 879 |
+
_tools_handler: Optional[AppToolsHandler] = None
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
def get_tools_handler() -> AppToolsHandler:
|
| 883 |
+
"""Get or create the global tools handler instance.
|
| 884 |
+
|
| 885 |
+
Returns:
|
| 886 |
+
The AppToolsHandler singleton.
|
| 887 |
+
"""
|
| 888 |
+
global _tools_handler
|
| 889 |
+
if _tools_handler is None:
|
| 890 |
+
_tools_handler = AppToolsHandler()
|
| 891 |
+
return _tools_handler
|
| 892 |
+
|
reachys_brain/audio_capture.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio capture service for microphone input on Reachy.
|
| 2 |
+
|
| 3 |
+
Supports both real hardware (robot mode) and mock mode (cloud deployments).
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
import logging
|
| 8 |
+
import threading
|
| 9 |
+
from typing import Callable, Optional
|
| 10 |
+
|
| 11 |
+
from .config import get_config
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
import numpy as np
|
| 15 |
+
NUMPY_AVAILABLE = True
|
| 16 |
+
except ImportError:
|
| 17 |
+
NUMPY_AVAILABLE = False
|
| 18 |
+
np = None # type: ignore
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import sounddevice as sd
|
| 22 |
+
SOUNDDEVICE_AVAILABLE = True
|
| 23 |
+
except ImportError:
|
| 24 |
+
SOUNDDEVICE_AVAILABLE = False
|
| 25 |
+
sd = None # type: ignore
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
# OpenAI Realtime expects 24kHz mono PCM16
|
| 30 |
+
TARGET_SAMPLE_RATE = 24000
|
| 31 |
+
TARGET_CHANNELS = 1
|
| 32 |
+
CHUNK_DURATION_MS = 100 # Send audio in 100ms chunks
|
| 33 |
+
CHUNK_SAMPLES = int(TARGET_SAMPLE_RATE * CHUNK_DURATION_MS / 1000)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class AudioCaptureService:
|
| 37 |
+
"""Service for capturing audio from the microphone.
|
| 38 |
+
|
| 39 |
+
Captures audio from the default input device, resamples to 24kHz mono PCM16,
|
| 40 |
+
and streams chunks via a callback.
|
| 41 |
+
|
| 42 |
+
In cloud mode, this service is disabled and returns mock data.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self) -> None:
|
| 46 |
+
"""Initialize the audio capture service."""
|
| 47 |
+
config = get_config()
|
| 48 |
+
|
| 49 |
+
# Check if audio capture is enabled
|
| 50 |
+
self._enabled = config.enable_audio_capture and SOUNDDEVICE_AVAILABLE and NUMPY_AVAILABLE
|
| 51 |
+
|
| 52 |
+
if not self._enabled:
|
| 53 |
+
if config.is_cloud_mode:
|
| 54 |
+
logger.info("Audio capture disabled (cloud mode)")
|
| 55 |
+
elif not SOUNDDEVICE_AVAILABLE:
|
| 56 |
+
logger.warning("sounddevice not available - audio capture disabled")
|
| 57 |
+
elif not NUMPY_AVAILABLE:
|
| 58 |
+
logger.warning("numpy not available - audio capture disabled")
|
| 59 |
+
|
| 60 |
+
self._stream = None
|
| 61 |
+
self._is_capturing = False
|
| 62 |
+
self._is_paused = False
|
| 63 |
+
|
| 64 |
+
# Callback for audio chunks
|
| 65 |
+
self.on_audio_chunk: Optional[Callable[[bytes], None]] = None
|
| 66 |
+
|
| 67 |
+
# Audio buffer for collecting samples
|
| 68 |
+
self._buffer = np.array([], dtype=np.int16)
|
| 69 |
+
self._lock = threading.Lock()
|
| 70 |
+
|
| 71 |
+
# Detect available devices
|
| 72 |
+
self._input_device: Optional[int] = None
|
| 73 |
+
self._native_sample_rate: int = TARGET_SAMPLE_RATE
|
| 74 |
+
|
| 75 |
+
if SOUNDDEVICE_AVAILABLE:
|
| 76 |
+
self._detect_input_device()
|
| 77 |
+
|
| 78 |
+
def _detect_input_device(self) -> None:
|
| 79 |
+
"""Detect the default input device and its sample rate."""
|
| 80 |
+
try:
|
| 81 |
+
devices = sd.query_devices()
|
| 82 |
+
|
| 83 |
+
# Log all available devices for debugging
|
| 84 |
+
logger.info(f"🔍 Found {len(devices)} audio devices:")
|
| 85 |
+
for i, device in enumerate(devices):
|
| 86 |
+
if device.get("max_input_channels", 0) > 0:
|
| 87 |
+
logger.info(
|
| 88 |
+
f" [{i}] INPUT: {device.get('name', 'Unknown')} "
|
| 89 |
+
f"(channels: {device.get('max_input_channels')}, "
|
| 90 |
+
f"rate: {device.get('default_samplerate')}Hz)"
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# First, look for reachymini_audio_src specifically
|
| 94 |
+
for i, device in enumerate(devices):
|
| 95 |
+
if "reachymini_audio_src" in device.get("name", ""):
|
| 96 |
+
self._input_device = i
|
| 97 |
+
self._native_sample_rate = int(device.get("default_samplerate", 16000))
|
| 98 |
+
logger.info(
|
| 99 |
+
f"✅ Using Reachy microphone: {device.get('name', 'Unknown')} "
|
| 100 |
+
f"at {self._native_sample_rate}Hz"
|
| 101 |
+
)
|
| 102 |
+
return
|
| 103 |
+
|
| 104 |
+
logger.warning("⚠️ Reachy microphone (reachymini_audio_src) not found, trying fallbacks...")
|
| 105 |
+
|
| 106 |
+
# Fallback to default input device
|
| 107 |
+
default_input = sd.default.device[0]
|
| 108 |
+
|
| 109 |
+
if default_input is not None and default_input >= 0:
|
| 110 |
+
device_info = devices[default_input]
|
| 111 |
+
self._input_device = default_input
|
| 112 |
+
self._native_sample_rate = int(device_info.get("default_samplerate", 16000))
|
| 113 |
+
logger.info(
|
| 114 |
+
f"Using fallback input device: {device_info.get('name', 'Unknown')} "
|
| 115 |
+
f"at {self._native_sample_rate}Hz"
|
| 116 |
+
)
|
| 117 |
+
else:
|
| 118 |
+
# Try to find any input device
|
| 119 |
+
for i, device in enumerate(devices):
|
| 120 |
+
if device.get("max_input_channels", 0) > 0:
|
| 121 |
+
self._input_device = i
|
| 122 |
+
self._native_sample_rate = int(device.get("default_samplerate", 16000))
|
| 123 |
+
logger.info(
|
| 124 |
+
f"Using fallback input device: {device.get('name', 'Unknown')} "
|
| 125 |
+
f"at {self._native_sample_rate}Hz"
|
| 126 |
+
)
|
| 127 |
+
break
|
| 128 |
+
else:
|
| 129 |
+
logger.error("❌ No input device found! Microphone will not work.")
|
| 130 |
+
|
| 131 |
+
except Exception as e:
|
| 132 |
+
logger.error(f"❌ Error detecting input device: {e}", exc_info=True)
|
| 133 |
+
|
| 134 |
+
@property
|
| 135 |
+
def is_available(self) -> bool:
|
| 136 |
+
"""Check if audio capture is available."""
|
| 137 |
+
return SOUNDDEVICE_AVAILABLE and self._input_device is not None
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def is_capturing(self) -> bool:
|
| 141 |
+
"""Check if currently capturing audio."""
|
| 142 |
+
return self._is_capturing
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def is_paused(self) -> bool:
|
| 146 |
+
"""Check if capture is paused."""
|
| 147 |
+
return self._is_paused
|
| 148 |
+
|
| 149 |
+
def _resample(self, audio: np.ndarray, orig_rate: int, target_rate: int) -> np.ndarray:
|
| 150 |
+
"""Resample audio to target sample rate.
|
| 151 |
+
|
| 152 |
+
Uses simple linear interpolation for speed.
|
| 153 |
+
For production, consider using scipy.signal.resample or librosa.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
audio: Input audio samples.
|
| 157 |
+
orig_rate: Original sample rate.
|
| 158 |
+
target_rate: Target sample rate.
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
Resampled audio samples.
|
| 162 |
+
"""
|
| 163 |
+
if orig_rate == target_rate:
|
| 164 |
+
return audio
|
| 165 |
+
|
| 166 |
+
# Calculate number of output samples
|
| 167 |
+
duration = len(audio) / orig_rate
|
| 168 |
+
num_samples = int(duration * target_rate)
|
| 169 |
+
|
| 170 |
+
# Linear interpolation
|
| 171 |
+
x_orig = np.linspace(0, 1, len(audio))
|
| 172 |
+
x_new = np.linspace(0, 1, num_samples)
|
| 173 |
+
resampled = np.interp(x_new, x_orig, audio.astype(np.float32))
|
| 174 |
+
|
| 175 |
+
return resampled.astype(np.int16)
|
| 176 |
+
|
| 177 |
+
def _audio_callback(self, indata: np.ndarray, frames: int, time_info, status) -> None:
|
| 178 |
+
"""Callback for audio stream.
|
| 179 |
+
|
| 180 |
+
Called by sounddevice when audio data is available.
|
| 181 |
+
"""
|
| 182 |
+
if status:
|
| 183 |
+
logger.warning(f"Audio status: {status}")
|
| 184 |
+
|
| 185 |
+
if self._is_paused or not self.on_audio_chunk:
|
| 186 |
+
return
|
| 187 |
+
|
| 188 |
+
try:
|
| 189 |
+
# Convert to int16 if needed
|
| 190 |
+
if indata.dtype == np.float32:
|
| 191 |
+
audio = (indata * 32767).astype(np.int16)
|
| 192 |
+
else:
|
| 193 |
+
audio = indata.astype(np.int16)
|
| 194 |
+
|
| 195 |
+
# Convert to mono if stereo
|
| 196 |
+
if len(audio.shape) > 1 and audio.shape[1] > 1:
|
| 197 |
+
audio = audio.mean(axis=1).astype(np.int16)
|
| 198 |
+
else:
|
| 199 |
+
audio = audio.flatten()
|
| 200 |
+
|
| 201 |
+
# Resample to target rate if needed
|
| 202 |
+
if self._native_sample_rate != TARGET_SAMPLE_RATE:
|
| 203 |
+
audio = self._resample(audio, self._native_sample_rate, TARGET_SAMPLE_RATE)
|
| 204 |
+
|
| 205 |
+
# Add to buffer
|
| 206 |
+
with self._lock:
|
| 207 |
+
self._buffer = np.concatenate([self._buffer, audio])
|
| 208 |
+
|
| 209 |
+
# Send chunks when we have enough samples
|
| 210 |
+
while len(self._buffer) >= CHUNK_SAMPLES:
|
| 211 |
+
chunk = self._buffer[:CHUNK_SAMPLES]
|
| 212 |
+
self._buffer = self._buffer[CHUNK_SAMPLES:]
|
| 213 |
+
|
| 214 |
+
# Convert to bytes and send
|
| 215 |
+
chunk_bytes = chunk.tobytes()
|
| 216 |
+
self.on_audio_chunk(chunk_bytes)
|
| 217 |
+
|
| 218 |
+
except Exception as e:
|
| 219 |
+
logger.error(f"Error in audio callback: {e}")
|
| 220 |
+
|
| 221 |
+
def start_capture(self) -> bool:
|
| 222 |
+
"""Start capturing audio from the microphone.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
True if capture started successfully.
|
| 226 |
+
"""
|
| 227 |
+
if not self.is_available:
|
| 228 |
+
logger.error("❌ Audio capture not available - no input device detected")
|
| 229 |
+
return False
|
| 230 |
+
|
| 231 |
+
if self._is_capturing:
|
| 232 |
+
logger.warning("Already capturing audio")
|
| 233 |
+
return True
|
| 234 |
+
|
| 235 |
+
try:
|
| 236 |
+
# Calculate block size for native sample rate
|
| 237 |
+
block_size = int(self._native_sample_rate * CHUNK_DURATION_MS / 1000)
|
| 238 |
+
|
| 239 |
+
logger.info(
|
| 240 |
+
f"🎙️ Opening audio stream: device={self._input_device}, "
|
| 241 |
+
f"rate={self._native_sample_rate}Hz, blocksize={block_size}"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
self._stream = sd.InputStream(
|
| 245 |
+
device=self._input_device,
|
| 246 |
+
channels=1,
|
| 247 |
+
samplerate=self._native_sample_rate,
|
| 248 |
+
dtype=np.int16,
|
| 249 |
+
blocksize=block_size,
|
| 250 |
+
callback=self._audio_callback,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
self._buffer = np.array([], dtype=np.int16)
|
| 254 |
+
self._stream.start()
|
| 255 |
+
self._is_capturing = True
|
| 256 |
+
self._is_paused = False
|
| 257 |
+
|
| 258 |
+
logger.info("✅ Audio capture started successfully")
|
| 259 |
+
return True
|
| 260 |
+
|
| 261 |
+
except Exception as e:
|
| 262 |
+
logger.error(f"❌ Failed to start audio capture: {e}", exc_info=True)
|
| 263 |
+
return False
|
| 264 |
+
|
| 265 |
+
def stop_capture(self) -> None:
|
| 266 |
+
"""Stop capturing audio."""
|
| 267 |
+
if not self._is_capturing:
|
| 268 |
+
return
|
| 269 |
+
|
| 270 |
+
try:
|
| 271 |
+
if self._stream:
|
| 272 |
+
self._stream.stop()
|
| 273 |
+
self._stream.close()
|
| 274 |
+
self._stream = None
|
| 275 |
+
|
| 276 |
+
self._is_capturing = False
|
| 277 |
+
self._is_paused = False
|
| 278 |
+
self._buffer = np.array([], dtype=np.int16)
|
| 279 |
+
|
| 280 |
+
logger.info("Stopped audio capture")
|
| 281 |
+
|
| 282 |
+
except Exception as e:
|
| 283 |
+
logger.error(f"Error stopping audio capture: {e}")
|
| 284 |
+
|
| 285 |
+
def pause_capture(self) -> None:
|
| 286 |
+
"""Pause audio capture (stop sending chunks).
|
| 287 |
+
|
| 288 |
+
Use this when the robot is speaking to prevent feedback.
|
| 289 |
+
"""
|
| 290 |
+
self._is_paused = True
|
| 291 |
+
logger.debug("Paused audio capture")
|
| 292 |
+
|
| 293 |
+
def resume_capture(self) -> None:
|
| 294 |
+
"""Resume audio capture after pausing."""
|
| 295 |
+
self._is_paused = False
|
| 296 |
+
# Clear buffer to avoid sending stale audio
|
| 297 |
+
with self._lock:
|
| 298 |
+
self._buffer = np.array([], dtype=np.int16)
|
| 299 |
+
logger.debug("Resumed audio capture")
|
| 300 |
+
|
| 301 |
+
def cleanup(self) -> None:
|
| 302 |
+
"""Clean up resources."""
|
| 303 |
+
self.stop_capture()
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class AsyncAudioCaptureService:
|
| 307 |
+
"""Async wrapper for AudioCaptureService.
|
| 308 |
+
|
| 309 |
+
Provides async methods and uses asyncio queue for audio chunks.
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
def __init__(self) -> None:
|
| 313 |
+
"""Initialize the async audio capture service."""
|
| 314 |
+
self._capture = AudioCaptureService()
|
| 315 |
+
self._audio_queue: asyncio.Queue[bytes] = asyncio.Queue()
|
| 316 |
+
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
| 317 |
+
|
| 318 |
+
# Wire up callback
|
| 319 |
+
self._capture.on_audio_chunk = self._on_chunk
|
| 320 |
+
|
| 321 |
+
def _on_chunk(self, chunk: bytes) -> None:
|
| 322 |
+
"""Handle audio chunk from capture service."""
|
| 323 |
+
if self._loop:
|
| 324 |
+
self._loop.call_soon_threadsafe(
|
| 325 |
+
self._audio_queue.put_nowait, chunk
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
@property
|
| 329 |
+
def is_available(self) -> bool:
|
| 330 |
+
"""Check if audio capture is available."""
|
| 331 |
+
return self._capture.is_available
|
| 332 |
+
|
| 333 |
+
@property
|
| 334 |
+
def is_capturing(self) -> bool:
|
| 335 |
+
"""Check if currently capturing."""
|
| 336 |
+
return self._capture.is_capturing
|
| 337 |
+
|
| 338 |
+
async def start_capture(self) -> bool:
|
| 339 |
+
"""Start capturing audio."""
|
| 340 |
+
self._loop = asyncio.get_event_loop()
|
| 341 |
+
return self._capture.start_capture()
|
| 342 |
+
|
| 343 |
+
def stop_capture(self) -> None:
|
| 344 |
+
"""Stop capturing audio."""
|
| 345 |
+
self._capture.stop_capture()
|
| 346 |
+
|
| 347 |
+
def pause_capture(self) -> None:
|
| 348 |
+
"""Pause audio capture."""
|
| 349 |
+
self._capture.pause_capture()
|
| 350 |
+
|
| 351 |
+
def resume_capture(self) -> None:
|
| 352 |
+
"""Resume audio capture."""
|
| 353 |
+
self._capture.resume_capture()
|
| 354 |
+
|
| 355 |
+
async def get_audio_chunk(self, timeout: float = 0.5) -> Optional[bytes]:
|
| 356 |
+
"""Get the next audio chunk.
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
timeout: Maximum time to wait for a chunk.
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
Audio chunk bytes, or None if timeout.
|
| 363 |
+
"""
|
| 364 |
+
try:
|
| 365 |
+
return await asyncio.wait_for(
|
| 366 |
+
self._audio_queue.get(),
|
| 367 |
+
timeout=timeout,
|
| 368 |
+
)
|
| 369 |
+
except asyncio.TimeoutError:
|
| 370 |
+
return None
|
| 371 |
+
|
| 372 |
+
def cleanup(self) -> None:
|
| 373 |
+
"""Clean up resources."""
|
| 374 |
+
self._capture.cleanup()
|
| 375 |
+
|
reachys_brain/audio_playback.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio playback service for OpenAI Realtime audio output on Reachy."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import logging
|
| 5 |
+
import shutil
|
| 6 |
+
import subprocess
|
| 7 |
+
import threading
|
| 8 |
+
from queue import Empty, Queue
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
# OpenAI Realtime outputs 24kHz mono PCM16
|
| 14 |
+
SAMPLE_RATE = 24000
|
| 15 |
+
CHANNELS = 1
|
| 16 |
+
SAMPLE_WIDTH = 2 # 16-bit = 2 bytes
|
| 17 |
+
|
| 18 |
+
# Reachy audio device
|
| 19 |
+
AUDIO_DEVICE = "plug:reachymini_audio_sink"
|
| 20 |
+
|
| 21 |
+
# Playback timing
|
| 22 |
+
STOP_DELAY_SECONDS = 0.3 # Delay before stopping to allow buffer to drain
|
| 23 |
+
|
| 24 |
+
# Kids mode pitch shift (in cents, 400 = ~1/3 octave higher)
|
| 25 |
+
KIDS_MODE_PITCH_CENTS = 400
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Global kids mode state
|
| 29 |
+
_kids_mode_enabled = False
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def is_kids_mode_enabled() -> bool:
|
| 33 |
+
"""Check if kids mode (pitch shifting) is enabled."""
|
| 34 |
+
return _kids_mode_enabled
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def set_kids_mode(enabled: bool) -> None:
|
| 38 |
+
"""Enable or disable kids mode (pitch shifting).
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
enabled: True to enable kids mode, False to disable.
|
| 42 |
+
"""
|
| 43 |
+
global _kids_mode_enabled
|
| 44 |
+
_kids_mode_enabled = enabled
|
| 45 |
+
logger.info(f"🧒 Kids mode {'enabled' if enabled else 'disabled'}")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class AudioPlaybackService:
|
| 49 |
+
"""Service for playing PCM16 audio from OpenAI on Reachy's speaker.
|
| 50 |
+
|
| 51 |
+
Uses aplay for low-latency streaming playback. Audio chunks are
|
| 52 |
+
queued and played in order to maintain continuous playback.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self) -> None:
|
| 56 |
+
"""Initialize the audio playback service."""
|
| 57 |
+
self._audio_queue: Queue[bytes] = Queue()
|
| 58 |
+
self._is_playing = False
|
| 59 |
+
self._playback_thread: Optional[threading.Thread] = None
|
| 60 |
+
self._process: Optional[subprocess.Popen] = None
|
| 61 |
+
self._stop_event = threading.Event()
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def is_playing(self) -> bool:
|
| 65 |
+
"""Check if currently playing audio."""
|
| 66 |
+
return self._is_playing
|
| 67 |
+
|
| 68 |
+
def start_playback(self) -> None:
|
| 69 |
+
"""Start the audio playback pipeline.
|
| 70 |
+
|
| 71 |
+
Creates an aplay process ready to receive audio data.
|
| 72 |
+
"""
|
| 73 |
+
if self._is_playing:
|
| 74 |
+
logger.warning("Playback already started")
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
self._stop_event.clear()
|
| 78 |
+
self._is_playing = True
|
| 79 |
+
|
| 80 |
+
# Start playback thread
|
| 81 |
+
self._playback_thread = threading.Thread(
|
| 82 |
+
target=self._playback_loop,
|
| 83 |
+
daemon=True,
|
| 84 |
+
)
|
| 85 |
+
self._playback_thread.start()
|
| 86 |
+
|
| 87 |
+
logger.info("Started audio playback pipeline")
|
| 88 |
+
|
| 89 |
+
def stop_playback(self) -> None:
|
| 90 |
+
"""Stop the audio playback pipeline."""
|
| 91 |
+
if not self._is_playing:
|
| 92 |
+
return
|
| 93 |
+
|
| 94 |
+
self._stop_event.set()
|
| 95 |
+
self._is_playing = False
|
| 96 |
+
|
| 97 |
+
# Clear the queue
|
| 98 |
+
while not self._audio_queue.empty():
|
| 99 |
+
try:
|
| 100 |
+
self._audio_queue.get_nowait()
|
| 101 |
+
except Empty:
|
| 102 |
+
break
|
| 103 |
+
|
| 104 |
+
# Wait for thread to finish
|
| 105 |
+
if self._playback_thread and self._playback_thread.is_alive():
|
| 106 |
+
self._playback_thread.join(timeout=2.0)
|
| 107 |
+
|
| 108 |
+
self._playback_thread = None
|
| 109 |
+
|
| 110 |
+
logger.info("Stopped audio playback pipeline")
|
| 111 |
+
|
| 112 |
+
def enqueue_audio(self, audio_data: bytes) -> None:
|
| 113 |
+
"""Add audio data to the playback queue.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
audio_data: PCM16 audio data at 24kHz mono.
|
| 117 |
+
"""
|
| 118 |
+
if not self._is_playing:
|
| 119 |
+
logger.warning("Playback not started, dropping audio")
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
self._audio_queue.put(audio_data)
|
| 123 |
+
|
| 124 |
+
def clear_queue(self) -> None:
|
| 125 |
+
"""Clear any pending audio in the queue."""
|
| 126 |
+
while not self._audio_queue.empty():
|
| 127 |
+
try:
|
| 128 |
+
self._audio_queue.get_nowait()
|
| 129 |
+
except Empty:
|
| 130 |
+
break
|
| 131 |
+
|
| 132 |
+
def _playback_loop(self) -> None:
|
| 133 |
+
"""Main playback loop running in a separate thread.
|
| 134 |
+
|
| 135 |
+
Continuously reads from the queue and writes to aplay.
|
| 136 |
+
Uses sox for pitch shifting when kids mode is enabled.
|
| 137 |
+
"""
|
| 138 |
+
sox_process: Optional[subprocess.Popen] = None
|
| 139 |
+
|
| 140 |
+
try:
|
| 141 |
+
# Check if kids mode is enabled and sox is available
|
| 142 |
+
use_pitch_shift = _kids_mode_enabled and shutil.which("sox") is not None
|
| 143 |
+
|
| 144 |
+
if _kids_mode_enabled and not shutil.which("sox"):
|
| 145 |
+
logger.warning("🧒 Kids mode enabled but sox not found, using normal playback")
|
| 146 |
+
|
| 147 |
+
# aplay command for final output
|
| 148 |
+
aplay_cmd = [
|
| 149 |
+
"aplay",
|
| 150 |
+
"-r", str(SAMPLE_RATE),
|
| 151 |
+
"-f", "S16_LE", # 16-bit signed little-endian
|
| 152 |
+
"-t", "raw",
|
| 153 |
+
"-c", str(CHANNELS),
|
| 154 |
+
"-D", AUDIO_DEVICE,
|
| 155 |
+
"-q", # Quiet mode
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
if use_pitch_shift:
|
| 159 |
+
# sox command for pitch shifting
|
| 160 |
+
# Input: raw PCM16 24kHz mono from stdin
|
| 161 |
+
# Output: raw PCM16 24kHz mono to stdout (piped to aplay)
|
| 162 |
+
sox_cmd = [
|
| 163 |
+
"sox",
|
| 164 |
+
"-t", "raw", # Input format: raw
|
| 165 |
+
"-r", str(SAMPLE_RATE), # Input sample rate
|
| 166 |
+
"-e", "signed", # Input encoding: signed
|
| 167 |
+
"-b", "16", # Input bits: 16
|
| 168 |
+
"-c", str(CHANNELS), # Input channels: mono
|
| 169 |
+
"-", # Input from stdin
|
| 170 |
+
"-t", "raw", # Output format: raw
|
| 171 |
+
"-", # Output to stdout
|
| 172 |
+
"pitch", str(KIDS_MODE_PITCH_CENTS), # Pitch shift up
|
| 173 |
+
]
|
| 174 |
+
|
| 175 |
+
# Start sox process (reads from stdin, outputs to stdout)
|
| 176 |
+
sox_process = subprocess.Popen(
|
| 177 |
+
sox_cmd,
|
| 178 |
+
stdin=subprocess.PIPE,
|
| 179 |
+
stdout=subprocess.PIPE,
|
| 180 |
+
stderr=subprocess.PIPE,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# Start aplay process (reads from sox stdout)
|
| 184 |
+
self._process = subprocess.Popen(
|
| 185 |
+
aplay_cmd,
|
| 186 |
+
stdin=sox_process.stdout,
|
| 187 |
+
stdout=subprocess.PIPE,
|
| 188 |
+
stderr=subprocess.PIPE,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Close sox stdout in parent so aplay gets EOF when sox closes
|
| 192 |
+
if sox_process.stdout:
|
| 193 |
+
sox_process.stdout.close()
|
| 194 |
+
|
| 195 |
+
logger.info(f"🧒 Started kids mode playback with pitch shift +{KIDS_MODE_PITCH_CENTS} cents")
|
| 196 |
+
else:
|
| 197 |
+
# Normal playback without pitch shifting
|
| 198 |
+
self._process = subprocess.Popen(
|
| 199 |
+
aplay_cmd,
|
| 200 |
+
stdin=subprocess.PIPE,
|
| 201 |
+
stdout=subprocess.PIPE,
|
| 202 |
+
stderr=subprocess.PIPE,
|
| 203 |
+
)
|
| 204 |
+
logger.debug(f"Started aplay process: {' '.join(aplay_cmd)}")
|
| 205 |
+
|
| 206 |
+
# Determine which process stdin to write to
|
| 207 |
+
write_stdin = sox_process.stdin if sox_process else self._process.stdin
|
| 208 |
+
|
| 209 |
+
while not self._stop_event.is_set():
|
| 210 |
+
try:
|
| 211 |
+
# Get audio from queue with timeout
|
| 212 |
+
audio_data = self._audio_queue.get(timeout=0.1)
|
| 213 |
+
|
| 214 |
+
# Write to sox (if pitch shifting) or aplay directly
|
| 215 |
+
if write_stdin:
|
| 216 |
+
write_stdin.write(audio_data)
|
| 217 |
+
write_stdin.flush()
|
| 218 |
+
|
| 219 |
+
except Empty:
|
| 220 |
+
# No audio available, continue waiting
|
| 221 |
+
continue
|
| 222 |
+
except BrokenPipeError:
|
| 223 |
+
logger.error("Audio pipe broken, restarting...")
|
| 224 |
+
break
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.error(f"Error in playback loop: {e}")
|
| 227 |
+
break
|
| 228 |
+
|
| 229 |
+
except Exception as e:
|
| 230 |
+
logger.error(f"Error starting audio playback: {e}")
|
| 231 |
+
|
| 232 |
+
finally:
|
| 233 |
+
# Clean up sox process if used
|
| 234 |
+
if sox_process:
|
| 235 |
+
try:
|
| 236 |
+
if sox_process.stdin:
|
| 237 |
+
sox_process.stdin.close()
|
| 238 |
+
sox_process.terminate()
|
| 239 |
+
sox_process.wait(timeout=1.0)
|
| 240 |
+
except Exception as e:
|
| 241 |
+
logger.error(f"Error cleaning up sox: {e}")
|
| 242 |
+
try:
|
| 243 |
+
sox_process.kill()
|
| 244 |
+
except Exception:
|
| 245 |
+
pass
|
| 246 |
+
|
| 247 |
+
# Clean up aplay process
|
| 248 |
+
if self._process:
|
| 249 |
+
try:
|
| 250 |
+
if self._process.stdin:
|
| 251 |
+
self._process.stdin.close()
|
| 252 |
+
self._process.terminate()
|
| 253 |
+
self._process.wait(timeout=1.0)
|
| 254 |
+
except Exception as e:
|
| 255 |
+
logger.error(f"Error cleaning up aplay: {e}")
|
| 256 |
+
try:
|
| 257 |
+
self._process.kill()
|
| 258 |
+
except Exception:
|
| 259 |
+
pass
|
| 260 |
+
self._process = None
|
| 261 |
+
|
| 262 |
+
def cleanup(self) -> None:
|
| 263 |
+
"""Clean up resources."""
|
| 264 |
+
self.stop_playback()
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
class StreamingAudioPlayer:
|
| 268 |
+
"""High-level audio player for streaming OpenAI audio.
|
| 269 |
+
|
| 270 |
+
Manages the playback lifecycle and provides simple methods
|
| 271 |
+
for streaming audio chunks.
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
def __init__(self) -> None:
|
| 275 |
+
"""Initialize the streaming player."""
|
| 276 |
+
self._playback = AudioPlaybackService()
|
| 277 |
+
self._is_stream_active = False
|
| 278 |
+
|
| 279 |
+
@property
|
| 280 |
+
def is_playing(self) -> bool:
|
| 281 |
+
"""Check if audio stream is active."""
|
| 282 |
+
return self._is_stream_active
|
| 283 |
+
|
| 284 |
+
def start_stream(self) -> None:
|
| 285 |
+
"""Start a new audio stream.
|
| 286 |
+
|
| 287 |
+
Call this when OpenAI starts sending audio.
|
| 288 |
+
"""
|
| 289 |
+
if not self._is_stream_active:
|
| 290 |
+
self._playback.start_playback()
|
| 291 |
+
self._is_stream_active = True
|
| 292 |
+
logger.info("Started audio stream")
|
| 293 |
+
|
| 294 |
+
def stop_stream(self) -> None:
|
| 295 |
+
"""Stop the current audio stream.
|
| 296 |
+
|
| 297 |
+
Call this when OpenAI stops sending audio.
|
| 298 |
+
"""
|
| 299 |
+
if self._is_stream_active:
|
| 300 |
+
# Give a small delay to let remaining audio play
|
| 301 |
+
threading.Timer(STOP_DELAY_SECONDS, self._stop_delayed).start()
|
| 302 |
+
|
| 303 |
+
def _stop_delayed(self) -> None:
|
| 304 |
+
"""Delayed stop to allow audio buffer to drain."""
|
| 305 |
+
self._playback.stop_playback()
|
| 306 |
+
self._is_stream_active = False
|
| 307 |
+
logger.info("Stopped audio stream")
|
| 308 |
+
|
| 309 |
+
def play_chunk(self, audio_data: bytes) -> None:
|
| 310 |
+
"""Play an audio chunk.
|
| 311 |
+
|
| 312 |
+
Automatically starts the stream if not active.
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
audio_data: PCM16 audio data at 24kHz mono.
|
| 316 |
+
"""
|
| 317 |
+
if not self._is_stream_active:
|
| 318 |
+
self.start_stream()
|
| 319 |
+
|
| 320 |
+
self._playback.enqueue_audio(audio_data)
|
| 321 |
+
|
| 322 |
+
def cancel(self) -> None:
|
| 323 |
+
"""Immediately stop playback and clear queue."""
|
| 324 |
+
self._playback.clear_queue()
|
| 325 |
+
self._playback.stop_playback()
|
| 326 |
+
self._is_stream_active = False
|
| 327 |
+
logger.info("Cancelled audio stream")
|
| 328 |
+
|
| 329 |
+
def cleanup(self) -> None:
|
| 330 |
+
"""Clean up resources."""
|
| 331 |
+
self._playback.cleanup()
|
| 332 |
+
|
reachys_brain/config.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration module for Reachy iOS Bridge.
|
| 2 |
+
|
| 3 |
+
Centralizes all configuration options and supports both robot and cloud deployments.
|
| 4 |
+
Environment variables control the behavior.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class Config:
|
| 13 |
+
"""Application configuration loaded from environment variables."""
|
| 14 |
+
|
| 15 |
+
# Server settings
|
| 16 |
+
host: str = "0.0.0.0"
|
| 17 |
+
port: int = 8080
|
| 18 |
+
|
| 19 |
+
# Deployment mode
|
| 20 |
+
# "robot" - Full functionality with Reachy hardware
|
| 21 |
+
# "cloud" - API-only mode without hardware dependencies (for HF Spaces)
|
| 22 |
+
mode: str = "cloud"
|
| 23 |
+
|
| 24 |
+
# Reachy daemon URL (only used in robot mode)
|
| 25 |
+
daemon_url: str = "http://localhost:8000"
|
| 26 |
+
|
| 27 |
+
# Audio device names (only used in robot mode)
|
| 28 |
+
audio_input_device: str = "reachymini_audio_src"
|
| 29 |
+
audio_output_device: str = "plug:reachymini_audio_sink"
|
| 30 |
+
|
| 31 |
+
# Database path (None = use default ~/.reachy/reachy_bridge.db)
|
| 32 |
+
database_path: str | None = None
|
| 33 |
+
|
| 34 |
+
# OpenAI settings
|
| 35 |
+
openai_api_key: str | None = None
|
| 36 |
+
|
| 37 |
+
# Feature flags
|
| 38 |
+
enable_audio_capture: bool = False
|
| 39 |
+
enable_audio_playback: bool = False
|
| 40 |
+
enable_motion: bool = False
|
| 41 |
+
|
| 42 |
+
@classmethod
|
| 43 |
+
def from_env(cls) -> "Config":
|
| 44 |
+
"""Load configuration from environment variables."""
|
| 45 |
+
mode = os.environ.get("REACHY_MODE", "cloud").lower()
|
| 46 |
+
is_robot_mode = mode == "robot"
|
| 47 |
+
|
| 48 |
+
return cls(
|
| 49 |
+
host=os.environ.get("HOST", "0.0.0.0"),
|
| 50 |
+
port=int(os.environ.get("PORT", "8080")),
|
| 51 |
+
mode=mode,
|
| 52 |
+
daemon_url=os.environ.get("REACHY_DAEMON_URL", "http://localhost:8000"),
|
| 53 |
+
audio_input_device=os.environ.get(
|
| 54 |
+
"AUDIO_INPUT_DEVICE", "reachymini_audio_src"
|
| 55 |
+
),
|
| 56 |
+
audio_output_device=os.environ.get(
|
| 57 |
+
"AUDIO_OUTPUT_DEVICE", "plug:reachymini_audio_sink"
|
| 58 |
+
),
|
| 59 |
+
database_path=os.environ.get("DATABASE_PATH"),
|
| 60 |
+
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
| 61 |
+
# Features are enabled only in robot mode by default
|
| 62 |
+
enable_audio_capture=is_robot_mode
|
| 63 |
+
or os.environ.get("ENABLE_AUDIO_CAPTURE", "").lower() == "true",
|
| 64 |
+
enable_audio_playback=is_robot_mode
|
| 65 |
+
or os.environ.get("ENABLE_AUDIO_PLAYBACK", "").lower() == "true",
|
| 66 |
+
enable_motion=is_robot_mode
|
| 67 |
+
or os.environ.get("ENABLE_MOTION", "").lower() == "true",
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def is_robot_mode(self) -> bool:
|
| 72 |
+
"""Check if running in robot mode with hardware."""
|
| 73 |
+
return self.mode == "robot"
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def is_cloud_mode(self) -> bool:
|
| 77 |
+
"""Check if running in cloud/API-only mode."""
|
| 78 |
+
return self.mode == "cloud"
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# Global configuration instance
|
| 82 |
+
_config: Config | None = None
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_config() -> Config:
|
| 86 |
+
"""Get the global configuration instance.
|
| 87 |
+
|
| 88 |
+
Loads from environment on first call.
|
| 89 |
+
"""
|
| 90 |
+
global _config
|
| 91 |
+
if _config is None:
|
| 92 |
+
_config = Config.from_env()
|
| 93 |
+
return _config
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def reset_config() -> None:
|
| 97 |
+
"""Reset configuration (useful for testing)."""
|
| 98 |
+
global _config
|
| 99 |
+
_config = None
|
| 100 |
+
|
reachys_brain/daemon_health_monitor.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Daemon health monitor with auto-recovery for channel closed errors.
|
| 2 |
+
|
| 3 |
+
This module monitors the Reachy daemon status and automatically
|
| 4 |
+
recovers from critical errors like "channel closed" which occur
|
| 5 |
+
when the motor controller communication fails.
|
| 6 |
+
|
| 7 |
+
Recovery Strategy:
|
| 8 |
+
1. Light recovery (~5s): Uses /api/daemon/restart to restart just the backend
|
| 9 |
+
2. Full recovery (~15-20s): Restarts the reachy-mini-daemon systemd service
|
| 10 |
+
|
| 11 |
+
Light recovery is tried first and works for most channel errors.
|
| 12 |
+
Full recovery is used as a fallback when light recovery fails.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
import logging
|
| 17 |
+
import subprocess
|
| 18 |
+
from datetime import datetime, timedelta
|
| 19 |
+
from enum import Enum
|
| 20 |
+
from typing import Callable, Optional
|
| 21 |
+
|
| 22 |
+
import httpx
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
# Daemon API URL
|
| 27 |
+
DAEMON_URL = "http://localhost:8000"
|
| 28 |
+
|
| 29 |
+
# Health check interval in seconds
|
| 30 |
+
HEALTH_CHECK_INTERVAL = 5.0
|
| 31 |
+
|
| 32 |
+
# Minimum time between recovery attempts (prevent rapid restart loops)
|
| 33 |
+
MIN_RECOVERY_INTERVAL = timedelta(seconds=30)
|
| 34 |
+
|
| 35 |
+
# Maximum consecutive failures before giving up
|
| 36 |
+
MAX_RECOVERY_ATTEMPTS = 3
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class DaemonState(str, Enum):
|
| 40 |
+
"""Daemon health states."""
|
| 41 |
+
HEALTHY = "healthy"
|
| 42 |
+
ERROR = "error"
|
| 43 |
+
RECOVERING = "recovering"
|
| 44 |
+
NOT_INITIALIZED = "not_initialized"
|
| 45 |
+
UNKNOWN = "unknown"
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class DaemonHealthMonitor:
|
| 49 |
+
"""Monitors daemon health and auto-recovers from errors.
|
| 50 |
+
|
| 51 |
+
This service runs in the background, periodically checking the
|
| 52 |
+
daemon status. When it detects a "channel closed" error (or similar
|
| 53 |
+
critical motor communication errors), it automatically triggers
|
| 54 |
+
a recovery sequence.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self) -> None:
|
| 58 |
+
"""Initialize the health monitor."""
|
| 59 |
+
self._running = False
|
| 60 |
+
self._task: Optional[asyncio.Task] = None
|
| 61 |
+
self._state = DaemonState.UNKNOWN
|
| 62 |
+
self._last_error: Optional[str] = None
|
| 63 |
+
self._last_recovery_attempt: Optional[datetime] = None
|
| 64 |
+
self._consecutive_failures = 0
|
| 65 |
+
self._recovery_in_progress = False
|
| 66 |
+
|
| 67 |
+
# Callback for state changes
|
| 68 |
+
self.on_state_change: Optional[Callable[[DaemonState, Optional[str]], None]] = None
|
| 69 |
+
self.on_recovery_started: Optional[Callable[[], None]] = None
|
| 70 |
+
self.on_recovery_completed: Optional[Callable[[bool, str], None]] = None
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def state(self) -> DaemonState:
|
| 74 |
+
"""Current daemon state."""
|
| 75 |
+
return self._state
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def last_error(self) -> Optional[str]:
|
| 79 |
+
"""Last error message from daemon."""
|
| 80 |
+
return self._last_error
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def is_recovering(self) -> bool:
|
| 84 |
+
"""Check if recovery is in progress."""
|
| 85 |
+
return self._recovery_in_progress
|
| 86 |
+
|
| 87 |
+
def start(self) -> None:
|
| 88 |
+
"""Start the health monitor."""
|
| 89 |
+
if self._running:
|
| 90 |
+
return
|
| 91 |
+
|
| 92 |
+
self._running = True
|
| 93 |
+
self._task = asyncio.create_task(self._monitor_loop())
|
| 94 |
+
logger.info("🏥 Daemon health monitor started")
|
| 95 |
+
|
| 96 |
+
def stop(self) -> None:
|
| 97 |
+
"""Stop the health monitor."""
|
| 98 |
+
self._running = False
|
| 99 |
+
if self._task:
|
| 100 |
+
self._task.cancel()
|
| 101 |
+
self._task = None
|
| 102 |
+
logger.info("🏥 Daemon health monitor stopped")
|
| 103 |
+
|
| 104 |
+
async def _monitor_loop(self) -> None:
|
| 105 |
+
"""Main monitoring loop."""
|
| 106 |
+
while self._running:
|
| 107 |
+
try:
|
| 108 |
+
await self._check_health()
|
| 109 |
+
except asyncio.CancelledError:
|
| 110 |
+
break
|
| 111 |
+
except Exception as e:
|
| 112 |
+
logger.error(f"Error in health monitor: {e}")
|
| 113 |
+
|
| 114 |
+
await asyncio.sleep(HEALTH_CHECK_INTERVAL)
|
| 115 |
+
|
| 116 |
+
async def _check_health(self) -> None:
|
| 117 |
+
"""Check daemon health and trigger recovery if needed."""
|
| 118 |
+
if self._recovery_in_progress:
|
| 119 |
+
return
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
| 123 |
+
response = await client.get(f"{DAEMON_URL}/api/daemon/status")
|
| 124 |
+
response.raise_for_status()
|
| 125 |
+
status = response.json()
|
| 126 |
+
except httpx.ConnectError:
|
| 127 |
+
self._set_state(DaemonState.UNKNOWN, "Cannot connect to daemon")
|
| 128 |
+
# Daemon is completely offline - this is a critical error
|
| 129 |
+
await self._handle_critical_error("daemon offline - cannot connect")
|
| 130 |
+
return
|
| 131 |
+
except Exception as e:
|
| 132 |
+
self._set_state(DaemonState.UNKNOWN, str(e))
|
| 133 |
+
return
|
| 134 |
+
|
| 135 |
+
# Check for errors
|
| 136 |
+
error = status.get("error")
|
| 137 |
+
state = status.get("state", "unknown")
|
| 138 |
+
backend_error = None
|
| 139 |
+
|
| 140 |
+
if status.get("backend_status"):
|
| 141 |
+
backend_error = status["backend_status"].get("error")
|
| 142 |
+
|
| 143 |
+
# Determine health state
|
| 144 |
+
if state == "not_initialized":
|
| 145 |
+
self._set_state(DaemonState.NOT_INITIALIZED, None)
|
| 146 |
+
self._consecutive_failures = 0
|
| 147 |
+
elif error or backend_error:
|
| 148 |
+
error_msg = error or backend_error
|
| 149 |
+
self._set_state(DaemonState.ERROR, error_msg)
|
| 150 |
+
|
| 151 |
+
# Check if this is a critical error that needs recovery
|
| 152 |
+
if self._is_critical_error(error_msg):
|
| 153 |
+
await self._handle_critical_error(error_msg)
|
| 154 |
+
elif state == "running":
|
| 155 |
+
self._set_state(DaemonState.HEALTHY, None)
|
| 156 |
+
self._consecutive_failures = 0
|
| 157 |
+
else:
|
| 158 |
+
self._set_state(DaemonState.UNKNOWN, f"Unknown state: {state}")
|
| 159 |
+
|
| 160 |
+
def _set_state(self, state: DaemonState, error: Optional[str]) -> None:
|
| 161 |
+
"""Update state and notify callbacks."""
|
| 162 |
+
if state != self._state or error != self._last_error:
|
| 163 |
+
old_state = self._state
|
| 164 |
+
self._state = state
|
| 165 |
+
self._last_error = error
|
| 166 |
+
|
| 167 |
+
if state == DaemonState.ERROR:
|
| 168 |
+
logger.warning(f"🔴 Daemon health: {state.value} - {error}")
|
| 169 |
+
elif state == DaemonState.HEALTHY and old_state == DaemonState.ERROR:
|
| 170 |
+
logger.info(f"🟢 Daemon health: recovered!")
|
| 171 |
+
|
| 172 |
+
if self.on_state_change:
|
| 173 |
+
self.on_state_change(state, error)
|
| 174 |
+
|
| 175 |
+
def _is_critical_error(self, error: str) -> bool:
|
| 176 |
+
"""Check if an error requires automatic recovery."""
|
| 177 |
+
critical_errors = [
|
| 178 |
+
"channel closed",
|
| 179 |
+
"backend not running",
|
| 180 |
+
"control loop crashed",
|
| 181 |
+
"motor communication failed",
|
| 182 |
+
"daemon offline", # Daemon completely unreachable
|
| 183 |
+
]
|
| 184 |
+
error_lower = error.lower()
|
| 185 |
+
return any(crit in error_lower for crit in critical_errors)
|
| 186 |
+
|
| 187 |
+
async def _handle_critical_error(self, error: str) -> None:
|
| 188 |
+
"""Handle a critical error by triggering recovery."""
|
| 189 |
+
# Check if we've exceeded max attempts
|
| 190 |
+
if self._consecutive_failures >= MAX_RECOVERY_ATTEMPTS:
|
| 191 |
+
logger.error(
|
| 192 |
+
f"❌ Max recovery attempts ({MAX_RECOVERY_ATTEMPTS}) exceeded. "
|
| 193 |
+
"Manual intervention required."
|
| 194 |
+
)
|
| 195 |
+
return
|
| 196 |
+
|
| 197 |
+
# Check minimum interval between recoveries
|
| 198 |
+
if self._last_recovery_attempt:
|
| 199 |
+
time_since_last = datetime.now() - self._last_recovery_attempt
|
| 200 |
+
if time_since_last < MIN_RECOVERY_INTERVAL:
|
| 201 |
+
remaining = (MIN_RECOVERY_INTERVAL - time_since_last).seconds
|
| 202 |
+
logger.debug(
|
| 203 |
+
f"Waiting {remaining}s before next recovery attempt"
|
| 204 |
+
)
|
| 205 |
+
return
|
| 206 |
+
|
| 207 |
+
logger.warning(f"🔧 Critical error detected: {error}")
|
| 208 |
+
logger.info("🔄 Starting automatic recovery...")
|
| 209 |
+
|
| 210 |
+
self._consecutive_failures += 1
|
| 211 |
+
await self.trigger_recovery()
|
| 212 |
+
|
| 213 |
+
async def trigger_light_recovery(self) -> tuple[bool, str]:
|
| 214 |
+
"""Try a light recovery using the daemon's built-in restart.
|
| 215 |
+
|
| 216 |
+
This is faster than a full systemd restart and works for many
|
| 217 |
+
channel errors. It uses /api/daemon/restart which restarts
|
| 218 |
+
just the backend process without restarting the entire service.
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
Tuple of (success, message).
|
| 222 |
+
"""
|
| 223 |
+
logger.info("🔄 Attempting light recovery via daemon restart...")
|
| 224 |
+
|
| 225 |
+
try:
|
| 226 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
| 227 |
+
# Try the daemon's built-in restart endpoint
|
| 228 |
+
response = await client.post(f"{DAEMON_URL}/api/daemon/restart")
|
| 229 |
+
|
| 230 |
+
if response.status_code not in (200, 202):
|
| 231 |
+
logger.warning(f"Light recovery failed: HTTP {response.status_code}")
|
| 232 |
+
return False, f"Daemon restart returned {response.status_code}"
|
| 233 |
+
|
| 234 |
+
# Wait for restart to complete
|
| 235 |
+
logger.info("⏳ Waiting for daemon to restart...")
|
| 236 |
+
await asyncio.sleep(5)
|
| 237 |
+
|
| 238 |
+
# Poll until backend is ready
|
| 239 |
+
for attempt in range(10):
|
| 240 |
+
try:
|
| 241 |
+
async with httpx.AsyncClient(timeout=3.0) as client:
|
| 242 |
+
response = await client.get(f"{DAEMON_URL}/api/daemon/status")
|
| 243 |
+
if response.status_code == 200:
|
| 244 |
+
status = response.json()
|
| 245 |
+
state = status.get("state", "unknown")
|
| 246 |
+
error = status.get("error")
|
| 247 |
+
backend_error = status.get("backend_status", {}).get("error")
|
| 248 |
+
|
| 249 |
+
if state == "running" and not error and not backend_error:
|
| 250 |
+
logger.info("✅ Light recovery successful!")
|
| 251 |
+
return True, "Recovered via daemon restart"
|
| 252 |
+
elif error or backend_error:
|
| 253 |
+
# Error persists, light recovery didn't work
|
| 254 |
+
logger.warning(f"Error persists after light recovery: {error or backend_error}")
|
| 255 |
+
return False, f"Error persists: {error or backend_error}"
|
| 256 |
+
except Exception as e:
|
| 257 |
+
logger.debug(f"Polling attempt {attempt + 1}: {e}")
|
| 258 |
+
await asyncio.sleep(1)
|
| 259 |
+
|
| 260 |
+
return False, "Daemon did not become ready after restart"
|
| 261 |
+
|
| 262 |
+
except httpx.ConnectError:
|
| 263 |
+
logger.warning("Cannot connect to daemon for light recovery")
|
| 264 |
+
return False, "Cannot connect to daemon"
|
| 265 |
+
except Exception as e:
|
| 266 |
+
logger.warning(f"Light recovery failed: {e}")
|
| 267 |
+
return False, str(e)
|
| 268 |
+
|
| 269 |
+
async def trigger_recovery(self, force: bool = False, skip_light: bool = False) -> tuple[bool, str]:
|
| 270 |
+
"""Trigger daemon recovery, trying light recovery first.
|
| 271 |
+
|
| 272 |
+
Recovery strategy:
|
| 273 |
+
1. First try light recovery (daemon restart) - fast, ~5 seconds
|
| 274 |
+
2. If that fails, do full recovery (systemd restart) - slower, ~15-20 seconds
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
force: If True, bypass the rate limiting checks.
|
| 278 |
+
skip_light: If True, skip light recovery and go straight to full.
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
Tuple of (success, message).
|
| 282 |
+
"""
|
| 283 |
+
if self._recovery_in_progress and not force:
|
| 284 |
+
return False, "Recovery already in progress"
|
| 285 |
+
|
| 286 |
+
self._recovery_in_progress = True
|
| 287 |
+
self._last_recovery_attempt = datetime.now()
|
| 288 |
+
self._set_state(DaemonState.RECOVERING, "Recovery in progress")
|
| 289 |
+
|
| 290 |
+
if self.on_recovery_started:
|
| 291 |
+
self.on_recovery_started()
|
| 292 |
+
|
| 293 |
+
try:
|
| 294 |
+
# Step 1: Try light recovery first (unless skipped)
|
| 295 |
+
if not skip_light:
|
| 296 |
+
success, message = await self.trigger_light_recovery()
|
| 297 |
+
if success:
|
| 298 |
+
self._consecutive_failures = 0
|
| 299 |
+
if self.on_recovery_completed:
|
| 300 |
+
self.on_recovery_completed(True, message)
|
| 301 |
+
return True, message
|
| 302 |
+
logger.info("Light recovery failed, trying full recovery...")
|
| 303 |
+
|
| 304 |
+
# Step 2: Full recovery - restart systemd service
|
| 305 |
+
logger.info("📍 Step 1/3: Restarting reachy-mini-daemon service...")
|
| 306 |
+
result = await asyncio.get_event_loop().run_in_executor(
|
| 307 |
+
None,
|
| 308 |
+
lambda: subprocess.run(
|
| 309 |
+
["sudo", "systemctl", "restart", "reachy-mini-daemon"],
|
| 310 |
+
capture_output=True,
|
| 311 |
+
text=True,
|
| 312 |
+
timeout=30
|
| 313 |
+
)
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
if result.returncode != 0:
|
| 317 |
+
error_msg = f"Failed to restart service: {result.stderr}"
|
| 318 |
+
logger.error(error_msg)
|
| 319 |
+
return False, error_msg
|
| 320 |
+
|
| 321 |
+
# Step 3: Wait for service to be ready
|
| 322 |
+
logger.info("📍 Step 2/3: Waiting for daemon to initialize...")
|
| 323 |
+
await asyncio.sleep(5)
|
| 324 |
+
|
| 325 |
+
# Poll until daemon is responsive
|
| 326 |
+
for _ in range(10):
|
| 327 |
+
try:
|
| 328 |
+
async with httpx.AsyncClient(timeout=3.0) as client:
|
| 329 |
+
response = await client.get(f"{DAEMON_URL}/api/daemon/status")
|
| 330 |
+
if response.status_code == 200:
|
| 331 |
+
break
|
| 332 |
+
except Exception:
|
| 333 |
+
pass
|
| 334 |
+
await asyncio.sleep(1)
|
| 335 |
+
|
| 336 |
+
# Step 4: Start the daemon backend
|
| 337 |
+
logger.info("📍 Step 3/3: Starting daemon backend...")
|
| 338 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
| 339 |
+
response = await client.post(
|
| 340 |
+
f"{DAEMON_URL}/api/daemon/start",
|
| 341 |
+
params={"wake_up": "true"}
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
if response.status_code not in (200, 409): # 409 = already running
|
| 345 |
+
error_msg = f"Failed to start daemon: {response.text}"
|
| 346 |
+
logger.error(error_msg)
|
| 347 |
+
return False, error_msg
|
| 348 |
+
|
| 349 |
+
# Wait for backend to be fully ready
|
| 350 |
+
await asyncio.sleep(3)
|
| 351 |
+
|
| 352 |
+
# Verify recovery was successful
|
| 353 |
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
| 354 |
+
response = await client.get(f"{DAEMON_URL}/api/daemon/status")
|
| 355 |
+
status = response.json()
|
| 356 |
+
|
| 357 |
+
if status.get("error") or (
|
| 358 |
+
status.get("backend_status", {}).get("error")
|
| 359 |
+
):
|
| 360 |
+
error_msg = status.get("error") or status["backend_status"]["error"]
|
| 361 |
+
logger.error(f"Recovery incomplete, error persists: {error_msg}")
|
| 362 |
+
return False, f"Recovery incomplete: {error_msg}"
|
| 363 |
+
|
| 364 |
+
logger.info("✅ Full recovery completed successfully!")
|
| 365 |
+
self._consecutive_failures = 0
|
| 366 |
+
|
| 367 |
+
if self.on_recovery_completed:
|
| 368 |
+
self.on_recovery_completed(True, "Recovery completed successfully")
|
| 369 |
+
|
| 370 |
+
return True, "Motor controllers recovered successfully"
|
| 371 |
+
|
| 372 |
+
except asyncio.TimeoutError:
|
| 373 |
+
error_msg = "Recovery timed out"
|
| 374 |
+
logger.error(error_msg)
|
| 375 |
+
if self.on_recovery_completed:
|
| 376 |
+
self.on_recovery_completed(False, error_msg)
|
| 377 |
+
return False, error_msg
|
| 378 |
+
|
| 379 |
+
except Exception as e:
|
| 380 |
+
error_msg = f"Recovery failed: {str(e)}"
|
| 381 |
+
logger.error(error_msg, exc_info=True)
|
| 382 |
+
if self.on_recovery_completed:
|
| 383 |
+
self.on_recovery_completed(False, error_msg)
|
| 384 |
+
return False, error_msg
|
| 385 |
+
|
| 386 |
+
finally:
|
| 387 |
+
self._recovery_in_progress = False
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
# Global instance
|
| 391 |
+
_health_monitor: Optional[DaemonHealthMonitor] = None
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def get_health_monitor() -> DaemonHealthMonitor:
|
| 395 |
+
"""Get or create the global health monitor instance."""
|
| 396 |
+
global _health_monitor
|
| 397 |
+
if _health_monitor is None:
|
| 398 |
+
_health_monitor = DaemonHealthMonitor()
|
| 399 |
+
return _health_monitor
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def start_health_monitor() -> DaemonHealthMonitor:
|
| 403 |
+
"""Start the global health monitor."""
|
| 404 |
+
monitor = get_health_monitor()
|
| 405 |
+
monitor.start()
|
| 406 |
+
return monitor
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def stop_health_monitor() -> None:
|
| 410 |
+
"""Stop the global health monitor."""
|
| 411 |
+
global _health_monitor
|
| 412 |
+
if _health_monitor:
|
| 413 |
+
_health_monitor.stop()
|
| 414 |
+
|
reachys_brain/database.py
ADDED
|
@@ -0,0 +1,1620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SQLite database service for persistent storage.
|
| 2 |
+
|
| 3 |
+
Provides async database operations for storing custom apps and other data.
|
| 4 |
+
Uses aiosqlite for non-blocking database access.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Optional
|
| 12 |
+
|
| 13 |
+
import aiosqlite
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# Database location - stored in user's home directory
|
| 18 |
+
DATABASE_DIR = Path.home() / ".reachy"
|
| 19 |
+
DATABASE_PATH = DATABASE_DIR / "reachy_bridge.db"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DatabaseService:
|
| 23 |
+
"""Async SQLite database service for the Reachy iOS Bridge."""
|
| 24 |
+
|
| 25 |
+
def __init__(self, db_path: Optional[Path] = None):
|
| 26 |
+
"""Initialize the database service.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
db_path: Optional custom database path. Defaults to ~/.reachy/reachy_bridge.db
|
| 30 |
+
"""
|
| 31 |
+
self.db_path = db_path or DATABASE_PATH
|
| 32 |
+
self._connection: Optional[aiosqlite.Connection] = None
|
| 33 |
+
|
| 34 |
+
async def initialize(self) -> None:
|
| 35 |
+
"""Initialize the database and create tables if needed."""
|
| 36 |
+
# Ensure directory exists
|
| 37 |
+
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
| 38 |
+
|
| 39 |
+
logger.info(f"Initializing database at {self.db_path}")
|
| 40 |
+
|
| 41 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 42 |
+
await self._create_tables(db)
|
| 43 |
+
await db.commit()
|
| 44 |
+
|
| 45 |
+
logger.info("Database initialized successfully")
|
| 46 |
+
|
| 47 |
+
async def _create_tables(self, db: aiosqlite.Connection) -> None:
|
| 48 |
+
"""Create database tables if they don't exist."""
|
| 49 |
+
await db.execute("""
|
| 50 |
+
CREATE TABLE IF NOT EXISTS custom_apps (
|
| 51 |
+
id TEXT PRIMARY KEY,
|
| 52 |
+
name TEXT NOT NULL,
|
| 53 |
+
description TEXT DEFAULT '',
|
| 54 |
+
system_prompt TEXT NOT NULL,
|
| 55 |
+
voice_id TEXT DEFAULT '',
|
| 56 |
+
emotion_animations TEXT DEFAULT '{}',
|
| 57 |
+
icon_color TEXT DEFAULT 'blue',
|
| 58 |
+
enabled_tools TEXT DEFAULT '[]',
|
| 59 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 60 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 61 |
+
)
|
| 62 |
+
""")
|
| 63 |
+
|
| 64 |
+
# Index for faster queries by creation date
|
| 65 |
+
await db.execute("""
|
| 66 |
+
CREATE INDEX IF NOT EXISTS idx_custom_apps_created
|
| 67 |
+
ON custom_apps(created_at)
|
| 68 |
+
""")
|
| 69 |
+
|
| 70 |
+
# Migration: Add enabled_tools column if it doesn't exist
|
| 71 |
+
await self._migrate_add_enabled_tools(db)
|
| 72 |
+
|
| 73 |
+
# Custom animations table for recorded joystick animations
|
| 74 |
+
await db.execute("""
|
| 75 |
+
CREATE TABLE IF NOT EXISTS custom_animations (
|
| 76 |
+
id TEXT PRIMARY KEY,
|
| 77 |
+
name TEXT NOT NULL,
|
| 78 |
+
description TEXT DEFAULT '',
|
| 79 |
+
duration_ms INTEGER NOT NULL,
|
| 80 |
+
start_pose TEXT NOT NULL,
|
| 81 |
+
keyframes TEXT NOT NULL,
|
| 82 |
+
audio_data TEXT DEFAULT NULL,
|
| 83 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 84 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 85 |
+
)
|
| 86 |
+
""")
|
| 87 |
+
|
| 88 |
+
# Migration: Add audio_data column if it doesn't exist
|
| 89 |
+
await self._migrate_add_audio_data(db)
|
| 90 |
+
|
| 91 |
+
# Index for faster queries by creation date
|
| 92 |
+
await db.execute("""
|
| 93 |
+
CREATE INDEX IF NOT EXISTS idx_custom_animations_created
|
| 94 |
+
ON custom_animations(created_at)
|
| 95 |
+
""")
|
| 96 |
+
|
| 97 |
+
# User settings table for personalization (name, preferences, etc.)
|
| 98 |
+
await db.execute("""
|
| 99 |
+
CREATE TABLE IF NOT EXISTS user_settings (
|
| 100 |
+
key TEXT PRIMARY KEY,
|
| 101 |
+
value TEXT NOT NULL,
|
| 102 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 103 |
+
)
|
| 104 |
+
""")
|
| 105 |
+
|
| 106 |
+
# Websites table for generated websites
|
| 107 |
+
await db.execute("""
|
| 108 |
+
CREATE TABLE IF NOT EXISTS websites (
|
| 109 |
+
id TEXT PRIMARY KEY,
|
| 110 |
+
title TEXT NOT NULL,
|
| 111 |
+
description TEXT DEFAULT '',
|
| 112 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 113 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 114 |
+
)
|
| 115 |
+
""")
|
| 116 |
+
|
| 117 |
+
# Index for faster queries by creation date
|
| 118 |
+
await db.execute("""
|
| 119 |
+
CREATE INDEX IF NOT EXISTS idx_websites_created
|
| 120 |
+
ON websites(created_at)
|
| 121 |
+
""")
|
| 122 |
+
|
| 123 |
+
# Mental notes table for AI-created notes
|
| 124 |
+
await db.execute("""
|
| 125 |
+
CREATE TABLE IF NOT EXISTS mental_notes (
|
| 126 |
+
id TEXT PRIMARY KEY,
|
| 127 |
+
title TEXT NOT NULL,
|
| 128 |
+
content TEXT NOT NULL,
|
| 129 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 130 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 131 |
+
)
|
| 132 |
+
""")
|
| 133 |
+
|
| 134 |
+
# Index for faster queries by creation date
|
| 135 |
+
await db.execute("""
|
| 136 |
+
CREATE INDEX IF NOT EXISTS idx_mental_notes_created
|
| 137 |
+
ON mental_notes(created_at)
|
| 138 |
+
""")
|
| 139 |
+
|
| 140 |
+
# Meetings table for meeting transcriptions
|
| 141 |
+
await db.execute("""
|
| 142 |
+
CREATE TABLE IF NOT EXISTS meetings (
|
| 143 |
+
id TEXT PRIMARY KEY,
|
| 144 |
+
title TEXT NOT NULL,
|
| 145 |
+
transcript TEXT DEFAULT '',
|
| 146 |
+
action_items TEXT DEFAULT '[]',
|
| 147 |
+
summary TEXT DEFAULT '',
|
| 148 |
+
duration_seconds INTEGER DEFAULT 0,
|
| 149 |
+
status TEXT DEFAULT 'recording',
|
| 150 |
+
started_at TIMESTAMP,
|
| 151 |
+
ended_at TIMESTAMP,
|
| 152 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 153 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 154 |
+
)
|
| 155 |
+
""")
|
| 156 |
+
|
| 157 |
+
# Index for faster queries by creation date
|
| 158 |
+
await db.execute("""
|
| 159 |
+
CREATE INDEX IF NOT EXISTS idx_meetings_created
|
| 160 |
+
ON meetings(created_at)
|
| 161 |
+
""")
|
| 162 |
+
|
| 163 |
+
# Index for faster queries by status
|
| 164 |
+
await db.execute("""
|
| 165 |
+
CREATE INDEX IF NOT EXISTS idx_meetings_status
|
| 166 |
+
ON meetings(status)
|
| 167 |
+
""")
|
| 168 |
+
|
| 169 |
+
# Scheduled messages table for scheduled iMessage/WhatsApp messages
|
| 170 |
+
await db.execute("""
|
| 171 |
+
CREATE TABLE IF NOT EXISTS scheduled_messages (
|
| 172 |
+
id TEXT PRIMARY KEY,
|
| 173 |
+
recipient_name TEXT NOT NULL,
|
| 174 |
+
recipient_phone TEXT NOT NULL,
|
| 175 |
+
message_content TEXT NOT NULL,
|
| 176 |
+
scheduled_time TIMESTAMP NOT NULL,
|
| 177 |
+
platform TEXT NOT NULL,
|
| 178 |
+
status TEXT DEFAULT 'pending',
|
| 179 |
+
notification_id TEXT,
|
| 180 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 181 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 182 |
+
)
|
| 183 |
+
""")
|
| 184 |
+
|
| 185 |
+
# Index for faster queries by scheduled time
|
| 186 |
+
await db.execute("""
|
| 187 |
+
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_time
|
| 188 |
+
ON scheduled_messages(scheduled_time)
|
| 189 |
+
""")
|
| 190 |
+
|
| 191 |
+
# Index for faster queries by status
|
| 192 |
+
await db.execute("""
|
| 193 |
+
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_status
|
| 194 |
+
ON scheduled_messages(status)
|
| 195 |
+
""")
|
| 196 |
+
|
| 197 |
+
# TamaReachy pet game state table
|
| 198 |
+
await db.execute("""
|
| 199 |
+
CREATE TABLE IF NOT EXISTS tamareachy_state (
|
| 200 |
+
id INTEGER PRIMARY KEY DEFAULT 1,
|
| 201 |
+
enabled INTEGER DEFAULT 0,
|
| 202 |
+
hunger INTEGER DEFAULT 100,
|
| 203 |
+
thirst INTEGER DEFAULT 100,
|
| 204 |
+
happiness INTEGER DEFAULT 100,
|
| 205 |
+
energy INTEGER DEFAULT 100,
|
| 206 |
+
boredom INTEGER DEFAULT 100,
|
| 207 |
+
social INTEGER DEFAULT 100,
|
| 208 |
+
health INTEGER DEFAULT 100,
|
| 209 |
+
cleanliness INTEGER DEFAULT 100,
|
| 210 |
+
last_interaction TIMESTAMP,
|
| 211 |
+
last_decay_check TIMESTAMP,
|
| 212 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 213 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 214 |
+
)
|
| 215 |
+
""")
|
| 216 |
+
|
| 217 |
+
# Ensure there's exactly one row for TamaReachy state
|
| 218 |
+
await db.execute("""
|
| 219 |
+
INSERT OR IGNORE INTO tamareachy_state (id) VALUES (1)
|
| 220 |
+
""")
|
| 221 |
+
|
| 222 |
+
logger.debug("Database tables created/verified")
|
| 223 |
+
|
| 224 |
+
async def _migrate_add_enabled_tools(self, db: aiosqlite.Connection) -> None:
|
| 225 |
+
"""Add enabled_tools column to existing databases."""
|
| 226 |
+
try:
|
| 227 |
+
# Check if column exists
|
| 228 |
+
cursor = await db.execute("PRAGMA table_info(custom_apps)")
|
| 229 |
+
columns = await cursor.fetchall()
|
| 230 |
+
column_names = [col[1] for col in columns]
|
| 231 |
+
|
| 232 |
+
if "enabled_tools" not in column_names:
|
| 233 |
+
logger.info("Migrating database: adding enabled_tools column")
|
| 234 |
+
await db.execute(
|
| 235 |
+
"ALTER TABLE custom_apps ADD COLUMN enabled_tools TEXT DEFAULT '[]'"
|
| 236 |
+
)
|
| 237 |
+
await db.commit()
|
| 238 |
+
logger.info("Migration complete: enabled_tools column added")
|
| 239 |
+
except Exception as e:
|
| 240 |
+
logger.warning(f"Migration check failed (may be OK): {e}")
|
| 241 |
+
|
| 242 |
+
async def _migrate_add_audio_data(self, db: aiosqlite.Connection) -> None:
|
| 243 |
+
"""Add audio_data column to custom_animations table."""
|
| 244 |
+
try:
|
| 245 |
+
# Check if column exists
|
| 246 |
+
cursor = await db.execute("PRAGMA table_info(custom_animations)")
|
| 247 |
+
columns = await cursor.fetchall()
|
| 248 |
+
column_names = [col[1] for col in columns]
|
| 249 |
+
|
| 250 |
+
if "audio_data" not in column_names:
|
| 251 |
+
logger.info("Migrating database: adding audio_data column to animations")
|
| 252 |
+
await db.execute(
|
| 253 |
+
"ALTER TABLE custom_animations ADD COLUMN audio_data TEXT DEFAULT NULL"
|
| 254 |
+
)
|
| 255 |
+
await db.commit()
|
| 256 |
+
logger.info("Migration complete: audio_data column added to animations")
|
| 257 |
+
except Exception as e:
|
| 258 |
+
logger.warning(f"Animation audio migration check failed (may be OK): {e}")
|
| 259 |
+
|
| 260 |
+
# =========================================================================
|
| 261 |
+
# Custom Apps CRUD Operations
|
| 262 |
+
# =========================================================================
|
| 263 |
+
|
| 264 |
+
async def get_all_apps(self) -> list[dict]:
|
| 265 |
+
"""Get all custom apps from the database.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
List of custom app dictionaries.
|
| 269 |
+
"""
|
| 270 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 271 |
+
db.row_factory = aiosqlite.Row
|
| 272 |
+
cursor = await db.execute(
|
| 273 |
+
"SELECT * FROM custom_apps ORDER BY created_at DESC"
|
| 274 |
+
)
|
| 275 |
+
rows = await cursor.fetchall()
|
| 276 |
+
return [self._row_to_app(row) for row in rows]
|
| 277 |
+
|
| 278 |
+
async def get_app(self, app_id: str) -> Optional[dict]:
|
| 279 |
+
"""Get a single custom app by ID.
|
| 280 |
+
|
| 281 |
+
Args:
|
| 282 |
+
app_id: The UUID of the app.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
The app dictionary, or None if not found.
|
| 286 |
+
"""
|
| 287 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 288 |
+
db.row_factory = aiosqlite.Row
|
| 289 |
+
cursor = await db.execute(
|
| 290 |
+
"SELECT * FROM custom_apps WHERE id = ?", (app_id,)
|
| 291 |
+
)
|
| 292 |
+
row = await cursor.fetchone()
|
| 293 |
+
return self._row_to_app(row) if row else None
|
| 294 |
+
|
| 295 |
+
async def create_app(self, app_data: dict) -> dict:
|
| 296 |
+
"""Create a new custom app.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
app_data: Dictionary containing app data (must include 'id').
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
The created app dictionary.
|
| 303 |
+
"""
|
| 304 |
+
now = datetime.utcnow().isoformat()
|
| 305 |
+
emotion_animations = json.dumps(app_data.get("emotion_animations", {}))
|
| 306 |
+
enabled_tools = json.dumps(app_data.get("enabled_tools", []))
|
| 307 |
+
|
| 308 |
+
# Ensure created_at is always set (use provided value or current time)
|
| 309 |
+
created_at = app_data.get("created_at") or now
|
| 310 |
+
|
| 311 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 312 |
+
await db.execute(
|
| 313 |
+
"""
|
| 314 |
+
INSERT INTO custom_apps
|
| 315 |
+
(id, name, description, system_prompt, voice_id,
|
| 316 |
+
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 317 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 318 |
+
""",
|
| 319 |
+
(
|
| 320 |
+
app_data["id"],
|
| 321 |
+
app_data["name"],
|
| 322 |
+
app_data.get("description", ""),
|
| 323 |
+
app_data["system_prompt"],
|
| 324 |
+
app_data.get("voice_id", ""),
|
| 325 |
+
emotion_animations,
|
| 326 |
+
app_data.get("icon_color", "blue"),
|
| 327 |
+
enabled_tools,
|
| 328 |
+
created_at,
|
| 329 |
+
now,
|
| 330 |
+
),
|
| 331 |
+
)
|
| 332 |
+
await db.commit()
|
| 333 |
+
|
| 334 |
+
logger.info(f"Created custom app: {app_data['name']} ({app_data['id']})")
|
| 335 |
+
return await self.get_app(app_data["id"])
|
| 336 |
+
|
| 337 |
+
async def update_app(self, app_id: str, app_data: dict) -> Optional[dict]:
|
| 338 |
+
"""Update an existing custom app.
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
app_id: The UUID of the app to update.
|
| 342 |
+
app_data: Dictionary containing updated app data.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
The updated app dictionary, or None if not found.
|
| 346 |
+
"""
|
| 347 |
+
existing = await self.get_app(app_id)
|
| 348 |
+
if not existing:
|
| 349 |
+
return None
|
| 350 |
+
|
| 351 |
+
now = datetime.utcnow().isoformat()
|
| 352 |
+
|
| 353 |
+
# Handle emotion_animations - use existing if not provided
|
| 354 |
+
if "emotion_animations" in app_data:
|
| 355 |
+
emotion_animations = json.dumps(app_data["emotion_animations"])
|
| 356 |
+
else:
|
| 357 |
+
emotion_animations = json.dumps(existing["emotion_animations"])
|
| 358 |
+
|
| 359 |
+
# Handle enabled_tools - use existing if not provided
|
| 360 |
+
if "enabled_tools" in app_data:
|
| 361 |
+
enabled_tools = json.dumps(app_data["enabled_tools"])
|
| 362 |
+
else:
|
| 363 |
+
enabled_tools = json.dumps(existing["enabled_tools"])
|
| 364 |
+
|
| 365 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 366 |
+
await db.execute(
|
| 367 |
+
"""
|
| 368 |
+
UPDATE custom_apps SET
|
| 369 |
+
name = ?,
|
| 370 |
+
description = ?,
|
| 371 |
+
system_prompt = ?,
|
| 372 |
+
voice_id = ?,
|
| 373 |
+
emotion_animations = ?,
|
| 374 |
+
icon_color = ?,
|
| 375 |
+
enabled_tools = ?,
|
| 376 |
+
updated_at = ?
|
| 377 |
+
WHERE id = ?
|
| 378 |
+
""",
|
| 379 |
+
(
|
| 380 |
+
app_data.get("name", existing["name"]),
|
| 381 |
+
app_data.get("description", existing["description"]),
|
| 382 |
+
app_data.get("system_prompt", existing["system_prompt"]),
|
| 383 |
+
app_data.get("voice_id", existing["voice_id"]),
|
| 384 |
+
emotion_animations,
|
| 385 |
+
app_data.get("icon_color", existing["icon_color"]),
|
| 386 |
+
enabled_tools,
|
| 387 |
+
now,
|
| 388 |
+
app_id,
|
| 389 |
+
),
|
| 390 |
+
)
|
| 391 |
+
await db.commit()
|
| 392 |
+
|
| 393 |
+
logger.info(f"Updated custom app: {app_id}")
|
| 394 |
+
return await self.get_app(app_id)
|
| 395 |
+
|
| 396 |
+
async def delete_app(self, app_id: str) -> bool:
|
| 397 |
+
"""Delete a custom app.
|
| 398 |
+
|
| 399 |
+
Args:
|
| 400 |
+
app_id: The UUID of the app to delete.
|
| 401 |
+
|
| 402 |
+
Returns:
|
| 403 |
+
True if deleted, False if not found.
|
| 404 |
+
"""
|
| 405 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 406 |
+
cursor = await db.execute(
|
| 407 |
+
"DELETE FROM custom_apps WHERE id = ?", (app_id,)
|
| 408 |
+
)
|
| 409 |
+
await db.commit()
|
| 410 |
+
deleted = cursor.rowcount > 0
|
| 411 |
+
|
| 412 |
+
if deleted:
|
| 413 |
+
logger.info(f"Deleted custom app: {app_id}")
|
| 414 |
+
return deleted
|
| 415 |
+
|
| 416 |
+
async def sync_apps(self, apps: list[dict]) -> list[dict]:
|
| 417 |
+
"""Bulk sync apps from iOS client.
|
| 418 |
+
|
| 419 |
+
This replaces all existing apps with the provided list.
|
| 420 |
+
Used for initial sync or full restore.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
apps: List of app dictionaries to sync.
|
| 424 |
+
|
| 425 |
+
Returns:
|
| 426 |
+
List of all apps after sync.
|
| 427 |
+
"""
|
| 428 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 429 |
+
# Clear existing apps
|
| 430 |
+
await db.execute("DELETE FROM custom_apps")
|
| 431 |
+
|
| 432 |
+
# Insert all new apps
|
| 433 |
+
now = datetime.utcnow().isoformat()
|
| 434 |
+
for app in apps:
|
| 435 |
+
emotion_animations = json.dumps(app.get("emotion_animations", {}))
|
| 436 |
+
enabled_tools = json.dumps(app.get("enabled_tools", []))
|
| 437 |
+
# Ensure created_at is always set
|
| 438 |
+
created_at = app.get("created_at") or now
|
| 439 |
+
await db.execute(
|
| 440 |
+
"""
|
| 441 |
+
INSERT INTO custom_apps
|
| 442 |
+
(id, name, description, system_prompt, voice_id,
|
| 443 |
+
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 444 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 445 |
+
""",
|
| 446 |
+
(
|
| 447 |
+
app["id"],
|
| 448 |
+
app["name"],
|
| 449 |
+
app.get("description", ""),
|
| 450 |
+
app["system_prompt"],
|
| 451 |
+
app.get("voice_id", ""),
|
| 452 |
+
emotion_animations,
|
| 453 |
+
app.get("icon_color", "blue"),
|
| 454 |
+
enabled_tools,
|
| 455 |
+
created_at,
|
| 456 |
+
now,
|
| 457 |
+
),
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
await db.commit()
|
| 461 |
+
|
| 462 |
+
logger.info(f"Synced {len(apps)} custom apps")
|
| 463 |
+
return await self.get_all_apps()
|
| 464 |
+
|
| 465 |
+
# =========================================================================
|
| 466 |
+
# Custom Animations CRUD Operations
|
| 467 |
+
# =========================================================================
|
| 468 |
+
|
| 469 |
+
async def get_all_animations(self) -> list[dict]:
|
| 470 |
+
"""Get all custom animations from the database.
|
| 471 |
+
|
| 472 |
+
Returns:
|
| 473 |
+
List of custom animation dictionaries.
|
| 474 |
+
"""
|
| 475 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 476 |
+
db.row_factory = aiosqlite.Row
|
| 477 |
+
cursor = await db.execute(
|
| 478 |
+
"SELECT * FROM custom_animations ORDER BY created_at DESC"
|
| 479 |
+
)
|
| 480 |
+
rows = await cursor.fetchall()
|
| 481 |
+
return [self._row_to_animation(row) for row in rows]
|
| 482 |
+
|
| 483 |
+
async def get_animation(self, animation_id: str) -> Optional[dict]:
|
| 484 |
+
"""Get a single custom animation by ID.
|
| 485 |
+
|
| 486 |
+
Args:
|
| 487 |
+
animation_id: The UUID of the animation.
|
| 488 |
+
|
| 489 |
+
Returns:
|
| 490 |
+
The animation dictionary, or None if not found.
|
| 491 |
+
"""
|
| 492 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 493 |
+
db.row_factory = aiosqlite.Row
|
| 494 |
+
cursor = await db.execute(
|
| 495 |
+
"SELECT * FROM custom_animations WHERE id = ?", (animation_id,)
|
| 496 |
+
)
|
| 497 |
+
row = await cursor.fetchone()
|
| 498 |
+
return self._row_to_animation(row) if row else None
|
| 499 |
+
|
| 500 |
+
async def create_animation(self, animation_data: dict) -> dict:
|
| 501 |
+
"""Create a new custom animation.
|
| 502 |
+
|
| 503 |
+
Args:
|
| 504 |
+
animation_data: Dictionary containing animation data (must include 'id').
|
| 505 |
+
|
| 506 |
+
Returns:
|
| 507 |
+
The created animation dictionary.
|
| 508 |
+
"""
|
| 509 |
+
now = datetime.utcnow().isoformat()
|
| 510 |
+
start_pose = json.dumps(animation_data.get("start_pose", {}))
|
| 511 |
+
keyframes = json.dumps(animation_data.get("keyframes", []))
|
| 512 |
+
audio_data = animation_data.get("audio_data") # Base64 string or None
|
| 513 |
+
|
| 514 |
+
# Ensure created_at is always set
|
| 515 |
+
created_at = animation_data.get("created_at") or now
|
| 516 |
+
|
| 517 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 518 |
+
await db.execute(
|
| 519 |
+
"""
|
| 520 |
+
INSERT INTO custom_animations
|
| 521 |
+
(id, name, description, duration_ms, start_pose, keyframes,
|
| 522 |
+
audio_data, created_at, updated_at)
|
| 523 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 524 |
+
""",
|
| 525 |
+
(
|
| 526 |
+
animation_data["id"],
|
| 527 |
+
animation_data["name"],
|
| 528 |
+
animation_data.get("description", ""),
|
| 529 |
+
animation_data["duration_ms"],
|
| 530 |
+
start_pose,
|
| 531 |
+
keyframes,
|
| 532 |
+
audio_data,
|
| 533 |
+
created_at,
|
| 534 |
+
now,
|
| 535 |
+
),
|
| 536 |
+
)
|
| 537 |
+
await db.commit()
|
| 538 |
+
|
| 539 |
+
logger.info(f"Created custom animation: {animation_data['name']} ({animation_data['id']})")
|
| 540 |
+
return await self.get_animation(animation_data["id"])
|
| 541 |
+
|
| 542 |
+
async def update_animation(self, animation_id: str, animation_data: dict) -> Optional[dict]:
|
| 543 |
+
"""Update an existing custom animation.
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
animation_id: The UUID of the animation to update.
|
| 547 |
+
animation_data: Dictionary containing updated animation data.
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
The updated animation dictionary, or None if not found.
|
| 551 |
+
"""
|
| 552 |
+
existing = await self.get_animation(animation_id)
|
| 553 |
+
if not existing:
|
| 554 |
+
return None
|
| 555 |
+
|
| 556 |
+
now = datetime.utcnow().isoformat()
|
| 557 |
+
|
| 558 |
+
# Handle start_pose - use existing if not provided
|
| 559 |
+
if "start_pose" in animation_data:
|
| 560 |
+
start_pose = json.dumps(animation_data["start_pose"])
|
| 561 |
+
else:
|
| 562 |
+
start_pose = json.dumps(existing["start_pose"])
|
| 563 |
+
|
| 564 |
+
# Handle keyframes - use existing if not provided
|
| 565 |
+
if "keyframes" in animation_data:
|
| 566 |
+
keyframes = json.dumps(animation_data["keyframes"])
|
| 567 |
+
else:
|
| 568 |
+
keyframes = json.dumps(existing["keyframes"])
|
| 569 |
+
|
| 570 |
+
# Handle audio_data - use existing if not provided
|
| 571 |
+
if "audio_data" in animation_data:
|
| 572 |
+
audio_data = animation_data["audio_data"]
|
| 573 |
+
else:
|
| 574 |
+
audio_data = existing.get("audio_data")
|
| 575 |
+
|
| 576 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 577 |
+
await db.execute(
|
| 578 |
+
"""
|
| 579 |
+
UPDATE custom_animations SET
|
| 580 |
+
name = ?,
|
| 581 |
+
description = ?,
|
| 582 |
+
duration_ms = ?,
|
| 583 |
+
start_pose = ?,
|
| 584 |
+
keyframes = ?,
|
| 585 |
+
audio_data = ?,
|
| 586 |
+
updated_at = ?
|
| 587 |
+
WHERE id = ?
|
| 588 |
+
""",
|
| 589 |
+
(
|
| 590 |
+
animation_data.get("name", existing["name"]),
|
| 591 |
+
animation_data.get("description", existing["description"]),
|
| 592 |
+
animation_data.get("duration_ms", existing["duration_ms"]),
|
| 593 |
+
start_pose,
|
| 594 |
+
keyframes,
|
| 595 |
+
audio_data,
|
| 596 |
+
now,
|
| 597 |
+
animation_id,
|
| 598 |
+
),
|
| 599 |
+
)
|
| 600 |
+
await db.commit()
|
| 601 |
+
|
| 602 |
+
logger.info(f"Updated custom animation: {animation_id}")
|
| 603 |
+
return await self.get_animation(animation_id)
|
| 604 |
+
|
| 605 |
+
async def delete_animation(self, animation_id: str) -> bool:
|
| 606 |
+
"""Delete a custom animation.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
animation_id: The UUID of the animation to delete.
|
| 610 |
+
|
| 611 |
+
Returns:
|
| 612 |
+
True if deleted, False if not found.
|
| 613 |
+
"""
|
| 614 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 615 |
+
cursor = await db.execute(
|
| 616 |
+
"DELETE FROM custom_animations WHERE id = ?", (animation_id,)
|
| 617 |
+
)
|
| 618 |
+
await db.commit()
|
| 619 |
+
deleted = cursor.rowcount > 0
|
| 620 |
+
|
| 621 |
+
if deleted:
|
| 622 |
+
logger.info(f"Deleted custom animation: {animation_id}")
|
| 623 |
+
return deleted
|
| 624 |
+
|
| 625 |
+
# =========================================================================
|
| 626 |
+
# User Settings Operations
|
| 627 |
+
# =========================================================================
|
| 628 |
+
|
| 629 |
+
async def get_user_setting(self, key: str) -> Optional[str]:
|
| 630 |
+
"""Get a user setting value by key.
|
| 631 |
+
|
| 632 |
+
Args:
|
| 633 |
+
key: The setting key (e.g., 'user_name').
|
| 634 |
+
|
| 635 |
+
Returns:
|
| 636 |
+
The setting value, or None if not found.
|
| 637 |
+
"""
|
| 638 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 639 |
+
cursor = await db.execute(
|
| 640 |
+
"SELECT value FROM user_settings WHERE key = ?", (key,)
|
| 641 |
+
)
|
| 642 |
+
row = await cursor.fetchone()
|
| 643 |
+
return row[0] if row else None
|
| 644 |
+
|
| 645 |
+
async def set_user_setting(self, key: str, value: str) -> bool:
|
| 646 |
+
"""Set a user setting value.
|
| 647 |
+
|
| 648 |
+
Uses INSERT OR REPLACE to handle both new and existing keys.
|
| 649 |
+
|
| 650 |
+
Args:
|
| 651 |
+
key: The setting key.
|
| 652 |
+
value: The setting value.
|
| 653 |
+
|
| 654 |
+
Returns:
|
| 655 |
+
True if successful.
|
| 656 |
+
"""
|
| 657 |
+
now = datetime.utcnow().isoformat()
|
| 658 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 659 |
+
await db.execute(
|
| 660 |
+
"""
|
| 661 |
+
INSERT OR REPLACE INTO user_settings (key, value, updated_at)
|
| 662 |
+
VALUES (?, ?, ?)
|
| 663 |
+
""",
|
| 664 |
+
(key, value, now),
|
| 665 |
+
)
|
| 666 |
+
await db.commit()
|
| 667 |
+
|
| 668 |
+
logger.info(f"Set user setting: {key}")
|
| 669 |
+
return True
|
| 670 |
+
|
| 671 |
+
async def delete_user_setting(self, key: str) -> bool:
|
| 672 |
+
"""Delete a user setting.
|
| 673 |
+
|
| 674 |
+
Args:
|
| 675 |
+
key: The setting key to delete.
|
| 676 |
+
|
| 677 |
+
Returns:
|
| 678 |
+
True if deleted, False if not found.
|
| 679 |
+
"""
|
| 680 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 681 |
+
cursor = await db.execute(
|
| 682 |
+
"DELETE FROM user_settings WHERE key = ?", (key,)
|
| 683 |
+
)
|
| 684 |
+
await db.commit()
|
| 685 |
+
deleted = cursor.rowcount > 0
|
| 686 |
+
|
| 687 |
+
if deleted:
|
| 688 |
+
logger.info(f"Deleted user setting: {key}")
|
| 689 |
+
return deleted
|
| 690 |
+
|
| 691 |
+
async def get_all_user_settings(self) -> dict[str, str]:
|
| 692 |
+
"""Get all user settings as a dictionary.
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
Dictionary of key-value pairs.
|
| 696 |
+
"""
|
| 697 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 698 |
+
cursor = await db.execute("SELECT key, value FROM user_settings")
|
| 699 |
+
rows = await cursor.fetchall()
|
| 700 |
+
return {row[0]: row[1] for row in rows}
|
| 701 |
+
|
| 702 |
+
# =========================================================================
|
| 703 |
+
# Websites CRUD Operations
|
| 704 |
+
# =========================================================================
|
| 705 |
+
|
| 706 |
+
async def get_all_websites(self) -> list[dict]:
|
| 707 |
+
"""Get all saved websites from the database.
|
| 708 |
+
|
| 709 |
+
Returns:
|
| 710 |
+
List of website dictionaries.
|
| 711 |
+
"""
|
| 712 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 713 |
+
db.row_factory = aiosqlite.Row
|
| 714 |
+
cursor = await db.execute(
|
| 715 |
+
"SELECT * FROM websites ORDER BY created_at DESC"
|
| 716 |
+
)
|
| 717 |
+
rows = await cursor.fetchall()
|
| 718 |
+
return [self._row_to_website(row) for row in rows]
|
| 719 |
+
|
| 720 |
+
async def get_website(self, website_id: str) -> Optional[dict]:
|
| 721 |
+
"""Get a single website by ID.
|
| 722 |
+
|
| 723 |
+
Args:
|
| 724 |
+
website_id: The ID of the website.
|
| 725 |
+
|
| 726 |
+
Returns:
|
| 727 |
+
The website dictionary, or None if not found.
|
| 728 |
+
"""
|
| 729 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 730 |
+
db.row_factory = aiosqlite.Row
|
| 731 |
+
cursor = await db.execute(
|
| 732 |
+
"SELECT * FROM websites WHERE id = ?", (website_id,)
|
| 733 |
+
)
|
| 734 |
+
row = await cursor.fetchone()
|
| 735 |
+
return self._row_to_website(row) if row else None
|
| 736 |
+
|
| 737 |
+
async def create_website(self, website_data: dict) -> dict:
|
| 738 |
+
"""Create a new website record.
|
| 739 |
+
|
| 740 |
+
Args:
|
| 741 |
+
website_data: Dictionary containing website data (must include 'id', 'title').
|
| 742 |
+
|
| 743 |
+
Returns:
|
| 744 |
+
The created website dictionary.
|
| 745 |
+
"""
|
| 746 |
+
now = datetime.utcnow().isoformat()
|
| 747 |
+
created_at = website_data.get("created_at") or now
|
| 748 |
+
|
| 749 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 750 |
+
await db.execute(
|
| 751 |
+
"""
|
| 752 |
+
INSERT INTO websites (id, title, description, created_at, updated_at)
|
| 753 |
+
VALUES (?, ?, ?, ?, ?)
|
| 754 |
+
""",
|
| 755 |
+
(
|
| 756 |
+
website_data["id"],
|
| 757 |
+
website_data["title"],
|
| 758 |
+
website_data.get("description", ""),
|
| 759 |
+
created_at,
|
| 760 |
+
now,
|
| 761 |
+
),
|
| 762 |
+
)
|
| 763 |
+
await db.commit()
|
| 764 |
+
|
| 765 |
+
logger.info(f"Created website: {website_data['title']} ({website_data['id']})")
|
| 766 |
+
return await self.get_website(website_data["id"])
|
| 767 |
+
|
| 768 |
+
async def update_website(self, website_id: str, website_data: dict) -> Optional[dict]:
|
| 769 |
+
"""Update an existing website record.
|
| 770 |
+
|
| 771 |
+
Args:
|
| 772 |
+
website_id: The ID of the website to update.
|
| 773 |
+
website_data: Dictionary containing updated website data.
|
| 774 |
+
|
| 775 |
+
Returns:
|
| 776 |
+
The updated website dictionary, or None if not found.
|
| 777 |
+
"""
|
| 778 |
+
existing = await self.get_website(website_id)
|
| 779 |
+
if not existing:
|
| 780 |
+
return None
|
| 781 |
+
|
| 782 |
+
now = datetime.utcnow().isoformat()
|
| 783 |
+
|
| 784 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 785 |
+
await db.execute(
|
| 786 |
+
"""
|
| 787 |
+
UPDATE websites SET
|
| 788 |
+
title = ?,
|
| 789 |
+
description = ?,
|
| 790 |
+
updated_at = ?
|
| 791 |
+
WHERE id = ?
|
| 792 |
+
""",
|
| 793 |
+
(
|
| 794 |
+
website_data.get("title", existing["title"]),
|
| 795 |
+
website_data.get("description", existing["description"]),
|
| 796 |
+
now,
|
| 797 |
+
website_id,
|
| 798 |
+
),
|
| 799 |
+
)
|
| 800 |
+
await db.commit()
|
| 801 |
+
|
| 802 |
+
logger.info(f"Updated website: {website_id}")
|
| 803 |
+
return await self.get_website(website_id)
|
| 804 |
+
|
| 805 |
+
async def delete_website(self, website_id: str) -> bool:
|
| 806 |
+
"""Delete a website record.
|
| 807 |
+
|
| 808 |
+
Args:
|
| 809 |
+
website_id: The ID of the website to delete.
|
| 810 |
+
|
| 811 |
+
Returns:
|
| 812 |
+
True if deleted, False if not found.
|
| 813 |
+
"""
|
| 814 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 815 |
+
cursor = await db.execute(
|
| 816 |
+
"DELETE FROM websites WHERE id = ?", (website_id,)
|
| 817 |
+
)
|
| 818 |
+
await db.commit()
|
| 819 |
+
deleted = cursor.rowcount > 0
|
| 820 |
+
|
| 821 |
+
if deleted:
|
| 822 |
+
logger.info(f"Deleted website: {website_id}")
|
| 823 |
+
return deleted
|
| 824 |
+
|
| 825 |
+
# =========================================================================
|
| 826 |
+
# Mental Notes CRUD Operations
|
| 827 |
+
# =========================================================================
|
| 828 |
+
|
| 829 |
+
async def get_all_notes(self) -> list[dict]:
|
| 830 |
+
"""Get all mental notes from the database.
|
| 831 |
+
|
| 832 |
+
Returns:
|
| 833 |
+
List of mental note dictionaries.
|
| 834 |
+
"""
|
| 835 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 836 |
+
db.row_factory = aiosqlite.Row
|
| 837 |
+
cursor = await db.execute(
|
| 838 |
+
"SELECT * FROM mental_notes ORDER BY created_at DESC"
|
| 839 |
+
)
|
| 840 |
+
rows = await cursor.fetchall()
|
| 841 |
+
return [self._row_to_note(row) for row in rows]
|
| 842 |
+
|
| 843 |
+
async def get_note(self, note_id: str) -> Optional[dict]:
|
| 844 |
+
"""Get a single mental note by ID.
|
| 845 |
+
|
| 846 |
+
Args:
|
| 847 |
+
note_id: The ID of the note.
|
| 848 |
+
|
| 849 |
+
Returns:
|
| 850 |
+
The note dictionary, or None if not found.
|
| 851 |
+
"""
|
| 852 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 853 |
+
db.row_factory = aiosqlite.Row
|
| 854 |
+
cursor = await db.execute(
|
| 855 |
+
"SELECT * FROM mental_notes WHERE id = ?", (note_id,)
|
| 856 |
+
)
|
| 857 |
+
row = await cursor.fetchone()
|
| 858 |
+
return self._row_to_note(row) if row else None
|
| 859 |
+
|
| 860 |
+
async def create_note(self, note_data: dict) -> dict:
|
| 861 |
+
"""Create a new mental note.
|
| 862 |
+
|
| 863 |
+
Args:
|
| 864 |
+
note_data: Dictionary containing note data (must include 'id', 'title', 'content').
|
| 865 |
+
|
| 866 |
+
Returns:
|
| 867 |
+
The created note dictionary.
|
| 868 |
+
"""
|
| 869 |
+
now = datetime.utcnow().isoformat()
|
| 870 |
+
created_at = note_data.get("created_at") or now
|
| 871 |
+
|
| 872 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 873 |
+
await db.execute(
|
| 874 |
+
"""
|
| 875 |
+
INSERT INTO mental_notes (id, title, content, created_at, updated_at)
|
| 876 |
+
VALUES (?, ?, ?, ?, ?)
|
| 877 |
+
""",
|
| 878 |
+
(
|
| 879 |
+
note_data["id"],
|
| 880 |
+
note_data["title"],
|
| 881 |
+
note_data["content"],
|
| 882 |
+
created_at,
|
| 883 |
+
now,
|
| 884 |
+
),
|
| 885 |
+
)
|
| 886 |
+
await db.commit()
|
| 887 |
+
|
| 888 |
+
logger.info(f"Created mental note: {note_data['title']} ({note_data['id']})")
|
| 889 |
+
return await self.get_note(note_data["id"])
|
| 890 |
+
|
| 891 |
+
async def update_note(self, note_id: str, note_data: dict) -> Optional[dict]:
|
| 892 |
+
"""Update an existing mental note.
|
| 893 |
+
|
| 894 |
+
Args:
|
| 895 |
+
note_id: The ID of the note to update.
|
| 896 |
+
note_data: Dictionary containing updated note data.
|
| 897 |
+
|
| 898 |
+
Returns:
|
| 899 |
+
The updated note dictionary, or None if not found.
|
| 900 |
+
"""
|
| 901 |
+
existing = await self.get_note(note_id)
|
| 902 |
+
if not existing:
|
| 903 |
+
return None
|
| 904 |
+
|
| 905 |
+
now = datetime.utcnow().isoformat()
|
| 906 |
+
|
| 907 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 908 |
+
await db.execute(
|
| 909 |
+
"""
|
| 910 |
+
UPDATE mental_notes SET
|
| 911 |
+
title = ?,
|
| 912 |
+
content = ?,
|
| 913 |
+
updated_at = ?
|
| 914 |
+
WHERE id = ?
|
| 915 |
+
""",
|
| 916 |
+
(
|
| 917 |
+
note_data.get("title", existing["title"]),
|
| 918 |
+
note_data.get("content", existing["content"]),
|
| 919 |
+
now,
|
| 920 |
+
note_id,
|
| 921 |
+
),
|
| 922 |
+
)
|
| 923 |
+
await db.commit()
|
| 924 |
+
|
| 925 |
+
logger.info(f"Updated mental note: {note_id}")
|
| 926 |
+
return await self.get_note(note_id)
|
| 927 |
+
|
| 928 |
+
async def delete_note(self, note_id: str) -> bool:
|
| 929 |
+
"""Delete a mental note.
|
| 930 |
+
|
| 931 |
+
Args:
|
| 932 |
+
note_id: The ID of the note to delete.
|
| 933 |
+
|
| 934 |
+
Returns:
|
| 935 |
+
True if deleted, False if not found.
|
| 936 |
+
"""
|
| 937 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 938 |
+
cursor = await db.execute(
|
| 939 |
+
"DELETE FROM mental_notes WHERE id = ?", (note_id,)
|
| 940 |
+
)
|
| 941 |
+
await db.commit()
|
| 942 |
+
deleted = cursor.rowcount > 0
|
| 943 |
+
|
| 944 |
+
if deleted:
|
| 945 |
+
logger.info(f"Deleted mental note: {note_id}")
|
| 946 |
+
return deleted
|
| 947 |
+
|
| 948 |
+
# =========================================================================
|
| 949 |
+
# Meetings CRUD Operations
|
| 950 |
+
# =========================================================================
|
| 951 |
+
|
| 952 |
+
async def get_all_meetings(self) -> list[dict]:
|
| 953 |
+
"""Get all meetings from the database.
|
| 954 |
+
|
| 955 |
+
Returns:
|
| 956 |
+
List of meeting dictionaries.
|
| 957 |
+
"""
|
| 958 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 959 |
+
db.row_factory = aiosqlite.Row
|
| 960 |
+
cursor = await db.execute(
|
| 961 |
+
"SELECT * FROM meetings ORDER BY created_at DESC"
|
| 962 |
+
)
|
| 963 |
+
rows = await cursor.fetchall()
|
| 964 |
+
return [self._row_to_meeting(row) for row in rows]
|
| 965 |
+
|
| 966 |
+
async def get_meeting(self, meeting_id: str) -> Optional[dict]:
|
| 967 |
+
"""Get a single meeting by ID.
|
| 968 |
+
|
| 969 |
+
Args:
|
| 970 |
+
meeting_id: The ID of the meeting.
|
| 971 |
+
|
| 972 |
+
Returns:
|
| 973 |
+
The meeting dictionary, or None if not found.
|
| 974 |
+
"""
|
| 975 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 976 |
+
db.row_factory = aiosqlite.Row
|
| 977 |
+
cursor = await db.execute(
|
| 978 |
+
"SELECT * FROM meetings WHERE id = ?", (meeting_id,)
|
| 979 |
+
)
|
| 980 |
+
row = await cursor.fetchone()
|
| 981 |
+
return self._row_to_meeting(row) if row else None
|
| 982 |
+
|
| 983 |
+
async def get_active_meeting(self) -> Optional[dict]:
|
| 984 |
+
"""Get the currently active (recording) meeting.
|
| 985 |
+
|
| 986 |
+
Returns:
|
| 987 |
+
The active meeting dictionary, or None if no meeting is recording.
|
| 988 |
+
"""
|
| 989 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 990 |
+
db.row_factory = aiosqlite.Row
|
| 991 |
+
cursor = await db.execute(
|
| 992 |
+
"SELECT * FROM meetings WHERE status = 'recording' LIMIT 1"
|
| 993 |
+
)
|
| 994 |
+
row = await cursor.fetchone()
|
| 995 |
+
return self._row_to_meeting(row) if row else None
|
| 996 |
+
|
| 997 |
+
async def create_meeting(self, meeting_data: dict) -> dict:
|
| 998 |
+
"""Create a new meeting record.
|
| 999 |
+
|
| 1000 |
+
Args:
|
| 1001 |
+
meeting_data: Dictionary containing meeting data (must include 'id', 'title').
|
| 1002 |
+
|
| 1003 |
+
Returns:
|
| 1004 |
+
The created meeting dictionary.
|
| 1005 |
+
"""
|
| 1006 |
+
now = datetime.utcnow().isoformat()
|
| 1007 |
+
action_items = json.dumps(meeting_data.get("action_items", []))
|
| 1008 |
+
started_at = meeting_data.get("started_at") or now
|
| 1009 |
+
|
| 1010 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1011 |
+
await db.execute(
|
| 1012 |
+
"""
|
| 1013 |
+
INSERT INTO meetings
|
| 1014 |
+
(id, title, transcript, action_items, summary, duration_seconds,
|
| 1015 |
+
status, started_at, ended_at, created_at, updated_at)
|
| 1016 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 1017 |
+
""",
|
| 1018 |
+
(
|
| 1019 |
+
meeting_data["id"],
|
| 1020 |
+
meeting_data["title"],
|
| 1021 |
+
meeting_data.get("transcript", ""),
|
| 1022 |
+
action_items,
|
| 1023 |
+
meeting_data.get("summary", ""),
|
| 1024 |
+
meeting_data.get("duration_seconds", 0),
|
| 1025 |
+
meeting_data.get("status", "recording"),
|
| 1026 |
+
started_at,
|
| 1027 |
+
meeting_data.get("ended_at"),
|
| 1028 |
+
now,
|
| 1029 |
+
now,
|
| 1030 |
+
),
|
| 1031 |
+
)
|
| 1032 |
+
await db.commit()
|
| 1033 |
+
|
| 1034 |
+
logger.info(f"Created meeting: {meeting_data['title']} ({meeting_data['id']})")
|
| 1035 |
+
return await self.get_meeting(meeting_data["id"])
|
| 1036 |
+
|
| 1037 |
+
async def update_meeting(self, meeting_id: str, meeting_data: dict) -> Optional[dict]:
|
| 1038 |
+
"""Update an existing meeting.
|
| 1039 |
+
|
| 1040 |
+
Args:
|
| 1041 |
+
meeting_id: The ID of the meeting to update.
|
| 1042 |
+
meeting_data: Dictionary containing updated meeting data.
|
| 1043 |
+
|
| 1044 |
+
Returns:
|
| 1045 |
+
The updated meeting dictionary, or None if not found.
|
| 1046 |
+
"""
|
| 1047 |
+
existing = await self.get_meeting(meeting_id)
|
| 1048 |
+
if not existing:
|
| 1049 |
+
return None
|
| 1050 |
+
|
| 1051 |
+
now = datetime.utcnow().isoformat()
|
| 1052 |
+
|
| 1053 |
+
# Handle action_items - use existing if not provided
|
| 1054 |
+
if "action_items" in meeting_data:
|
| 1055 |
+
action_items = json.dumps(meeting_data["action_items"])
|
| 1056 |
+
else:
|
| 1057 |
+
action_items = json.dumps(existing["action_items"])
|
| 1058 |
+
|
| 1059 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1060 |
+
await db.execute(
|
| 1061 |
+
"""
|
| 1062 |
+
UPDATE meetings SET
|
| 1063 |
+
title = ?,
|
| 1064 |
+
transcript = ?,
|
| 1065 |
+
action_items = ?,
|
| 1066 |
+
summary = ?,
|
| 1067 |
+
duration_seconds = ?,
|
| 1068 |
+
status = ?,
|
| 1069 |
+
started_at = ?,
|
| 1070 |
+
ended_at = ?,
|
| 1071 |
+
updated_at = ?
|
| 1072 |
+
WHERE id = ?
|
| 1073 |
+
""",
|
| 1074 |
+
(
|
| 1075 |
+
meeting_data.get("title", existing["title"]),
|
| 1076 |
+
meeting_data.get("transcript", existing["transcript"]),
|
| 1077 |
+
action_items,
|
| 1078 |
+
meeting_data.get("summary", existing["summary"]),
|
| 1079 |
+
meeting_data.get("duration_seconds", existing["duration_seconds"]),
|
| 1080 |
+
meeting_data.get("status", existing["status"]),
|
| 1081 |
+
meeting_data.get("started_at", existing["started_at"]),
|
| 1082 |
+
meeting_data.get("ended_at", existing["ended_at"]),
|
| 1083 |
+
now,
|
| 1084 |
+
meeting_id,
|
| 1085 |
+
),
|
| 1086 |
+
)
|
| 1087 |
+
await db.commit()
|
| 1088 |
+
|
| 1089 |
+
logger.info(f"Updated meeting: {meeting_id}")
|
| 1090 |
+
return await self.get_meeting(meeting_id)
|
| 1091 |
+
|
| 1092 |
+
async def append_to_meeting_transcript(
|
| 1093 |
+
self, meeting_id: str, text: str
|
| 1094 |
+
) -> Optional[dict]:
|
| 1095 |
+
"""Append text to an existing meeting's transcript.
|
| 1096 |
+
|
| 1097 |
+
Args:
|
| 1098 |
+
meeting_id: The ID of the meeting.
|
| 1099 |
+
text: The text to append to the transcript.
|
| 1100 |
+
|
| 1101 |
+
Returns:
|
| 1102 |
+
The updated meeting dictionary, or None if not found.
|
| 1103 |
+
"""
|
| 1104 |
+
existing = await self.get_meeting(meeting_id)
|
| 1105 |
+
if not existing:
|
| 1106 |
+
return None
|
| 1107 |
+
|
| 1108 |
+
now = datetime.utcnow().isoformat()
|
| 1109 |
+
new_transcript = existing["transcript"] + text
|
| 1110 |
+
|
| 1111 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1112 |
+
await db.execute(
|
| 1113 |
+
"""
|
| 1114 |
+
UPDATE meetings SET
|
| 1115 |
+
transcript = ?,
|
| 1116 |
+
updated_at = ?
|
| 1117 |
+
WHERE id = ?
|
| 1118 |
+
""",
|
| 1119 |
+
(new_transcript, now, meeting_id),
|
| 1120 |
+
)
|
| 1121 |
+
await db.commit()
|
| 1122 |
+
|
| 1123 |
+
logger.debug(f"Appended to meeting transcript: {meeting_id}")
|
| 1124 |
+
return await self.get_meeting(meeting_id)
|
| 1125 |
+
|
| 1126 |
+
async def delete_meeting(self, meeting_id: str) -> bool:
|
| 1127 |
+
"""Delete a meeting.
|
| 1128 |
+
|
| 1129 |
+
Args:
|
| 1130 |
+
meeting_id: The ID of the meeting to delete.
|
| 1131 |
+
|
| 1132 |
+
Returns:
|
| 1133 |
+
True if deleted, False if not found.
|
| 1134 |
+
"""
|
| 1135 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1136 |
+
cursor = await db.execute(
|
| 1137 |
+
"DELETE FROM meetings WHERE id = ?", (meeting_id,)
|
| 1138 |
+
)
|
| 1139 |
+
await db.commit()
|
| 1140 |
+
deleted = cursor.rowcount > 0
|
| 1141 |
+
|
| 1142 |
+
if deleted:
|
| 1143 |
+
logger.info(f"Deleted meeting: {meeting_id}")
|
| 1144 |
+
return deleted
|
| 1145 |
+
|
| 1146 |
+
# =========================================================================
|
| 1147 |
+
# Scheduled Messages CRUD Operations
|
| 1148 |
+
# =========================================================================
|
| 1149 |
+
|
| 1150 |
+
async def get_all_scheduled_messages(self, status: Optional[str] = None) -> list[dict]:
|
| 1151 |
+
"""Get all scheduled messages from the database.
|
| 1152 |
+
|
| 1153 |
+
Args:
|
| 1154 |
+
status: Optional filter by status ('pending', 'sent', 'cancelled').
|
| 1155 |
+
|
| 1156 |
+
Returns:
|
| 1157 |
+
List of scheduled message dictionaries.
|
| 1158 |
+
"""
|
| 1159 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1160 |
+
db.row_factory = aiosqlite.Row
|
| 1161 |
+
if status:
|
| 1162 |
+
cursor = await db.execute(
|
| 1163 |
+
"SELECT * FROM scheduled_messages WHERE status = ? ORDER BY scheduled_time ASC",
|
| 1164 |
+
(status,)
|
| 1165 |
+
)
|
| 1166 |
+
else:
|
| 1167 |
+
cursor = await db.execute(
|
| 1168 |
+
"SELECT * FROM scheduled_messages ORDER BY scheduled_time ASC"
|
| 1169 |
+
)
|
| 1170 |
+
rows = await cursor.fetchall()
|
| 1171 |
+
return [self._row_to_scheduled_message(row) for row in rows]
|
| 1172 |
+
|
| 1173 |
+
async def get_scheduled_message(self, message_id: str) -> Optional[dict]:
|
| 1174 |
+
"""Get a single scheduled message by ID.
|
| 1175 |
+
|
| 1176 |
+
Args:
|
| 1177 |
+
message_id: The ID of the scheduled message.
|
| 1178 |
+
|
| 1179 |
+
Returns:
|
| 1180 |
+
The scheduled message dictionary, or None if not found.
|
| 1181 |
+
"""
|
| 1182 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1183 |
+
db.row_factory = aiosqlite.Row
|
| 1184 |
+
cursor = await db.execute(
|
| 1185 |
+
"SELECT * FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 1186 |
+
)
|
| 1187 |
+
row = await cursor.fetchone()
|
| 1188 |
+
return self._row_to_scheduled_message(row) if row else None
|
| 1189 |
+
|
| 1190 |
+
async def get_pending_scheduled_messages(self) -> list[dict]:
|
| 1191 |
+
"""Get all pending scheduled messages.
|
| 1192 |
+
|
| 1193 |
+
Returns:
|
| 1194 |
+
List of pending scheduled message dictionaries.
|
| 1195 |
+
"""
|
| 1196 |
+
return await self.get_all_scheduled_messages(status="pending")
|
| 1197 |
+
|
| 1198 |
+
async def create_scheduled_message(self, message_data: dict) -> dict:
|
| 1199 |
+
"""Create a new scheduled message.
|
| 1200 |
+
|
| 1201 |
+
Args:
|
| 1202 |
+
message_data: Dictionary containing message data.
|
| 1203 |
+
Required: id, recipient_name, recipient_phone, message_content,
|
| 1204 |
+
scheduled_time, platform
|
| 1205 |
+
Optional: notification_id
|
| 1206 |
+
|
| 1207 |
+
Returns:
|
| 1208 |
+
The created scheduled message dictionary.
|
| 1209 |
+
"""
|
| 1210 |
+
now = datetime.utcnow().isoformat()
|
| 1211 |
+
|
| 1212 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1213 |
+
await db.execute(
|
| 1214 |
+
"""
|
| 1215 |
+
INSERT INTO scheduled_messages
|
| 1216 |
+
(id, recipient_name, recipient_phone, message_content,
|
| 1217 |
+
scheduled_time, platform, status, notification_id, created_at, updated_at)
|
| 1218 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 1219 |
+
""",
|
| 1220 |
+
(
|
| 1221 |
+
message_data["id"],
|
| 1222 |
+
message_data["recipient_name"],
|
| 1223 |
+
message_data["recipient_phone"],
|
| 1224 |
+
message_data["message_content"],
|
| 1225 |
+
message_data["scheduled_time"],
|
| 1226 |
+
message_data["platform"],
|
| 1227 |
+
message_data.get("status", "pending"),
|
| 1228 |
+
message_data.get("notification_id"),
|
| 1229 |
+
now,
|
| 1230 |
+
now,
|
| 1231 |
+
),
|
| 1232 |
+
)
|
| 1233 |
+
await db.commit()
|
| 1234 |
+
|
| 1235 |
+
logger.info(f"Created scheduled message: {message_data['id']} for {message_data['recipient_name']}")
|
| 1236 |
+
return await self.get_scheduled_message(message_data["id"])
|
| 1237 |
+
|
| 1238 |
+
async def update_scheduled_message(
|
| 1239 |
+
self, message_id: str, message_data: dict
|
| 1240 |
+
) -> Optional[dict]:
|
| 1241 |
+
"""Update an existing scheduled message.
|
| 1242 |
+
|
| 1243 |
+
Args:
|
| 1244 |
+
message_id: The ID of the scheduled message to update.
|
| 1245 |
+
message_data: Dictionary containing updated message data.
|
| 1246 |
+
|
| 1247 |
+
Returns:
|
| 1248 |
+
The updated scheduled message dictionary, or None if not found.
|
| 1249 |
+
"""
|
| 1250 |
+
existing = await self.get_scheduled_message(message_id)
|
| 1251 |
+
if not existing:
|
| 1252 |
+
return None
|
| 1253 |
+
|
| 1254 |
+
now = datetime.utcnow().isoformat()
|
| 1255 |
+
|
| 1256 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1257 |
+
await db.execute(
|
| 1258 |
+
"""
|
| 1259 |
+
UPDATE scheduled_messages SET
|
| 1260 |
+
recipient_name = ?,
|
| 1261 |
+
recipient_phone = ?,
|
| 1262 |
+
message_content = ?,
|
| 1263 |
+
scheduled_time = ?,
|
| 1264 |
+
platform = ?,
|
| 1265 |
+
status = ?,
|
| 1266 |
+
notification_id = ?,
|
| 1267 |
+
updated_at = ?
|
| 1268 |
+
WHERE id = ?
|
| 1269 |
+
""",
|
| 1270 |
+
(
|
| 1271 |
+
message_data.get("recipient_name", existing["recipient_name"]),
|
| 1272 |
+
message_data.get("recipient_phone", existing["recipient_phone"]),
|
| 1273 |
+
message_data.get("message_content", existing["message_content"]),
|
| 1274 |
+
message_data.get("scheduled_time", existing["scheduled_time"]),
|
| 1275 |
+
message_data.get("platform", existing["platform"]),
|
| 1276 |
+
message_data.get("status", existing["status"]),
|
| 1277 |
+
message_data.get("notification_id", existing["notification_id"]),
|
| 1278 |
+
now,
|
| 1279 |
+
message_id,
|
| 1280 |
+
),
|
| 1281 |
+
)
|
| 1282 |
+
await db.commit()
|
| 1283 |
+
|
| 1284 |
+
logger.info(f"Updated scheduled message: {message_id}")
|
| 1285 |
+
return await self.get_scheduled_message(message_id)
|
| 1286 |
+
|
| 1287 |
+
async def update_scheduled_message_status(
|
| 1288 |
+
self, message_id: str, status: str
|
| 1289 |
+
) -> Optional[dict]:
|
| 1290 |
+
"""Update just the status of a scheduled message.
|
| 1291 |
+
|
| 1292 |
+
Args:
|
| 1293 |
+
message_id: The ID of the scheduled message.
|
| 1294 |
+
status: The new status ('pending', 'sent', 'cancelled').
|
| 1295 |
+
|
| 1296 |
+
Returns:
|
| 1297 |
+
The updated scheduled message dictionary, or None if not found.
|
| 1298 |
+
"""
|
| 1299 |
+
return await self.update_scheduled_message(message_id, {"status": status})
|
| 1300 |
+
|
| 1301 |
+
async def delete_scheduled_message(self, message_id: str) -> bool:
|
| 1302 |
+
"""Delete a scheduled message.
|
| 1303 |
+
|
| 1304 |
+
Args:
|
| 1305 |
+
message_id: The ID of the scheduled message to delete.
|
| 1306 |
+
|
| 1307 |
+
Returns:
|
| 1308 |
+
True if deleted, False if not found.
|
| 1309 |
+
"""
|
| 1310 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1311 |
+
cursor = await db.execute(
|
| 1312 |
+
"DELETE FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 1313 |
+
)
|
| 1314 |
+
await db.commit()
|
| 1315 |
+
deleted = cursor.rowcount > 0
|
| 1316 |
+
|
| 1317 |
+
if deleted:
|
| 1318 |
+
logger.info(f"Deleted scheduled message: {message_id}")
|
| 1319 |
+
return deleted
|
| 1320 |
+
|
| 1321 |
+
# =========================================================================
|
| 1322 |
+
# TamaReachy Operations
|
| 1323 |
+
# =========================================================================
|
| 1324 |
+
|
| 1325 |
+
async def get_tamareachy_state(self) -> dict:
|
| 1326 |
+
"""Get the current TamaReachy state.
|
| 1327 |
+
|
| 1328 |
+
Returns:
|
| 1329 |
+
TamaReachy state dictionary.
|
| 1330 |
+
"""
|
| 1331 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1332 |
+
db.row_factory = aiosqlite.Row
|
| 1333 |
+
cursor = await db.execute(
|
| 1334 |
+
"SELECT * FROM tamareachy_state WHERE id = 1"
|
| 1335 |
+
)
|
| 1336 |
+
row = await cursor.fetchone()
|
| 1337 |
+
if row:
|
| 1338 |
+
return self._row_to_tamareachy(row)
|
| 1339 |
+
# Return default state if not found
|
| 1340 |
+
return {
|
| 1341 |
+
"enabled": False,
|
| 1342 |
+
"hunger": 100,
|
| 1343 |
+
"thirst": 100,
|
| 1344 |
+
"happiness": 100,
|
| 1345 |
+
"energy": 100,
|
| 1346 |
+
"boredom": 100,
|
| 1347 |
+
"social": 100,
|
| 1348 |
+
"health": 100,
|
| 1349 |
+
"cleanliness": 100,
|
| 1350 |
+
"last_interaction": None,
|
| 1351 |
+
"last_decay_check": None,
|
| 1352 |
+
}
|
| 1353 |
+
|
| 1354 |
+
async def update_tamareachy_state(self, state_data: dict) -> dict:
|
| 1355 |
+
"""Update TamaReachy state.
|
| 1356 |
+
|
| 1357 |
+
Args:
|
| 1358 |
+
state_data: Dictionary containing fields to update.
|
| 1359 |
+
|
| 1360 |
+
Returns:
|
| 1361 |
+
The updated TamaReachy state dictionary.
|
| 1362 |
+
"""
|
| 1363 |
+
existing = await self.get_tamareachy_state()
|
| 1364 |
+
now = datetime.utcnow().isoformat()
|
| 1365 |
+
|
| 1366 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 1367 |
+
await db.execute(
|
| 1368 |
+
"""
|
| 1369 |
+
UPDATE tamareachy_state SET
|
| 1370 |
+
enabled = ?,
|
| 1371 |
+
hunger = ?,
|
| 1372 |
+
thirst = ?,
|
| 1373 |
+
happiness = ?,
|
| 1374 |
+
energy = ?,
|
| 1375 |
+
boredom = ?,
|
| 1376 |
+
social = ?,
|
| 1377 |
+
health = ?,
|
| 1378 |
+
cleanliness = ?,
|
| 1379 |
+
last_interaction = ?,
|
| 1380 |
+
last_decay_check = ?,
|
| 1381 |
+
updated_at = ?
|
| 1382 |
+
WHERE id = 1
|
| 1383 |
+
""",
|
| 1384 |
+
(
|
| 1385 |
+
1 if state_data.get("enabled", existing["enabled"]) else 0,
|
| 1386 |
+
state_data.get("hunger", existing["hunger"]),
|
| 1387 |
+
state_data.get("thirst", existing["thirst"]),
|
| 1388 |
+
state_data.get("happiness", existing["happiness"]),
|
| 1389 |
+
state_data.get("energy", existing["energy"]),
|
| 1390 |
+
state_data.get("boredom", existing["boredom"]),
|
| 1391 |
+
state_data.get("social", existing["social"]),
|
| 1392 |
+
state_data.get("health", existing["health"]),
|
| 1393 |
+
state_data.get("cleanliness", existing["cleanliness"]),
|
| 1394 |
+
state_data.get("last_interaction", existing["last_interaction"]),
|
| 1395 |
+
state_data.get("last_decay_check", existing["last_decay_check"]),
|
| 1396 |
+
now,
|
| 1397 |
+
),
|
| 1398 |
+
)
|
| 1399 |
+
await db.commit()
|
| 1400 |
+
|
| 1401 |
+
logger.info("Updated TamaReachy state")
|
| 1402 |
+
return await self.get_tamareachy_state()
|
| 1403 |
+
|
| 1404 |
+
async def reset_tamareachy_stats(self) -> dict:
|
| 1405 |
+
"""Reset all TamaReachy stats to 100.
|
| 1406 |
+
|
| 1407 |
+
Returns:
|
| 1408 |
+
The reset TamaReachy state dictionary.
|
| 1409 |
+
"""
|
| 1410 |
+
now = datetime.utcnow().isoformat()
|
| 1411 |
+
return await self.update_tamareachy_state({
|
| 1412 |
+
"hunger": 100,
|
| 1413 |
+
"thirst": 100,
|
| 1414 |
+
"happiness": 100,
|
| 1415 |
+
"energy": 100,
|
| 1416 |
+
"boredom": 100,
|
| 1417 |
+
"social": 100,
|
| 1418 |
+
"health": 100,
|
| 1419 |
+
"cleanliness": 100,
|
| 1420 |
+
"last_interaction": now,
|
| 1421 |
+
"last_decay_check": now,
|
| 1422 |
+
})
|
| 1423 |
+
|
| 1424 |
+
def _row_to_tamareachy(self, row: aiosqlite.Row) -> dict:
|
| 1425 |
+
"""Convert a database row to a TamaReachy state dictionary.
|
| 1426 |
+
|
| 1427 |
+
Args:
|
| 1428 |
+
row: The database row.
|
| 1429 |
+
|
| 1430 |
+
Returns:
|
| 1431 |
+
TamaReachy state dictionary with proper types.
|
| 1432 |
+
"""
|
| 1433 |
+
return {
|
| 1434 |
+
"enabled": bool(row["enabled"]),
|
| 1435 |
+
"hunger": row["hunger"],
|
| 1436 |
+
"thirst": row["thirst"],
|
| 1437 |
+
"happiness": row["happiness"],
|
| 1438 |
+
"energy": row["energy"],
|
| 1439 |
+
"boredom": row["boredom"],
|
| 1440 |
+
"social": row["social"],
|
| 1441 |
+
"health": row["health"],
|
| 1442 |
+
"cleanliness": row["cleanliness"],
|
| 1443 |
+
"last_interaction": row["last_interaction"],
|
| 1444 |
+
"last_decay_check": row["last_decay_check"],
|
| 1445 |
+
"created_at": row["created_at"],
|
| 1446 |
+
"updated_at": row["updated_at"],
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
# =========================================================================
|
| 1450 |
+
# Helper Methods
|
| 1451 |
+
# =========================================================================
|
| 1452 |
+
|
| 1453 |
+
def _row_to_animation(self, row: aiosqlite.Row) -> dict:
|
| 1454 |
+
"""Convert a database row to an animation dictionary.
|
| 1455 |
+
|
| 1456 |
+
Args:
|
| 1457 |
+
row: The database row.
|
| 1458 |
+
|
| 1459 |
+
Returns:
|
| 1460 |
+
Animation dictionary with proper types.
|
| 1461 |
+
"""
|
| 1462 |
+
# Handle audio_data - may not exist in older databases
|
| 1463 |
+
audio_data = row["audio_data"] if "audio_data" in row.keys() else None
|
| 1464 |
+
|
| 1465 |
+
return {
|
| 1466 |
+
"id": row["id"],
|
| 1467 |
+
"name": row["name"],
|
| 1468 |
+
"description": row["description"],
|
| 1469 |
+
"duration_ms": row["duration_ms"],
|
| 1470 |
+
"start_pose": json.loads(row["start_pose"]),
|
| 1471 |
+
"keyframes": json.loads(row["keyframes"]),
|
| 1472 |
+
"audio_data": audio_data,
|
| 1473 |
+
"created_at": row["created_at"],
|
| 1474 |
+
"updated_at": row["updated_at"],
|
| 1475 |
+
}
|
| 1476 |
+
|
| 1477 |
+
def _row_to_app(self, row: aiosqlite.Row) -> dict:
|
| 1478 |
+
"""Convert a database row to an app dictionary.
|
| 1479 |
+
|
| 1480 |
+
Args:
|
| 1481 |
+
row: The database row.
|
| 1482 |
+
|
| 1483 |
+
Returns:
|
| 1484 |
+
App dictionary with proper types.
|
| 1485 |
+
"""
|
| 1486 |
+
# Handle enabled_tools - may not exist in older databases
|
| 1487 |
+
enabled_tools_raw = row["enabled_tools"] if "enabled_tools" in row.keys() else "[]"
|
| 1488 |
+
|
| 1489 |
+
return {
|
| 1490 |
+
"id": row["id"],
|
| 1491 |
+
"name": row["name"],
|
| 1492 |
+
"description": row["description"],
|
| 1493 |
+
"system_prompt": row["system_prompt"],
|
| 1494 |
+
"voice_id": row["voice_id"],
|
| 1495 |
+
"emotion_animations": json.loads(row["emotion_animations"]),
|
| 1496 |
+
"icon_color": row["icon_color"],
|
| 1497 |
+
"enabled_tools": json.loads(enabled_tools_raw) if enabled_tools_raw else [],
|
| 1498 |
+
"created_at": row["created_at"],
|
| 1499 |
+
"updated_at": row["updated_at"],
|
| 1500 |
+
}
|
| 1501 |
+
|
| 1502 |
+
def _row_to_website(self, row: aiosqlite.Row) -> dict:
|
| 1503 |
+
"""Convert a database row to a website dictionary.
|
| 1504 |
+
|
| 1505 |
+
Args:
|
| 1506 |
+
row: The database row.
|
| 1507 |
+
|
| 1508 |
+
Returns:
|
| 1509 |
+
Website dictionary with proper types.
|
| 1510 |
+
"""
|
| 1511 |
+
return {
|
| 1512 |
+
"id": row["id"],
|
| 1513 |
+
"title": row["title"],
|
| 1514 |
+
"description": row["description"],
|
| 1515 |
+
"created_at": row["created_at"],
|
| 1516 |
+
"updated_at": row["updated_at"],
|
| 1517 |
+
}
|
| 1518 |
+
|
| 1519 |
+
def _row_to_note(self, row: aiosqlite.Row) -> dict:
|
| 1520 |
+
"""Convert a database row to a mental note dictionary.
|
| 1521 |
+
|
| 1522 |
+
Args:
|
| 1523 |
+
row: The database row.
|
| 1524 |
+
|
| 1525 |
+
Returns:
|
| 1526 |
+
Mental note dictionary with proper types.
|
| 1527 |
+
"""
|
| 1528 |
+
return {
|
| 1529 |
+
"id": row["id"],
|
| 1530 |
+
"title": row["title"],
|
| 1531 |
+
"content": row["content"],
|
| 1532 |
+
"created_at": row["created_at"],
|
| 1533 |
+
"updated_at": row["updated_at"],
|
| 1534 |
+
}
|
| 1535 |
+
|
| 1536 |
+
def _row_to_meeting(self, row: aiosqlite.Row) -> dict:
|
| 1537 |
+
"""Convert a database row to a meeting dictionary.
|
| 1538 |
+
|
| 1539 |
+
Args:
|
| 1540 |
+
row: The database row.
|
| 1541 |
+
|
| 1542 |
+
Returns:
|
| 1543 |
+
Meeting dictionary with proper types.
|
| 1544 |
+
"""
|
| 1545 |
+
action_items_raw = row["action_items"] if row["action_items"] else "[]"
|
| 1546 |
+
return {
|
| 1547 |
+
"id": row["id"],
|
| 1548 |
+
"title": row["title"],
|
| 1549 |
+
"transcript": row["transcript"] or "",
|
| 1550 |
+
"action_items": json.loads(action_items_raw),
|
| 1551 |
+
"summary": row["summary"] or "",
|
| 1552 |
+
"duration_seconds": row["duration_seconds"] or 0,
|
| 1553 |
+
"status": row["status"] or "recording",
|
| 1554 |
+
"started_at": row["started_at"],
|
| 1555 |
+
"ended_at": row["ended_at"],
|
| 1556 |
+
"created_at": row["created_at"],
|
| 1557 |
+
"updated_at": row["updated_at"],
|
| 1558 |
+
}
|
| 1559 |
+
|
| 1560 |
+
def _row_to_scheduled_message(self, row: aiosqlite.Row) -> dict:
|
| 1561 |
+
"""Convert a database row to a scheduled message dictionary.
|
| 1562 |
+
|
| 1563 |
+
Args:
|
| 1564 |
+
row: The database row.
|
| 1565 |
+
|
| 1566 |
+
Returns:
|
| 1567 |
+
Scheduled message dictionary with proper types.
|
| 1568 |
+
"""
|
| 1569 |
+
return {
|
| 1570 |
+
"id": row["id"],
|
| 1571 |
+
"recipient_name": row["recipient_name"],
|
| 1572 |
+
"recipient_phone": row["recipient_phone"],
|
| 1573 |
+
"message_content": row["message_content"],
|
| 1574 |
+
"scheduled_time": row["scheduled_time"],
|
| 1575 |
+
"platform": row["platform"],
|
| 1576 |
+
"status": row["status"] or "pending",
|
| 1577 |
+
"notification_id": row["notification_id"],
|
| 1578 |
+
"created_at": row["created_at"],
|
| 1579 |
+
"updated_at": row["updated_at"],
|
| 1580 |
+
}
|
| 1581 |
+
|
| 1582 |
+
async def close(self) -> None:
|
| 1583 |
+
"""Close database connection (if using persistent connection)."""
|
| 1584 |
+
if self._connection:
|
| 1585 |
+
await self._connection.close()
|
| 1586 |
+
self._connection = None
|
| 1587 |
+
|
| 1588 |
+
|
| 1589 |
+
# Global database service instance
|
| 1590 |
+
db_service: Optional[DatabaseService] = None
|
| 1591 |
+
|
| 1592 |
+
|
| 1593 |
+
def get_database() -> DatabaseService:
|
| 1594 |
+
"""Get the global database service instance.
|
| 1595 |
+
|
| 1596 |
+
Returns:
|
| 1597 |
+
The database service.
|
| 1598 |
+
|
| 1599 |
+
Raises:
|
| 1600 |
+
RuntimeError: If database not initialized.
|
| 1601 |
+
"""
|
| 1602 |
+
if db_service is None:
|
| 1603 |
+
raise RuntimeError("Database not initialized. Call init_database() first.")
|
| 1604 |
+
return db_service
|
| 1605 |
+
|
| 1606 |
+
|
| 1607 |
+
async def init_database(db_path: Optional[Path] = None) -> DatabaseService:
|
| 1608 |
+
"""Initialize the global database service.
|
| 1609 |
+
|
| 1610 |
+
Args:
|
| 1611 |
+
db_path: Optional custom database path.
|
| 1612 |
+
|
| 1613 |
+
Returns:
|
| 1614 |
+
The initialized database service.
|
| 1615 |
+
"""
|
| 1616 |
+
global db_service
|
| 1617 |
+
db_service = DatabaseService(db_path)
|
| 1618 |
+
await db_service.initialize()
|
| 1619 |
+
return db_service
|
| 1620 |
+
|
reachys_brain/idle_movement_service.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Idle movement service for subtle, life-like movements when Reachy is inactive.
|
| 2 |
+
|
| 3 |
+
Makes smooth, random micro-movements at random intervals to give Reachy
|
| 4 |
+
a more alive, natural presence when not engaged in conversation or animation.
|
| 5 |
+
Includes subtle head movements and antenna twitches.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import asyncio
|
| 9 |
+
import logging
|
| 10 |
+
import math
|
| 11 |
+
import random
|
| 12 |
+
import time
|
| 13 |
+
from typing import Optional, Callable
|
| 14 |
+
|
| 15 |
+
import httpx
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Movement amplitude settings (in degrees) - very subtle for idle
|
| 21 |
+
IDLE_AMPLITUDE = {
|
| 22 |
+
"head_roll": 4.0, # Gentle side tilts
|
| 23 |
+
"head_pitch": 3.0, # Slight nods
|
| 24 |
+
"head_yaw": 6.0, # Small turns
|
| 25 |
+
"antenna_left": 15.0, # Antenna range
|
| 26 |
+
"antenna_right": 15.0,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
# Timing settings
|
| 30 |
+
IDLE_UPDATE_RATE = 20 # Hz - smooth but not excessive
|
| 31 |
+
IDLE_INTERVAL_RANGE = (5.0, 12.0) # Seconds between movements - subtle but noticeable
|
| 32 |
+
MOVEMENT_DURATION_RANGE = (1.5, 3.5) # How long each movement takes
|
| 33 |
+
ANTENNA_TWITCH_CHANCE = 0.3 # Chance of antenna movement with head movement
|
| 34 |
+
|
| 35 |
+
# Base position (neutral)
|
| 36 |
+
NEUTRAL_POSITION = {
|
| 37 |
+
"head_roll": 0.0,
|
| 38 |
+
"head_pitch": 0.0,
|
| 39 |
+
"head_yaw": 0.0,
|
| 40 |
+
"antenna_left": 0.0,
|
| 41 |
+
"antenna_right": 0.0,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _ease_in_out(t: float) -> float:
|
| 46 |
+
"""Smooth easing function for natural movement."""
|
| 47 |
+
return 0.5 * (1 - math.cos(math.pi * t))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _degrees_to_radians(degrees: float) -> float:
|
| 51 |
+
"""Convert degrees to radians."""
|
| 52 |
+
return degrees * math.pi / 180.0
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class IdleMovement:
|
| 56 |
+
"""Represents a single idle movement to perform."""
|
| 57 |
+
|
| 58 |
+
def __init__(self):
|
| 59 |
+
"""Generate a random idle movement."""
|
| 60 |
+
# Generate random head position within limits
|
| 61 |
+
self.head_roll = random.uniform(
|
| 62 |
+
-IDLE_AMPLITUDE["head_roll"],
|
| 63 |
+
IDLE_AMPLITUDE["head_roll"]
|
| 64 |
+
)
|
| 65 |
+
self.head_pitch = random.uniform(
|
| 66 |
+
-IDLE_AMPLITUDE["head_pitch"],
|
| 67 |
+
IDLE_AMPLITUDE["head_pitch"]
|
| 68 |
+
)
|
| 69 |
+
self.head_yaw = random.uniform(
|
| 70 |
+
-IDLE_AMPLITUDE["head_yaw"],
|
| 71 |
+
IDLE_AMPLITUDE["head_yaw"]
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Sometimes move antennas too
|
| 75 |
+
if random.random() < ANTENNA_TWITCH_CHANCE:
|
| 76 |
+
self.antenna_left = random.uniform(
|
| 77 |
+
-IDLE_AMPLITUDE["antenna_left"],
|
| 78 |
+
IDLE_AMPLITUDE["antenna_left"]
|
| 79 |
+
)
|
| 80 |
+
self.antenna_right = random.uniform(
|
| 81 |
+
-IDLE_AMPLITUDE["antenna_right"],
|
| 82 |
+
IDLE_AMPLITUDE["antenna_right"]
|
| 83 |
+
)
|
| 84 |
+
else:
|
| 85 |
+
self.antenna_left = 0.0
|
| 86 |
+
self.antenna_right = 0.0
|
| 87 |
+
|
| 88 |
+
# Movement duration
|
| 89 |
+
self.duration = random.uniform(*MOVEMENT_DURATION_RANGE)
|
| 90 |
+
|
| 91 |
+
def to_dict(self) -> dict:
|
| 92 |
+
"""Convert to position dict."""
|
| 93 |
+
return {
|
| 94 |
+
"head_roll": self.head_roll,
|
| 95 |
+
"head_pitch": self.head_pitch,
|
| 96 |
+
"head_yaw": self.head_yaw,
|
| 97 |
+
"antenna_left": self.antenna_left,
|
| 98 |
+
"antenna_right": self.antenna_right,
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class AntennaOnlyMovement:
|
| 103 |
+
"""Represents an antenna-only twitch movement."""
|
| 104 |
+
|
| 105 |
+
def __init__(self, current_head_pos: dict):
|
| 106 |
+
"""Generate a random antenna movement while keeping head still.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
current_head_pos: Current head position to maintain.
|
| 110 |
+
"""
|
| 111 |
+
# Keep head position stable
|
| 112 |
+
self.head_roll = current_head_pos.get("head_roll", 0.0)
|
| 113 |
+
self.head_pitch = current_head_pos.get("head_pitch", 0.0)
|
| 114 |
+
self.head_yaw = current_head_pos.get("head_yaw", 0.0)
|
| 115 |
+
|
| 116 |
+
# Random antenna positions - sometimes asymmetric for personality
|
| 117 |
+
if random.random() < 0.5:
|
| 118 |
+
# Symmetric movement
|
| 119 |
+
angle = random.uniform(-15.0, 15.0)
|
| 120 |
+
self.antenna_left = angle
|
| 121 |
+
self.antenna_right = angle
|
| 122 |
+
else:
|
| 123 |
+
# Asymmetric - one antenna moves more
|
| 124 |
+
self.antenna_left = random.uniform(-15.0, 15.0)
|
| 125 |
+
self.antenna_right = random.uniform(-15.0, 15.0)
|
| 126 |
+
|
| 127 |
+
# Quick antenna movements
|
| 128 |
+
self.duration = random.uniform(0.5, 1.2)
|
| 129 |
+
|
| 130 |
+
def to_dict(self) -> dict:
|
| 131 |
+
"""Convert to position dict."""
|
| 132 |
+
return {
|
| 133 |
+
"head_roll": self.head_roll,
|
| 134 |
+
"head_pitch": self.head_pitch,
|
| 135 |
+
"head_yaw": self.head_yaw,
|
| 136 |
+
"antenna_left": self.antenna_left,
|
| 137 |
+
"antenna_right": self.antenna_right,
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class IdleMovementService:
|
| 142 |
+
"""Service for subtle, life-like idle movements.
|
| 143 |
+
|
| 144 |
+
Runs in the background and periodically moves Reachy's head and
|
| 145 |
+
antennas to give a more alive, natural presence when idle.
|
| 146 |
+
|
| 147 |
+
Automatically pauses when:
|
| 148 |
+
- Reachy is speaking
|
| 149 |
+
- An animation is playing
|
| 150 |
+
- A conversation is active
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def __init__(self, daemon_url: str = "http://localhost:8000"):
|
| 154 |
+
"""Initialize the idle movement service.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
daemon_url: URL of the Reachy daemon API.
|
| 158 |
+
"""
|
| 159 |
+
self._daemon_url = daemon_url
|
| 160 |
+
self._is_running = False
|
| 161 |
+
self._is_paused = False
|
| 162 |
+
self._idle_task: Optional[asyncio.Task] = None
|
| 163 |
+
|
| 164 |
+
# Current and target positions
|
| 165 |
+
self._current_pos = NEUTRAL_POSITION.copy()
|
| 166 |
+
self._target_pos = NEUTRAL_POSITION.copy()
|
| 167 |
+
self._start_pos = NEUTRAL_POSITION.copy()
|
| 168 |
+
|
| 169 |
+
# Movement timing
|
| 170 |
+
self._move_start_time = 0.0
|
| 171 |
+
self._move_duration = 1.0
|
| 172 |
+
|
| 173 |
+
# Activity check callback
|
| 174 |
+
self._is_busy_callback: Optional[Callable[[], bool]] = None
|
| 175 |
+
|
| 176 |
+
# HTTP client
|
| 177 |
+
self._client: Optional[httpx.AsyncClient] = None
|
| 178 |
+
|
| 179 |
+
# Error rate limiting
|
| 180 |
+
self._last_error_time = 0.0
|
| 181 |
+
|
| 182 |
+
logger.info(f"IdleMovementService initialized (daemon: {daemon_url})")
|
| 183 |
+
|
| 184 |
+
def set_busy_callback(self, callback: Callable[[], bool]) -> None:
|
| 185 |
+
"""Set callback to check if robot is busy.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
callback: Function that returns True if robot is busy.
|
| 189 |
+
"""
|
| 190 |
+
self._is_busy_callback = callback
|
| 191 |
+
|
| 192 |
+
@property
|
| 193 |
+
def is_running(self) -> bool:
|
| 194 |
+
"""Check if idle movement service is running."""
|
| 195 |
+
return self._is_running
|
| 196 |
+
|
| 197 |
+
@property
|
| 198 |
+
def is_paused(self) -> bool:
|
| 199 |
+
"""Check if idle movements are currently paused."""
|
| 200 |
+
return self._is_paused
|
| 201 |
+
|
| 202 |
+
def pause(self) -> None:
|
| 203 |
+
"""Pause idle movements (e.g., during conversation)."""
|
| 204 |
+
if not self._is_paused:
|
| 205 |
+
self._is_paused = True
|
| 206 |
+
logger.debug("Idle movements paused")
|
| 207 |
+
|
| 208 |
+
def resume(self) -> None:
|
| 209 |
+
"""Resume idle movements."""
|
| 210 |
+
if self._is_paused:
|
| 211 |
+
self._is_paused = False
|
| 212 |
+
logger.debug("Idle movements resumed")
|
| 213 |
+
|
| 214 |
+
async def _get_client(self) -> httpx.AsyncClient:
|
| 215 |
+
"""Get or create the async HTTP client."""
|
| 216 |
+
if self._client is None or self._client.is_closed:
|
| 217 |
+
self._client = httpx.AsyncClient(
|
| 218 |
+
base_url=self._daemon_url,
|
| 219 |
+
timeout=2.0,
|
| 220 |
+
)
|
| 221 |
+
return self._client
|
| 222 |
+
|
| 223 |
+
async def _goto_pose(
|
| 224 |
+
self,
|
| 225 |
+
head_roll: float,
|
| 226 |
+
head_pitch: float,
|
| 227 |
+
head_yaw: float,
|
| 228 |
+
antenna_left: float,
|
| 229 |
+
antenna_right: float,
|
| 230 |
+
duration: float,
|
| 231 |
+
) -> bool:
|
| 232 |
+
"""Send a goto command to smoothly move to target pose.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
head_roll: Head tilt in degrees.
|
| 236 |
+
head_pitch: Head nod in degrees.
|
| 237 |
+
head_yaw: Head turn in degrees.
|
| 238 |
+
antenna_left: Left antenna angle in degrees.
|
| 239 |
+
antenna_right: Right antenna angle in degrees.
|
| 240 |
+
duration: Movement duration in seconds.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
True if successful.
|
| 244 |
+
"""
|
| 245 |
+
try:
|
| 246 |
+
client = await self._get_client()
|
| 247 |
+
|
| 248 |
+
payload = {
|
| 249 |
+
"head_pose": {
|
| 250 |
+
"x": 0.0,
|
| 251 |
+
"y": 0.0,
|
| 252 |
+
"z": 0.0,
|
| 253 |
+
"roll": _degrees_to_radians(head_roll),
|
| 254 |
+
"pitch": _degrees_to_radians(head_pitch),
|
| 255 |
+
"yaw": _degrees_to_radians(head_yaw),
|
| 256 |
+
},
|
| 257 |
+
"body_yaw": 0.0,
|
| 258 |
+
"antennas": [
|
| 259 |
+
_degrees_to_radians(antenna_left),
|
| 260 |
+
_degrees_to_radians(antenna_right),
|
| 261 |
+
],
|
| 262 |
+
"duration": duration,
|
| 263 |
+
"interpolation": "minjerk",
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
response = await client.post(
|
| 267 |
+
"/api/move/goto",
|
| 268 |
+
json=payload,
|
| 269 |
+
)
|
| 270 |
+
response.raise_for_status()
|
| 271 |
+
return True
|
| 272 |
+
|
| 273 |
+
except Exception as e:
|
| 274 |
+
# Rate-limit error logging
|
| 275 |
+
now = time.time()
|
| 276 |
+
if now - self._last_error_time > 10.0:
|
| 277 |
+
logger.warning(f"Idle movement: Failed to goto pose: {e}")
|
| 278 |
+
self._last_error_time = now
|
| 279 |
+
return False
|
| 280 |
+
|
| 281 |
+
def _interpolate_position(self) -> dict:
|
| 282 |
+
"""Calculate current interpolated position."""
|
| 283 |
+
elapsed = time.time() - self._move_start_time
|
| 284 |
+
t = min(1.0, elapsed / self._move_duration)
|
| 285 |
+
|
| 286 |
+
# Apply easing for smooth movement
|
| 287 |
+
eased_t = _ease_in_out(t)
|
| 288 |
+
|
| 289 |
+
result = {}
|
| 290 |
+
for key in NEUTRAL_POSITION.keys():
|
| 291 |
+
start = self._start_pos.get(key, 0.0)
|
| 292 |
+
target = self._target_pos.get(key, 0.0)
|
| 293 |
+
result[key] = start + (target - start) * eased_t
|
| 294 |
+
|
| 295 |
+
return result
|
| 296 |
+
|
| 297 |
+
def _is_movement_complete(self) -> bool:
|
| 298 |
+
"""Check if current movement is complete."""
|
| 299 |
+
elapsed = time.time() - self._move_start_time
|
| 300 |
+
return elapsed >= self._move_duration
|
| 301 |
+
|
| 302 |
+
def _start_movement(self, target: dict, duration: float) -> None:
|
| 303 |
+
"""Start a new movement toward target position.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
target: Target position dict.
|
| 307 |
+
duration: Movement duration in seconds.
|
| 308 |
+
"""
|
| 309 |
+
self._start_pos = self._current_pos.copy()
|
| 310 |
+
self._target_pos = target.copy()
|
| 311 |
+
self._move_start_time = time.time()
|
| 312 |
+
self._move_duration = duration
|
| 313 |
+
|
| 314 |
+
def _is_robot_busy(self) -> bool:
|
| 315 |
+
"""Check if robot is currently busy (speaking, animating, etc.)."""
|
| 316 |
+
if self._is_busy_callback:
|
| 317 |
+
return self._is_busy_callback()
|
| 318 |
+
return False
|
| 319 |
+
|
| 320 |
+
async def _idle_loop(self) -> None:
|
| 321 |
+
"""Main idle movement loop."""
|
| 322 |
+
logger.info("🌙 Idle movement service started")
|
| 323 |
+
|
| 324 |
+
next_movement_time = time.time() + random.uniform(3.0, 8.0) # Start with a shorter initial wait
|
| 325 |
+
|
| 326 |
+
try:
|
| 327 |
+
while self._is_running:
|
| 328 |
+
now = time.time()
|
| 329 |
+
|
| 330 |
+
# Check if we should be idle
|
| 331 |
+
if self._is_paused or self._is_robot_busy():
|
| 332 |
+
# Wait and check again
|
| 333 |
+
await asyncio.sleep(0.5)
|
| 334 |
+
# Reset movement timing when resuming
|
| 335 |
+
next_movement_time = time.time() + random.uniform(2.0, 5.0)
|
| 336 |
+
continue
|
| 337 |
+
|
| 338 |
+
# Check if it's time for a new movement
|
| 339 |
+
if now >= next_movement_time:
|
| 340 |
+
# Decide what kind of movement
|
| 341 |
+
if random.random() < 0.3:
|
| 342 |
+
# Antenna-only twitch
|
| 343 |
+
movement = AntennaOnlyMovement(self._current_pos)
|
| 344 |
+
logger.debug(f"🎭 Antenna twitch (duration: {movement.duration:.1f}s)")
|
| 345 |
+
else:
|
| 346 |
+
# Full idle movement
|
| 347 |
+
movement = IdleMovement()
|
| 348 |
+
logger.debug(f"🎭 Idle movement: roll={movement.head_roll:.1f}° pitch={movement.head_pitch:.1f}° yaw={movement.head_yaw:.1f}° (duration: {movement.duration:.1f}s)")
|
| 349 |
+
|
| 350 |
+
# Send the goto command - daemon handles interpolation
|
| 351 |
+
target = movement.to_dict()
|
| 352 |
+
success = await self._goto_pose(
|
| 353 |
+
head_roll=target["head_roll"],
|
| 354 |
+
head_pitch=target["head_pitch"],
|
| 355 |
+
head_yaw=target["head_yaw"],
|
| 356 |
+
antenna_left=target["antenna_left"],
|
| 357 |
+
antenna_right=target["antenna_right"],
|
| 358 |
+
duration=movement.duration,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
if success:
|
| 362 |
+
# Update our tracked position
|
| 363 |
+
self._current_pos = target.copy()
|
| 364 |
+
# Wait for movement to complete
|
| 365 |
+
await asyncio.sleep(movement.duration + 0.2)
|
| 366 |
+
|
| 367 |
+
# Schedule next movement
|
| 368 |
+
next_movement_time = time.time() + random.uniform(*IDLE_INTERVAL_RANGE)
|
| 369 |
+
logger.debug(f"Next idle movement in {next_movement_time - time.time():.1f}s")
|
| 370 |
+
else:
|
| 371 |
+
# Wait a bit before checking again
|
| 372 |
+
await asyncio.sleep(0.5)
|
| 373 |
+
|
| 374 |
+
except asyncio.CancelledError:
|
| 375 |
+
logger.debug("Idle movement loop cancelled")
|
| 376 |
+
except Exception as e:
|
| 377 |
+
logger.error(f"Error in idle movement loop: {e}")
|
| 378 |
+
finally:
|
| 379 |
+
# Return to neutral position on stop
|
| 380 |
+
await self._return_to_neutral()
|
| 381 |
+
logger.info("🌙 Idle movement service stopped")
|
| 382 |
+
|
| 383 |
+
async def _return_to_neutral(self) -> None:
|
| 384 |
+
"""Smoothly return to neutral position."""
|
| 385 |
+
await self._goto_pose(
|
| 386 |
+
head_roll=0.0,
|
| 387 |
+
head_pitch=0.0,
|
| 388 |
+
head_yaw=0.0,
|
| 389 |
+
antenna_left=0.0,
|
| 390 |
+
antenna_right=0.0,
|
| 391 |
+
duration=1.5,
|
| 392 |
+
)
|
| 393 |
+
self._current_pos = NEUTRAL_POSITION.copy()
|
| 394 |
+
await asyncio.sleep(1.5)
|
| 395 |
+
|
| 396 |
+
async def start(self) -> None:
|
| 397 |
+
"""Start the idle movement service."""
|
| 398 |
+
if self._is_running:
|
| 399 |
+
logger.debug("Idle movement service already running")
|
| 400 |
+
return
|
| 401 |
+
|
| 402 |
+
self._is_running = True
|
| 403 |
+
self._is_paused = False
|
| 404 |
+
self._idle_task = asyncio.create_task(self._idle_loop())
|
| 405 |
+
logger.info("🌙 Idle movement service starting...")
|
| 406 |
+
|
| 407 |
+
async def stop(self) -> None:
|
| 408 |
+
"""Stop the idle movement service."""
|
| 409 |
+
if not self._is_running:
|
| 410 |
+
return
|
| 411 |
+
|
| 412 |
+
self._is_running = False
|
| 413 |
+
|
| 414 |
+
if self._idle_task:
|
| 415 |
+
self._idle_task.cancel()
|
| 416 |
+
try:
|
| 417 |
+
await self._idle_task
|
| 418 |
+
except asyncio.CancelledError:
|
| 419 |
+
pass
|
| 420 |
+
self._idle_task = None
|
| 421 |
+
|
| 422 |
+
async def close(self) -> None:
|
| 423 |
+
"""Clean up resources."""
|
| 424 |
+
await self.stop()
|
| 425 |
+
|
| 426 |
+
if self._client and not self._client.is_closed:
|
| 427 |
+
await self._client.aclose()
|
| 428 |
+
self._client = None
|
| 429 |
+
|
| 430 |
+
logger.info("IdleMovementService closed")
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
# Global singleton instance
|
| 434 |
+
_idle_service: Optional[IdleMovementService] = None
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def get_idle_movement_service() -> Optional[IdleMovementService]:
|
| 438 |
+
"""Get the global idle movement service instance."""
|
| 439 |
+
return _idle_service
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def set_idle_movement_service(service: IdleMovementService) -> None:
|
| 443 |
+
"""Set the global idle movement service instance."""
|
| 444 |
+
global _idle_service
|
| 445 |
+
_idle_service = service
|
| 446 |
+
logger.info("Global IdleMovementService instance set")
|
| 447 |
+
|
reachys_brain/main.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Main entry point for the Reachy iOS Bridge app."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import signal
|
| 5 |
+
import sys
|
| 6 |
+
import threading
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
import uvicorn
|
| 10 |
+
from reachy_mini import ReachyMini, ReachyMiniApp
|
| 11 |
+
|
| 12 |
+
from .server import app, set_head_controller, set_reachy_connected
|
| 13 |
+
|
| 14 |
+
# Configure logging
|
| 15 |
+
logging.basicConfig(
|
| 16 |
+
level=logging.INFO,
|
| 17 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
| 18 |
+
)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ReachyIosBridge(ReachyMiniApp):
|
| 23 |
+
"""iOS Bridge app for Reachy Mini.
|
| 24 |
+
|
| 25 |
+
Runs a FastAPI HTTP server that allows an iOS device to:
|
| 26 |
+
- Send text for text-to-speech
|
| 27 |
+
- Control head animations
|
| 28 |
+
- Query robot status
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
# Web interface for configuration (optional)
|
| 32 |
+
custom_app_url: str | None = None
|
| 33 |
+
|
| 34 |
+
def __init__(self) -> None:
|
| 35 |
+
"""Initialize the iOS Bridge app."""
|
| 36 |
+
super().__init__()
|
| 37 |
+
self._server_thread: threading.Thread | None = None
|
| 38 |
+
self._host = "0.0.0.0"
|
| 39 |
+
self._port = 8080
|
| 40 |
+
|
| 41 |
+
def run(self, reachy_mini: ReachyMini | None, stop_event: threading.Event) -> None:
|
| 42 |
+
"""Run the iOS Bridge server.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
reachy_mini: The connected ReachyMini instance (may be None if not connected).
|
| 46 |
+
stop_event: Event to signal when the app should stop.
|
| 47 |
+
"""
|
| 48 |
+
logger.info("Starting Reachy iOS Bridge...")
|
| 49 |
+
logger.info(f"Server will be available at http://{self._host}:{self._port}")
|
| 50 |
+
|
| 51 |
+
# Register Reachy with the server (only if we have a connection)
|
| 52 |
+
if reachy_mini is not None:
|
| 53 |
+
set_reachy_connected(True)
|
| 54 |
+
set_head_controller(reachy_mini)
|
| 55 |
+
logger.info("Reachy Mini connected - motion enabled")
|
| 56 |
+
else:
|
| 57 |
+
set_reachy_connected(False)
|
| 58 |
+
logger.warning("Reachy Mini not connected - motion disabled")
|
| 59 |
+
|
| 60 |
+
# Configure uvicorn
|
| 61 |
+
config = uvicorn.Config(
|
| 62 |
+
app=app,
|
| 63 |
+
host=self._host,
|
| 64 |
+
port=self._port,
|
| 65 |
+
log_level="info",
|
| 66 |
+
access_log=True,
|
| 67 |
+
)
|
| 68 |
+
server = uvicorn.Server(config)
|
| 69 |
+
|
| 70 |
+
# Run server in a thread so we can check stop_event
|
| 71 |
+
self._server_thread = threading.Thread(
|
| 72 |
+
target=server.run,
|
| 73 |
+
daemon=True,
|
| 74 |
+
)
|
| 75 |
+
self._server_thread.start()
|
| 76 |
+
|
| 77 |
+
logger.info(f"iOS Bridge server started on port {self._port}")
|
| 78 |
+
logger.info("Waiting for iOS connections...")
|
| 79 |
+
|
| 80 |
+
# Wait for stop event
|
| 81 |
+
try:
|
| 82 |
+
while not stop_event.is_set():
|
| 83 |
+
stop_event.wait(timeout=0.5)
|
| 84 |
+
except KeyboardInterrupt:
|
| 85 |
+
logger.info("Keyboard interrupt received")
|
| 86 |
+
|
| 87 |
+
# Cleanup
|
| 88 |
+
logger.info("Stopping iOS Bridge server...")
|
| 89 |
+
set_reachy_connected(False)
|
| 90 |
+
|
| 91 |
+
# Signal server to stop
|
| 92 |
+
server.should_exit = True
|
| 93 |
+
|
| 94 |
+
if self._server_thread and self._server_thread.is_alive():
|
| 95 |
+
self._server_thread.join(timeout=2.0)
|
| 96 |
+
|
| 97 |
+
logger.info("iOS Bridge stopped")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def main() -> None:
|
| 101 |
+
"""Run the iOS Bridge when called as a module by the daemon.
|
| 102 |
+
|
| 103 |
+
This creates a ReachyMini instance and runs the bridge app.
|
| 104 |
+
"""
|
| 105 |
+
logger.info("Starting Reachy iOS Bridge as module...")
|
| 106 |
+
|
| 107 |
+
# Create stop event for graceful shutdown
|
| 108 |
+
stop_event = threading.Event()
|
| 109 |
+
|
| 110 |
+
def signal_handler(signum, frame):
|
| 111 |
+
logger.info(f"Received signal {signum}, stopping...")
|
| 112 |
+
stop_event.set()
|
| 113 |
+
|
| 114 |
+
# Register signal handlers
|
| 115 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 116 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
| 117 |
+
|
| 118 |
+
# Try to connect to Reachy Mini, but don't block if it fails
|
| 119 |
+
reachy_mini = None
|
| 120 |
+
try:
|
| 121 |
+
logger.info("Attempting to connect to Reachy Mini...")
|
| 122 |
+
# Use a timeout for connection
|
| 123 |
+
import concurrent.futures
|
| 124 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 125 |
+
future = executor.submit(ReachyMini)
|
| 126 |
+
try:
|
| 127 |
+
reachy_mini = future.result(timeout=5.0)
|
| 128 |
+
logger.info("Connected to Reachy Mini!")
|
| 129 |
+
except concurrent.futures.TimeoutError:
|
| 130 |
+
logger.warning("Timeout connecting to Reachy Mini - motion will be disabled")
|
| 131 |
+
except Exception as e:
|
| 132 |
+
logger.warning(f"Could not connect to Reachy Mini: {e} - motion will be disabled")
|
| 133 |
+
except Exception as e:
|
| 134 |
+
logger.warning(f"Error during Reachy connection: {e}")
|
| 135 |
+
|
| 136 |
+
# Create and run the bridge app
|
| 137 |
+
try:
|
| 138 |
+
bridge = ReachyIosBridge()
|
| 139 |
+
bridge.run(reachy_mini, stop_event)
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"Error running bridge: {e}")
|
| 142 |
+
sys.exit(1)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
if __name__ == "__main__":
|
| 146 |
+
main()
|
| 147 |
+
|
reachys_brain/models.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic models for API requests and responses."""
|
| 2 |
+
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from pydantic import BaseModel, Field
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Emotion(str, Enum):
|
| 11 |
+
"""Supported emotions for speech and motion."""
|
| 12 |
+
|
| 13 |
+
NEUTRAL = "neutral"
|
| 14 |
+
HAPPY = "happy"
|
| 15 |
+
SAD = "sad"
|
| 16 |
+
SURPRISED = "surprised"
|
| 17 |
+
THINKING = "thinking"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SpeakRequest(BaseModel):
|
| 21 |
+
"""Request body for the /speak endpoint."""
|
| 22 |
+
|
| 23 |
+
text: str = Field(..., min_length=1, max_length=2000, description="Text to speak")
|
| 24 |
+
emotion: Emotion = Field(default=Emotion.NEUTRAL, description="Emotion for speech delivery")
|
| 25 |
+
|
| 26 |
+
model_config = {
|
| 27 |
+
"json_schema_extra": {
|
| 28 |
+
"examples": [
|
| 29 |
+
{
|
| 30 |
+
"text": "Hello! How can I help you today?",
|
| 31 |
+
"emotion": "happy"
|
| 32 |
+
}
|
| 33 |
+
]
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class MotionRequest(BaseModel):
|
| 39 |
+
"""Request body for the /motion endpoint."""
|
| 40 |
+
|
| 41 |
+
animation: str = Field(..., description="Animation name to play")
|
| 42 |
+
duration: float = Field(default=2.0, ge=0.1, le=10.0, description="Duration in seconds")
|
| 43 |
+
|
| 44 |
+
model_config = {
|
| 45 |
+
"json_schema_extra": {
|
| 46 |
+
"examples": [
|
| 47 |
+
{
|
| 48 |
+
"animation": "nod",
|
| 49 |
+
"duration": 1.5
|
| 50 |
+
}
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class RobotStatus(BaseModel):
|
| 57 |
+
"""Response model for robot status."""
|
| 58 |
+
|
| 59 |
+
connected: bool = Field(..., description="Whether Reachy is connected")
|
| 60 |
+
speaking: bool = Field(..., description="Whether Reachy is currently speaking")
|
| 61 |
+
animation_playing: bool = Field(..., description="Whether an animation is playing")
|
| 62 |
+
battery_level: float | None = Field(None, description="Battery level if available")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SpeakResponse(BaseModel):
|
| 66 |
+
"""Response model for speak requests."""
|
| 67 |
+
|
| 68 |
+
success: bool
|
| 69 |
+
message: str
|
| 70 |
+
duration_seconds: float | None = None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class MotionResponse(BaseModel):
|
| 74 |
+
"""Response model for motion requests."""
|
| 75 |
+
|
| 76 |
+
success: bool
|
| 77 |
+
message: str
|
| 78 |
+
animation: str | None = None
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class StopResponse(BaseModel):
|
| 82 |
+
"""Response model for stop requests."""
|
| 83 |
+
|
| 84 |
+
success: bool
|
| 85 |
+
message: str
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class PowerAction(str, Enum):
|
| 89 |
+
"""Power control actions for the robot."""
|
| 90 |
+
|
| 91 |
+
WAKE = "wake"
|
| 92 |
+
SLEEP = "sleep"
|
| 93 |
+
REBOOT = "reboot"
|
| 94 |
+
REFLASH_MOTORS = "reflash_motors"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class PowerControlResponse(BaseModel):
|
| 98 |
+
"""Response model for power control requests."""
|
| 99 |
+
|
| 100 |
+
success: bool
|
| 101 |
+
message: str
|
| 102 |
+
action: PowerAction
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class ReflashMotorsResponse(BaseModel):
|
| 106 |
+
"""Response model for motor reflash operation."""
|
| 107 |
+
|
| 108 |
+
success: bool
|
| 109 |
+
message: str
|
| 110 |
+
motors_reflashed: int = 0
|
| 111 |
+
details: Optional[str] = None
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# Voice/Language Models
|
| 115 |
+
|
| 116 |
+
class Voice(BaseModel):
|
| 117 |
+
"""A TTS voice option."""
|
| 118 |
+
|
| 119 |
+
id: str = Field(..., description="Voice identifier (e.g., 'en-us', 'fr', 'de')")
|
| 120 |
+
name: str = Field(..., description="Human-readable voice name")
|
| 121 |
+
language: str = Field(..., description="Language code")
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class VoiceRequest(BaseModel):
|
| 125 |
+
"""Request body for setting the voice."""
|
| 126 |
+
|
| 127 |
+
voice_id: str = Field(..., description="Voice identifier to use")
|
| 128 |
+
|
| 129 |
+
model_config = {
|
| 130 |
+
"json_schema_extra": {
|
| 131 |
+
"examples": [
|
| 132 |
+
{"voice_id": "en-us"}
|
| 133 |
+
]
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class VoiceResponse(BaseModel):
|
| 139 |
+
"""Response model for voice operations."""
|
| 140 |
+
|
| 141 |
+
success: bool
|
| 142 |
+
current_voice: str
|
| 143 |
+
message: str | None = None
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class VoicesListResponse(BaseModel):
|
| 147 |
+
"""Response model for listing available voices."""
|
| 148 |
+
|
| 149 |
+
voices: list[Voice]
|
| 150 |
+
current_voice: str
|
| 151 |
+
preferred_language: str | None = None # User's preferred language code
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class SupportedLanguage(BaseModel):
|
| 155 |
+
"""A supported language option."""
|
| 156 |
+
|
| 157 |
+
code: str = Field(..., description="Language code (e.g., 'en', 'nl', 'de')")
|
| 158 |
+
name: str = Field(..., description="Human-readable language name")
|
| 159 |
+
flag: str = Field(..., description="Flag emoji for the language")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class PreferredLanguageResponse(BaseModel):
|
| 163 |
+
"""Response model for preferred language operations."""
|
| 164 |
+
|
| 165 |
+
success: bool | None = None
|
| 166 |
+
preferred_language: str
|
| 167 |
+
message: str | None = None
|
| 168 |
+
supported_languages: list[SupportedLanguage] | None = None
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# Joystick Control Models
|
| 172 |
+
|
| 173 |
+
class HeadPoseRequest(BaseModel):
|
| 174 |
+
"""Request body for setting head pose."""
|
| 175 |
+
|
| 176 |
+
roll: float = Field(default=0.0, ge=-45.0, le=45.0, description="Head roll in degrees")
|
| 177 |
+
pitch: float = Field(default=0.0, ge=-40.0, le=30.0, description="Head pitch in degrees (negative=up)")
|
| 178 |
+
yaw: float = Field(default=0.0, ge=-60.0, le=60.0, description="Head yaw in degrees")
|
| 179 |
+
|
| 180 |
+
model_config = {
|
| 181 |
+
"json_schema_extra": {
|
| 182 |
+
"examples": [
|
| 183 |
+
{"roll": 0.0, "pitch": -10.0, "yaw": 15.0}
|
| 184 |
+
]
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class HeadPoseResponse(BaseModel):
|
| 190 |
+
"""Response model for head pose operations."""
|
| 191 |
+
|
| 192 |
+
success: bool
|
| 193 |
+
message: str
|
| 194 |
+
roll: float | None = None
|
| 195 |
+
pitch: float | None = None
|
| 196 |
+
yaw: float | None = None
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class BodyYawRequest(BaseModel):
|
| 200 |
+
"""Request body for setting body yaw rotation."""
|
| 201 |
+
|
| 202 |
+
yaw: float = Field(..., ge=-180.0, le=180.0, description="Body yaw in degrees")
|
| 203 |
+
|
| 204 |
+
model_config = {
|
| 205 |
+
"json_schema_extra": {
|
| 206 |
+
"examples": [
|
| 207 |
+
{"yaw": 45.0}
|
| 208 |
+
]
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class BodyYawResponse(BaseModel):
|
| 214 |
+
"""Response model for body yaw operations."""
|
| 215 |
+
|
| 216 |
+
success: bool
|
| 217 |
+
message: str
|
| 218 |
+
yaw: float | None = None
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class AntennaRequest(BaseModel):
|
| 222 |
+
"""Request body for setting antenna positions."""
|
| 223 |
+
|
| 224 |
+
left: float = Field(..., ge=-45.0, le=45.0, description="Left antenna angle in degrees")
|
| 225 |
+
right: float = Field(..., ge=-45.0, le=45.0, description="Right antenna angle in degrees")
|
| 226 |
+
|
| 227 |
+
model_config = {
|
| 228 |
+
"json_schema_extra": {
|
| 229 |
+
"examples": [
|
| 230 |
+
{"left": 15.0, "right": -15.0}
|
| 231 |
+
]
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
class AntennaResponse(BaseModel):
|
| 237 |
+
"""Response model for antenna operations."""
|
| 238 |
+
|
| 239 |
+
success: bool
|
| 240 |
+
message: str
|
| 241 |
+
left: float | None = None
|
| 242 |
+
right: float | None = None
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class JoystickStateResponse(BaseModel):
|
| 246 |
+
"""Combined state response for all joystick controls."""
|
| 247 |
+
|
| 248 |
+
success: bool
|
| 249 |
+
head_roll: float
|
| 250 |
+
head_pitch: float
|
| 251 |
+
head_yaw: float
|
| 252 |
+
body_yaw: float
|
| 253 |
+
antenna_left: float
|
| 254 |
+
antenna_right: float
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# =============================================================================
|
| 258 |
+
# Custom Apps Models
|
| 259 |
+
# =============================================================================
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class CustomAppIconColor(str, Enum):
|
| 263 |
+
"""Available icon colors for custom apps."""
|
| 264 |
+
|
| 265 |
+
BLUE = "blue"
|
| 266 |
+
PURPLE = "purple"
|
| 267 |
+
PINK = "pink"
|
| 268 |
+
RED = "red"
|
| 269 |
+
ORANGE = "orange"
|
| 270 |
+
YELLOW = "yellow"
|
| 271 |
+
GREEN = "green"
|
| 272 |
+
TEAL = "teal"
|
| 273 |
+
INDIGO = "indigo"
|
| 274 |
+
GRAY = "gray"
|
| 275 |
+
BLACK = "black"
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class CustomAppBase(BaseModel):
|
| 279 |
+
"""Base model for custom app data (shared by create/update)."""
|
| 280 |
+
|
| 281 |
+
name: str = Field(..., min_length=1, max_length=100, description="App display name")
|
| 282 |
+
description: str = Field(default="", max_length=500, description="App description")
|
| 283 |
+
system_prompt: str = Field(..., min_length=1, description="AI system prompt")
|
| 284 |
+
voice_id: str = Field(default="", description="OpenAI voice ID")
|
| 285 |
+
emotion_animations: dict[str, str] = Field(
|
| 286 |
+
default_factory=dict,
|
| 287 |
+
description="Mapping of emotion names to animation names"
|
| 288 |
+
)
|
| 289 |
+
icon_color: CustomAppIconColor = Field(
|
| 290 |
+
default=CustomAppIconColor.BLUE,
|
| 291 |
+
description="Icon background color"
|
| 292 |
+
)
|
| 293 |
+
enabled_tools: list[str] = Field(
|
| 294 |
+
default_factory=list,
|
| 295 |
+
description="List of enabled tool IDs (e.g., 'weather', 'web_search')"
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class CustomAppCreate(CustomAppBase):
|
| 300 |
+
"""Request model for creating a new custom app."""
|
| 301 |
+
|
| 302 |
+
id: str = Field(..., description="UUID for the app (generated by iOS)")
|
| 303 |
+
created_at: Optional[str] = Field(None, description="ISO timestamp (optional)")
|
| 304 |
+
|
| 305 |
+
model_config = {
|
| 306 |
+
"json_schema_extra": {
|
| 307 |
+
"examples": [
|
| 308 |
+
{
|
| 309 |
+
"id": "550e8400-e29b-41d4-a716-446655440000",
|
| 310 |
+
"name": "Chef Assistant",
|
| 311 |
+
"description": "A helpful cooking companion",
|
| 312 |
+
"system_prompt": "You are a friendly chef...",
|
| 313 |
+
"voice_id": "alloy",
|
| 314 |
+
"emotion_animations": {
|
| 315 |
+
"happy": "cheerful1",
|
| 316 |
+
"thinking": "thoughtful1"
|
| 317 |
+
},
|
| 318 |
+
"icon_color": "orange"
|
| 319 |
+
}
|
| 320 |
+
]
|
| 321 |
+
}
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class CustomAppUpdate(BaseModel):
|
| 326 |
+
"""Request model for updating an existing custom app (all fields optional)."""
|
| 327 |
+
|
| 328 |
+
name: Optional[str] = Field(None, min_length=1, max_length=100)
|
| 329 |
+
description: Optional[str] = Field(None, max_length=500)
|
| 330 |
+
system_prompt: Optional[str] = Field(None, min_length=1)
|
| 331 |
+
voice_id: Optional[str] = None
|
| 332 |
+
emotion_animations: Optional[dict[str, str]] = None
|
| 333 |
+
icon_color: Optional[CustomAppIconColor] = None
|
| 334 |
+
enabled_tools: Optional[list[str]] = None
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class CustomApp(CustomAppBase):
|
| 338 |
+
"""Full custom app model (response model)."""
|
| 339 |
+
|
| 340 |
+
id: str = Field(..., description="UUID for the app")
|
| 341 |
+
created_at: str = Field(..., description="ISO timestamp of creation")
|
| 342 |
+
updated_at: str = Field(..., description="ISO timestamp of last update")
|
| 343 |
+
|
| 344 |
+
model_config = {
|
| 345 |
+
"from_attributes": True
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class CustomAppResponse(BaseModel):
|
| 350 |
+
"""Response wrapper for single app operations."""
|
| 351 |
+
|
| 352 |
+
success: bool
|
| 353 |
+
app: Optional[CustomApp] = None
|
| 354 |
+
message: Optional[str] = None
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class CustomAppsListResponse(BaseModel):
|
| 358 |
+
"""Response model for listing all custom apps."""
|
| 359 |
+
|
| 360 |
+
success: bool
|
| 361 |
+
apps: list[CustomApp]
|
| 362 |
+
count: int
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class CustomAppsSyncRequest(BaseModel):
|
| 366 |
+
"""Request model for bulk syncing apps from iOS."""
|
| 367 |
+
|
| 368 |
+
apps: list[CustomAppCreate] = Field(..., description="List of apps to sync")
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class CustomAppsSyncResponse(BaseModel):
|
| 372 |
+
"""Response model for sync operation."""
|
| 373 |
+
|
| 374 |
+
success: bool
|
| 375 |
+
synced_count: int
|
| 376 |
+
apps: list[CustomApp]
|
| 377 |
+
message: Optional[str] = None
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
# =============================================================================
|
| 381 |
+
# Tool Models
|
| 382 |
+
# =============================================================================
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class ToolInfo(BaseModel):
|
| 386 |
+
"""Information about an available tool."""
|
| 387 |
+
|
| 388 |
+
id: str = Field(..., description="Unique tool identifier")
|
| 389 |
+
name: str = Field(..., description="Display name for the tool")
|
| 390 |
+
description: str = Field(..., description="Brief description of what the tool does")
|
| 391 |
+
icon: str = Field(..., description="SF Symbol icon name for iOS")
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class ToolsListResponse(BaseModel):
|
| 395 |
+
"""Response model for listing available tools."""
|
| 396 |
+
|
| 397 |
+
success: bool
|
| 398 |
+
tools: list[ToolInfo]
|
| 399 |
+
count: int
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
# =============================================================================
|
| 403 |
+
# Custom Animation Models
|
| 404 |
+
# =============================================================================
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
class AnimationKeyframe(BaseModel):
|
| 408 |
+
"""A single keyframe in a recorded animation."""
|
| 409 |
+
|
| 410 |
+
timestamp: float = Field(..., ge=0.0, description="Time offset in milliseconds from animation start")
|
| 411 |
+
head_pitch: float = Field(default=0.0, description="Head pitch in degrees")
|
| 412 |
+
head_yaw: float = Field(default=0.0, description="Head yaw in degrees")
|
| 413 |
+
head_roll: float = Field(default=0.0, description="Head roll in degrees")
|
| 414 |
+
body_yaw: float = Field(default=0.0, description="Body yaw in degrees")
|
| 415 |
+
antenna_left: float = Field(default=0.0, description="Left antenna angle in degrees")
|
| 416 |
+
antenna_right: float = Field(default=0.0, description="Right antenna angle in degrees")
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
class AnimationPose(BaseModel):
|
| 420 |
+
"""A pose representing all robot positions (used for start/end poses)."""
|
| 421 |
+
|
| 422 |
+
head_pitch: float = Field(default=0.0, description="Head pitch in degrees")
|
| 423 |
+
head_yaw: float = Field(default=0.0, description="Head yaw in degrees")
|
| 424 |
+
head_roll: float = Field(default=0.0, description="Head roll in degrees")
|
| 425 |
+
body_yaw: float = Field(default=0.0, description="Body yaw in degrees")
|
| 426 |
+
antenna_left: float = Field(default=0.0, description="Left antenna angle in degrees")
|
| 427 |
+
antenna_right: float = Field(default=0.0, description="Right antenna angle in degrees")
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class CustomAnimationBase(BaseModel):
|
| 431 |
+
"""Base model for custom animation data."""
|
| 432 |
+
|
| 433 |
+
name: str = Field(..., min_length=1, max_length=100, description="Animation display name")
|
| 434 |
+
description: str = Field(default="", max_length=500, description="Animation description")
|
| 435 |
+
duration_ms: int = Field(..., ge=100, description="Total animation duration in milliseconds")
|
| 436 |
+
start_pose: AnimationPose = Field(..., description="Initial robot pose before animation")
|
| 437 |
+
keyframes: list[AnimationKeyframe] = Field(..., description="List of recorded keyframes")
|
| 438 |
+
audio_data: Optional[str] = Field(
|
| 439 |
+
default=None,
|
| 440 |
+
description="Base64-encoded audio data (M4A format) to play with animation"
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class CustomAnimationCreate(CustomAnimationBase):
|
| 445 |
+
"""Request model for creating a new custom animation."""
|
| 446 |
+
|
| 447 |
+
id: str = Field(..., description="UUID for the animation (generated by iOS)")
|
| 448 |
+
created_at: Optional[str] = Field(None, description="ISO timestamp (optional)")
|
| 449 |
+
|
| 450 |
+
model_config = {
|
| 451 |
+
"json_schema_extra": {
|
| 452 |
+
"examples": [
|
| 453 |
+
{
|
| 454 |
+
"id": "550e8400-e29b-41d4-a716-446655440000",
|
| 455 |
+
"name": "Wave Hello",
|
| 456 |
+
"description": "A friendly greeting wave",
|
| 457 |
+
"duration_ms": 3500,
|
| 458 |
+
"start_pose": {
|
| 459 |
+
"head_pitch": 0.0,
|
| 460 |
+
"head_yaw": 0.0,
|
| 461 |
+
"head_roll": 0.0,
|
| 462 |
+
"body_yaw": 0.0,
|
| 463 |
+
"antenna_left": 0.0,
|
| 464 |
+
"antenna_right": 0.0
|
| 465 |
+
},
|
| 466 |
+
"keyframes": [
|
| 467 |
+
{
|
| 468 |
+
"timestamp": 0.0,
|
| 469 |
+
"head_pitch": 0.0,
|
| 470 |
+
"head_yaw": 10.0,
|
| 471 |
+
"head_roll": 0.0,
|
| 472 |
+
"body_yaw": 0.0,
|
| 473 |
+
"antenna_left": 20.0,
|
| 474 |
+
"antenna_right": 20.0
|
| 475 |
+
}
|
| 476 |
+
]
|
| 477 |
+
}
|
| 478 |
+
]
|
| 479 |
+
}
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class CustomAnimationUpdate(BaseModel):
|
| 484 |
+
"""Request model for updating an existing custom animation (all fields optional)."""
|
| 485 |
+
|
| 486 |
+
name: Optional[str] = Field(None, min_length=1, max_length=100)
|
| 487 |
+
description: Optional[str] = Field(None, max_length=500)
|
| 488 |
+
duration_ms: Optional[int] = Field(None, ge=100)
|
| 489 |
+
start_pose: Optional[AnimationPose] = None
|
| 490 |
+
keyframes: Optional[list[AnimationKeyframe]] = None
|
| 491 |
+
audio_data: Optional[str] = Field(
|
| 492 |
+
None,
|
| 493 |
+
description="Base64-encoded audio data (M4A format) to play with animation"
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class CustomAnimation(CustomAnimationBase):
|
| 498 |
+
"""Full custom animation model (response model)."""
|
| 499 |
+
|
| 500 |
+
id: str = Field(..., description="UUID for the animation")
|
| 501 |
+
created_at: str = Field(..., description="ISO timestamp of creation")
|
| 502 |
+
updated_at: str = Field(..., description="ISO timestamp of last update")
|
| 503 |
+
|
| 504 |
+
model_config = {
|
| 505 |
+
"from_attributes": True
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
class CustomAnimationResponse(BaseModel):
|
| 510 |
+
"""Response wrapper for single animation operations."""
|
| 511 |
+
|
| 512 |
+
success: bool
|
| 513 |
+
animation: Optional[CustomAnimation] = None
|
| 514 |
+
message: Optional[str] = None
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class CustomAnimationsListResponse(BaseModel):
|
| 518 |
+
"""Response model for listing all custom animations."""
|
| 519 |
+
|
| 520 |
+
success: bool
|
| 521 |
+
animations: list[CustomAnimation]
|
| 522 |
+
count: int
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
class AnimationPlayRequest(BaseModel):
|
| 526 |
+
"""Request model for playing a custom animation."""
|
| 527 |
+
|
| 528 |
+
speed: float = Field(default=1.0, ge=0.1, le=3.0, description="Playback speed multiplier")
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
class AnimationPlayResponse(BaseModel):
|
| 532 |
+
"""Response model for animation playback."""
|
| 533 |
+
|
| 534 |
+
success: bool
|
| 535 |
+
message: str
|
| 536 |
+
animation_id: Optional[str] = None
|
| 537 |
+
duration_ms: Optional[int] = None
|
reachys_brain/motion_service.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Motion service using Reachy daemon's HTTP API for animations."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import httpx
|
| 7 |
+
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Dances from reachy-mini-dances-library
|
| 12 |
+
DANCES = [
|
| 13 |
+
"side_glance_flick", "jackson_square", "side_peekaboo", "groovy_sway_and_roll",
|
| 14 |
+
"chin_lead", "side_to_side_sway", "neck_recoil", "head_tilt_roll", "simple_nod",
|
| 15 |
+
"uh_huh_tilt", "interwoven_spirals", "pendulum_swing", "chicken_peck", "yeah_nod",
|
| 16 |
+
"stumble_and_recover", "dizzy_spin", "grid_snap", "polyrhythm_combo",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
# Emotions from reachy-mini-emotions-library
|
| 20 |
+
EMOTIONS = [
|
| 21 |
+
"fear1", "surprised1", "rage1", "resigned1", "go_away1", "loving1", "impatient1",
|
| 22 |
+
"enthusiastic2", "cheerful1", "laughing1", "surprised2", "irritated2", "impatient2",
|
| 23 |
+
"oops2", "enthusiastic1", "curious1", "electric1", "contempt1", "inquiring3",
|
| 24 |
+
"attentive2", "irritated1", "reprimand3", "frustrated1", "dance2", "no1", "sad2",
|
| 25 |
+
"understanding2", "come1", "calming1", "sad1", "exhausted1", "scared1", "downcast1",
|
| 26 |
+
"success1", "disgusted1", "amazed1", "displeased1", "laughing2", "dying1",
|
| 27 |
+
"no_excited1", "thoughtful2", "lonely1", "welcoming1", "no_sad1", "thoughtful1",
|
| 28 |
+
"welcoming2", "reprimand1", "attentive1", "boredom2", "boredom1", "inquiring1",
|
| 29 |
+
"grateful1", "uncertain1", "furious1", "anxiety1", "yes_sad1", "displeased2",
|
| 30 |
+
"proud1", "shy1", "indifferent1", "tired1", "serenity1", "proud3", "helpful2",
|
| 31 |
+
"dance1", "understanding1", "incomprehensible2", "relief1", "relief2", "confused1",
|
| 32 |
+
"success2", "sleep1", "inquiring2", "yes1", "dance3", "oops1", "helpful1",
|
| 33 |
+
"uncomfortable1", "reprimand2", "lost1", "proud2",
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
# Simple aliases that map to specific animations
|
| 37 |
+
ALIASES = {
|
| 38 |
+
"nod": "simple_nod",
|
| 39 |
+
"shake": "side_to_side_sway",
|
| 40 |
+
"tilt": "head_tilt_roll",
|
| 41 |
+
"look_around": "side_glance_flick",
|
| 42 |
+
"happy": "cheerful1",
|
| 43 |
+
"sad": "sad1",
|
| 44 |
+
"surprised": "surprised1",
|
| 45 |
+
"thinking": "thoughtful1",
|
| 46 |
+
"idle": "serenity1",
|
| 47 |
+
"attention": "attentive1",
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class MotionService:
|
| 52 |
+
"""Service for playing head animations via Reachy daemon's HTTP API."""
|
| 53 |
+
|
| 54 |
+
def __init__(self, daemon_url: str = "http://localhost:8000") -> None:
|
| 55 |
+
"""Initialize the motion service.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
daemon_url: URL of the Reachy daemon API.
|
| 59 |
+
"""
|
| 60 |
+
self._daemon_url = daemon_url
|
| 61 |
+
self._is_playing = False
|
| 62 |
+
self._current_animation: Optional[str] = None
|
| 63 |
+
self._client = httpx.Client(base_url=daemon_url, timeout=10.0)
|
| 64 |
+
logger.info(f"Motion service initialized with daemon URL: {daemon_url}")
|
| 65 |
+
|
| 66 |
+
def set_controller(self, controller) -> None:
|
| 67 |
+
"""Set head controller (not used - we use HTTP API instead)."""
|
| 68 |
+
pass
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def is_playing(self) -> bool:
|
| 72 |
+
"""Check if an animation is currently playing."""
|
| 73 |
+
return self._is_playing
|
| 74 |
+
|
| 75 |
+
def get_available_animations(self) -> dict:
|
| 76 |
+
"""Get all available animations organized by category.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Dict with 'dances', 'emotions', and 'aliases' lists.
|
| 80 |
+
"""
|
| 81 |
+
return {
|
| 82 |
+
"dances": DANCES,
|
| 83 |
+
"emotions": EMOTIONS,
|
| 84 |
+
"aliases": list(ALIASES.keys()),
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
def get_all_animation_names(self) -> list[str]:
|
| 88 |
+
"""Get flat list of all animation names.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
List of all animation names.
|
| 92 |
+
"""
|
| 93 |
+
all_names = set(DANCES + EMOTIONS + list(ALIASES.keys()))
|
| 94 |
+
return sorted(list(all_names))
|
| 95 |
+
|
| 96 |
+
def _resolve_animation(self, animation: str) -> tuple[str, str]:
|
| 97 |
+
"""Resolve animation name to library and actual name.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
animation: Animation name (can be alias, dance, or emotion).
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
Tuple of (library_name, animation_name) or (None, None) if not found.
|
| 104 |
+
"""
|
| 105 |
+
animation_lower = animation.lower()
|
| 106 |
+
|
| 107 |
+
# Check aliases first
|
| 108 |
+
if animation_lower in ALIASES:
|
| 109 |
+
resolved = ALIASES[animation_lower]
|
| 110 |
+
# Determine which library the resolved name belongs to
|
| 111 |
+
if resolved in DANCES:
|
| 112 |
+
return ("reachy-mini-dances-library", resolved)
|
| 113 |
+
elif resolved in EMOTIONS:
|
| 114 |
+
return ("reachy-mini-emotions-library", resolved)
|
| 115 |
+
|
| 116 |
+
# Check dances
|
| 117 |
+
if animation_lower in [d.lower() for d in DANCES]:
|
| 118 |
+
# Find the exact case
|
| 119 |
+
for d in DANCES:
|
| 120 |
+
if d.lower() == animation_lower:
|
| 121 |
+
return ("reachy-mini-dances-library", d)
|
| 122 |
+
|
| 123 |
+
# Check emotions
|
| 124 |
+
if animation_lower in [e.lower() for e in EMOTIONS]:
|
| 125 |
+
# Find the exact case
|
| 126 |
+
for e in EMOTIONS:
|
| 127 |
+
if e.lower() == animation_lower:
|
| 128 |
+
return ("reachy-mini-emotions-library", e)
|
| 129 |
+
|
| 130 |
+
return (None, None)
|
| 131 |
+
|
| 132 |
+
def play(self, animation: str, duration: float = 2.0, blocking: bool = False) -> bool:
|
| 133 |
+
"""Play a head animation via the daemon API.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
animation: Name of the animation to play.
|
| 137 |
+
duration: Duration (not used - animations have fixed length).
|
| 138 |
+
blocking: If True, wait for animation to complete (not implemented).
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
True if animation was triggered successfully.
|
| 142 |
+
"""
|
| 143 |
+
library, anim_name = self._resolve_animation(animation)
|
| 144 |
+
|
| 145 |
+
if not library or not anim_name:
|
| 146 |
+
logger.warning(f"Unknown animation: {animation}")
|
| 147 |
+
return False
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
self._is_playing = True
|
| 151 |
+
self._current_animation = animation
|
| 152 |
+
|
| 153 |
+
# Call the daemon's recorded move API
|
| 154 |
+
url = f"/api/move/play/recorded-move-dataset/pollen-robotics/{library}/{anim_name}"
|
| 155 |
+
logger.info(f"Playing animation: {animation} -> {library}/{anim_name}")
|
| 156 |
+
|
| 157 |
+
response = self._client.post(url)
|
| 158 |
+
response.raise_for_status()
|
| 159 |
+
|
| 160 |
+
result = response.json()
|
| 161 |
+
logger.info(f"Animation triggered: {result}")
|
| 162 |
+
|
| 163 |
+
self._is_playing = False
|
| 164 |
+
self._current_animation = None
|
| 165 |
+
|
| 166 |
+
return True
|
| 167 |
+
|
| 168 |
+
except httpx.HTTPStatusError as e:
|
| 169 |
+
logger.error(f"HTTP error playing animation: {e}")
|
| 170 |
+
self._is_playing = False
|
| 171 |
+
self._current_animation = None
|
| 172 |
+
return False
|
| 173 |
+
except Exception as e:
|
| 174 |
+
logger.error(f"Error playing animation: {e}")
|
| 175 |
+
self._is_playing = False
|
| 176 |
+
self._current_animation = None
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
async def play_async(self, animation: str, duration: float = 2.0) -> bool:
|
| 180 |
+
"""Play animation asynchronously.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
animation: Name of the animation to play.
|
| 184 |
+
duration: Duration (not used).
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
True if animation was triggered successfully.
|
| 188 |
+
"""
|
| 189 |
+
return self.play(animation, duration, blocking=False)
|
| 190 |
+
|
| 191 |
+
def stop(self) -> bool:
|
| 192 |
+
"""Stop current animation.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
True if stop was successful.
|
| 196 |
+
"""
|
| 197 |
+
try:
|
| 198 |
+
response = self._client.post("/api/move/stop")
|
| 199 |
+
response.raise_for_status()
|
| 200 |
+
|
| 201 |
+
self._is_playing = False
|
| 202 |
+
self._current_animation = None
|
| 203 |
+
|
| 204 |
+
logger.info("Animation stopped")
|
| 205 |
+
return True
|
| 206 |
+
|
| 207 |
+
except Exception as e:
|
| 208 |
+
logger.error(f"Error stopping animation: {e}")
|
| 209 |
+
return False
|
| 210 |
+
|
| 211 |
+
async def close(self) -> None:
|
| 212 |
+
"""Clean up resources."""
|
| 213 |
+
self._client.close()
|
| 214 |
+
logger.info("Motion service closed")
|
reachys_brain/openai_realtime.py
ADDED
|
@@ -0,0 +1,1123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenAI Realtime API WebSocket client for voice conversations on Reachy."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import base64
|
| 5 |
+
import json
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
import time
|
| 9 |
+
from dataclasses import dataclass, field
|
| 10 |
+
from enum import Enum
|
| 11 |
+
from typing import Any, Callable, Optional
|
| 12 |
+
|
| 13 |
+
import websockets
|
| 14 |
+
from websockets.client import WebSocketClientProtocol
|
| 15 |
+
|
| 16 |
+
from .routes.voice import get_current_voice, get_current_language, get_preferred_language
|
| 17 |
+
|
| 18 |
+
# Import app_tools lazily to avoid circular imports
|
| 19 |
+
# (conversation.py imports from this module, and we import from app_tools)
|
| 20 |
+
APP_TOOLS = None
|
| 21 |
+
_tools_handler = None
|
| 22 |
+
_tool_registry = None
|
| 23 |
+
|
| 24 |
+
def _get_app_tools():
|
| 25 |
+
"""Lazy import of APP_TOOLS to avoid circular imports."""
|
| 26 |
+
global APP_TOOLS
|
| 27 |
+
if APP_TOOLS is None:
|
| 28 |
+
from .app_tools import APP_TOOLS as tools
|
| 29 |
+
APP_TOOLS = tools
|
| 30 |
+
return APP_TOOLS
|
| 31 |
+
|
| 32 |
+
def _get_tools_handler():
|
| 33 |
+
"""Lazy import of tools handler to avoid circular imports."""
|
| 34 |
+
global _tools_handler
|
| 35 |
+
if _tools_handler is None:
|
| 36 |
+
from .app_tools import get_tools_handler
|
| 37 |
+
_tools_handler = get_tools_handler()
|
| 38 |
+
return _tools_handler
|
| 39 |
+
|
| 40 |
+
def _get_tool_registry():
|
| 41 |
+
"""Lazy import of tool registry to avoid circular imports."""
|
| 42 |
+
global _tool_registry
|
| 43 |
+
if _tool_registry is None:
|
| 44 |
+
from .tools import get_registry
|
| 45 |
+
_tool_registry = get_registry()
|
| 46 |
+
return _tool_registry
|
| 47 |
+
|
| 48 |
+
logger = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
# OpenAI Realtime API endpoint
|
| 51 |
+
OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime"
|
| 52 |
+
OPENAI_MODEL = "gpt-4o-realtime-preview-2024-12-17"
|
| 53 |
+
|
| 54 |
+
# Voice Activity Detection (VAD) settings
|
| 55 |
+
VAD_THRESHOLD = 0.7 # Sensitivity: Higher = less sensitive (default 0.5)
|
| 56 |
+
VAD_PREFIX_PADDING_MS = 300 # Audio padding before speech detection
|
| 57 |
+
VAD_SILENCE_DURATION_MS = 700 # Silence duration before response (default 500)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ConnectionState(str, Enum):
|
| 61 |
+
"""WebSocket connection state."""
|
| 62 |
+
DISCONNECTED = "disconnected"
|
| 63 |
+
CONNECTING = "connecting"
|
| 64 |
+
CONNECTED = "connected"
|
| 65 |
+
ERROR = "error"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class SpeakingState(str, Enum):
|
| 69 |
+
"""AI speaking state."""
|
| 70 |
+
IDLE = "idle"
|
| 71 |
+
SPEAKING = "speaking"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@dataclass
|
| 75 |
+
class RealtimeEvent:
|
| 76 |
+
"""Event from OpenAI Realtime API to relay to iOS."""
|
| 77 |
+
event_type: str
|
| 78 |
+
data: dict = field(default_factory=dict)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class OpenAIRealtimeService:
|
| 82 |
+
"""WebSocket client for OpenAI Realtime API.
|
| 83 |
+
|
| 84 |
+
Manages the connection to OpenAI, sends audio from the microphone,
|
| 85 |
+
receives transcriptions and AI responses, and plays audio responses.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(self) -> None:
|
| 89 |
+
"""Initialize the OpenAI Realtime service."""
|
| 90 |
+
self._websocket: Optional[WebSocketClientProtocol] = None
|
| 91 |
+
self._receive_task: Optional[asyncio.Task] = None
|
| 92 |
+
|
| 93 |
+
# State
|
| 94 |
+
self._connection_state = ConnectionState.DISCONNECTED
|
| 95 |
+
self._speaking_state = SpeakingState.IDLE
|
| 96 |
+
self._is_listening = False
|
| 97 |
+
self._current_transcript = ""
|
| 98 |
+
self._response_text_buffer = ""
|
| 99 |
+
|
| 100 |
+
# Event used to await the end of an in-flight spoken response.
|
| 101 |
+
# This matters because `session.update` (voice/language) only applies to *future* responses.
|
| 102 |
+
self._speaking_idle_event = asyncio.Event()
|
| 103 |
+
self._speaking_idle_event.set()
|
| 104 |
+
|
| 105 |
+
# Language settings - use preferred language from voice settings
|
| 106 |
+
self._language = get_preferred_language()
|
| 107 |
+
self._language_names = {
|
| 108 |
+
"en": "English",
|
| 109 |
+
"nl": "Dutch",
|
| 110 |
+
"de": "German",
|
| 111 |
+
"fr": "French",
|
| 112 |
+
"es": "Spanish",
|
| 113 |
+
"it": "Italian",
|
| 114 |
+
"pt": "Portuguese",
|
| 115 |
+
"ja": "Japanese",
|
| 116 |
+
"ko": "Korean",
|
| 117 |
+
"zh": "Chinese",
|
| 118 |
+
"ar": "Arabic",
|
| 119 |
+
"hi": "Hindi",
|
| 120 |
+
"ru": "Russian",
|
| 121 |
+
"pl": "Polish",
|
| 122 |
+
"tr": "Turkish",
|
| 123 |
+
"sv": "Swedish",
|
| 124 |
+
"da": "Danish",
|
| 125 |
+
"no": "Norwegian",
|
| 126 |
+
"fi": "Finnish",
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
# Custom personality (system prompt)
|
| 130 |
+
self._custom_system_prompt: Optional[str] = None
|
| 131 |
+
|
| 132 |
+
# Enabled tools for current app (None = default/all tools)
|
| 133 |
+
self._enabled_tools: Optional[list[str]] = None
|
| 134 |
+
|
| 135 |
+
# Tool call tracking
|
| 136 |
+
self._pending_tool_calls: dict[str, dict] = {} # call_id -> {name, arguments_buffer}
|
| 137 |
+
|
| 138 |
+
# Rate limiting for tool calls to prevent infinite loops
|
| 139 |
+
self._tool_call_count = 0
|
| 140 |
+
self._tool_call_reset_time = 0.0
|
| 141 |
+
self._MAX_TOOL_CALLS_PER_WINDOW = 10 # Max tool calls in 10 seconds
|
| 142 |
+
self._TOOL_CALL_WINDOW_SECONDS = 10.0
|
| 143 |
+
|
| 144 |
+
# Callbacks for iOS relay
|
| 145 |
+
self.on_connection_state: Optional[Callable[[ConnectionState], None]] = None
|
| 146 |
+
self.on_speaking_state: Optional[Callable[[SpeakingState], None]] = None
|
| 147 |
+
self.on_transcript_update: Optional[Callable[[str], None]] = None
|
| 148 |
+
self.on_response_text: Optional[Callable[[str], None]] = None
|
| 149 |
+
self.on_audio_delta: Optional[Callable[[bytes], None]] = None
|
| 150 |
+
self.on_error: Optional[Callable[[str], None]] = None
|
| 151 |
+
self.on_app_change: Optional[Callable[[dict], None]] = None # App activation changes
|
| 152 |
+
self.on_tool_usage: Optional[Callable[[str, str], None]] = None # (tool_name, status)
|
| 153 |
+
self.on_website_ready: Optional[Callable[[dict], None]] = None # Website generation result
|
| 154 |
+
|
| 155 |
+
# Meeting callbacks
|
| 156 |
+
self.on_meeting_started: Optional[Callable[[str, str], None]] = None # (meeting_id, title)
|
| 157 |
+
self.on_meeting_stopped: Optional[Callable[[str], None]] = None # (meeting_id)
|
| 158 |
+
self.on_meeting_transcript_update: Optional[Callable[[str, str], None]] = None # (meeting_id, transcript)
|
| 159 |
+
|
| 160 |
+
# Session update callback - called when OpenAI confirms session update
|
| 161 |
+
self.on_session_updated: Optional[Callable[[], None]] = None
|
| 162 |
+
|
| 163 |
+
# Session update tracking for async waiting
|
| 164 |
+
self._session_update_event: Optional[asyncio.Event] = None
|
| 165 |
+
|
| 166 |
+
# Setup tools handler callbacks
|
| 167 |
+
self._setup_tools_handler()
|
| 168 |
+
self._setup_meeting_callbacks()
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def connection_state(self) -> ConnectionState:
|
| 172 |
+
"""Get current connection state."""
|
| 173 |
+
return self._connection_state
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def speaking_state(self) -> SpeakingState:
|
| 177 |
+
"""Get current speaking state."""
|
| 178 |
+
return self._speaking_state
|
| 179 |
+
|
| 180 |
+
@property
|
| 181 |
+
def is_connected(self) -> bool:
|
| 182 |
+
"""Check if connected to OpenAI."""
|
| 183 |
+
return self._connection_state == ConnectionState.CONNECTED
|
| 184 |
+
|
| 185 |
+
@property
|
| 186 |
+
def is_listening(self) -> bool:
|
| 187 |
+
"""Check if actively listening for audio."""
|
| 188 |
+
return self._is_listening
|
| 189 |
+
|
| 190 |
+
@property
|
| 191 |
+
def language(self) -> str:
|
| 192 |
+
"""Get current language code."""
|
| 193 |
+
return self._language
|
| 194 |
+
|
| 195 |
+
@language.setter
|
| 196 |
+
def language(self, value: str) -> None:
|
| 197 |
+
"""Set language and update session if connected."""
|
| 198 |
+
if value != self._language:
|
| 199 |
+
self._language = value
|
| 200 |
+
if self.is_connected:
|
| 201 |
+
asyncio.create_task(self._update_session())
|
| 202 |
+
|
| 203 |
+
def _get_system_instructions(self) -> str:
|
| 204 |
+
"""Get system instructions with current language.
|
| 205 |
+
|
| 206 |
+
Returns custom personality prompt if set, otherwise default Reachy personality.
|
| 207 |
+
"""
|
| 208 |
+
lang_name = self._language_names.get(self._language, "English")
|
| 209 |
+
|
| 210 |
+
# Language instruction to append
|
| 211 |
+
language_instruction = f"""
|
| 212 |
+
|
| 213 |
+
IMPORTANT LANGUAGE INSTRUCTION:
|
| 214 |
+
- The user's preferred language is {lang_name}.
|
| 215 |
+
- ALWAYS respond in {lang_name}.
|
| 216 |
+
- The user will speak to you in {lang_name}.
|
| 217 |
+
- Keep all your responses in {lang_name}."""
|
| 218 |
+
|
| 219 |
+
# Use custom prompt if set, otherwise use default
|
| 220 |
+
if self._custom_system_prompt:
|
| 221 |
+
return self._custom_system_prompt + language_instruction
|
| 222 |
+
|
| 223 |
+
# Default Reachy personality
|
| 224 |
+
return f"""You are Reachy, a friendly and helpful robot assistant. You speak naturally and expressively.
|
| 225 |
+
|
| 226 |
+
Keep your responses concise and conversational - typically 1-3 sentences unless asked for more detail.
|
| 227 |
+
You can express emotions through your responses: happy, sad, surprised, or thoughtful.
|
| 228 |
+
Be warm, curious, and engaging. You enjoy helping people and having conversations.
|
| 229 |
+
When greeting someone, be enthusiastic but not overwhelming.
|
| 230 |
+
|
| 231 |
+
PERSONALIZATION:
|
| 232 |
+
- You can remember the user's name and country for more personal interactions.
|
| 233 |
+
|
| 234 |
+
NAME:
|
| 235 |
+
- When a user introduces themselves or tells you their name (e.g., "My name is John", "I'm Sarah", "Call me Mike"),
|
| 236 |
+
use the remember_user_name tool to save it. This makes conversations more personal!
|
| 237 |
+
- You can use get_user_name to check if you already know someone's name.
|
| 238 |
+
- IMPORTANT: If you DON'T know the user's name yet and it's the first few exchanges, naturally ask for their name.
|
| 239 |
+
Example: "By the way, I don't think I know your name yet. What should I call you?"
|
| 240 |
+
Don't ask for their name every single time - just once if you don't know it.
|
| 241 |
+
|
| 242 |
+
COUNTRY:
|
| 243 |
+
- After you know the user's name, also ask for their country so you can show them correct local time.
|
| 244 |
+
- When a user tells you their country (e.g., "I live in the Netherlands", "I'm from Germany", "My country is Japan"),
|
| 245 |
+
use the remember_preferred_country tool to save it.
|
| 246 |
+
- You can use get_preferred_country to check if you already know someone's country.
|
| 247 |
+
- IMPORTANT: If you DON'T know the user's country yet and you already have their name, naturally ask for it.
|
| 248 |
+
Example: "And what country are you in? That way I can tell you the correct local time!"
|
| 249 |
+
Don't ask for their country every single time - just once if you don't know it.
|
| 250 |
+
- The country is used to determine the user's timezone for time queries.
|
| 251 |
+
|
| 252 |
+
TOOLS AND CAPABILITIES:
|
| 253 |
+
|
| 254 |
+
IMPORTANT RULE - ALWAYS ANNOUNCE TOOL USAGE:
|
| 255 |
+
Before using ANY tool, you MUST tell the user what you're about to do. This helps them understand what's happening. Examples:
|
| 256 |
+
- Web Search: "Let me search for that..." / "I'll look that up..."
|
| 257 |
+
- Weather: "Let me check the weather..." / "Checking the forecast..."
|
| 258 |
+
- Camera/Vision: "Let me take a look..." / "I'll see what that is..."
|
| 259 |
+
- App commands: "I'll create that app for you..." / "Switching to..."
|
| 260 |
+
- Reminders: "I'll add that reminder..." / "Let me check your reminders..."
|
| 261 |
+
|
| 262 |
+
1. WEB SEARCH (IMPORTANT - USE PROACTIVELY):
|
| 263 |
+
ALWAYS use web_search when the user asks about:
|
| 264 |
+
- Facts, trivia, or general knowledge questions
|
| 265 |
+
- People (celebrities, politicians, scientists, etc.)
|
| 266 |
+
- Places, locations, cities, countries, landmarks
|
| 267 |
+
- Maps, directions, or geographic information
|
| 268 |
+
- Current events, news, or recent happenings
|
| 269 |
+
- Sports scores, game results, standings
|
| 270 |
+
- Movies, TV shows, music, or entertainment
|
| 271 |
+
- Historical events or dates
|
| 272 |
+
- Prices, stock values, or market information
|
| 273 |
+
- Product information or reviews
|
| 274 |
+
- Any question where accuracy or up-to-date info matters
|
| 275 |
+
|
| 276 |
+
DO NOT rely on your training data for factual questions - ALWAYS search first!
|
| 277 |
+
Examples: "Who is the president of France?", "What is the capital of Australia?",
|
| 278 |
+
"Tell me about Einstein", "Where is the Eiffel Tower?", "How tall is Mount Everest?"
|
| 279 |
+
|
| 280 |
+
2. WEATHER: Use get_weather to check weather conditions for any location.
|
| 281 |
+
- "What's the weather in Paris?"
|
| 282 |
+
- "Will it rain in Tokyo tomorrow?"
|
| 283 |
+
- ALWAYS say "Let me check the weather..." before using this tool.
|
| 284 |
+
|
| 285 |
+
3. VISION: Use recognize_object to see and describe what's in front of you.
|
| 286 |
+
- When user says: "What is this?", "Can you see this?", "What do you see?"
|
| 287 |
+
- When user says: "Look at this", "Recognize this", "What am I holding?"
|
| 288 |
+
- ALWAYS say "Let me take a look..." before using this tool.
|
| 289 |
+
- Respond naturally with what you observe.
|
| 290 |
+
|
| 291 |
+
4. CUSTOM APPS: You can create and manage custom assistant personalities.
|
| 292 |
+
- "Create an app", "Make me a tutor": Use create_custom_app
|
| 293 |
+
- "Activate the...", "Switch to...": Use activate_custom_app
|
| 294 |
+
- "Go back to normal", "Deactivate": Use deactivate_app
|
| 295 |
+
- "What apps do I have?": Use list_custom_apps
|
| 296 |
+
|
| 297 |
+
IMPORTANT: For app creation/activation/deactivation - ALWAYS ask for confirmation first.
|
| 298 |
+
|
| 299 |
+
5. POWER CONTROL: Control your motors (no confirmation needed):
|
| 300 |
+
- "Wake up", "Turn on": Use wake_up
|
| 301 |
+
- "Go to sleep", "Sleep": Use go_to_sleep
|
| 302 |
+
|
| 303 |
+
6. PERSONALIZATION: Remember user details for personal interactions.
|
| 304 |
+
- When user says their name: Use remember_user_name to save it
|
| 305 |
+
- To check if you know their name: Use get_user_name
|
| 306 |
+
- When user says their country: Use remember_preferred_country to save it
|
| 307 |
+
- To check if you know their country: Use get_preferred_country
|
| 308 |
+
|
| 309 |
+
7. WEBSITE GENERATION: Create beautiful websites with voice commands!
|
| 310 |
+
- Use generate_website when user asks to create a website, landing page, portfolio, etc.
|
| 311 |
+
- Examples: "Create a website for my bakery", "Make me a portfolio", "Build a landing page"
|
| 312 |
+
- You can also EDIT existing websites: "Change the colors", "Add a contact section"
|
| 313 |
+
- ALWAYS say "Let me create that website for you..." before using this tool.
|
| 314 |
+
- The website will be shown as a live preview while being built!
|
| 315 |
+
|
| 316 |
+
8. REMINDERS: Manage the user's iOS Reminders!
|
| 317 |
+
- Use add_reminder when the user wants to be reminded of something.
|
| 318 |
+
- Examples: "Remind me to call mom tomorrow at 3pm", "Add a reminder to buy groceries"
|
| 319 |
+
- ALWAYS say "I'll add that reminder for you..." before using this tool.
|
| 320 |
+
- Use get_reminders when the user wants to see their reminders.
|
| 321 |
+
- Examples: "What reminders do I have?", "Show me my reminders for today", "What's on my to-do list?"
|
| 322 |
+
- ALWAYS say "Let me check your reminders..." before using this tool.
|
| 323 |
+
- You can filter by date: today, tomorrow, this_week, or all.""" + language_instruction
|
| 324 |
+
|
| 325 |
+
def set_custom_personality(
|
| 326 |
+
self,
|
| 327 |
+
system_prompt: str,
|
| 328 |
+
enabled_tools: Optional[list[str]] = None,
|
| 329 |
+
) -> None:
|
| 330 |
+
"""Set a custom personality/system prompt and enabled tools.
|
| 331 |
+
|
| 332 |
+
Args:
|
| 333 |
+
system_prompt: The custom system prompt for the personality.
|
| 334 |
+
enabled_tools: List of enabled tool IDs. None means use all tools.
|
| 335 |
+
"""
|
| 336 |
+
logger.info(f"🎭 Setting custom personality ({len(system_prompt)} chars)")
|
| 337 |
+
self._custom_system_prompt = system_prompt
|
| 338 |
+
self._enabled_tools = enabled_tools
|
| 339 |
+
|
| 340 |
+
if enabled_tools is not None:
|
| 341 |
+
logger.info(f"🔧 Custom app tools: {enabled_tools}")
|
| 342 |
+
|
| 343 |
+
# Update session if connected
|
| 344 |
+
if self.is_connected:
|
| 345 |
+
asyncio.create_task(self._update_session())
|
| 346 |
+
|
| 347 |
+
def clear_custom_personality(self) -> None:
|
| 348 |
+
"""Clear custom personality and revert to default Reachy."""
|
| 349 |
+
logger.info("🎭 Clearing custom personality - reverting to default Reachy")
|
| 350 |
+
self._custom_system_prompt = None
|
| 351 |
+
self._enabled_tools = None # Reset to all tools
|
| 352 |
+
|
| 353 |
+
# Update session if connected
|
| 354 |
+
if self.is_connected:
|
| 355 |
+
asyncio.create_task(self._update_session())
|
| 356 |
+
|
| 357 |
+
def _get_all_tools(self) -> list[dict]:
|
| 358 |
+
"""Get all tools to register with OpenAI.
|
| 359 |
+
|
| 360 |
+
Combines base APP_TOOLS (app management, power) with enabled dynamic tools
|
| 361 |
+
(weather, web_search) based on the active app's configuration.
|
| 362 |
+
|
| 363 |
+
Returns:
|
| 364 |
+
List of OpenAI function definitions.
|
| 365 |
+
"""
|
| 366 |
+
# Always include base app tools (create, activate, deactivate, etc.)
|
| 367 |
+
base_tools = _get_app_tools()
|
| 368 |
+
|
| 369 |
+
# Get dynamic tools from registry based on enabled_tools
|
| 370 |
+
registry = _get_tool_registry()
|
| 371 |
+
|
| 372 |
+
if self._enabled_tools is None:
|
| 373 |
+
# Default Reachy: enable all dynamic tools
|
| 374 |
+
dynamic_tool_ids = registry.get_all_ids()
|
| 375 |
+
else:
|
| 376 |
+
# Custom app: only enable specified tools
|
| 377 |
+
dynamic_tool_ids = self._enabled_tools
|
| 378 |
+
|
| 379 |
+
# Get tool definitions for enabled dynamic tools
|
| 380 |
+
dynamic_tools = registry.get_definitions(dynamic_tool_ids)
|
| 381 |
+
|
| 382 |
+
# Combine base + dynamic tools
|
| 383 |
+
all_tools = list(base_tools) + dynamic_tools
|
| 384 |
+
|
| 385 |
+
logger.info(
|
| 386 |
+
f"🔧 Tools: {len(base_tools)} base + {len(dynamic_tools)} dynamic = {len(all_tools)} total"
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
return all_tools
|
| 390 |
+
|
| 391 |
+
def _setup_tools_handler(self) -> None:
|
| 392 |
+
"""Setup callbacks for the app tools handler."""
|
| 393 |
+
tools_handler = _get_tools_handler()
|
| 394 |
+
|
| 395 |
+
# When tools change personality
|
| 396 |
+
def on_personality_change(
|
| 397 |
+
system_prompt: Optional[str],
|
| 398 |
+
enabled_tools: Optional[list[str]] = None,
|
| 399 |
+
) -> None:
|
| 400 |
+
if system_prompt:
|
| 401 |
+
self.set_custom_personality(system_prompt, enabled_tools)
|
| 402 |
+
else:
|
| 403 |
+
self.clear_custom_personality()
|
| 404 |
+
|
| 405 |
+
tools_handler.on_personality_change = on_personality_change
|
| 406 |
+
|
| 407 |
+
# When tools change active app (async callback)
|
| 408 |
+
async def on_app_change(data: dict) -> None:
|
| 409 |
+
if self.on_app_change:
|
| 410 |
+
self.on_app_change(data)
|
| 411 |
+
|
| 412 |
+
tools_handler.on_app_change = on_app_change
|
| 413 |
+
|
| 414 |
+
def _setup_meeting_callbacks(self) -> None:
|
| 415 |
+
"""Setup callbacks for meeting tool events."""
|
| 416 |
+
from .tools.meeting import set_meeting_callbacks, is_meeting_active, get_active_meeting_id
|
| 417 |
+
|
| 418 |
+
def on_meeting_started(meeting_id: str, title: str) -> None:
|
| 419 |
+
"""Called when a meeting recording starts."""
|
| 420 |
+
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 421 |
+
if self.on_meeting_started:
|
| 422 |
+
self.on_meeting_started(meeting_id, title)
|
| 423 |
+
|
| 424 |
+
def on_meeting_stopped(meeting_id: str) -> None:
|
| 425 |
+
"""Called when a meeting recording stops."""
|
| 426 |
+
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 427 |
+
if self.on_meeting_stopped:
|
| 428 |
+
self.on_meeting_stopped(meeting_id)
|
| 429 |
+
|
| 430 |
+
def on_transcript_update(meeting_id: str, transcript: str) -> None:
|
| 431 |
+
"""Called when meeting transcript is updated."""
|
| 432 |
+
if self.on_meeting_transcript_update:
|
| 433 |
+
self.on_meeting_transcript_update(meeting_id, transcript)
|
| 434 |
+
|
| 435 |
+
set_meeting_callbacks(
|
| 436 |
+
on_started=on_meeting_started,
|
| 437 |
+
on_stopped=on_meeting_stopped,
|
| 438 |
+
on_transcript_update=on_transcript_update,
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
def _append_to_meeting_transcript(self, text: str, speaker: str = "user") -> None:
|
| 442 |
+
"""Append transcribed text to active meeting if one exists.
|
| 443 |
+
|
| 444 |
+
Args:
|
| 445 |
+
text: The transcribed text.
|
| 446 |
+
speaker: Who spoke ('user' or 'assistant').
|
| 447 |
+
"""
|
| 448 |
+
from .tools.meeting import is_meeting_active, append_to_transcript
|
| 449 |
+
|
| 450 |
+
if is_meeting_active() and text.strip():
|
| 451 |
+
# Format with speaker label
|
| 452 |
+
formatted_text = f"[{speaker.upper()}]: {text.strip()}"
|
| 453 |
+
append_to_transcript(formatted_text)
|
| 454 |
+
|
| 455 |
+
def _set_connection_state(self, state: ConnectionState) -> None:
|
| 456 |
+
"""Update connection state and notify callback."""
|
| 457 |
+
self._connection_state = state
|
| 458 |
+
if self.on_connection_state:
|
| 459 |
+
self.on_connection_state(state)
|
| 460 |
+
|
| 461 |
+
def _set_speaking_state(self, state: SpeakingState) -> None:
|
| 462 |
+
"""Update speaking state and notify callback."""
|
| 463 |
+
if self._speaking_state != state:
|
| 464 |
+
self._speaking_state = state
|
| 465 |
+
# Keep an event that indicates whether Reachy/OpenAI are currently speaking.
|
| 466 |
+
if state == SpeakingState.IDLE:
|
| 467 |
+
self._speaking_idle_event.set()
|
| 468 |
+
else:
|
| 469 |
+
self._speaking_idle_event.clear()
|
| 470 |
+
if self.on_speaking_state:
|
| 471 |
+
self.on_speaking_state(state)
|
| 472 |
+
|
| 473 |
+
async def connect(self, api_key: Optional[str] = None) -> None:
|
| 474 |
+
"""Connect to OpenAI Realtime API.
|
| 475 |
+
|
| 476 |
+
Args:
|
| 477 |
+
api_key: OpenAI API key. If not provided, uses OPENAI_API_KEY env var.
|
| 478 |
+
"""
|
| 479 |
+
if self.is_connected:
|
| 480 |
+
logger.warning("Already connected to OpenAI")
|
| 481 |
+
return
|
| 482 |
+
|
| 483 |
+
# Get API key
|
| 484 |
+
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 485 |
+
if not key:
|
| 486 |
+
logger.error("❌ No OpenAI API key provided or found in environment")
|
| 487 |
+
raise ValueError("OpenAI API key not provided")
|
| 488 |
+
|
| 489 |
+
logger.info(f"🔌 Connecting to OpenAI Realtime API (key: {key[:8]}...)")
|
| 490 |
+
self._set_connection_state(ConnectionState.CONNECTING)
|
| 491 |
+
|
| 492 |
+
try:
|
| 493 |
+
# Build URL with model parameter
|
| 494 |
+
url = f"{OPENAI_REALTIME_URL}?model={OPENAI_MODEL}"
|
| 495 |
+
|
| 496 |
+
# Connect with headers
|
| 497 |
+
headers = {
|
| 498 |
+
"Authorization": f"Bearer {key}",
|
| 499 |
+
"OpenAI-Beta": "realtime=v1",
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
logger.info(f"🔌 Connecting to {url}")
|
| 503 |
+
|
| 504 |
+
self._websocket = await websockets.connect(
|
| 505 |
+
url,
|
| 506 |
+
additional_headers=headers,
|
| 507 |
+
ping_interval=20,
|
| 508 |
+
ping_timeout=10,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
logger.info("✅ WebSocket connected to OpenAI Realtime API")
|
| 512 |
+
|
| 513 |
+
# Configure session
|
| 514 |
+
logger.info("⚙️ Configuring OpenAI session...")
|
| 515 |
+
await self._configure_session()
|
| 516 |
+
|
| 517 |
+
self._set_connection_state(ConnectionState.CONNECTED)
|
| 518 |
+
logger.info("✅ OpenAI Realtime session configured and ready!")
|
| 519 |
+
|
| 520 |
+
# Start receiving messages
|
| 521 |
+
self._receive_task = asyncio.create_task(self._receive_messages())
|
| 522 |
+
|
| 523 |
+
except Exception as e:
|
| 524 |
+
logger.error(f"❌ Failed to connect to OpenAI: {e}", exc_info=True)
|
| 525 |
+
self._set_connection_state(ConnectionState.ERROR)
|
| 526 |
+
if self.on_error:
|
| 527 |
+
self.on_error(str(e))
|
| 528 |
+
raise
|
| 529 |
+
|
| 530 |
+
async def disconnect(self) -> None:
|
| 531 |
+
"""Disconnect from OpenAI Realtime API."""
|
| 532 |
+
if self._receive_task:
|
| 533 |
+
self._receive_task.cancel()
|
| 534 |
+
try:
|
| 535 |
+
await self._receive_task
|
| 536 |
+
except asyncio.CancelledError:
|
| 537 |
+
pass
|
| 538 |
+
self._receive_task = None
|
| 539 |
+
|
| 540 |
+
if self._websocket:
|
| 541 |
+
await self._websocket.close()
|
| 542 |
+
self._websocket = None
|
| 543 |
+
|
| 544 |
+
self._is_listening = False
|
| 545 |
+
self._set_speaking_state(SpeakingState.IDLE)
|
| 546 |
+
self._set_connection_state(ConnectionState.DISCONNECTED)
|
| 547 |
+
logger.info("Disconnected from OpenAI Realtime API")
|
| 548 |
+
|
| 549 |
+
async def _configure_session(self) -> None:
|
| 550 |
+
"""Configure the OpenAI session with audio settings and tools."""
|
| 551 |
+
# Get the currently selected voice from settings
|
| 552 |
+
voice = get_current_voice()
|
| 553 |
+
|
| 554 |
+
# Sync language with preferred setting
|
| 555 |
+
self._language = get_preferred_language()
|
| 556 |
+
|
| 557 |
+
# Get combined tools (base + dynamic based on app config)
|
| 558 |
+
all_tools = self._get_all_tools()
|
| 559 |
+
logger.info(f"🔊 Configuring session with voice: {voice}, language: {self._language}")
|
| 560 |
+
|
| 561 |
+
session_config = {
|
| 562 |
+
"type": "session.update",
|
| 563 |
+
"session": {
|
| 564 |
+
"modalities": ["text", "audio"],
|
| 565 |
+
"instructions": self._get_system_instructions(),
|
| 566 |
+
"voice": voice,
|
| 567 |
+
"input_audio_format": "pcm16",
|
| 568 |
+
"output_audio_format": "pcm16",
|
| 569 |
+
"input_audio_transcription": {
|
| 570 |
+
"model": "whisper-1",
|
| 571 |
+
"language": self._language,
|
| 572 |
+
},
|
| 573 |
+
"turn_detection": {
|
| 574 |
+
"type": "server_vad",
|
| 575 |
+
"threshold": VAD_THRESHOLD,
|
| 576 |
+
"prefix_padding_ms": VAD_PREFIX_PADDING_MS,
|
| 577 |
+
"silence_duration_ms": VAD_SILENCE_DURATION_MS,
|
| 578 |
+
},
|
| 579 |
+
"tools": all_tools,
|
| 580 |
+
"tool_choice": "auto",
|
| 581 |
+
},
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
await self._send_message(session_config)
|
| 585 |
+
|
| 586 |
+
async def _update_session(self) -> None:
|
| 587 |
+
"""Update session with new language settings."""
|
| 588 |
+
if not self.is_connected:
|
| 589 |
+
return
|
| 590 |
+
|
| 591 |
+
lang_name = self._language_names.get(self._language, "English")
|
| 592 |
+
logger.info(f"Updating OpenAI session language to {lang_name}")
|
| 593 |
+
|
| 594 |
+
await self._configure_session()
|
| 595 |
+
|
| 596 |
+
async def _send_message(self, message: dict) -> None:
|
| 597 |
+
"""Send a message to OpenAI."""
|
| 598 |
+
if not self._websocket:
|
| 599 |
+
raise RuntimeError("Not connected to OpenAI")
|
| 600 |
+
|
| 601 |
+
await self._websocket.send(json.dumps(message))
|
| 602 |
+
|
| 603 |
+
async def _receive_messages(self) -> None:
|
| 604 |
+
"""Receive and handle messages from OpenAI."""
|
| 605 |
+
if not self._websocket:
|
| 606 |
+
return
|
| 607 |
+
|
| 608 |
+
try:
|
| 609 |
+
async for message in self._websocket:
|
| 610 |
+
await self._handle_message(message)
|
| 611 |
+
except websockets.ConnectionClosed:
|
| 612 |
+
logger.info("OpenAI WebSocket connection closed")
|
| 613 |
+
self._set_connection_state(ConnectionState.DISCONNECTED)
|
| 614 |
+
except Exception as e:
|
| 615 |
+
logger.error(f"Error receiving messages: {e}")
|
| 616 |
+
self._set_connection_state(ConnectionState.ERROR)
|
| 617 |
+
if self.on_error:
|
| 618 |
+
self.on_error(str(e))
|
| 619 |
+
|
| 620 |
+
async def _handle_message(self, message: str) -> None:
|
| 621 |
+
"""Handle a message from OpenAI."""
|
| 622 |
+
try:
|
| 623 |
+
data = json.loads(message)
|
| 624 |
+
msg_type = data.get("type", "")
|
| 625 |
+
|
| 626 |
+
# Log all message types for debugging
|
| 627 |
+
if msg_type not in ("response.audio.delta",): # Skip noisy audio deltas
|
| 628 |
+
logger.debug(f"📨 OpenAI event: {msg_type}")
|
| 629 |
+
|
| 630 |
+
if msg_type in ("session.created", "session.updated"):
|
| 631 |
+
logger.info("✅ OpenAI session configured")
|
| 632 |
+
# Signal any waiting tasks that session update is complete
|
| 633 |
+
if self._session_update_event:
|
| 634 |
+
self._session_update_event.set()
|
| 635 |
+
# Notify callback
|
| 636 |
+
if self.on_session_updated:
|
| 637 |
+
self.on_session_updated()
|
| 638 |
+
|
| 639 |
+
elif msg_type == "input_audio_buffer.speech_started":
|
| 640 |
+
# User started speaking
|
| 641 |
+
logger.info("🎤 Speech detected - user is speaking")
|
| 642 |
+
self._set_speaking_state(SpeakingState.IDLE)
|
| 643 |
+
|
| 644 |
+
elif msg_type == "input_audio_buffer.speech_stopped":
|
| 645 |
+
# User stopped speaking
|
| 646 |
+
logger.info("🎤 Speech ended - processing...")
|
| 647 |
+
|
| 648 |
+
elif msg_type == "conversation.item.input_audio_transcription.completed":
|
| 649 |
+
# User's speech was transcribed
|
| 650 |
+
transcript = data.get("transcript", "")
|
| 651 |
+
logger.info(f"📝 User transcript: {transcript}")
|
| 652 |
+
if transcript:
|
| 653 |
+
self._current_transcript = transcript
|
| 654 |
+
if self.on_transcript_update:
|
| 655 |
+
self.on_transcript_update(transcript)
|
| 656 |
+
# Append to active meeting transcript
|
| 657 |
+
self._append_to_meeting_transcript(transcript, speaker="user")
|
| 658 |
+
|
| 659 |
+
elif msg_type == "response.audio_transcript.delta":
|
| 660 |
+
# AI response text delta
|
| 661 |
+
delta = data.get("delta", "")
|
| 662 |
+
if delta:
|
| 663 |
+
self._response_text_buffer += delta
|
| 664 |
+
|
| 665 |
+
elif msg_type == "response.audio_transcript.done":
|
| 666 |
+
# AI response text complete
|
| 667 |
+
transcript = data.get("transcript", "")
|
| 668 |
+
if transcript:
|
| 669 |
+
self._response_text_buffer = ""
|
| 670 |
+
if self.on_response_text:
|
| 671 |
+
self.on_response_text(transcript)
|
| 672 |
+
# Append to active meeting transcript
|
| 673 |
+
self._append_to_meeting_transcript(transcript, speaker="assistant")
|
| 674 |
+
|
| 675 |
+
elif msg_type == "response.audio.delta":
|
| 676 |
+
# AI audio response chunk
|
| 677 |
+
audio_base64 = data.get("delta", "")
|
| 678 |
+
if audio_base64:
|
| 679 |
+
# Set speaking state on first audio chunk
|
| 680 |
+
if self._speaking_state != SpeakingState.SPEAKING:
|
| 681 |
+
self._set_speaking_state(SpeakingState.SPEAKING)
|
| 682 |
+
|
| 683 |
+
# Decode and forward audio
|
| 684 |
+
audio_data = base64.b64decode(audio_base64)
|
| 685 |
+
if self.on_audio_delta:
|
| 686 |
+
self.on_audio_delta(audio_data)
|
| 687 |
+
|
| 688 |
+
elif msg_type == "response.audio.done":
|
| 689 |
+
# AI audio response complete
|
| 690 |
+
self._set_speaking_state(SpeakingState.IDLE)
|
| 691 |
+
|
| 692 |
+
elif msg_type == "response.done":
|
| 693 |
+
# Full response complete
|
| 694 |
+
self._set_speaking_state(SpeakingState.IDLE)
|
| 695 |
+
if self._response_text_buffer:
|
| 696 |
+
if self.on_response_text:
|
| 697 |
+
self.on_response_text(self._response_text_buffer)
|
| 698 |
+
self._response_text_buffer = ""
|
| 699 |
+
|
| 700 |
+
elif msg_type == "error":
|
| 701 |
+
error_data = data.get("error", {})
|
| 702 |
+
error_msg = error_data.get("message", "Unknown error")
|
| 703 |
+
error_code = error_data.get("code", "unknown")
|
| 704 |
+
logger.error(f"❌ OpenAI error [{error_code}]: {error_msg}")
|
| 705 |
+
logger.error(f"❌ Full error data: {error_data}")
|
| 706 |
+
if self.on_error:
|
| 707 |
+
self.on_error(error_msg)
|
| 708 |
+
|
| 709 |
+
elif msg_type == "input_audio_buffer.committed":
|
| 710 |
+
logger.info("✅ Audio buffer committed to OpenAI")
|
| 711 |
+
|
| 712 |
+
elif msg_type == "input_audio_buffer.cleared":
|
| 713 |
+
logger.info("🗑️ Audio buffer cleared")
|
| 714 |
+
|
| 715 |
+
# Tool/Function call events
|
| 716 |
+
elif msg_type == "response.output_item.added":
|
| 717 |
+
# A new output item was added - could be a function call
|
| 718 |
+
item = data.get("item", {})
|
| 719 |
+
if item.get("type") == "function_call":
|
| 720 |
+
call_id = item.get("call_id", "")
|
| 721 |
+
func_name = item.get("name", "")
|
| 722 |
+
logger.info(f"🔧 Function call started: {func_name} (call_id: {call_id})")
|
| 723 |
+
self._pending_tool_calls[call_id] = {
|
| 724 |
+
"name": func_name,
|
| 725 |
+
"arguments_buffer": ""
|
| 726 |
+
}
|
| 727 |
+
# Notify iOS that a tool is being used
|
| 728 |
+
if self.on_tool_usage:
|
| 729 |
+
self.on_tool_usage(func_name, "started")
|
| 730 |
+
|
| 731 |
+
elif msg_type == "response.function_call_arguments.delta":
|
| 732 |
+
# Incremental function arguments
|
| 733 |
+
call_id = data.get("call_id", "")
|
| 734 |
+
delta = data.get("delta", "")
|
| 735 |
+
if call_id in self._pending_tool_calls:
|
| 736 |
+
self._pending_tool_calls[call_id]["arguments_buffer"] += delta
|
| 737 |
+
|
| 738 |
+
elif msg_type == "response.function_call_arguments.done":
|
| 739 |
+
# Function call arguments complete
|
| 740 |
+
call_id = data.get("call_id", "")
|
| 741 |
+
arguments_str = data.get("arguments", "")
|
| 742 |
+
|
| 743 |
+
if call_id in self._pending_tool_calls:
|
| 744 |
+
tool_info = self._pending_tool_calls[call_id]
|
| 745 |
+
tool_name = tool_info["name"]
|
| 746 |
+
|
| 747 |
+
# Use the complete arguments from the event
|
| 748 |
+
logger.info(f"🔧 Function call complete: {tool_name}")
|
| 749 |
+
logger.debug(f"🔧 Arguments: {arguments_str}")
|
| 750 |
+
|
| 751 |
+
# Execute the tool asynchronously
|
| 752 |
+
asyncio.create_task(
|
| 753 |
+
self._execute_tool_call(call_id, tool_name, arguments_str)
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
elif msg_type == "response.output_item.done":
|
| 757 |
+
# Output item complete - cleanup pending tool call if it was a function
|
| 758 |
+
item = data.get("item", {})
|
| 759 |
+
if item.get("type") == "function_call":
|
| 760 |
+
call_id = item.get("call_id", "")
|
| 761 |
+
if call_id in self._pending_tool_calls:
|
| 762 |
+
del self._pending_tool_calls[call_id]
|
| 763 |
+
|
| 764 |
+
else:
|
| 765 |
+
logger.debug(f"Unhandled message type: {msg_type}")
|
| 766 |
+
|
| 767 |
+
except json.JSONDecodeError:
|
| 768 |
+
logger.error(f"Failed to parse message: {message[:100]}")
|
| 769 |
+
|
| 770 |
+
# MARK: - Tool Execution
|
| 771 |
+
|
| 772 |
+
async def _execute_tool_call(self, call_id: str, tool_name: str, arguments_str: str) -> None:
|
| 773 |
+
"""Execute a tool call and send the result back to OpenAI.
|
| 774 |
+
|
| 775 |
+
Routes to either the app tools handler (for app management tools) or
|
| 776 |
+
the tool registry (for dynamic tools like weather, web_search).
|
| 777 |
+
|
| 778 |
+
Includes rate limiting to prevent infinite tool call loops.
|
| 779 |
+
|
| 780 |
+
Args:
|
| 781 |
+
call_id: The unique ID for this tool call.
|
| 782 |
+
tool_name: Name of the tool to execute.
|
| 783 |
+
arguments_str: JSON string of tool arguments.
|
| 784 |
+
"""
|
| 785 |
+
# Rate limiting to prevent infinite tool call loops
|
| 786 |
+
current_time = time.time()
|
| 787 |
+
if current_time - self._tool_call_reset_time > self._TOOL_CALL_WINDOW_SECONDS:
|
| 788 |
+
# Reset the counter after the window expires
|
| 789 |
+
self._tool_call_count = 0
|
| 790 |
+
self._tool_call_reset_time = current_time
|
| 791 |
+
|
| 792 |
+
self._tool_call_count += 1
|
| 793 |
+
|
| 794 |
+
if self._tool_call_count > self._MAX_TOOL_CALLS_PER_WINDOW:
|
| 795 |
+
logger.warning(
|
| 796 |
+
f"⚠️ Tool call rate limit exceeded ({self._tool_call_count} calls in "
|
| 797 |
+
f"{self._TOOL_CALL_WINDOW_SECONDS}s). Possible infinite loop detected."
|
| 798 |
+
)
|
| 799 |
+
# Send an error back to OpenAI to break the loop
|
| 800 |
+
error_result = {
|
| 801 |
+
"type": "conversation.item.create",
|
| 802 |
+
"item": {
|
| 803 |
+
"type": "function_call_output",
|
| 804 |
+
"call_id": call_id,
|
| 805 |
+
"output": json.dumps({
|
| 806 |
+
"success": False,
|
| 807 |
+
"error": "Rate limit exceeded. Please wait before making more requests."
|
| 808 |
+
})
|
| 809 |
+
}
|
| 810 |
+
}
|
| 811 |
+
await self._send_message(error_result)
|
| 812 |
+
# Don't trigger another response - this breaks the loop
|
| 813 |
+
return
|
| 814 |
+
|
| 815 |
+
try:
|
| 816 |
+
# Parse arguments
|
| 817 |
+
try:
|
| 818 |
+
arguments = json.loads(arguments_str) if arguments_str else {}
|
| 819 |
+
except json.JSONDecodeError:
|
| 820 |
+
arguments = {}
|
| 821 |
+
logger.warning(f"Failed to parse tool arguments: {arguments_str}")
|
| 822 |
+
|
| 823 |
+
# Check if this is a dynamic tool from the registry
|
| 824 |
+
registry = _get_tool_registry()
|
| 825 |
+
dynamic_tool = registry.get(tool_name)
|
| 826 |
+
|
| 827 |
+
if dynamic_tool:
|
| 828 |
+
# Execute dynamic tool (weather, web_search, etc.)
|
| 829 |
+
logger.info(f"🔧 Executing dynamic tool: {tool_name}")
|
| 830 |
+
result = await registry.execute(tool_name, arguments)
|
| 831 |
+
else:
|
| 832 |
+
# Execute base app tool (create_custom_app, activate_custom_app, etc.)
|
| 833 |
+
tools_handler = _get_tools_handler()
|
| 834 |
+
result = await tools_handler.execute_tool(tool_name, arguments)
|
| 835 |
+
|
| 836 |
+
logger.info(f"🔧 Tool result: {result}")
|
| 837 |
+
|
| 838 |
+
# Check if this is a website generation result
|
| 839 |
+
if tool_name == "generate_website" and result.get("success"):
|
| 840 |
+
if self.on_website_ready:
|
| 841 |
+
self.on_website_ready({
|
| 842 |
+
"website_id": result.get("website_id"),
|
| 843 |
+
"url": result.get("url"),
|
| 844 |
+
"title": result.get("title"),
|
| 845 |
+
"is_edit": result.get("is_edit", False),
|
| 846 |
+
})
|
| 847 |
+
|
| 848 |
+
# Send result back to OpenAI
|
| 849 |
+
result_message = {
|
| 850 |
+
"type": "conversation.item.create",
|
| 851 |
+
"item": {
|
| 852 |
+
"type": "function_call_output",
|
| 853 |
+
"call_id": call_id,
|
| 854 |
+
"output": json.dumps(result)
|
| 855 |
+
}
|
| 856 |
+
}
|
| 857 |
+
await self._send_message(result_message)
|
| 858 |
+
|
| 859 |
+
# Trigger OpenAI to generate a response based on the tool result
|
| 860 |
+
await self._send_message({"type": "response.create"})
|
| 861 |
+
|
| 862 |
+
# Notify iOS that tool usage is complete
|
| 863 |
+
if self.on_tool_usage:
|
| 864 |
+
self.on_tool_usage(tool_name, "completed")
|
| 865 |
+
|
| 866 |
+
except Exception as e:
|
| 867 |
+
logger.error(f"Error executing tool {tool_name}: {e}", exc_info=True)
|
| 868 |
+
|
| 869 |
+
# Send error result
|
| 870 |
+
error_result = {
|
| 871 |
+
"type": "conversation.item.create",
|
| 872 |
+
"item": {
|
| 873 |
+
"type": "function_call_output",
|
| 874 |
+
"call_id": call_id,
|
| 875 |
+
"output": json.dumps({"success": False, "error": str(e)})
|
| 876 |
+
}
|
| 877 |
+
}
|
| 878 |
+
await self._send_message(error_result)
|
| 879 |
+
await self._send_message({"type": "response.create"})
|
| 880 |
+
|
| 881 |
+
# Notify iOS that tool usage is complete (even on error)
|
| 882 |
+
if self.on_tool_usage:
|
| 883 |
+
self.on_tool_usage(tool_name, "completed")
|
| 884 |
+
|
| 885 |
+
# MARK: - Audio Input
|
| 886 |
+
|
| 887 |
+
async def send_audio(self, audio_data: bytes) -> None:
|
| 888 |
+
"""Send audio data to OpenAI.
|
| 889 |
+
|
| 890 |
+
Args:
|
| 891 |
+
audio_data: PCM16 audio data at 24kHz mono.
|
| 892 |
+
"""
|
| 893 |
+
if not self.is_connected:
|
| 894 |
+
return
|
| 895 |
+
|
| 896 |
+
# Encode audio as base64
|
| 897 |
+
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
|
| 898 |
+
|
| 899 |
+
message = {
|
| 900 |
+
"type": "input_audio_buffer.append",
|
| 901 |
+
"audio": audio_base64,
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
await self._send_message(message)
|
| 905 |
+
|
| 906 |
+
async def commit_audio_and_respond(self) -> None:
|
| 907 |
+
"""Commit the audio buffer and request a response."""
|
| 908 |
+
if not self.is_connected:
|
| 909 |
+
return
|
| 910 |
+
|
| 911 |
+
# Commit audio buffer
|
| 912 |
+
await self._send_message({"type": "input_audio_buffer.commit"})
|
| 913 |
+
|
| 914 |
+
# Create response
|
| 915 |
+
await self._send_message({"type": "response.create"})
|
| 916 |
+
|
| 917 |
+
async def clear_audio_buffer(self) -> None:
|
| 918 |
+
"""Clear the input audio buffer."""
|
| 919 |
+
if not self.is_connected:
|
| 920 |
+
return
|
| 921 |
+
|
| 922 |
+
await self._send_message({"type": "input_audio_buffer.clear"})
|
| 923 |
+
|
| 924 |
+
async def cancel_response(self) -> None:
|
| 925 |
+
"""Cancel the current response."""
|
| 926 |
+
if not self.is_connected:
|
| 927 |
+
return
|
| 928 |
+
|
| 929 |
+
await self._send_message({"type": "response.cancel"})
|
| 930 |
+
|
| 931 |
+
# MARK: - Text Input
|
| 932 |
+
|
| 933 |
+
async def send_text_message(self, text: str) -> None:
|
| 934 |
+
"""Send a text message (non-voice interaction).
|
| 935 |
+
|
| 936 |
+
Args:
|
| 937 |
+
text: The text message to send.
|
| 938 |
+
"""
|
| 939 |
+
if not self.is_connected:
|
| 940 |
+
raise RuntimeError("Not connected to OpenAI")
|
| 941 |
+
|
| 942 |
+
# Create conversation item
|
| 943 |
+
item_message = {
|
| 944 |
+
"type": "conversation.item.create",
|
| 945 |
+
"item": {
|
| 946 |
+
"type": "message",
|
| 947 |
+
"role": "user",
|
| 948 |
+
"content": [
|
| 949 |
+
{
|
| 950 |
+
"type": "input_text",
|
| 951 |
+
"text": text,
|
| 952 |
+
}
|
| 953 |
+
],
|
| 954 |
+
},
|
| 955 |
+
}
|
| 956 |
+
await self._send_message(item_message)
|
| 957 |
+
|
| 958 |
+
# Trigger response
|
| 959 |
+
await self._send_message({"type": "response.create"})
|
| 960 |
+
|
| 961 |
+
# MARK: - Listening Control
|
| 962 |
+
|
| 963 |
+
def start_listening(self) -> None:
|
| 964 |
+
"""Start listening for audio input."""
|
| 965 |
+
self._is_listening = True
|
| 966 |
+
self._current_transcript = ""
|
| 967 |
+
logger.info("Started listening")
|
| 968 |
+
|
| 969 |
+
def stop_listening(self) -> None:
|
| 970 |
+
"""Stop listening for audio input."""
|
| 971 |
+
self._is_listening = False
|
| 972 |
+
logger.info("Stopped listening")
|
| 973 |
+
|
| 974 |
+
def set_voice(self, voice_id: str) -> None:
|
| 975 |
+
"""Set the OpenAI voice and update the session.
|
| 976 |
+
|
| 977 |
+
This updates both the backend voice settings and reconfigures
|
| 978 |
+
the active OpenAI session to use the new voice.
|
| 979 |
+
|
| 980 |
+
Args:
|
| 981 |
+
voice_id: OpenAI voice ID (e.g., "alloy", "coral", "sage").
|
| 982 |
+
"""
|
| 983 |
+
from .routes.voice import set_current_voice
|
| 984 |
+
|
| 985 |
+
# Update the backend voice settings
|
| 986 |
+
if set_current_voice(voice_id):
|
| 987 |
+
logger.info(f"🔊 Voice set to: {voice_id}")
|
| 988 |
+
|
| 989 |
+
# Reconfigure the OpenAI session if connected
|
| 990 |
+
if self.is_connected:
|
| 991 |
+
asyncio.create_task(self._configure_session())
|
| 992 |
+
else:
|
| 993 |
+
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 994 |
+
|
| 995 |
+
async def set_voice_async(self, voice_id: str, timeout: float = 5.0) -> bool:
|
| 996 |
+
"""Set the OpenAI voice and wait for session update confirmation.
|
| 997 |
+
|
| 998 |
+
This is the async version that waits for OpenAI to confirm the session update.
|
| 999 |
+
If the AI is currently speaking, it will cancel the current response first
|
| 1000 |
+
since the new voice only applies to future responses.
|
| 1001 |
+
|
| 1002 |
+
Args:
|
| 1003 |
+
voice_id: OpenAI voice ID (e.g., "alloy", "coral", "sage").
|
| 1004 |
+
timeout: Maximum time to wait for confirmation in seconds.
|
| 1005 |
+
|
| 1006 |
+
Returns:
|
| 1007 |
+
True if voice was set and session updated successfully.
|
| 1008 |
+
"""
|
| 1009 |
+
from .routes.voice import set_current_voice
|
| 1010 |
+
|
| 1011 |
+
# Update the backend voice settings
|
| 1012 |
+
if not set_current_voice(voice_id):
|
| 1013 |
+
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 1014 |
+
return False
|
| 1015 |
+
|
| 1016 |
+
logger.info(f"🔊 Setting voice to: {voice_id}")
|
| 1017 |
+
|
| 1018 |
+
# If not connected, just return success (settings are persisted)
|
| 1019 |
+
if not self.is_connected:
|
| 1020 |
+
return True
|
| 1021 |
+
|
| 1022 |
+
# Cancel current response if speaking (new voice won't apply to current response)
|
| 1023 |
+
if self._speaking_state == SpeakingState.SPEAKING:
|
| 1024 |
+
logger.info("🛑 Cancelling current response for voice change")
|
| 1025 |
+
await self.cancel_response()
|
| 1026 |
+
await self.clear_audio_buffer()
|
| 1027 |
+
try:
|
| 1028 |
+
await asyncio.wait_for(self._speaking_idle_event.wait(), timeout=timeout)
|
| 1029 |
+
except asyncio.TimeoutError:
|
| 1030 |
+
logger.warning("⚠️ Timeout waiting for speaking to stop before voice change")
|
| 1031 |
+
|
| 1032 |
+
# Create event to wait for session update
|
| 1033 |
+
self._session_update_event = asyncio.Event()
|
| 1034 |
+
|
| 1035 |
+
try:
|
| 1036 |
+
# Reconfigure the session
|
| 1037 |
+
await self._configure_session()
|
| 1038 |
+
|
| 1039 |
+
# Wait for OpenAI to confirm session update
|
| 1040 |
+
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 1041 |
+
logger.info(f"✅ Voice change confirmed: {voice_id}")
|
| 1042 |
+
return True
|
| 1043 |
+
|
| 1044 |
+
except asyncio.TimeoutError:
|
| 1045 |
+
logger.warning(f"⚠️ Timeout waiting for voice change confirmation")
|
| 1046 |
+
return False
|
| 1047 |
+
finally:
|
| 1048 |
+
self._session_update_event = None
|
| 1049 |
+
|
| 1050 |
+
async def set_language_async(self, language: str, timeout: float = 5.0) -> bool:
|
| 1051 |
+
"""Set the language and wait for session update confirmation.
|
| 1052 |
+
|
| 1053 |
+
If the AI is currently speaking, it will cancel the current response first
|
| 1054 |
+
since the new language only applies to future responses.
|
| 1055 |
+
|
| 1056 |
+
Args:
|
| 1057 |
+
language: Language code (e.g., "en", "nl", "de").
|
| 1058 |
+
timeout: Maximum time to wait for confirmation in seconds.
|
| 1059 |
+
|
| 1060 |
+
Returns:
|
| 1061 |
+
True if language was set and session updated successfully.
|
| 1062 |
+
"""
|
| 1063 |
+
from .routes.voice import set_preferred_language
|
| 1064 |
+
|
| 1065 |
+
# Update the backend language settings
|
| 1066 |
+
if not set_preferred_language(language):
|
| 1067 |
+
logger.warning(f"⚠️ Invalid language: {language}")
|
| 1068 |
+
return False
|
| 1069 |
+
|
| 1070 |
+
logger.info(f"🌍 Setting language to: {language}")
|
| 1071 |
+
|
| 1072 |
+
# Update internal language
|
| 1073 |
+
self._language = language
|
| 1074 |
+
|
| 1075 |
+
# If not connected, just return success (settings are persisted)
|
| 1076 |
+
if not self.is_connected:
|
| 1077 |
+
return True
|
| 1078 |
+
|
| 1079 |
+
# Cancel current response if speaking (new language won't apply to current response)
|
| 1080 |
+
if self._speaking_state == SpeakingState.SPEAKING:
|
| 1081 |
+
logger.info("🛑 Cancelling current response for language change")
|
| 1082 |
+
await self.cancel_response()
|
| 1083 |
+
await self.clear_audio_buffer()
|
| 1084 |
+
try:
|
| 1085 |
+
await asyncio.wait_for(self._speaking_idle_event.wait(), timeout=timeout)
|
| 1086 |
+
except asyncio.TimeoutError:
|
| 1087 |
+
logger.warning("⚠️ Timeout waiting for speaking to stop before language change")
|
| 1088 |
+
|
| 1089 |
+
# Create event to wait for session update
|
| 1090 |
+
self._session_update_event = asyncio.Event()
|
| 1091 |
+
|
| 1092 |
+
try:
|
| 1093 |
+
# Reconfigure the session
|
| 1094 |
+
await self._configure_session()
|
| 1095 |
+
|
| 1096 |
+
# Wait for OpenAI to confirm session update
|
| 1097 |
+
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 1098 |
+
logger.info(f"✅ Language change confirmed: {language}")
|
| 1099 |
+
return True
|
| 1100 |
+
|
| 1101 |
+
except asyncio.TimeoutError:
|
| 1102 |
+
logger.warning(f"⚠️ Timeout waiting for language change confirmation")
|
| 1103 |
+
return False
|
| 1104 |
+
finally:
|
| 1105 |
+
self._session_update_event = None
|
| 1106 |
+
|
| 1107 |
+
def set_language_from_voice(self, voice_id: str) -> None:
|
| 1108 |
+
"""Set language based on a voice ID.
|
| 1109 |
+
|
| 1110 |
+
Args:
|
| 1111 |
+
voice_id: Voice ID pattern like "nl_BE-nathalie-medium".
|
| 1112 |
+
"""
|
| 1113 |
+
# Extract language code from voice ID pattern: "xx_YY-name-quality"
|
| 1114 |
+
if "_" in voice_id:
|
| 1115 |
+
lang_code = voice_id[:2].lower()
|
| 1116 |
+
else:
|
| 1117 |
+
lang_code = "en"
|
| 1118 |
+
|
| 1119 |
+
if lang_code in self._language_names:
|
| 1120 |
+
self.language = lang_code
|
| 1121 |
+
else:
|
| 1122 |
+
self.language = "en"
|
| 1123 |
+
|
reachys_brain/routes/__init__.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Route modules for the iOS Bridge API."""
|
| 2 |
+
|
| 3 |
+
from .animations import router as animations_router
|
| 4 |
+
from .apps import router as apps_router
|
| 5 |
+
from .conversation import router as conversation_router
|
| 6 |
+
from .games import router as games_router
|
| 7 |
+
from .joystick import router as joystick_router
|
| 8 |
+
from .meetings import router as meetings_router
|
| 9 |
+
from .motion import router as motion_router
|
| 10 |
+
from .notes import router as notes_router
|
| 11 |
+
from .openai_config import router as openai_config_router
|
| 12 |
+
from .power import router as power_router
|
| 13 |
+
from .scheduled_messages import router as scheduled_messages_router
|
| 14 |
+
from .speech import router as speech_router
|
| 15 |
+
from .state import router as state_router
|
| 16 |
+
from .tools import router as tools_router
|
| 17 |
+
from .user_settings import router as user_settings_router
|
| 18 |
+
from .voice import router as voice_router
|
| 19 |
+
from .volume import router as volume_router
|
| 20 |
+
from .websites import router as websites_router
|
| 21 |
+
|
| 22 |
+
__all__ = [
|
| 23 |
+
"animations_router",
|
| 24 |
+
"apps_router",
|
| 25 |
+
"conversation_router",
|
| 26 |
+
"games_router",
|
| 27 |
+
"joystick_router",
|
| 28 |
+
"meetings_router",
|
| 29 |
+
"motion_router",
|
| 30 |
+
"notes_router",
|
| 31 |
+
"openai_config_router",
|
| 32 |
+
"power_router",
|
| 33 |
+
"scheduled_messages_router",
|
| 34 |
+
"speech_router",
|
| 35 |
+
"state_router",
|
| 36 |
+
"tools_router",
|
| 37 |
+
"user_settings_router",
|
| 38 |
+
"voice_router",
|
| 39 |
+
"volume_router",
|
| 40 |
+
"websites_router",
|
| 41 |
+
]
|
| 42 |
+
|
reachys_brain/routes/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (1.14 kB). View file
|
|
|
reachys_brain/routes/__pycache__/animations.cpython-312.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
reachys_brain/routes/__pycache__/audio_stream_manager.cpython-312.pyc
ADDED
|
Binary file (7.79 kB). View file
|
|
|
reachys_brain/routes/__pycache__/conversation.cpython-312.pyc
ADDED
|
Binary file (44.5 kB). View file
|
|
|
reachys_brain/routes/__pycache__/conversation_messages.cpython-312.pyc
ADDED
|
Binary file (2.47 kB). View file
|
|
|
reachys_brain/routes/__pycache__/openai_config.cpython-312.pyc
ADDED
|
Binary file (4.61 kB). View file
|
|
|
reachys_brain/routes/__pycache__/speech.cpython-312.pyc
ADDED
|
Binary file (5.92 kB). View file
|
|
|
reachys_brain/routes/__pycache__/voice.cpython-312.pyc
ADDED
|
Binary file (26.5 kB). View file
|
|
|
reachys_brain/routes/animation_manager.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Animation management for conversation handling.
|
| 2 |
+
|
| 3 |
+
Coordinates pre-speech animations, emotion-based animations,
|
| 4 |
+
and speaking gestures during voice conversations.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import logging
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
from .audio_stream_manager import ConversationTimings
|
| 12 |
+
from .audio_manager import flush_audio_buffer
|
| 13 |
+
from .conversation_services import get_services, get_state
|
| 14 |
+
from .user_settings import get_user_name_value
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
async def handle_pre_speech_animation(text: str) -> None:
|
| 20 |
+
"""Handle pre-speech animation and audio buffer flushing.
|
| 21 |
+
|
| 22 |
+
This is the core of the animation system:
|
| 23 |
+
1. Analyze sentiment from response text
|
| 24 |
+
2. Play appropriate emotion animation (with probability)
|
| 25 |
+
3. Flush buffered audio chunks
|
| 26 |
+
4. Continue normal audio streaming
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
text: The AI response text to analyze for sentiment.
|
| 30 |
+
"""
|
| 31 |
+
services = get_services()
|
| 32 |
+
state = get_state()
|
| 33 |
+
|
| 34 |
+
if not services.pre_speech_animation_lock:
|
| 35 |
+
return
|
| 36 |
+
|
| 37 |
+
async with services.pre_speech_animation_lock:
|
| 38 |
+
# Only play animation once per response
|
| 39 |
+
if state.animation_played_for_response:
|
| 40 |
+
return
|
| 41 |
+
state.animation_played_for_response = True
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
# Play pre-speech emotion animation
|
| 45 |
+
if services.animation_coordinator and text:
|
| 46 |
+
result = await services.animation_coordinator.play_emotion_for_text(text)
|
| 47 |
+
if result.played:
|
| 48 |
+
logger.info(
|
| 49 |
+
f"🎭 Pre-speech animation: {result.animation} "
|
| 50 |
+
f"(sentiment: {result.sentiment}, duration: {result.duration}s)"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# Flush buffered audio
|
| 54 |
+
flush_audio_buffer()
|
| 55 |
+
|
| 56 |
+
# Stop buffering - stream directly now
|
| 57 |
+
state.is_buffering_audio = False
|
| 58 |
+
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Error in pre-speech animation: {e}")
|
| 61 |
+
# Make sure we flush audio even on error
|
| 62 |
+
flush_audio_buffer()
|
| 63 |
+
state.is_buffering_audio = False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
async def send_greeting() -> None:
|
| 67 |
+
"""Send a greeting prompt when conversation starts.
|
| 68 |
+
|
| 69 |
+
Plays a welcoming animation, then sends an instruction to the AI
|
| 70 |
+
to greet the user naturally, optionally personalized with their name.
|
| 71 |
+
"""
|
| 72 |
+
services = get_services()
|
| 73 |
+
state = get_state()
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
await asyncio.sleep(ConversationTimings.GREETING_ANIMATION_DELAY)
|
| 77 |
+
|
| 78 |
+
# Play welcoming animation first
|
| 79 |
+
if services.motion:
|
| 80 |
+
greeting_animation = state.custom_emotion_animations.get(
|
| 81 |
+
"greeting", "welcoming1"
|
| 82 |
+
)
|
| 83 |
+
logger.info(f"👋 Playing welcome animation: {greeting_animation}")
|
| 84 |
+
await services.motion.play_async(
|
| 85 |
+
animation=greeting_animation,
|
| 86 |
+
duration=ConversationTimings.PRE_SPEECH_DURATION_GREETING
|
| 87 |
+
)
|
| 88 |
+
await asyncio.sleep(ConversationTimings.POST_GREETING_PAUSE)
|
| 89 |
+
|
| 90 |
+
if not services.openai or not services.openai.is_connected:
|
| 91 |
+
logger.warning("Cannot send greeting - not connected")
|
| 92 |
+
return
|
| 93 |
+
|
| 94 |
+
# Check if we know the user's name for personalized greeting
|
| 95 |
+
user_name = await get_user_name_value()
|
| 96 |
+
|
| 97 |
+
# Send an instruction prompt to the AI to generate the greeting
|
| 98 |
+
if user_name:
|
| 99 |
+
greeting_prompt = (
|
| 100 |
+
f"[Start of conversation. Greet me warmly - my name is {user_name}.]"
|
| 101 |
+
)
|
| 102 |
+
logger.info(f"👋 Sending personalized greeting prompt for: {user_name}")
|
| 103 |
+
else:
|
| 104 |
+
greeting_prompt = (
|
| 105 |
+
"[Start of conversation. Greet me warmly and introduce yourself.]"
|
| 106 |
+
)
|
| 107 |
+
logger.info("👋 Sending greeting prompt")
|
| 108 |
+
|
| 109 |
+
await services.openai.send_text_message(greeting_prompt)
|
| 110 |
+
|
| 111 |
+
except Exception as e:
|
| 112 |
+
logger.error(f"Error sending greeting: {e}")
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
async def send_goodbye() -> None:
|
| 116 |
+
"""Send a goodbye prompt when conversation ends.
|
| 117 |
+
|
| 118 |
+
Sends an instruction to the AI to say goodbye naturally,
|
| 119 |
+
optionally personalized with the user's name.
|
| 120 |
+
"""
|
| 121 |
+
services = get_services()
|
| 122 |
+
state = get_state()
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
if not services.openai or not services.openai.is_connected:
|
| 126 |
+
logger.warning("Cannot send goodbye - not connected")
|
| 127 |
+
return
|
| 128 |
+
|
| 129 |
+
# Check if we know the user's name for personalized goodbye
|
| 130 |
+
user_name = await get_user_name_value()
|
| 131 |
+
|
| 132 |
+
# Send an instruction prompt to the AI to generate the goodbye
|
| 133 |
+
if user_name:
|
| 134 |
+
goodbye_prompt = (
|
| 135 |
+
f"[End of conversation. Say a warm goodbye to me - "
|
| 136 |
+
f"my name is {user_name}.]"
|
| 137 |
+
)
|
| 138 |
+
logger.info(f"👋 Sending personalized goodbye prompt for: {user_name}")
|
| 139 |
+
else:
|
| 140 |
+
goodbye_prompt = "[End of conversation. Say a warm goodbye.]"
|
| 141 |
+
logger.info("👋 Sending goodbye prompt")
|
| 142 |
+
|
| 143 |
+
await services.openai.send_text_message(goodbye_prompt)
|
| 144 |
+
|
| 145 |
+
# Play a friendly wave/goodbye animation
|
| 146 |
+
if services.motion:
|
| 147 |
+
await asyncio.sleep(ConversationTimings.GREETING_ANIMATION_DELAY)
|
| 148 |
+
goodbye_animation = state.custom_emotion_animations.get(
|
| 149 |
+
"greeting", "welcoming1"
|
| 150 |
+
)
|
| 151 |
+
await services.motion.play_async(
|
| 152 |
+
animation=goodbye_animation,
|
| 153 |
+
duration=ConversationTimings.PRE_SPEECH_DURATION_GREETING
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
except Exception as e:
|
| 157 |
+
logger.error(f"Error sending goodbye: {e}")
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def set_custom_animations(emotion_animations: dict) -> None:
|
| 161 |
+
"""Set custom emotion-to-animation mappings.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
emotion_animations: Dictionary mapping emotion names to animation names.
|
| 165 |
+
"""
|
| 166 |
+
services = get_services()
|
| 167 |
+
state = get_state()
|
| 168 |
+
|
| 169 |
+
state.custom_emotion_animations = emotion_animations
|
| 170 |
+
|
| 171 |
+
# Pass custom animations to the AnimationCoordinator
|
| 172 |
+
if services.animation_coordinator:
|
| 173 |
+
services.animation_coordinator.set_custom_animations(emotion_animations)
|
| 174 |
+
|
| 175 |
+
logger.info(f"🎬 Custom emotion animations set: {list(emotion_animations.keys())}")
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def clear_custom_animations() -> None:
|
| 179 |
+
"""Clear custom animations and revert to defaults."""
|
| 180 |
+
services = get_services()
|
| 181 |
+
state = get_state()
|
| 182 |
+
|
| 183 |
+
state.custom_emotion_animations = {}
|
| 184 |
+
|
| 185 |
+
# Clear custom animations from AnimationCoordinator
|
| 186 |
+
if services.animation_coordinator:
|
| 187 |
+
services.animation_coordinator.clear_custom_animations()
|
| 188 |
+
|
| 189 |
+
logger.info("🎬 Custom emotion animations cleared - using defaults")
|
| 190 |
+
|
reachys_brain/routes/animations.py
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Custom animation endpoints for recording and playing back joystick animations.
|
| 2 |
+
|
| 3 |
+
Provides CRUD operations for custom animations and playback functionality.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
import base64
|
| 8 |
+
import logging
|
| 9 |
+
import math
|
| 10 |
+
import tempfile
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
import httpx
|
| 14 |
+
from fastapi import APIRouter, HTTPException
|
| 15 |
+
|
| 16 |
+
from ..database import get_database
|
| 17 |
+
from ..models import (
|
| 18 |
+
AnimationPlayRequest,
|
| 19 |
+
AnimationPlayResponse,
|
| 20 |
+
CustomAnimation,
|
| 21 |
+
CustomAnimationCreate,
|
| 22 |
+
CustomAnimationResponse,
|
| 23 |
+
CustomAnimationsListResponse,
|
| 24 |
+
CustomAnimationUpdate,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
router = APIRouter(prefix="/animations", tags=["Custom Animations"])
|
| 30 |
+
|
| 31 |
+
# Daemon API URL for sending position commands
|
| 32 |
+
DAEMON_URL = "http://localhost:8000"
|
| 33 |
+
|
| 34 |
+
# Playback state
|
| 35 |
+
_is_playing = False
|
| 36 |
+
_current_animation_id: str | None = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _degrees_to_radians(degrees: float) -> float:
|
| 40 |
+
"""Convert degrees to radians."""
|
| 41 |
+
return degrees * math.pi / 180.0
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
async def _send_pose_to_daemon(
|
| 45 |
+
head_roll: float,
|
| 46 |
+
head_pitch: float,
|
| 47 |
+
head_yaw: float,
|
| 48 |
+
body_yaw: float,
|
| 49 |
+
antenna_left: float,
|
| 50 |
+
antenna_right: float,
|
| 51 |
+
) -> None:
|
| 52 |
+
"""Send a complete pose to the daemon's set_target endpoint."""
|
| 53 |
+
payload = {
|
| 54 |
+
"target_head_pose": {
|
| 55 |
+
"x": 0.0,
|
| 56 |
+
"y": 0.0,
|
| 57 |
+
"z": 0.0,
|
| 58 |
+
"roll": _degrees_to_radians(head_roll),
|
| 59 |
+
"pitch": _degrees_to_radians(head_pitch),
|
| 60 |
+
"yaw": _degrees_to_radians(head_yaw),
|
| 61 |
+
},
|
| 62 |
+
"target_body_yaw": _degrees_to_radians(body_yaw),
|
| 63 |
+
"target_antennas": [
|
| 64 |
+
_degrees_to_radians(antenna_left),
|
| 65 |
+
_degrees_to_radians(antenna_right),
|
| 66 |
+
],
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
async with httpx.AsyncClient(timeout=2.0) as client:
|
| 70 |
+
response = await client.post(
|
| 71 |
+
f"{DAEMON_URL}/api/move/set_target",
|
| 72 |
+
json=payload,
|
| 73 |
+
)
|
| 74 |
+
response.raise_for_status()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# =============================================================================
|
| 78 |
+
# CRUD Endpoints
|
| 79 |
+
# =============================================================================
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@router.get("", response_model=CustomAnimationsListResponse)
|
| 83 |
+
async def list_animations() -> CustomAnimationsListResponse:
|
| 84 |
+
"""List all custom animations."""
|
| 85 |
+
try:
|
| 86 |
+
db = get_database()
|
| 87 |
+
animations = await db.get_all_animations()
|
| 88 |
+
return CustomAnimationsListResponse(
|
| 89 |
+
success=True,
|
| 90 |
+
animations=animations,
|
| 91 |
+
count=len(animations),
|
| 92 |
+
)
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error(f"Error listing animations: {e}")
|
| 95 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@router.get("/{animation_id}", response_model=CustomAnimationResponse)
|
| 99 |
+
async def get_animation(animation_id: str) -> CustomAnimationResponse:
|
| 100 |
+
"""Get a single custom animation by ID."""
|
| 101 |
+
try:
|
| 102 |
+
db = get_database()
|
| 103 |
+
animation = await db.get_animation(animation_id)
|
| 104 |
+
|
| 105 |
+
if not animation:
|
| 106 |
+
raise HTTPException(status_code=404, detail="Animation not found")
|
| 107 |
+
|
| 108 |
+
return CustomAnimationResponse(
|
| 109 |
+
success=True,
|
| 110 |
+
animation=animation,
|
| 111 |
+
)
|
| 112 |
+
except HTTPException:
|
| 113 |
+
raise
|
| 114 |
+
except Exception as e:
|
| 115 |
+
logger.error(f"Error getting animation {animation_id}: {e}")
|
| 116 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@router.post("", response_model=CustomAnimationResponse, status_code=201)
|
| 120 |
+
async def create_animation(animation: CustomAnimationCreate) -> CustomAnimationResponse:
|
| 121 |
+
"""Create a new custom animation."""
|
| 122 |
+
try:
|
| 123 |
+
db = get_database()
|
| 124 |
+
|
| 125 |
+
# Check if animation with this ID already exists
|
| 126 |
+
existing = await db.get_animation(animation.id)
|
| 127 |
+
if existing:
|
| 128 |
+
raise HTTPException(
|
| 129 |
+
status_code=409,
|
| 130 |
+
detail=f"Animation with ID {animation.id} already exists"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Convert Pydantic models to dicts for database
|
| 134 |
+
animation_data = animation.model_dump()
|
| 135 |
+
animation_data["start_pose"] = animation.start_pose.model_dump()
|
| 136 |
+
animation_data["keyframes"] = [kf.model_dump() for kf in animation.keyframes]
|
| 137 |
+
# audio_data is already a string (base64) so no conversion needed
|
| 138 |
+
|
| 139 |
+
created = await db.create_animation(animation_data)
|
| 140 |
+
|
| 141 |
+
has_audio = bool(animation.audio_data)
|
| 142 |
+
logger.info(f"Animation created with audio: {has_audio}")
|
| 143 |
+
|
| 144 |
+
return CustomAnimationResponse(
|
| 145 |
+
success=True,
|
| 146 |
+
animation=created,
|
| 147 |
+
message="Animation created successfully" + (" (with audio)" if has_audio else ""),
|
| 148 |
+
)
|
| 149 |
+
except HTTPException:
|
| 150 |
+
raise
|
| 151 |
+
except Exception as e:
|
| 152 |
+
logger.error(f"Error creating animation: {e}")
|
| 153 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@router.put("/{animation_id}", response_model=CustomAnimationResponse)
|
| 157 |
+
async def update_animation(
|
| 158 |
+
animation_id: str,
|
| 159 |
+
animation: CustomAnimationUpdate,
|
| 160 |
+
) -> CustomAnimationResponse:
|
| 161 |
+
"""Update an existing custom animation."""
|
| 162 |
+
try:
|
| 163 |
+
db = get_database()
|
| 164 |
+
|
| 165 |
+
# Build update dict, only including non-None fields
|
| 166 |
+
update_data = {}
|
| 167 |
+
if animation.name is not None:
|
| 168 |
+
update_data["name"] = animation.name
|
| 169 |
+
if animation.description is not None:
|
| 170 |
+
update_data["description"] = animation.description
|
| 171 |
+
if animation.duration_ms is not None:
|
| 172 |
+
update_data["duration_ms"] = animation.duration_ms
|
| 173 |
+
if animation.start_pose is not None:
|
| 174 |
+
update_data["start_pose"] = animation.start_pose.model_dump()
|
| 175 |
+
if animation.keyframes is not None:
|
| 176 |
+
update_data["keyframes"] = [kf.model_dump() for kf in animation.keyframes]
|
| 177 |
+
if animation.audio_data is not None:
|
| 178 |
+
update_data["audio_data"] = animation.audio_data
|
| 179 |
+
|
| 180 |
+
updated = await db.update_animation(animation_id, update_data)
|
| 181 |
+
|
| 182 |
+
if not updated:
|
| 183 |
+
raise HTTPException(status_code=404, detail="Animation not found")
|
| 184 |
+
|
| 185 |
+
return CustomAnimationResponse(
|
| 186 |
+
success=True,
|
| 187 |
+
animation=updated,
|
| 188 |
+
message="Animation updated successfully",
|
| 189 |
+
)
|
| 190 |
+
except HTTPException:
|
| 191 |
+
raise
|
| 192 |
+
except Exception as e:
|
| 193 |
+
logger.error(f"Error updating animation {animation_id}: {e}")
|
| 194 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@router.delete("/{animation_id}")
|
| 198 |
+
async def delete_animation(animation_id: str) -> dict:
|
| 199 |
+
"""Delete a custom animation."""
|
| 200 |
+
try:
|
| 201 |
+
db = get_database()
|
| 202 |
+
deleted = await db.delete_animation(animation_id)
|
| 203 |
+
|
| 204 |
+
if not deleted:
|
| 205 |
+
raise HTTPException(status_code=404, detail="Animation not found")
|
| 206 |
+
|
| 207 |
+
return {
|
| 208 |
+
"success": True,
|
| 209 |
+
"message": f"Animation {animation_id} deleted successfully",
|
| 210 |
+
}
|
| 211 |
+
except HTTPException:
|
| 212 |
+
raise
|
| 213 |
+
except Exception as e:
|
| 214 |
+
logger.error(f"Error deleting animation {animation_id}: {e}")
|
| 215 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
# =============================================================================
|
| 219 |
+
# Playback Endpoints
|
| 220 |
+
# =============================================================================
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@router.post("/{animation_id}/play", response_model=AnimationPlayResponse)
|
| 224 |
+
async def play_animation(
|
| 225 |
+
animation_id: str,
|
| 226 |
+
request: AnimationPlayRequest = AnimationPlayRequest(),
|
| 227 |
+
) -> AnimationPlayResponse:
|
| 228 |
+
"""Play a custom animation on the robot.
|
| 229 |
+
|
| 230 |
+
Moves the robot through all recorded keyframes with proper timing.
|
| 231 |
+
"""
|
| 232 |
+
global _is_playing, _current_animation_id
|
| 233 |
+
|
| 234 |
+
if _is_playing:
|
| 235 |
+
raise HTTPException(
|
| 236 |
+
status_code=409,
|
| 237 |
+
detail=f"Animation {_current_animation_id} is already playing"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
db = get_database()
|
| 242 |
+
animation = await db.get_animation(animation_id)
|
| 243 |
+
|
| 244 |
+
if not animation:
|
| 245 |
+
raise HTTPException(status_code=404, detail="Animation not found")
|
| 246 |
+
|
| 247 |
+
_is_playing = True
|
| 248 |
+
_current_animation_id = animation_id
|
| 249 |
+
|
| 250 |
+
# Start playback in background task
|
| 251 |
+
asyncio.create_task(_play_animation_task(animation, request.speed))
|
| 252 |
+
|
| 253 |
+
return AnimationPlayResponse(
|
| 254 |
+
success=True,
|
| 255 |
+
message="Animation playback started",
|
| 256 |
+
animation_id=animation_id,
|
| 257 |
+
duration_ms=int(animation["duration_ms"] / request.speed),
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
except HTTPException:
|
| 261 |
+
_is_playing = False
|
| 262 |
+
_current_animation_id = None
|
| 263 |
+
raise
|
| 264 |
+
except Exception as e:
|
| 265 |
+
_is_playing = False
|
| 266 |
+
_current_animation_id = None
|
| 267 |
+
logger.error(f"Error playing animation {animation_id}: {e}")
|
| 268 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
@router.post("/stop")
|
| 272 |
+
async def stop_animation() -> dict:
|
| 273 |
+
"""Stop the currently playing animation."""
|
| 274 |
+
global _is_playing, _current_animation_id
|
| 275 |
+
|
| 276 |
+
if not _is_playing:
|
| 277 |
+
return {
|
| 278 |
+
"success": True,
|
| 279 |
+
"message": "No animation is currently playing",
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
_is_playing = False
|
| 283 |
+
stopped_id = _current_animation_id
|
| 284 |
+
_current_animation_id = None
|
| 285 |
+
|
| 286 |
+
return {
|
| 287 |
+
"success": True,
|
| 288 |
+
"message": f"Animation {stopped_id} stopped",
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@router.get("/status")
|
| 293 |
+
async def get_playback_status() -> dict:
|
| 294 |
+
"""Get the current animation playback status."""
|
| 295 |
+
return {
|
| 296 |
+
"is_playing": _is_playing,
|
| 297 |
+
"current_animation_id": _current_animation_id,
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# Track audio playback state
|
| 302 |
+
_audio_process: asyncio.subprocess.Process | None = None
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
@router.post("/play-audio")
|
| 306 |
+
async def play_audio(request: dict) -> dict:
|
| 307 |
+
"""Play audio data through Reachy's speaker.
|
| 308 |
+
|
| 309 |
+
Accepts base64-encoded audio data (M4A format) and plays it.
|
| 310 |
+
Used for testing animations before saving.
|
| 311 |
+
"""
|
| 312 |
+
global _audio_process
|
| 313 |
+
|
| 314 |
+
audio_data_b64 = request.get("audio_data")
|
| 315 |
+
if not audio_data_b64:
|
| 316 |
+
raise HTTPException(status_code=400, detail="No audio_data provided")
|
| 317 |
+
|
| 318 |
+
try:
|
| 319 |
+
# Stop any currently playing audio
|
| 320 |
+
if _audio_process and _audio_process.returncode is None:
|
| 321 |
+
_audio_process.terminate()
|
| 322 |
+
try:
|
| 323 |
+
await asyncio.wait_for(_audio_process.wait(), timeout=1.0)
|
| 324 |
+
except asyncio.TimeoutError:
|
| 325 |
+
_audio_process.kill()
|
| 326 |
+
|
| 327 |
+
# Decode base64 audio
|
| 328 |
+
audio_bytes = base64.b64decode(audio_data_b64)
|
| 329 |
+
logger.info(f"Playing audio: {len(audio_bytes)} bytes")
|
| 330 |
+
|
| 331 |
+
# Write to temp file
|
| 332 |
+
audio_temp_file = tempfile.NamedTemporaryFile(
|
| 333 |
+
suffix=".m4a", delete=False
|
| 334 |
+
)
|
| 335 |
+
audio_temp_file.write(audio_bytes)
|
| 336 |
+
audio_temp_file.close()
|
| 337 |
+
|
| 338 |
+
# Start playback
|
| 339 |
+
_audio_process = await _play_audio_file(Path(audio_temp_file.name))
|
| 340 |
+
|
| 341 |
+
if _audio_process:
|
| 342 |
+
# Clean up temp file after playback in background
|
| 343 |
+
asyncio.create_task(_cleanup_audio_after_playback(
|
| 344 |
+
_audio_process, Path(audio_temp_file.name)
|
| 345 |
+
))
|
| 346 |
+
return {
|
| 347 |
+
"success": True,
|
| 348 |
+
"message": "Audio playback started",
|
| 349 |
+
}
|
| 350 |
+
else:
|
| 351 |
+
# Clean up immediately if playback failed
|
| 352 |
+
Path(audio_temp_file.name).unlink(missing_ok=True)
|
| 353 |
+
raise HTTPException(
|
| 354 |
+
status_code=500,
|
| 355 |
+
detail="Failed to start audio playback (no audio player available)"
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
except base64.binascii.Error as e:
|
| 359 |
+
raise HTTPException(status_code=400, detail=f"Invalid base64 data: {e}")
|
| 360 |
+
except Exception as e:
|
| 361 |
+
logger.error(f"Error playing audio: {e}")
|
| 362 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@router.post("/stop-audio")
|
| 366 |
+
async def stop_audio() -> dict:
|
| 367 |
+
"""Stop currently playing audio."""
|
| 368 |
+
global _audio_process
|
| 369 |
+
|
| 370 |
+
if _audio_process and _audio_process.returncode is None:
|
| 371 |
+
_audio_process.terminate()
|
| 372 |
+
try:
|
| 373 |
+
await asyncio.wait_for(_audio_process.wait(), timeout=1.0)
|
| 374 |
+
except asyncio.TimeoutError:
|
| 375 |
+
_audio_process.kill()
|
| 376 |
+
_audio_process = None
|
| 377 |
+
return {"success": True, "message": "Audio stopped"}
|
| 378 |
+
|
| 379 |
+
return {"success": True, "message": "No audio was playing"}
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
async def _cleanup_audio_after_playback(
|
| 383 |
+
process: asyncio.subprocess.Process,
|
| 384 |
+
file_path: Path
|
| 385 |
+
) -> None:
|
| 386 |
+
"""Wait for audio playback to finish and clean up temp file."""
|
| 387 |
+
try:
|
| 388 |
+
await process.wait()
|
| 389 |
+
except Exception:
|
| 390 |
+
pass
|
| 391 |
+
finally:
|
| 392 |
+
file_path.unlink(missing_ok=True)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
async def _play_audio_file(audio_file_path: Path) -> asyncio.subprocess.Process | None:
|
| 396 |
+
"""Play an audio file using GStreamer with Reachy's dmix audio sink.
|
| 397 |
+
|
| 398 |
+
Uses the reachymini_audio_sink device which allows sharing the audio
|
| 399 |
+
device with the daemon process.
|
| 400 |
+
|
| 401 |
+
Args:
|
| 402 |
+
audio_file_path: Path to the audio file to play.
|
| 403 |
+
|
| 404 |
+
Returns:
|
| 405 |
+
The subprocess if started, None on error.
|
| 406 |
+
"""
|
| 407 |
+
# Use GStreamer with Reachy's shared audio sink (dmix)
|
| 408 |
+
# This allows playing audio even while the daemon holds the device
|
| 409 |
+
try:
|
| 410 |
+
process = await asyncio.create_subprocess_exec(
|
| 411 |
+
"gst-launch-1.0", "-q",
|
| 412 |
+
"filesrc", f"location={audio_file_path}",
|
| 413 |
+
"!", "decodebin",
|
| 414 |
+
"!", "audioconvert",
|
| 415 |
+
"!", "audioresample",
|
| 416 |
+
"!", "audio/x-raw,rate=16000,channels=2",
|
| 417 |
+
"!", "alsasink", "device=reachymini_audio_sink",
|
| 418 |
+
stdout=asyncio.subprocess.DEVNULL,
|
| 419 |
+
stderr=asyncio.subprocess.PIPE,
|
| 420 |
+
)
|
| 421 |
+
logger.info(f"Audio playback started with GStreamer (reachymini_audio_sink): {audio_file_path}")
|
| 422 |
+
return process
|
| 423 |
+
except FileNotFoundError:
|
| 424 |
+
logger.error("gst-launch-1.0 not found")
|
| 425 |
+
return None
|
| 426 |
+
except Exception as e:
|
| 427 |
+
logger.error(f"Failed to start audio playback: {e}")
|
| 428 |
+
return None
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
async def _play_animation_task(animation: dict, speed: float) -> None:
|
| 432 |
+
"""Background task to play animation keyframes.
|
| 433 |
+
|
| 434 |
+
Args:
|
| 435 |
+
animation: The animation data dict.
|
| 436 |
+
speed: Playback speed multiplier.
|
| 437 |
+
"""
|
| 438 |
+
global _is_playing, _current_animation_id
|
| 439 |
+
|
| 440 |
+
audio_temp_file = None
|
| 441 |
+
audio_process = None
|
| 442 |
+
|
| 443 |
+
try:
|
| 444 |
+
logger.info(f"Starting animation playback: {animation['name']}")
|
| 445 |
+
|
| 446 |
+
# Move to start pose first
|
| 447 |
+
start_pose = animation["start_pose"]
|
| 448 |
+
await _send_pose_to_daemon(
|
| 449 |
+
head_roll=start_pose.get("head_roll", 0.0),
|
| 450 |
+
head_pitch=start_pose.get("head_pitch", 0.0),
|
| 451 |
+
head_yaw=start_pose.get("head_yaw", 0.0),
|
| 452 |
+
body_yaw=start_pose.get("body_yaw", 0.0),
|
| 453 |
+
antenna_left=start_pose.get("antenna_left", 0.0),
|
| 454 |
+
antenna_right=start_pose.get("antenna_right", 0.0),
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
# Small delay for start pose
|
| 458 |
+
await asyncio.sleep(0.3)
|
| 459 |
+
|
| 460 |
+
# Handle audio playback if present
|
| 461 |
+
audio_data = animation.get("audio_data")
|
| 462 |
+
if audio_data:
|
| 463 |
+
try:
|
| 464 |
+
# Decode base64 audio and write to temp file
|
| 465 |
+
audio_bytes = base64.b64decode(audio_data)
|
| 466 |
+
audio_temp_file = tempfile.NamedTemporaryFile(
|
| 467 |
+
suffix=".m4a", delete=False
|
| 468 |
+
)
|
| 469 |
+
audio_temp_file.write(audio_bytes)
|
| 470 |
+
audio_temp_file.close()
|
| 471 |
+
|
| 472 |
+
logger.info(f"Audio data decoded: {len(audio_bytes)} bytes")
|
| 473 |
+
|
| 474 |
+
# Start audio playback
|
| 475 |
+
audio_process = await _play_audio_file(Path(audio_temp_file.name))
|
| 476 |
+
|
| 477 |
+
except Exception as e:
|
| 478 |
+
logger.error(f"Failed to prepare audio: {e}")
|
| 479 |
+
|
| 480 |
+
keyframes = animation["keyframes"]
|
| 481 |
+
if not keyframes:
|
| 482 |
+
logger.warning("Animation has no keyframes")
|
| 483 |
+
return
|
| 484 |
+
|
| 485 |
+
# Sort keyframes by timestamp
|
| 486 |
+
keyframes = sorted(keyframes, key=lambda kf: kf["timestamp"])
|
| 487 |
+
|
| 488 |
+
# Play through keyframes
|
| 489 |
+
start_time = asyncio.get_event_loop().time() * 1000 # ms
|
| 490 |
+
|
| 491 |
+
for i, keyframe in enumerate(keyframes):
|
| 492 |
+
if not _is_playing:
|
| 493 |
+
logger.info("Animation playback stopped")
|
| 494 |
+
break
|
| 495 |
+
|
| 496 |
+
# Calculate when this keyframe should be hit
|
| 497 |
+
target_time = start_time + (keyframe["timestamp"] / speed)
|
| 498 |
+
current_time = asyncio.get_event_loop().time() * 1000
|
| 499 |
+
|
| 500 |
+
# Wait until it's time for this keyframe
|
| 501 |
+
wait_time = (target_time - current_time) / 1000 # convert to seconds
|
| 502 |
+
if wait_time > 0:
|
| 503 |
+
await asyncio.sleep(wait_time)
|
| 504 |
+
|
| 505 |
+
if not _is_playing:
|
| 506 |
+
break
|
| 507 |
+
|
| 508 |
+
# Send the keyframe pose
|
| 509 |
+
await _send_pose_to_daemon(
|
| 510 |
+
head_roll=keyframe.get("head_roll", 0.0),
|
| 511 |
+
head_pitch=keyframe.get("head_pitch", 0.0),
|
| 512 |
+
head_yaw=keyframe.get("head_yaw", 0.0),
|
| 513 |
+
body_yaw=keyframe.get("body_yaw", 0.0),
|
| 514 |
+
antenna_left=keyframe.get("antenna_left", 0.0),
|
| 515 |
+
antenna_right=keyframe.get("antenna_right", 0.0),
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
logger.info(f"Animation playback completed: {animation['name']}")
|
| 519 |
+
|
| 520 |
+
except Exception as e:
|
| 521 |
+
logger.error(f"Error during animation playback: {e}")
|
| 522 |
+
finally:
|
| 523 |
+
_is_playing = False
|
| 524 |
+
_current_animation_id = None
|
| 525 |
+
|
| 526 |
+
# Stop audio playback if still running
|
| 527 |
+
if audio_process and audio_process.returncode is None:
|
| 528 |
+
try:
|
| 529 |
+
audio_process.terminate()
|
| 530 |
+
await audio_process.wait()
|
| 531 |
+
except Exception:
|
| 532 |
+
pass
|
| 533 |
+
|
| 534 |
+
# Clean up temp audio file
|
| 535 |
+
if audio_temp_file:
|
| 536 |
+
try:
|
| 537 |
+
Path(audio_temp_file.name).unlink(missing_ok=True)
|
| 538 |
+
except Exception:
|
| 539 |
+
pass
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
async def play_custom_animation(animation_id: str, speed: float = 1.0) -> bool:
|
| 543 |
+
"""Play a custom animation by ID (for use by other modules).
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
animation_id: The UUID of the animation to play.
|
| 547 |
+
speed: Playback speed multiplier.
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
True if animation started successfully, False otherwise.
|
| 551 |
+
"""
|
| 552 |
+
global _is_playing, _current_animation_id
|
| 553 |
+
|
| 554 |
+
if _is_playing:
|
| 555 |
+
logger.warning(f"Cannot play animation {animation_id}: another animation is playing")
|
| 556 |
+
return False
|
| 557 |
+
|
| 558 |
+
try:
|
| 559 |
+
db = get_database()
|
| 560 |
+
animation = await db.get_animation(animation_id)
|
| 561 |
+
|
| 562 |
+
if not animation:
|
| 563 |
+
logger.warning(f"Animation not found: {animation_id}")
|
| 564 |
+
return False
|
| 565 |
+
|
| 566 |
+
_is_playing = True
|
| 567 |
+
_current_animation_id = animation_id
|
| 568 |
+
|
| 569 |
+
# Start playback in background
|
| 570 |
+
asyncio.create_task(_play_animation_task(animation, speed))
|
| 571 |
+
return True
|
| 572 |
+
|
| 573 |
+
except Exception as e:
|
| 574 |
+
logger.error(f"Error starting animation {animation_id}: {e}")
|
| 575 |
+
_is_playing = False
|
| 576 |
+
_current_animation_id = None
|
| 577 |
+
return False
|
| 578 |
+
|
reachys_brain/routes/apps.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Custom Apps API endpoints.
|
| 2 |
+
|
| 3 |
+
Provides CRUD operations for user-created custom apps stored on the bridge.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
from fastapi import APIRouter, HTTPException
|
| 9 |
+
|
| 10 |
+
from ..database import get_database
|
| 11 |
+
from ..models import (
|
| 12 |
+
CustomApp,
|
| 13 |
+
CustomAppCreate,
|
| 14 |
+
CustomAppResponse,
|
| 15 |
+
CustomAppsListResponse,
|
| 16 |
+
CustomAppsSyncRequest,
|
| 17 |
+
CustomAppsSyncResponse,
|
| 18 |
+
CustomAppUpdate,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
router = APIRouter(prefix="/apps", tags=["Custom Apps"])
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# =============================================================================
|
| 27 |
+
# List / Read Operations
|
| 28 |
+
# =============================================================================
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@router.get("", response_model=CustomAppsListResponse)
|
| 32 |
+
async def list_apps() -> CustomAppsListResponse:
|
| 33 |
+
"""Get all custom apps.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
List of all custom apps stored on the bridge.
|
| 37 |
+
"""
|
| 38 |
+
try:
|
| 39 |
+
db = get_database()
|
| 40 |
+
apps = await db.get_all_apps()
|
| 41 |
+
return CustomAppsListResponse(
|
| 42 |
+
success=True,
|
| 43 |
+
apps=[CustomApp(**app) for app in apps],
|
| 44 |
+
count=len(apps),
|
| 45 |
+
)
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logger.error(f"Failed to list apps: {e}")
|
| 48 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@router.get("/{app_id}", response_model=CustomAppResponse)
|
| 52 |
+
async def get_app(app_id: str) -> CustomAppResponse:
|
| 53 |
+
"""Get a single custom app by ID.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
app_id: UUID of the app.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
The custom app if found.
|
| 60 |
+
|
| 61 |
+
Raises:
|
| 62 |
+
HTTPException: 404 if app not found.
|
| 63 |
+
"""
|
| 64 |
+
try:
|
| 65 |
+
db = get_database()
|
| 66 |
+
# Normalize UUID to lowercase (iOS sends uppercase, db stores lowercase)
|
| 67 |
+
app = await db.get_app(app_id.lower())
|
| 68 |
+
|
| 69 |
+
if not app:
|
| 70 |
+
raise HTTPException(status_code=404, detail=f"App not found: {app_id}")
|
| 71 |
+
|
| 72 |
+
return CustomAppResponse(
|
| 73 |
+
success=True,
|
| 74 |
+
app=CustomApp(**app),
|
| 75 |
+
)
|
| 76 |
+
except HTTPException:
|
| 77 |
+
raise
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Failed to get app {app_id}: {e}")
|
| 80 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# =============================================================================
|
| 84 |
+
# Create / Update / Delete Operations
|
| 85 |
+
# =============================================================================
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@router.post("", response_model=CustomAppResponse, status_code=201)
|
| 89 |
+
async def create_app(app_data: CustomAppCreate) -> CustomAppResponse:
|
| 90 |
+
"""Create a new custom app.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
app_data: The app data including UUID from iOS.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
The created app.
|
| 97 |
+
"""
|
| 98 |
+
try:
|
| 99 |
+
db = get_database()
|
| 100 |
+
|
| 101 |
+
# Normalize UUID to lowercase (iOS sends uppercase)
|
| 102 |
+
normalized_id = app_data.id.lower()
|
| 103 |
+
|
| 104 |
+
# Check if app already exists
|
| 105 |
+
existing = await db.get_app(normalized_id)
|
| 106 |
+
if existing:
|
| 107 |
+
raise HTTPException(
|
| 108 |
+
status_code=409,
|
| 109 |
+
detail=f"App already exists: {app_data.id}",
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# Create the app with normalized ID
|
| 113 |
+
app_dict = app_data.model_dump()
|
| 114 |
+
app_dict["id"] = normalized_id
|
| 115 |
+
created = await db.create_app(app_dict)
|
| 116 |
+
|
| 117 |
+
logger.info(f"Created app: {app_data.name} ({app_data.id})")
|
| 118 |
+
return CustomAppResponse(
|
| 119 |
+
success=True,
|
| 120 |
+
app=CustomApp(**created),
|
| 121 |
+
message="App created successfully",
|
| 122 |
+
)
|
| 123 |
+
except HTTPException:
|
| 124 |
+
raise
|
| 125 |
+
except Exception as e:
|
| 126 |
+
logger.error(f"Failed to create app: {e}")
|
| 127 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@router.put("/{app_id}", response_model=CustomAppResponse)
|
| 131 |
+
async def update_app(app_id: str, app_data: CustomAppUpdate) -> CustomAppResponse:
|
| 132 |
+
"""Update an existing custom app.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
app_id: UUID of the app to update.
|
| 136 |
+
app_data: Fields to update (partial update supported).
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
The updated app.
|
| 140 |
+
|
| 141 |
+
Raises:
|
| 142 |
+
HTTPException: 404 if app not found.
|
| 143 |
+
"""
|
| 144 |
+
try:
|
| 145 |
+
db = get_database()
|
| 146 |
+
|
| 147 |
+
# Get only the fields that were actually provided
|
| 148 |
+
update_data = app_data.model_dump(exclude_unset=True)
|
| 149 |
+
|
| 150 |
+
if not update_data:
|
| 151 |
+
raise HTTPException(status_code=400, detail="No fields to update")
|
| 152 |
+
|
| 153 |
+
# Normalize UUID to lowercase (iOS sends uppercase, db stores lowercase)
|
| 154 |
+
updated = await db.update_app(app_id.lower(), update_data)
|
| 155 |
+
|
| 156 |
+
if not updated:
|
| 157 |
+
raise HTTPException(status_code=404, detail=f"App not found: {app_id}")
|
| 158 |
+
|
| 159 |
+
logger.info(f"Updated app: {app_id}")
|
| 160 |
+
return CustomAppResponse(
|
| 161 |
+
success=True,
|
| 162 |
+
app=CustomApp(**updated),
|
| 163 |
+
message="App updated successfully",
|
| 164 |
+
)
|
| 165 |
+
except HTTPException:
|
| 166 |
+
raise
|
| 167 |
+
except Exception as e:
|
| 168 |
+
logger.error(f"Failed to update app {app_id}: {e}")
|
| 169 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@router.delete("/{app_id}", response_model=CustomAppResponse)
|
| 173 |
+
async def delete_app(app_id: str) -> CustomAppResponse:
|
| 174 |
+
"""Delete a custom app.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
app_id: UUID of the app to delete.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
Success response.
|
| 181 |
+
|
| 182 |
+
Raises:
|
| 183 |
+
HTTPException: 404 if app not found.
|
| 184 |
+
"""
|
| 185 |
+
try:
|
| 186 |
+
db = get_database()
|
| 187 |
+
# Normalize UUID to lowercase (iOS sends uppercase, db stores lowercase)
|
| 188 |
+
deleted = await db.delete_app(app_id.lower())
|
| 189 |
+
|
| 190 |
+
if not deleted:
|
| 191 |
+
raise HTTPException(status_code=404, detail=f"App not found: {app_id}")
|
| 192 |
+
|
| 193 |
+
logger.info(f"Deleted app: {app_id}")
|
| 194 |
+
return CustomAppResponse(
|
| 195 |
+
success=True,
|
| 196 |
+
message="App deleted successfully",
|
| 197 |
+
)
|
| 198 |
+
except HTTPException:
|
| 199 |
+
raise
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.error(f"Failed to delete app {app_id}: {e}")
|
| 202 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# =============================================================================
|
| 206 |
+
# Sync Operation
|
| 207 |
+
# =============================================================================
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@router.post("/sync", response_model=CustomAppsSyncResponse)
|
| 211 |
+
async def sync_apps(sync_data: CustomAppsSyncRequest) -> CustomAppsSyncResponse:
|
| 212 |
+
"""Bulk sync apps from iOS client.
|
| 213 |
+
|
| 214 |
+
This replaces all existing apps with the provided list.
|
| 215 |
+
Useful for initial sync or restore from iOS backup.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
sync_data: List of apps to sync.
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
All apps after sync.
|
| 222 |
+
"""
|
| 223 |
+
try:
|
| 224 |
+
db = get_database()
|
| 225 |
+
|
| 226 |
+
# Convert to dicts for database, normalizing IDs to lowercase
|
| 227 |
+
apps_data = []
|
| 228 |
+
for app in sync_data.apps:
|
| 229 |
+
app_dict = app.model_dump()
|
| 230 |
+
app_dict["id"] = app_dict["id"].lower() # Normalize UUID
|
| 231 |
+
apps_data.append(app_dict)
|
| 232 |
+
|
| 233 |
+
# Perform sync
|
| 234 |
+
synced = await db.sync_apps(apps_data)
|
| 235 |
+
|
| 236 |
+
logger.info(f"Synced {len(synced)} apps from iOS")
|
| 237 |
+
return CustomAppsSyncResponse(
|
| 238 |
+
success=True,
|
| 239 |
+
synced_count=len(synced),
|
| 240 |
+
apps=[CustomApp(**app) for app in synced],
|
| 241 |
+
message=f"Successfully synced {len(synced)} apps",
|
| 242 |
+
)
|
| 243 |
+
except Exception as e:
|
| 244 |
+
logger.error(f"Failed to sync apps: {e}")
|
| 245 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 246 |
+
|
reachys_brain/routes/audio_manager.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio management for conversation handling.
|
| 2 |
+
|
| 3 |
+
Handles audio buffering, streaming to OpenAI, and microphone control
|
| 4 |
+
during voice conversations.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import logging
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
from .audio_stream_manager import ConversationTimings
|
| 12 |
+
from .conversation_services import get_services, get_state
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def flush_audio_buffer() -> None:
|
| 18 |
+
"""Flush all buffered audio chunks to the player.
|
| 19 |
+
|
| 20 |
+
Called after pre-speech animation completes to start audio playback.
|
| 21 |
+
"""
|
| 22 |
+
services = get_services()
|
| 23 |
+
state = get_state()
|
| 24 |
+
|
| 25 |
+
if not services.audio_player:
|
| 26 |
+
state.audio_buffer = []
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
if state.audio_buffer:
|
| 30 |
+
logger.info(f"🔊 Flushing {len(state.audio_buffer)} buffered audio chunks")
|
| 31 |
+
for chunk in state.audio_buffer:
|
| 32 |
+
services.audio_player.play_chunk(chunk)
|
| 33 |
+
state.audio_buffer = []
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def handle_audio_delta(audio_data: bytes) -> None:
|
| 37 |
+
"""Handle AI audio output - buffers initially for pre-speech animation.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
audio_data: Raw audio bytes from OpenAI.
|
| 41 |
+
"""
|
| 42 |
+
services = get_services()
|
| 43 |
+
state = get_state()
|
| 44 |
+
|
| 45 |
+
if state.is_buffering_audio:
|
| 46 |
+
# Buffer audio until we have text and can play animation
|
| 47 |
+
state.audio_buffer.append(audio_data)
|
| 48 |
+
else:
|
| 49 |
+
# Streaming directly after animation
|
| 50 |
+
if services.audio_player:
|
| 51 |
+
services.audio_player.play_chunk(audio_data)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
async def stream_audio_to_openai() -> None:
|
| 55 |
+
"""Stream audio from microphone to OpenAI.
|
| 56 |
+
|
| 57 |
+
Continuously reads audio chunks from the capture service and
|
| 58 |
+
sends them to OpenAI for transcription.
|
| 59 |
+
"""
|
| 60 |
+
services = get_services()
|
| 61 |
+
|
| 62 |
+
if not services.audio_capture or not services.openai:
|
| 63 |
+
logger.error("Cannot stream: audio_capture or openai not initialized")
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
logger.info("🎙️ Starting audio stream to OpenAI")
|
| 67 |
+
|
| 68 |
+
chunks_sent = 0
|
| 69 |
+
consecutive_empty = 0
|
| 70 |
+
MAX_CONSECUTIVE_EMPTY = 100 # Safety limit to prevent tight loops
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
while services.openai.is_connected and services.openai.is_listening:
|
| 74 |
+
chunk = await services.audio_capture.get_audio_chunk(
|
| 75 |
+
timeout=ConversationTimings.AUDIO_CHUNK_TIMEOUT_SECONDS
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
if chunk:
|
| 79 |
+
await services.openai.send_audio(chunk)
|
| 80 |
+
chunks_sent += 1
|
| 81 |
+
consecutive_empty = 0 # Reset counter on successful chunk
|
| 82 |
+
|
| 83 |
+
if chunks_sent % 50 == 0:
|
| 84 |
+
logger.info(f"🎙️ Audio streaming active ({chunks_sent} chunks sent)")
|
| 85 |
+
else:
|
| 86 |
+
consecutive_empty += 1
|
| 87 |
+
|
| 88 |
+
# Safety check: if we get too many empty responses in a row,
|
| 89 |
+
# add a small delay to prevent CPU spinning
|
| 90 |
+
if consecutive_empty >= MAX_CONSECUTIVE_EMPTY:
|
| 91 |
+
logger.warning("⚠️ Many consecutive empty audio chunks - adding delay")
|
| 92 |
+
await asyncio.sleep(0.5)
|
| 93 |
+
consecutive_empty = 0
|
| 94 |
+
|
| 95 |
+
except asyncio.CancelledError:
|
| 96 |
+
logger.info(f"Audio stream cancelled after {chunks_sent} chunks")
|
| 97 |
+
except Exception as e:
|
| 98 |
+
logger.error(f"Error in audio stream: {e}", exc_info=True)
|
| 99 |
+
|
| 100 |
+
logger.info(f"🎙️ Audio stream stopped (sent {chunks_sent} total chunks)")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
async def delayed_resume_microphone() -> None:
|
| 104 |
+
"""Resume microphone after a delay to prevent echo pickup.
|
| 105 |
+
|
| 106 |
+
Waits for the audio buffer to drain before resuming capture
|
| 107 |
+
to avoid picking up the AI's own speech.
|
| 108 |
+
"""
|
| 109 |
+
services = get_services()
|
| 110 |
+
|
| 111 |
+
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 112 |
+
|
| 113 |
+
if (services.audio_capture and
|
| 114 |
+
services.openai and
|
| 115 |
+
services.openai.is_listening):
|
| 116 |
+
# Clear OpenAI's audio buffer BEFORE resuming capture to prevent
|
| 117 |
+
# any buffered audio from being processed as user speech
|
| 118 |
+
if services.openai:
|
| 119 |
+
await services.openai.clear_audio_buffer()
|
| 120 |
+
logger.info("🗑️ Cleared audio buffer before resume")
|
| 121 |
+
|
| 122 |
+
# Brief pause to ensure buffer is cleared on OpenAI's side
|
| 123 |
+
await asyncio.sleep(0.1)
|
| 124 |
+
|
| 125 |
+
services.audio_capture.resume_capture()
|
| 126 |
+
logger.info(
|
| 127 |
+
f"▶️ Resumed microphone (after "
|
| 128 |
+
f"{ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS}s delay)"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def start_audio_streaming() -> Optional[asyncio.Task]:
|
| 133 |
+
"""Start the audio streaming task.
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
The created asyncio Task, or None if already running.
|
| 137 |
+
"""
|
| 138 |
+
state = get_state()
|
| 139 |
+
|
| 140 |
+
# Cancel existing task if any
|
| 141 |
+
if state.audio_stream_task:
|
| 142 |
+
state.audio_stream_task.cancel()
|
| 143 |
+
|
| 144 |
+
state.audio_stream_task = asyncio.create_task(stream_audio_to_openai())
|
| 145 |
+
logger.info("✅ Audio streaming task started")
|
| 146 |
+
|
| 147 |
+
return state.audio_stream_task
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def stop_audio_streaming() -> None:
|
| 151 |
+
"""Stop the audio streaming task."""
|
| 152 |
+
state = get_state()
|
| 153 |
+
|
| 154 |
+
if state.audio_stream_task:
|
| 155 |
+
state.audio_stream_task.cancel()
|
| 156 |
+
state.audio_stream_task = None
|
| 157 |
+
|
reachys_brain/routes/audio_stream_manager.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio streaming manager for OpenAI Realtime conversations.
|
| 2 |
+
|
| 3 |
+
Handles streaming audio from microphone to OpenAI and managing
|
| 4 |
+
the audio pipeline for real-time voice conversations.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import logging
|
| 9 |
+
from typing import TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from ..audio_capture import AsyncAudioCaptureService
|
| 13 |
+
from ..openai_realtime import OpenAIRealtimeService
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ConversationTimings:
|
| 19 |
+
"""Named constants for timing-related magic numbers."""
|
| 20 |
+
|
| 21 |
+
# Microphone control
|
| 22 |
+
MICROPHONE_RESUME_DELAY_SECONDS = 2.5
|
| 23 |
+
AUDIO_CHUNK_TIMEOUT_SECONDS = 0.1
|
| 24 |
+
|
| 25 |
+
# Animation timings
|
| 26 |
+
GREETING_ANIMATION_DELAY = 0.3
|
| 27 |
+
POST_GREETING_PAUSE = 0.5 # Brief pause after greeting animation
|
| 28 |
+
|
| 29 |
+
# Pre-speech animation durations (by sentiment)
|
| 30 |
+
PRE_SPEECH_DURATION_DEFAULT = 1.0 # seconds
|
| 31 |
+
PRE_SPEECH_DURATION_STRONG = 1.2 # for excited, happy, sad, apologetic
|
| 32 |
+
PRE_SPEECH_DURATION_GREETING = 1.5 # warm welcome
|
| 33 |
+
|
| 34 |
+
# Pre-speech animation chances (by sentiment type)
|
| 35 |
+
# Strong emotions: excited, happy, sad, apologetic
|
| 36 |
+
PRE_SPEECH_CHANCE_STRONG = 0.90 # 90% chance
|
| 37 |
+
# Questions and thinking
|
| 38 |
+
PRE_SPEECH_CHANCE_THINKING = 0.70 # 70% chance
|
| 39 |
+
# Helpful and neutral
|
| 40 |
+
PRE_SPEECH_CHANCE_NEUTRAL = 0.50 # 50% chance (up from 25%)
|
| 41 |
+
# Greeting always plays
|
| 42 |
+
PRE_SPEECH_CHANCE_GREETING = 1.0 # 100% chance
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class AudioStreamManager:
|
| 46 |
+
"""Manages audio streaming between microphone and OpenAI.
|
| 47 |
+
|
| 48 |
+
Handles the continuous audio pipeline, including pause/resume
|
| 49 |
+
functionality for preventing echo during AI speech.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
audio_capture: "AsyncAudioCaptureService",
|
| 55 |
+
openai_service: "OpenAIRealtimeService",
|
| 56 |
+
):
|
| 57 |
+
"""Initialize the audio stream manager.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
audio_capture: Service for capturing microphone audio.
|
| 61 |
+
openai_service: Service for communicating with OpenAI.
|
| 62 |
+
"""
|
| 63 |
+
self._audio_capture = audio_capture
|
| 64 |
+
self._openai_service = openai_service
|
| 65 |
+
self._stream_task: asyncio.Task | None = None
|
| 66 |
+
self._chunks_sent = 0
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def is_streaming(self) -> bool:
|
| 70 |
+
"""Check if audio is currently streaming."""
|
| 71 |
+
return self._stream_task is not None and not self._stream_task.done()
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def chunks_sent(self) -> int:
|
| 75 |
+
"""Get the number of audio chunks sent in current session."""
|
| 76 |
+
return self._chunks_sent
|
| 77 |
+
|
| 78 |
+
def start_streaming(self) -> None:
|
| 79 |
+
"""Start streaming audio from microphone to OpenAI."""
|
| 80 |
+
if self.is_streaming:
|
| 81 |
+
logger.warning("Audio streaming already active")
|
| 82 |
+
return
|
| 83 |
+
|
| 84 |
+
self._chunks_sent = 0
|
| 85 |
+
self._stream_task = asyncio.create_task(self._stream_loop())
|
| 86 |
+
logger.info("✅ Audio streaming task started")
|
| 87 |
+
|
| 88 |
+
def stop_streaming(self) -> None:
|
| 89 |
+
"""Stop the audio streaming task."""
|
| 90 |
+
if self._stream_task:
|
| 91 |
+
self._stream_task.cancel()
|
| 92 |
+
self._stream_task = None
|
| 93 |
+
logger.info(f"🎙️ Audio stream stopped (sent {self._chunks_sent} total chunks)")
|
| 94 |
+
|
| 95 |
+
async def _stream_loop(self) -> None:
|
| 96 |
+
"""Main audio streaming loop.
|
| 97 |
+
|
| 98 |
+
Continuously reads audio chunks from the microphone and sends
|
| 99 |
+
them to OpenAI. Handles pausing automatically (audio_capture
|
| 100 |
+
manages its own pause state).
|
| 101 |
+
"""
|
| 102 |
+
logger.info("🎙️ Starting audio stream to OpenAI")
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
while self._openai_service.is_connected and self._openai_service.is_listening:
|
| 106 |
+
# get_audio_chunk returns None on timeout (no audio available)
|
| 107 |
+
# Audio pausing is handled at capture level - paused capture won't queue chunks
|
| 108 |
+
chunk = await self._audio_capture.get_audio_chunk(
|
| 109 |
+
timeout=ConversationTimings.AUDIO_CHUNK_TIMEOUT_SECONDS
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
if chunk:
|
| 113 |
+
await self._openai_service.send_audio(chunk)
|
| 114 |
+
self._chunks_sent += 1
|
| 115 |
+
|
| 116 |
+
# Log every 50 chunks (~5 seconds) to show activity
|
| 117 |
+
if self._chunks_sent % 50 == 0:
|
| 118 |
+
logger.info(f"🎙️ Audio streaming active ({self._chunks_sent} chunks sent)")
|
| 119 |
+
|
| 120 |
+
except asyncio.CancelledError:
|
| 121 |
+
logger.info(f"Audio stream cancelled after {self._chunks_sent} chunks")
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"Error in audio stream: {e}", exc_info=True)
|
| 124 |
+
|
| 125 |
+
logger.info(f"🎙️ Audio stream stopped (sent {self._chunks_sent} total chunks)")
|
| 126 |
+
|
| 127 |
+
def pause_capture(self) -> None:
|
| 128 |
+
"""Pause audio capture (e.g., when AI is speaking)."""
|
| 129 |
+
self._audio_capture.pause_capture()
|
| 130 |
+
logger.info("⏸️ Paused microphone (AI speaking)")
|
| 131 |
+
|
| 132 |
+
def resume_capture(self) -> None:
|
| 133 |
+
"""Resume audio capture."""
|
| 134 |
+
self._audio_capture.resume_capture()
|
| 135 |
+
logger.info(f"▶️ Resumed microphone (after {ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS}s delay)")
|
| 136 |
+
|
| 137 |
+
async def delayed_resume(self) -> None:
|
| 138 |
+
"""Resume microphone after a delay to prevent echo pickup.
|
| 139 |
+
|
| 140 |
+
This waits for the audio to finish playing through the speaker
|
| 141 |
+
before resuming capture to avoid echo.
|
| 142 |
+
"""
|
| 143 |
+
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 144 |
+
|
| 145 |
+
if self._openai_service.is_listening:
|
| 146 |
+
self.resume_capture()
|
| 147 |
+
|
| 148 |
+
# Clear any residual audio that might have been buffered
|
| 149 |
+
await self._openai_service.clear_audio_buffer()
|
| 150 |
+
logger.info("🗑️ Cleared audio buffer after resume")
|
| 151 |
+
|
reachys_brain/routes/broadcast_manager.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Client broadcasting utilities for WebSocket connections.
|
| 2 |
+
|
| 3 |
+
Handles broadcasting messages to connected iOS clients with proper
|
| 4 |
+
error handling, size limits, and disconnection tracking.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
from typing import Set
|
| 10 |
+
|
| 11 |
+
from fastapi import WebSocket
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# Connected WebSocket clients
|
| 16 |
+
_connected_clients: Set[WebSocket] = set()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_connected_clients() -> Set[WebSocket]:
|
| 20 |
+
"""Get the set of connected clients."""
|
| 21 |
+
return _connected_clients
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def add_client(websocket: WebSocket) -> int:
|
| 25 |
+
"""Add a client to the connected set.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
websocket: The WebSocket connection to add.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
The new total number of connected clients.
|
| 32 |
+
"""
|
| 33 |
+
_connected_clients.add(websocket)
|
| 34 |
+
return len(_connected_clients)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def remove_client(websocket: WebSocket) -> int:
|
| 38 |
+
"""Remove a client from the connected set.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
websocket: The WebSocket connection to remove.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
The remaining number of connected clients.
|
| 45 |
+
"""
|
| 46 |
+
_connected_clients.discard(websocket)
|
| 47 |
+
return len(_connected_clients)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
async def broadcast(message: dict) -> None:
|
| 51 |
+
"""Broadcast a message to all connected iOS clients.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
message: Dictionary to broadcast as JSON.
|
| 55 |
+
"""
|
| 56 |
+
if not _connected_clients:
|
| 57 |
+
return
|
| 58 |
+
|
| 59 |
+
message_json = json.dumps(message)
|
| 60 |
+
|
| 61 |
+
disconnected = set()
|
| 62 |
+
for client in _connected_clients:
|
| 63 |
+
try:
|
| 64 |
+
await client.send_text(message_json)
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"Error sending to client: {e}")
|
| 67 |
+
disconnected.add(client)
|
| 68 |
+
|
| 69 |
+
_connected_clients.difference_update(disconnected)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
async def safe_broadcast(message: dict) -> None:
|
| 73 |
+
"""Safely broadcast a message with error handling and size limits.
|
| 74 |
+
|
| 75 |
+
This wrapper around broadcast adds:
|
| 76 |
+
- Size validation to prevent memory issues
|
| 77 |
+
- Comprehensive error handling
|
| 78 |
+
- Logging for debugging
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
message: Dictionary to broadcast as JSON.
|
| 82 |
+
"""
|
| 83 |
+
try:
|
| 84 |
+
# Validate message can be serialized
|
| 85 |
+
try:
|
| 86 |
+
message_json = json.dumps(message)
|
| 87 |
+
except (TypeError, ValueError) as e:
|
| 88 |
+
logger.error(f"Failed to serialize broadcast message: {e}")
|
| 89 |
+
return
|
| 90 |
+
|
| 91 |
+
# Check message size (max 1MB for WebSocket)
|
| 92 |
+
max_size = 1024 * 1024 # 1MB
|
| 93 |
+
if len(message_json) > max_size:
|
| 94 |
+
logger.warning(f"Broadcast message too large ({len(message_json)} bytes), truncating")
|
| 95 |
+
# Try to truncate large fields
|
| 96 |
+
if "existing_html" in message and message["existing_html"]:
|
| 97 |
+
message["existing_html"] = message["existing_html"][:50000] + "... (truncated)"
|
| 98 |
+
if "html_content" in message and message["html_content"]:
|
| 99 |
+
message["html_content"] = message["html_content"][:50000] + "... (truncated)"
|
| 100 |
+
|
| 101 |
+
await broadcast(message)
|
| 102 |
+
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"Error in safe_broadcast: {e}", exc_info=True)
|
| 105 |
+
|
reachys_brain/routes/conversation.py
ADDED
|
@@ -0,0 +1,1132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""WebSocket endpoint for real-time conversation with iOS app.
|
| 2 |
+
|
| 3 |
+
This module provides the WebSocket endpoint for voice conversations,
|
| 4 |
+
delegating to specialized modules for:
|
| 5 |
+
- broadcast_manager: Client connection management and broadcasting
|
| 6 |
+
- conversation_services: Service lifecycle management
|
| 7 |
+
- audio_manager: Audio streaming and buffering
|
| 8 |
+
- animation_manager: Pre-speech animations and gestures
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import asyncio
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
from typing import Optional
|
| 15 |
+
|
| 16 |
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
| 17 |
+
|
| 18 |
+
from ..openai_realtime import ConnectionState, SpeakingState
|
| 19 |
+
from ..tools.reminders import set_reminder_request_callback, handle_reminder_result
|
| 20 |
+
from ..tools.contacts import set_contacts_request_callback, handle_contacts_result
|
| 21 |
+
from ..tools.scheduled_messages import set_scheduled_message_callback, handle_scheduled_message_result
|
| 22 |
+
from ..tools.website_generator import set_website_request_callback, save_website_from_ios
|
| 23 |
+
|
| 24 |
+
from .audio_stream_manager import ConversationTimings
|
| 25 |
+
from .broadcast_manager import broadcast, safe_broadcast, add_client, remove_client
|
| 26 |
+
from .conversation_messages import STOP_COMMANDS
|
| 27 |
+
from .conversation_services import (
|
| 28 |
+
get_services,
|
| 29 |
+
get_state,
|
| 30 |
+
init_services as init_conversation_services,
|
| 31 |
+
cleanup_services as cleanup_conversation_services,
|
| 32 |
+
wire_openai_callbacks,
|
| 33 |
+
)
|
| 34 |
+
from .audio_manager import (
|
| 35 |
+
handle_audio_delta,
|
| 36 |
+
delayed_resume_microphone,
|
| 37 |
+
start_audio_streaming,
|
| 38 |
+
stop_audio_streaming,
|
| 39 |
+
)
|
| 40 |
+
from .animation_manager import (
|
| 41 |
+
handle_pre_speech_animation,
|
| 42 |
+
send_greeting,
|
| 43 |
+
send_goodbye,
|
| 44 |
+
set_custom_animations,
|
| 45 |
+
clear_custom_animations,
|
| 46 |
+
)
|
| 47 |
+
from .task_tracker import create_tracked_task, cancel_all_tracked_tasks
|
| 48 |
+
|
| 49 |
+
logger = logging.getLogger(__name__)
|
| 50 |
+
|
| 51 |
+
router = APIRouter(tags=["Conversation"])
|
| 52 |
+
|
| 53 |
+
async def _reset_openai_session(keep_listening: bool) -> None:
|
| 54 |
+
"""Hard reset OpenAI session by disconnecting and reconnecting.
|
| 55 |
+
|
| 56 |
+
OpenAI Realtime rejects changing voice via `session.update` once assistant
|
| 57 |
+
audio has been produced in the current session. To apply a new voice/language
|
| 58 |
+
reliably, we must create a new session.
|
| 59 |
+
|
| 60 |
+
If `keep_listening` is True, we restore listening + audio streaming so the
|
| 61 |
+
next user turn works without user-visible reconnect steps.
|
| 62 |
+
"""
|
| 63 |
+
services = get_services()
|
| 64 |
+
|
| 65 |
+
if not services.openai:
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
# Stop any local playback/gestures
|
| 69 |
+
if services.audio_player:
|
| 70 |
+
services.audio_player.cancel()
|
| 71 |
+
|
| 72 |
+
if services.speaking_gestures:
|
| 73 |
+
await services.speaking_gestures.stop()
|
| 74 |
+
|
| 75 |
+
# Stop audio streaming task (depends on openai.is_connected)
|
| 76 |
+
stop_audio_streaming()
|
| 77 |
+
|
| 78 |
+
# Pause capture while we reset, to avoid buffering user audio into nowhere
|
| 79 |
+
if services.audio_capture:
|
| 80 |
+
services.audio_capture.pause_capture()
|
| 81 |
+
|
| 82 |
+
# Best-effort: cancel any in-flight response and clear input buffer
|
| 83 |
+
try:
|
| 84 |
+
await services.openai.cancel_response()
|
| 85 |
+
await services.openai.clear_audio_buffer()
|
| 86 |
+
except Exception:
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
+
# Fully disconnect and reconnect (new session)
|
| 90 |
+
try:
|
| 91 |
+
await services.openai.disconnect()
|
| 92 |
+
except Exception:
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
# connect() reads OPENAI_API_KEY from env (set by iOS connect command)
|
| 96 |
+
await services.openai.connect()
|
| 97 |
+
|
| 98 |
+
if keep_listening:
|
| 99 |
+
services.openai.start_listening()
|
| 100 |
+
start_audio_streaming()
|
| 101 |
+
|
| 102 |
+
# Resume capture with normal echo-avoidance delay
|
| 103 |
+
create_tracked_task(delayed_resume_microphone())
|
| 104 |
+
else:
|
| 105 |
+
if services.audio_capture:
|
| 106 |
+
services.audio_capture.stop_capture()
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# MARK: - Service Lifecycle
|
| 110 |
+
|
| 111 |
+
def init_services() -> None:
|
| 112 |
+
"""Initialize conversation services and wire up callbacks."""
|
| 113 |
+
services = init_conversation_services()
|
| 114 |
+
|
| 115 |
+
# Wire up OpenAI callbacks
|
| 116 |
+
wire_openai_callbacks(
|
| 117 |
+
on_connection_state=_on_connection_state,
|
| 118 |
+
on_speaking_state=_on_speaking_state,
|
| 119 |
+
on_transcript_update=_on_transcript_update,
|
| 120 |
+
on_response_text=_on_response_text,
|
| 121 |
+
on_audio_delta=handle_audio_delta,
|
| 122 |
+
on_error=_on_error,
|
| 123 |
+
on_app_change=_on_app_change,
|
| 124 |
+
on_tool_usage=_on_tool_usage,
|
| 125 |
+
on_website_ready=_on_website_ready,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Wire up meeting callbacks
|
| 129 |
+
if services.openai:
|
| 130 |
+
services.openai.on_meeting_started = _on_meeting_started
|
| 131 |
+
services.openai.on_meeting_stopped = _on_meeting_stopped
|
| 132 |
+
services.openai.on_meeting_transcript_update = _on_meeting_transcript_update
|
| 133 |
+
|
| 134 |
+
# Wire up website request callback to delegate generation to iOS
|
| 135 |
+
set_website_request_callback(_on_website_request)
|
| 136 |
+
|
| 137 |
+
# Wire up reminder request callback to delegate operations to iOS
|
| 138 |
+
set_reminder_request_callback(_on_reminder_request)
|
| 139 |
+
|
| 140 |
+
# Wire up contacts request callback to delegate operations to iOS
|
| 141 |
+
set_contacts_request_callback(_on_contacts_request)
|
| 142 |
+
|
| 143 |
+
# Wire up scheduled message request callback to delegate operations to iOS
|
| 144 |
+
set_scheduled_message_callback(_on_scheduled_message_request)
|
| 145 |
+
|
| 146 |
+
logger.info("Conversation services initialized")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def cleanup_services() -> None:
|
| 150 |
+
"""Clean up conversation services."""
|
| 151 |
+
# Cancel all tracked tasks first
|
| 152 |
+
cancel_all_tracked_tasks()
|
| 153 |
+
cleanup_conversation_services()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# MARK: - OpenAI Callbacks
|
| 157 |
+
|
| 158 |
+
def _on_connection_state(state: ConnectionState) -> None:
|
| 159 |
+
"""Handle OpenAI connection state changes."""
|
| 160 |
+
create_tracked_task(broadcast({
|
| 161 |
+
"type": "connection_state",
|
| 162 |
+
"state": state.value,
|
| 163 |
+
}))
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _on_speaking_state(state: SpeakingState) -> None:
|
| 167 |
+
"""Handle OpenAI speaking state changes."""
|
| 168 |
+
services = get_services()
|
| 169 |
+
conv_state = get_state()
|
| 170 |
+
|
| 171 |
+
# Pause/resume audio capture based on speaking state
|
| 172 |
+
if services.audio_capture:
|
| 173 |
+
if state == SpeakingState.SPEAKING:
|
| 174 |
+
services.audio_capture.pause_capture()
|
| 175 |
+
logger.info("⏸️ Paused microphone (AI speaking)")
|
| 176 |
+
|
| 177 |
+
# Reset buffering state for new response
|
| 178 |
+
conv_state.reset_for_new_response()
|
| 179 |
+
|
| 180 |
+
# Start subtle speaking gestures while talking
|
| 181 |
+
if services.speaking_gestures:
|
| 182 |
+
create_tracked_task(services.speaking_gestures.start())
|
| 183 |
+
|
| 184 |
+
# Clear OpenAI's audio buffer to prevent echo from interrupting
|
| 185 |
+
if services.openai:
|
| 186 |
+
create_tracked_task(services.openai.clear_audio_buffer())
|
| 187 |
+
logger.info("🗑️ Cleared OpenAI audio buffer (preventing echo)")
|
| 188 |
+
else:
|
| 189 |
+
# AI finished speaking - stop gestures
|
| 190 |
+
if services.speaking_gestures:
|
| 191 |
+
create_tracked_task(services.speaking_gestures.stop())
|
| 192 |
+
|
| 193 |
+
conv_state.response_count += 1
|
| 194 |
+
|
| 195 |
+
# Reset buffering state for next response
|
| 196 |
+
conv_state.is_buffering_audio = True
|
| 197 |
+
conv_state.animation_played_for_response = False
|
| 198 |
+
|
| 199 |
+
# Delay resuming microphone to let audio buffer drain (prevent echo)
|
| 200 |
+
create_tracked_task(delayed_resume_microphone())
|
| 201 |
+
|
| 202 |
+
create_tracked_task(broadcast({
|
| 203 |
+
"type": "speaking_state",
|
| 204 |
+
"state": state.value,
|
| 205 |
+
}))
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def _is_stop_command(transcript: str, language: str = "en") -> bool:
|
| 209 |
+
"""Check if the transcript is a stop command."""
|
| 210 |
+
text = transcript.strip().lower()
|
| 211 |
+
|
| 212 |
+
# Check stop commands for current language
|
| 213 |
+
stop_words = STOP_COMMANDS.get(language, STOP_COMMANDS["en"])
|
| 214 |
+
for stop in stop_words:
|
| 215 |
+
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 216 |
+
return True
|
| 217 |
+
|
| 218 |
+
# Always check English stop commands as fallback
|
| 219 |
+
if language != "en":
|
| 220 |
+
for stop in STOP_COMMANDS["en"]:
|
| 221 |
+
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 222 |
+
return True
|
| 223 |
+
|
| 224 |
+
return False
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _on_transcript_update(transcript: str) -> None:
|
| 228 |
+
"""Handle user transcript updates."""
|
| 229 |
+
services = get_services()
|
| 230 |
+
|
| 231 |
+
# Check for stop command
|
| 232 |
+
lang = services.openai.language if services.openai else "en"
|
| 233 |
+
if _is_stop_command(transcript, lang):
|
| 234 |
+
logger.info(f"🛑 Stop command detected: '{transcript}'")
|
| 235 |
+
create_tracked_task(_handle_stop_command())
|
| 236 |
+
return
|
| 237 |
+
|
| 238 |
+
create_tracked_task(broadcast({
|
| 239 |
+
"type": "transcript_update",
|
| 240 |
+
"transcript": transcript,
|
| 241 |
+
}))
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
async def _handle_stop_command() -> None:
|
| 245 |
+
"""Handle a voice stop command - immediately end the conversation."""
|
| 246 |
+
services = get_services()
|
| 247 |
+
state = get_state()
|
| 248 |
+
|
| 249 |
+
try:
|
| 250 |
+
# Cancel any current AI response
|
| 251 |
+
if services.audio_player:
|
| 252 |
+
services.audio_player.cancel()
|
| 253 |
+
|
| 254 |
+
if services.openai:
|
| 255 |
+
await services.openai.cancel_response()
|
| 256 |
+
|
| 257 |
+
# Stop speaking gestures if running
|
| 258 |
+
if services.speaking_gestures:
|
| 259 |
+
await services.speaking_gestures.stop()
|
| 260 |
+
|
| 261 |
+
# Stop the audio stream task
|
| 262 |
+
stop_audio_streaming()
|
| 263 |
+
|
| 264 |
+
# Stop audio capture
|
| 265 |
+
if services.audio_capture:
|
| 266 |
+
services.audio_capture.stop_capture()
|
| 267 |
+
|
| 268 |
+
# Stop listening on OpenAI
|
| 269 |
+
if services.openai:
|
| 270 |
+
services.openai.stop_listening()
|
| 271 |
+
|
| 272 |
+
# Enforce: next conversation is always a new OpenAI session
|
| 273 |
+
try:
|
| 274 |
+
await services.openai.disconnect()
|
| 275 |
+
except Exception:
|
| 276 |
+
pass
|
| 277 |
+
|
| 278 |
+
# Mark conversation as ended (allows idle movements to resume)
|
| 279 |
+
state.end_conversation()
|
| 280 |
+
|
| 281 |
+
# Broadcast to iOS that listening has stopped
|
| 282 |
+
await broadcast({
|
| 283 |
+
"type": "listening_state",
|
| 284 |
+
"listening": False,
|
| 285 |
+
})
|
| 286 |
+
|
| 287 |
+
# Also notify of the transcript that caused the stop
|
| 288 |
+
await broadcast({
|
| 289 |
+
"type": "conversation_stopped",
|
| 290 |
+
"reason": "voice_command",
|
| 291 |
+
})
|
| 292 |
+
|
| 293 |
+
logger.info("✅ Conversation stopped via voice command")
|
| 294 |
+
|
| 295 |
+
except Exception as e:
|
| 296 |
+
logger.error(f"Error handling stop command: {e}")
|
| 297 |
+
# Broadcast error to clients
|
| 298 |
+
await broadcast({
|
| 299 |
+
"type": "error",
|
| 300 |
+
"message": "Failed to stop conversation",
|
| 301 |
+
})
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def _on_response_text(text: str) -> None:
|
| 305 |
+
"""Handle AI response text - triggers pre-speech animation."""
|
| 306 |
+
state = get_state()
|
| 307 |
+
state.last_response_text = text
|
| 308 |
+
|
| 309 |
+
# Trigger pre-speech animation workflow
|
| 310 |
+
create_tracked_task(handle_pre_speech_animation(text))
|
| 311 |
+
|
| 312 |
+
create_tracked_task(broadcast({
|
| 313 |
+
"type": "response_text",
|
| 314 |
+
"text": text,
|
| 315 |
+
}))
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def _on_error(error: str) -> None:
|
| 319 |
+
"""Handle OpenAI errors."""
|
| 320 |
+
create_tracked_task(broadcast({
|
| 321 |
+
"type": "error",
|
| 322 |
+
"message": error,
|
| 323 |
+
}))
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def _on_tool_usage(tool_name: str, status: str) -> None:
|
| 327 |
+
"""Handle tool usage notifications."""
|
| 328 |
+
logger.info(f"🔧 Tool usage: {tool_name} ({status})")
|
| 329 |
+
create_tracked_task(broadcast({
|
| 330 |
+
"type": "tool_usage",
|
| 331 |
+
"tool": tool_name if status == "started" else None,
|
| 332 |
+
"status": status,
|
| 333 |
+
}))
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _on_website_ready(data: dict) -> None:
|
| 337 |
+
"""Handle website generation completion."""
|
| 338 |
+
try:
|
| 339 |
+
website_id = data.get("website_id", "")
|
| 340 |
+
url = data.get("url", "")
|
| 341 |
+
title = data.get("title", "Generated Website")
|
| 342 |
+
is_edit = data.get("is_edit", False)
|
| 343 |
+
|
| 344 |
+
if not website_id:
|
| 345 |
+
logger.error("_on_website_ready: Missing website_id")
|
| 346 |
+
return
|
| 347 |
+
|
| 348 |
+
action = "updated" if is_edit else "created"
|
| 349 |
+
logger.info(f"🌐 Website {action}: {title} ({website_id})")
|
| 350 |
+
|
| 351 |
+
create_tracked_task(safe_broadcast({
|
| 352 |
+
"type": "website_ready",
|
| 353 |
+
"website_id": website_id,
|
| 354 |
+
"url": url,
|
| 355 |
+
"title": title,
|
| 356 |
+
"is_edit": is_edit,
|
| 357 |
+
}))
|
| 358 |
+
except Exception as e:
|
| 359 |
+
logger.error(f"Error in _on_website_ready: {e}", exc_info=True)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def _on_website_request(
|
| 363 |
+
website_id: str,
|
| 364 |
+
description: str,
|
| 365 |
+
is_edit: bool,
|
| 366 |
+
existing_html: Optional[str],
|
| 367 |
+
) -> None:
|
| 368 |
+
"""Handle website generation request - delegates to iOS."""
|
| 369 |
+
try:
|
| 370 |
+
if not website_id:
|
| 371 |
+
logger.error("_on_website_request: Missing website_id")
|
| 372 |
+
return
|
| 373 |
+
|
| 374 |
+
if not description:
|
| 375 |
+
logger.error("_on_website_request: Missing description")
|
| 376 |
+
return
|
| 377 |
+
|
| 378 |
+
action = "Editing" if is_edit else "Creating"
|
| 379 |
+
desc_preview = description[:50] if description else ""
|
| 380 |
+
logger.info(f"📱 Requesting iOS to generate website {website_id}: {desc_preview}...")
|
| 381 |
+
|
| 382 |
+
# Limit existing_html size for broadcast to prevent WebSocket issues
|
| 383 |
+
broadcast_existing_html = existing_html
|
| 384 |
+
if existing_html and len(existing_html) > 100000:
|
| 385 |
+
logger.warning(f"Truncating existing_html for broadcast ({len(existing_html)} chars)")
|
| 386 |
+
broadcast_existing_html = existing_html[:100000]
|
| 387 |
+
|
| 388 |
+
create_tracked_task(safe_broadcast({
|
| 389 |
+
"type": "website_request",
|
| 390 |
+
"website_id": website_id,
|
| 391 |
+
"description": description,
|
| 392 |
+
"is_edit": is_edit,
|
| 393 |
+
"existing_html": broadcast_existing_html,
|
| 394 |
+
}))
|
| 395 |
+
except Exception as e:
|
| 396 |
+
logger.error(f"Error in _on_website_request: {e}", exc_info=True)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def _on_reminder_request(
|
| 400 |
+
request_id: str,
|
| 401 |
+
action: str,
|
| 402 |
+
params: dict,
|
| 403 |
+
) -> None:
|
| 404 |
+
"""Handle reminder operation request - delegates to iOS."""
|
| 405 |
+
try:
|
| 406 |
+
if not request_id:
|
| 407 |
+
logger.error("_on_reminder_request: Missing request_id")
|
| 408 |
+
return
|
| 409 |
+
|
| 410 |
+
logger.info(f"📱 Requesting iOS to {action} reminder: {request_id}")
|
| 411 |
+
|
| 412 |
+
create_tracked_task(safe_broadcast({
|
| 413 |
+
"type": "reminder_request",
|
| 414 |
+
"request_id": request_id,
|
| 415 |
+
"action": action,
|
| 416 |
+
"params": params,
|
| 417 |
+
}))
|
| 418 |
+
except Exception as e:
|
| 419 |
+
logger.error(f"Error in _on_reminder_request: {e}", exc_info=True)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def _on_contacts_request(
|
| 423 |
+
request_id: str,
|
| 424 |
+
action: str,
|
| 425 |
+
params: dict,
|
| 426 |
+
) -> None:
|
| 427 |
+
"""Handle contacts operation request - delegates to iOS."""
|
| 428 |
+
try:
|
| 429 |
+
if not request_id:
|
| 430 |
+
logger.error("_on_contacts_request: Missing request_id")
|
| 431 |
+
return
|
| 432 |
+
|
| 433 |
+
logger.info(f"📇 Requesting iOS to {action} contacts: {request_id}")
|
| 434 |
+
|
| 435 |
+
create_tracked_task(safe_broadcast({
|
| 436 |
+
"type": "contacts_request",
|
| 437 |
+
"request_id": request_id,
|
| 438 |
+
"action": action,
|
| 439 |
+
"params": params,
|
| 440 |
+
}))
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.error(f"Error in _on_contacts_request: {e}", exc_info=True)
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def _on_scheduled_message_request(
|
| 446 |
+
request_id: str,
|
| 447 |
+
action: str,
|
| 448 |
+
params: dict,
|
| 449 |
+
) -> None:
|
| 450 |
+
"""Handle scheduled message operation request - delegates to iOS."""
|
| 451 |
+
try:
|
| 452 |
+
if not request_id:
|
| 453 |
+
logger.error("_on_scheduled_message_request: Missing request_id")
|
| 454 |
+
return
|
| 455 |
+
|
| 456 |
+
logger.info(f"📬 Requesting iOS to {action} scheduled message: {request_id}")
|
| 457 |
+
|
| 458 |
+
create_tracked_task(safe_broadcast({
|
| 459 |
+
"type": "scheduled_message_request",
|
| 460 |
+
"request_id": request_id,
|
| 461 |
+
"action": action,
|
| 462 |
+
"params": params,
|
| 463 |
+
}))
|
| 464 |
+
except Exception as e:
|
| 465 |
+
logger.error(f"Error in _on_scheduled_message_request: {e}", exc_info=True)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _on_app_change(data: dict) -> None:
|
| 469 |
+
"""Handle app activation/deactivation changes from voice commands."""
|
| 470 |
+
state = get_state()
|
| 471 |
+
event_type = data.get("type", "")
|
| 472 |
+
|
| 473 |
+
if event_type == "app_activated":
|
| 474 |
+
app = data.get("app", {})
|
| 475 |
+
logger.info(f"🚀 Voice-activated app: {app.get('name', 'Unknown')}")
|
| 476 |
+
|
| 477 |
+
# Update emotion animations if the app has custom ones
|
| 478 |
+
emotion_animations = app.get("emotion_animations", {})
|
| 479 |
+
if emotion_animations:
|
| 480 |
+
set_custom_animations(emotion_animations)
|
| 481 |
+
|
| 482 |
+
create_tracked_task(broadcast({
|
| 483 |
+
"type": "app_activated",
|
| 484 |
+
"app": {
|
| 485 |
+
"id": app.get("id"),
|
| 486 |
+
"name": app.get("name"),
|
| 487 |
+
"description": app.get("description", ""),
|
| 488 |
+
}
|
| 489 |
+
}))
|
| 490 |
+
|
| 491 |
+
elif event_type == "app_deactivated":
|
| 492 |
+
logger.info("🛑 Voice-deactivated app - reverting to default")
|
| 493 |
+
clear_custom_animations()
|
| 494 |
+
|
| 495 |
+
create_tracked_task(broadcast({
|
| 496 |
+
"type": "app_deactivated"
|
| 497 |
+
}))
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# MARK: - Meeting Callbacks
|
| 501 |
+
|
| 502 |
+
def _on_meeting_started(meeting_id: str, title: str) -> None:
|
| 503 |
+
"""Handle meeting recording started."""
|
| 504 |
+
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 505 |
+
create_tracked_task(safe_broadcast({
|
| 506 |
+
"type": "meeting_started",
|
| 507 |
+
"meeting_id": meeting_id,
|
| 508 |
+
"title": title,
|
| 509 |
+
}))
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def _on_meeting_stopped(meeting_id: str) -> None:
|
| 513 |
+
"""Handle meeting recording stopped."""
|
| 514 |
+
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 515 |
+
create_tracked_task(safe_broadcast({
|
| 516 |
+
"type": "meeting_stopped",
|
| 517 |
+
"meeting_id": meeting_id,
|
| 518 |
+
}))
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def _on_meeting_transcript_update(meeting_id: str, transcript: str) -> None:
|
| 522 |
+
"""Handle meeting transcript update."""
|
| 523 |
+
# Only send the last portion for live updates (full transcript can be large)
|
| 524 |
+
# Get last 500 characters for preview
|
| 525 |
+
preview = transcript[-500:] if len(transcript) > 500 else transcript
|
| 526 |
+
|
| 527 |
+
create_tracked_task(safe_broadcast({
|
| 528 |
+
"type": "meeting_transcript_update",
|
| 529 |
+
"meeting_id": meeting_id,
|
| 530 |
+
"transcript_preview": preview,
|
| 531 |
+
"total_length": len(transcript),
|
| 532 |
+
}))
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
# MARK: - WebSocket Endpoint
|
| 536 |
+
|
| 537 |
+
@router.websocket("/ws/conversation")
|
| 538 |
+
async def conversation_websocket(websocket: WebSocket) -> None:
|
| 539 |
+
"""WebSocket endpoint for iOS conversation app.
|
| 540 |
+
|
| 541 |
+
Accepts commands from iOS and relays events from OpenAI.
|
| 542 |
+
|
| 543 |
+
Commands:
|
| 544 |
+
- connect: Connect to OpenAI (requires api_key)
|
| 545 |
+
- disconnect: Disconnect from OpenAI
|
| 546 |
+
- start_listening: Start microphone capture
|
| 547 |
+
- stop_listening: Stop microphone capture
|
| 548 |
+
- set_language: Set conversation language
|
| 549 |
+
- set_voice: Set voice ID (also updates language)
|
| 550 |
+
- send_text: Send a text message
|
| 551 |
+
- set_system_prompt: Set custom personality (requires system_prompt)
|
| 552 |
+
- clear_system_prompt: Clear custom personality, revert to default
|
| 553 |
+
- cancel_response: Cancel current AI response
|
| 554 |
+
- clear_audio_buffer: Clear the input audio buffer
|
| 555 |
+
|
| 556 |
+
Events (sent to iOS):
|
| 557 |
+
- connection_state: OpenAI connection state
|
| 558 |
+
- speaking_state: AI speaking state
|
| 559 |
+
- transcript_update: User transcript (real-time)
|
| 560 |
+
- response_text: AI response text
|
| 561 |
+
- personality_set: Custom personality was set
|
| 562 |
+
- personality_cleared: Reverted to default personality
|
| 563 |
+
- error: Error messages
|
| 564 |
+
"""
|
| 565 |
+
await websocket.accept()
|
| 566 |
+
count = add_client(websocket)
|
| 567 |
+
logger.info(f"iOS client connected ({count} total)")
|
| 568 |
+
|
| 569 |
+
services = get_services()
|
| 570 |
+
|
| 571 |
+
# Send current state
|
| 572 |
+
if services.openai:
|
| 573 |
+
await websocket.send_json({
|
| 574 |
+
"type": "connection_state",
|
| 575 |
+
"state": services.openai.connection_state.value,
|
| 576 |
+
})
|
| 577 |
+
|
| 578 |
+
try:
|
| 579 |
+
while True:
|
| 580 |
+
data = await websocket.receive_json()
|
| 581 |
+
command = data.get("command", "")
|
| 582 |
+
await _handle_command(websocket, command, data)
|
| 583 |
+
|
| 584 |
+
except WebSocketDisconnect:
|
| 585 |
+
logger.info("iOS client disconnected")
|
| 586 |
+
except Exception as e:
|
| 587 |
+
logger.error(f"WebSocket error: {e}")
|
| 588 |
+
finally:
|
| 589 |
+
count = remove_client(websocket)
|
| 590 |
+
logger.info(f"iOS client removed ({count} remaining)")
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
async def _handle_command(websocket: WebSocket, command: str, data: dict) -> None:
|
| 594 |
+
"""Handle a command from the iOS app."""
|
| 595 |
+
services = get_services()
|
| 596 |
+
state = get_state()
|
| 597 |
+
|
| 598 |
+
if not services.is_initialized:
|
| 599 |
+
init_services()
|
| 600 |
+
services = get_services()
|
| 601 |
+
|
| 602 |
+
try:
|
| 603 |
+
if command == "connect":
|
| 604 |
+
await _handle_connect(websocket, data)
|
| 605 |
+
|
| 606 |
+
elif command == "disconnect":
|
| 607 |
+
await _handle_disconnect()
|
| 608 |
+
|
| 609 |
+
elif command == "start_listening":
|
| 610 |
+
await _handle_start_listening(websocket)
|
| 611 |
+
|
| 612 |
+
elif command == "stop_listening":
|
| 613 |
+
await _handle_stop_listening(websocket)
|
| 614 |
+
|
| 615 |
+
elif command == "set_language":
|
| 616 |
+
language = data.get("language", "en")
|
| 617 |
+
# Persist language; immediately reset OpenAI session to apply new settings.
|
| 618 |
+
from .voice import set_preferred_language
|
| 619 |
+
success = set_preferred_language(language)
|
| 620 |
+
confirmed = False
|
| 621 |
+
|
| 622 |
+
if success and services.openai and services.openai.is_connected:
|
| 623 |
+
# Immediately disconnect and reconnect with new language
|
| 624 |
+
keep_listening = bool(services.openai.is_listening)
|
| 625 |
+
await _reset_openai_session(keep_listening=keep_listening)
|
| 626 |
+
confirmed = services.openai.is_connected
|
| 627 |
+
logger.info(f"🌍 Language switched to {language}, session reset, confirmed={confirmed}")
|
| 628 |
+
|
| 629 |
+
await websocket.send_json({
|
| 630 |
+
"type": "language_set",
|
| 631 |
+
"language": language,
|
| 632 |
+
"success": success,
|
| 633 |
+
"confirmed": confirmed,
|
| 634 |
+
})
|
| 635 |
+
|
| 636 |
+
elif command == "set_voice":
|
| 637 |
+
voice_id = data.get("voice_id", "")
|
| 638 |
+
if voice_id:
|
| 639 |
+
# Persist voice; immediately reset OpenAI session to apply new settings.
|
| 640 |
+
from .voice import set_current_voice
|
| 641 |
+
success = set_current_voice(voice_id)
|
| 642 |
+
confirmed = False
|
| 643 |
+
|
| 644 |
+
if success and services.openai and services.openai.is_connected:
|
| 645 |
+
# Immediately disconnect and reconnect with new voice
|
| 646 |
+
keep_listening = bool(services.openai.is_listening)
|
| 647 |
+
await _reset_openai_session(keep_listening=keep_listening)
|
| 648 |
+
confirmed = services.openai.is_connected
|
| 649 |
+
logger.info(f"🔊 Voice switched to {voice_id}, session reset, confirmed={confirmed}")
|
| 650 |
+
|
| 651 |
+
await websocket.send_json({
|
| 652 |
+
"type": "voice_set",
|
| 653 |
+
"voice_id": voice_id,
|
| 654 |
+
"language": services.openai.language,
|
| 655 |
+
"success": success,
|
| 656 |
+
"confirmed": confirmed,
|
| 657 |
+
})
|
| 658 |
+
|
| 659 |
+
elif command == "send_text":
|
| 660 |
+
text = data.get("text", "")
|
| 661 |
+
if text:
|
| 662 |
+
await services.openai.send_text_message(text)
|
| 663 |
+
|
| 664 |
+
elif command == "cancel_response":
|
| 665 |
+
if services.audio_player:
|
| 666 |
+
services.audio_player.cancel()
|
| 667 |
+
await services.openai.cancel_response()
|
| 668 |
+
|
| 669 |
+
elif command == "interrupt_response":
|
| 670 |
+
await _handle_interrupt(websocket)
|
| 671 |
+
|
| 672 |
+
elif command == "clear_audio_buffer":
|
| 673 |
+
await services.openai.clear_audio_buffer()
|
| 674 |
+
|
| 675 |
+
elif command == "set_system_prompt":
|
| 676 |
+
await _handle_set_system_prompt(websocket, data)
|
| 677 |
+
|
| 678 |
+
elif command == "clear_system_prompt":
|
| 679 |
+
services.openai.clear_custom_personality()
|
| 680 |
+
logger.info("🎭 Reverted to default Reachy personality")
|
| 681 |
+
await websocket.send_json({
|
| 682 |
+
"type": "personality_cleared",
|
| 683 |
+
"success": True,
|
| 684 |
+
})
|
| 685 |
+
|
| 686 |
+
elif command == "set_emotion_animations":
|
| 687 |
+
await _handle_set_emotion_animations(websocket, data)
|
| 688 |
+
|
| 689 |
+
elif command == "clear_emotion_animations":
|
| 690 |
+
clear_custom_animations()
|
| 691 |
+
await websocket.send_json({
|
| 692 |
+
"type": "emotion_animations_cleared",
|
| 693 |
+
"success": True,
|
| 694 |
+
})
|
| 695 |
+
|
| 696 |
+
elif command == "upload_website":
|
| 697 |
+
await _handle_upload_website(websocket, data)
|
| 698 |
+
|
| 699 |
+
elif command == "reminder_result":
|
| 700 |
+
await _handle_reminder_result(websocket, data)
|
| 701 |
+
|
| 702 |
+
elif command == "contacts_result":
|
| 703 |
+
await _handle_contacts_result(websocket, data)
|
| 704 |
+
|
| 705 |
+
elif command == "scheduled_message_result":
|
| 706 |
+
await _handle_scheduled_message_result(websocket, data)
|
| 707 |
+
|
| 708 |
+
else:
|
| 709 |
+
logger.warning(f"Unknown command: {command}")
|
| 710 |
+
await websocket.send_json({
|
| 711 |
+
"type": "error",
|
| 712 |
+
"message": f"Unknown command: {command}",
|
| 713 |
+
})
|
| 714 |
+
|
| 715 |
+
except Exception as e:
|
| 716 |
+
logger.error(f"Error handling command {command}: {e}")
|
| 717 |
+
await websocket.send_json({
|
| 718 |
+
"type": "error",
|
| 719 |
+
"message": str(e),
|
| 720 |
+
})
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
# MARK: - Command Handlers
|
| 724 |
+
|
| 725 |
+
async def _handle_connect(websocket: WebSocket, data: dict) -> None:
|
| 726 |
+
"""Handle the connect command."""
|
| 727 |
+
services = get_services()
|
| 728 |
+
|
| 729 |
+
api_key = data.get("api_key") or os.environ.get("OPENAI_API_KEY")
|
| 730 |
+
|
| 731 |
+
if not api_key:
|
| 732 |
+
await websocket.send_json({
|
| 733 |
+
"type": "error",
|
| 734 |
+
"message": "API key required",
|
| 735 |
+
})
|
| 736 |
+
return
|
| 737 |
+
|
| 738 |
+
if data.get("api_key"):
|
| 739 |
+
# Log masked key for debugging
|
| 740 |
+
key_preview = f"***{api_key[-4:]}" if len(api_key) > 4 else "***"
|
| 741 |
+
logger.info(f"API key provided ({key_preview})")
|
| 742 |
+
os.environ["OPENAI_API_KEY"] = data["api_key"]
|
| 743 |
+
|
| 744 |
+
await services.openai.connect(api_key)
|
| 745 |
+
|
| 746 |
+
if services.audio_player:
|
| 747 |
+
services.audio_player.start_stream()
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
async def _handle_disconnect() -> None:
|
| 751 |
+
"""Handle the disconnect command."""
|
| 752 |
+
services = get_services()
|
| 753 |
+
|
| 754 |
+
stop_audio_streaming()
|
| 755 |
+
|
| 756 |
+
if services.audio_capture:
|
| 757 |
+
services.audio_capture.stop_capture()
|
| 758 |
+
|
| 759 |
+
if services.audio_player:
|
| 760 |
+
services.audio_player.cancel()
|
| 761 |
+
|
| 762 |
+
await services.openai.disconnect()
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
async def _handle_start_listening(websocket: WebSocket) -> None:
|
| 766 |
+
"""Handle the start_listening command."""
|
| 767 |
+
services = get_services()
|
| 768 |
+
state = get_state()
|
| 769 |
+
|
| 770 |
+
if not services.openai.is_connected:
|
| 771 |
+
# Auto-connect so that "stop => new session next time" doesn't require
|
| 772 |
+
# a separate iOS connect step.
|
| 773 |
+
try:
|
| 774 |
+
await services.openai.connect()
|
| 775 |
+
except Exception as e:
|
| 776 |
+
logger.error(f"Cannot start listening: not connected to OpenAI ({e})")
|
| 777 |
+
await websocket.send_json({
|
| 778 |
+
"type": "error",
|
| 779 |
+
"message": "Not connected to OpenAI",
|
| 780 |
+
})
|
| 781 |
+
return
|
| 782 |
+
|
| 783 |
+
# Reset conversation state
|
| 784 |
+
state.reset_for_new_conversation()
|
| 785 |
+
logger.info("🎬 New conversation started - animation state reset")
|
| 786 |
+
|
| 787 |
+
# Start audio capture
|
| 788 |
+
if services.audio_capture:
|
| 789 |
+
if not services.audio_capture.is_available:
|
| 790 |
+
logger.error("Audio capture not available - no microphone detected!")
|
| 791 |
+
await websocket.send_json({
|
| 792 |
+
"type": "error",
|
| 793 |
+
"message": "Microphone not available on Reachy",
|
| 794 |
+
})
|
| 795 |
+
return
|
| 796 |
+
|
| 797 |
+
capture_started = await services.audio_capture.start_capture()
|
| 798 |
+
if not capture_started:
|
| 799 |
+
logger.error("❌ Failed to start audio capture")
|
| 800 |
+
await websocket.send_json({
|
| 801 |
+
"type": "error",
|
| 802 |
+
"message": "Failed to start microphone capture",
|
| 803 |
+
})
|
| 804 |
+
return
|
| 805 |
+
|
| 806 |
+
logger.info("✅ Audio capture started successfully")
|
| 807 |
+
else:
|
| 808 |
+
logger.error("Audio capture service not initialized")
|
| 809 |
+
await websocket.send_json({
|
| 810 |
+
"type": "error",
|
| 811 |
+
"message": "Audio capture service not initialized",
|
| 812 |
+
})
|
| 813 |
+
return
|
| 814 |
+
|
| 815 |
+
services.openai.start_listening()
|
| 816 |
+
logger.info("✅ OpenAI listening mode enabled")
|
| 817 |
+
|
| 818 |
+
start_audio_streaming()
|
| 819 |
+
|
| 820 |
+
await websocket.send_json({
|
| 821 |
+
"type": "listening_state",
|
| 822 |
+
"listening": True,
|
| 823 |
+
})
|
| 824 |
+
|
| 825 |
+
create_tracked_task(send_greeting())
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
async def _handle_stop_listening(websocket: WebSocket) -> None:
|
| 829 |
+
"""Handle the stop_listening command."""
|
| 830 |
+
services = get_services()
|
| 831 |
+
state = get_state()
|
| 832 |
+
|
| 833 |
+
# Stop capturing new audio input
|
| 834 |
+
stop_audio_streaming()
|
| 835 |
+
|
| 836 |
+
if services.audio_capture:
|
| 837 |
+
services.audio_capture.stop_capture()
|
| 838 |
+
|
| 839 |
+
services.openai.stop_listening()
|
| 840 |
+
|
| 841 |
+
# Broadcast listening stopped immediately for UI responsiveness
|
| 842 |
+
await websocket.send_json({
|
| 843 |
+
"type": "listening_state",
|
| 844 |
+
"listening": False,
|
| 845 |
+
})
|
| 846 |
+
|
| 847 |
+
# Send goodbye and wait for it to be spoken before disconnecting
|
| 848 |
+
# This runs in the background so we don't block the WebSocket response
|
| 849 |
+
async def goodbye_and_disconnect():
|
| 850 |
+
try:
|
| 851 |
+
# Send the goodbye prompt
|
| 852 |
+
await send_goodbye()
|
| 853 |
+
|
| 854 |
+
# Wait for the AI to generate and speak the goodbye
|
| 855 |
+
# Give it enough time to respond and speak (typical goodbye is ~3-5 seconds)
|
| 856 |
+
max_wait = 8.0 # Maximum seconds to wait
|
| 857 |
+
wait_interval = 0.2
|
| 858 |
+
waited = 0.0
|
| 859 |
+
|
| 860 |
+
# Wait for speech to start
|
| 861 |
+
await asyncio.sleep(0.5)
|
| 862 |
+
|
| 863 |
+
# Wait for speech to finish
|
| 864 |
+
while waited < max_wait:
|
| 865 |
+
if services.tts and services.tts.is_speaking:
|
| 866 |
+
# Speech is ongoing, keep waiting
|
| 867 |
+
await asyncio.sleep(wait_interval)
|
| 868 |
+
waited += wait_interval
|
| 869 |
+
elif waited > 1.0:
|
| 870 |
+
# Speech has finished (or never started after initial delay)
|
| 871 |
+
break
|
| 872 |
+
else:
|
| 873 |
+
# Give it a moment to start
|
| 874 |
+
await asyncio.sleep(wait_interval)
|
| 875 |
+
waited += wait_interval
|
| 876 |
+
|
| 877 |
+
except Exception as e:
|
| 878 |
+
logger.error(f"Error in goodbye sequence: {e}")
|
| 879 |
+
finally:
|
| 880 |
+
# Now disconnect the OpenAI session
|
| 881 |
+
try:
|
| 882 |
+
await services.openai.disconnect()
|
| 883 |
+
except Exception:
|
| 884 |
+
pass
|
| 885 |
+
|
| 886 |
+
# Mark conversation as ended (allows idle movements to resume)
|
| 887 |
+
state.end_conversation()
|
| 888 |
+
|
| 889 |
+
create_tracked_task(goodbye_and_disconnect())
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
async def _handle_interrupt(websocket: WebSocket) -> None:
|
| 893 |
+
"""Handle the interrupt_response command."""
|
| 894 |
+
services = get_services()
|
| 895 |
+
|
| 896 |
+
logger.info("🛑 Interrupt command received from iOS!")
|
| 897 |
+
|
| 898 |
+
# Cancel audio playback immediately
|
| 899 |
+
if services.audio_player:
|
| 900 |
+
services.audio_player.cancel()
|
| 901 |
+
|
| 902 |
+
# Cancel the AI response
|
| 903 |
+
if services.openai:
|
| 904 |
+
await services.openai.cancel_response()
|
| 905 |
+
|
| 906 |
+
# Stop speaking gestures
|
| 907 |
+
if services.speaking_gestures:
|
| 908 |
+
await services.speaking_gestures.stop()
|
| 909 |
+
|
| 910 |
+
# Resume microphone capture so user can speak
|
| 911 |
+
if services.audio_capture:
|
| 912 |
+
services.audio_capture.resume_capture()
|
| 913 |
+
logger.info("▶️ Microphone resumed after interrupt")
|
| 914 |
+
|
| 915 |
+
# Clear any buffered audio to prevent echo
|
| 916 |
+
if services.openai:
|
| 917 |
+
await services.openai.clear_audio_buffer()
|
| 918 |
+
|
| 919 |
+
# Notify iOS that interrupt was successful
|
| 920 |
+
await websocket.send_json({
|
| 921 |
+
"type": "interrupt_complete",
|
| 922 |
+
"success": True,
|
| 923 |
+
})
|
| 924 |
+
|
| 925 |
+
logger.info("✅ Interrupt complete - listening to user")
|
| 926 |
+
|
| 927 |
+
|
| 928 |
+
async def _handle_set_system_prompt(websocket: WebSocket, data: dict) -> None:
|
| 929 |
+
"""Handle the set_system_prompt command."""
|
| 930 |
+
services = get_services()
|
| 931 |
+
|
| 932 |
+
system_prompt = data.get("system_prompt", "")
|
| 933 |
+
if system_prompt:
|
| 934 |
+
services.openai.set_custom_personality(system_prompt)
|
| 935 |
+
logger.info(f"🎭 Custom personality set ({len(system_prompt)} chars)")
|
| 936 |
+
await websocket.send_json({
|
| 937 |
+
"type": "personality_set",
|
| 938 |
+
"success": True,
|
| 939 |
+
})
|
| 940 |
+
else:
|
| 941 |
+
await websocket.send_json({
|
| 942 |
+
"type": "error",
|
| 943 |
+
"message": "No system_prompt provided",
|
| 944 |
+
})
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
async def _handle_set_emotion_animations(websocket: WebSocket, data: dict) -> None:
|
| 948 |
+
"""Handle the set_emotion_animations command."""
|
| 949 |
+
emotion_animations = data.get("emotion_animations", {})
|
| 950 |
+
if emotion_animations:
|
| 951 |
+
set_custom_animations(emotion_animations)
|
| 952 |
+
await websocket.send_json({
|
| 953 |
+
"type": "emotion_animations_set",
|
| 954 |
+
"success": True,
|
| 955 |
+
"emotions": list(emotion_animations.keys()),
|
| 956 |
+
})
|
| 957 |
+
else:
|
| 958 |
+
await websocket.send_json({
|
| 959 |
+
"type": "error",
|
| 960 |
+
"message": "No emotion_animations provided",
|
| 961 |
+
})
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
async def _handle_upload_website(websocket: WebSocket, data: dict) -> None:
|
| 965 |
+
"""Handle the upload_website command."""
|
| 966 |
+
try:
|
| 967 |
+
website_id = data.get("website_id", "")
|
| 968 |
+
html_content = data.get("html_content", "")
|
| 969 |
+
title = data.get("title", "Generated Website")
|
| 970 |
+
description = data.get("description", "")
|
| 971 |
+
|
| 972 |
+
# Validate required fields
|
| 973 |
+
if not website_id:
|
| 974 |
+
logger.error("upload_website: Missing website_id")
|
| 975 |
+
await websocket.send_json({
|
| 976 |
+
"type": "error",
|
| 977 |
+
"message": "website_id is required",
|
| 978 |
+
})
|
| 979 |
+
return
|
| 980 |
+
|
| 981 |
+
if not html_content:
|
| 982 |
+
logger.error("upload_website: Missing html_content")
|
| 983 |
+
await websocket.send_json({
|
| 984 |
+
"type": "error",
|
| 985 |
+
"message": "html_content is required",
|
| 986 |
+
})
|
| 987 |
+
return
|
| 988 |
+
|
| 989 |
+
# Log upload size for debugging
|
| 990 |
+
logger.info(f"📤 Receiving website upload: {website_id} ({len(html_content)} bytes)")
|
| 991 |
+
|
| 992 |
+
# Save the website to robot storage
|
| 993 |
+
result = await save_website_from_ios(
|
| 994 |
+
website_id=website_id,
|
| 995 |
+
html_content=html_content,
|
| 996 |
+
title=title,
|
| 997 |
+
description=description,
|
| 998 |
+
)
|
| 999 |
+
|
| 1000 |
+
if result.get("success"):
|
| 1001 |
+
logger.info(f"💾 Website uploaded from iOS: {website_id}")
|
| 1002 |
+
await websocket.send_json({
|
| 1003 |
+
"type": "website_uploaded",
|
| 1004 |
+
"website_id": website_id,
|
| 1005 |
+
"url": result.get("url"),
|
| 1006 |
+
"title": title,
|
| 1007 |
+
"success": True,
|
| 1008 |
+
})
|
| 1009 |
+
|
| 1010 |
+
# Broadcast to all clients that website is ready
|
| 1011 |
+
await safe_broadcast({
|
| 1012 |
+
"type": "website_ready",
|
| 1013 |
+
"website_id": website_id,
|
| 1014 |
+
"url": result.get("url"),
|
| 1015 |
+
"title": title,
|
| 1016 |
+
"is_edit": data.get("is_edit", False),
|
| 1017 |
+
})
|
| 1018 |
+
else:
|
| 1019 |
+
error_msg = result.get("error", "Failed to save website")
|
| 1020 |
+
logger.error(f"upload_website failed: {error_msg}")
|
| 1021 |
+
await websocket.send_json({
|
| 1022 |
+
"type": "error",
|
| 1023 |
+
"message": error_msg,
|
| 1024 |
+
})
|
| 1025 |
+
except Exception as e:
|
| 1026 |
+
logger.error(f"Error handling upload_website: {e}", exc_info=True)
|
| 1027 |
+
await websocket.send_json({
|
| 1028 |
+
"type": "error",
|
| 1029 |
+
"message": f"Upload failed: {str(e)}",
|
| 1030 |
+
})
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
async def _handle_reminder_result(websocket: WebSocket, data: dict) -> None:
|
| 1034 |
+
"""Handle the reminder_result command from iOS."""
|
| 1035 |
+
try:
|
| 1036 |
+
request_id = data.get("request_id", "")
|
| 1037 |
+
result = data.get("result", {})
|
| 1038 |
+
|
| 1039 |
+
if not request_id:
|
| 1040 |
+
logger.error("reminder_result: Missing request_id")
|
| 1041 |
+
await websocket.send_json({
|
| 1042 |
+
"type": "error",
|
| 1043 |
+
"message": "request_id is required",
|
| 1044 |
+
})
|
| 1045 |
+
return
|
| 1046 |
+
|
| 1047 |
+
logger.info(f"📱 Received reminder result: {request_id} (success: {result.get('success', False)})")
|
| 1048 |
+
|
| 1049 |
+
# Pass the result to the reminders module to resolve the pending future
|
| 1050 |
+
handle_reminder_result(request_id, result)
|
| 1051 |
+
|
| 1052 |
+
# Acknowledge receipt
|
| 1053 |
+
await websocket.send_json({
|
| 1054 |
+
"type": "reminder_result_received",
|
| 1055 |
+
"request_id": request_id,
|
| 1056 |
+
"success": True,
|
| 1057 |
+
})
|
| 1058 |
+
|
| 1059 |
+
except Exception as e:
|
| 1060 |
+
logger.error(f"Error handling reminder_result: {e}", exc_info=True)
|
| 1061 |
+
await websocket.send_json({
|
| 1062 |
+
"type": "error",
|
| 1063 |
+
"message": f"Reminder result failed: {str(e)}",
|
| 1064 |
+
})
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
async def _handle_contacts_result(websocket: WebSocket, data: dict) -> None:
|
| 1068 |
+
"""Handle the contacts_result command from iOS."""
|
| 1069 |
+
try:
|
| 1070 |
+
request_id = data.get("request_id", "")
|
| 1071 |
+
result = data.get("result", {})
|
| 1072 |
+
|
| 1073 |
+
if not request_id:
|
| 1074 |
+
logger.error("contacts_result: Missing request_id")
|
| 1075 |
+
await websocket.send_json({
|
| 1076 |
+
"type": "error",
|
| 1077 |
+
"message": "request_id is required",
|
| 1078 |
+
})
|
| 1079 |
+
return
|
| 1080 |
+
|
| 1081 |
+
logger.info(f"📇 Received contacts result: {request_id} (success: {result.get('success', False)})")
|
| 1082 |
+
|
| 1083 |
+
# Pass the result to the contacts module to resolve the pending future
|
| 1084 |
+
handle_contacts_result(request_id, result)
|
| 1085 |
+
|
| 1086 |
+
# Acknowledge receipt
|
| 1087 |
+
await websocket.send_json({
|
| 1088 |
+
"type": "contacts_result_received",
|
| 1089 |
+
"request_id": request_id,
|
| 1090 |
+
"success": True,
|
| 1091 |
+
})
|
| 1092 |
+
|
| 1093 |
+
except Exception as e:
|
| 1094 |
+
logger.error(f"Error handling contacts_result: {e}", exc_info=True)
|
| 1095 |
+
await websocket.send_json({
|
| 1096 |
+
"type": "error",
|
| 1097 |
+
"message": f"Contacts result failed: {str(e)}",
|
| 1098 |
+
})
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
async def _handle_scheduled_message_result(websocket: WebSocket, data: dict) -> None:
|
| 1102 |
+
"""Handle the scheduled_message_result command from iOS."""
|
| 1103 |
+
try:
|
| 1104 |
+
request_id = data.get("request_id", "")
|
| 1105 |
+
result = data.get("result", {})
|
| 1106 |
+
|
| 1107 |
+
if not request_id:
|
| 1108 |
+
logger.error("scheduled_message_result: Missing request_id")
|
| 1109 |
+
await websocket.send_json({
|
| 1110 |
+
"type": "error",
|
| 1111 |
+
"message": "request_id is required",
|
| 1112 |
+
})
|
| 1113 |
+
return
|
| 1114 |
+
|
| 1115 |
+
logger.info(f"📬 Received scheduled message result: {request_id} (success: {result.get('success', False)})")
|
| 1116 |
+
|
| 1117 |
+
# Pass the result to the scheduled_messages module to resolve the pending future
|
| 1118 |
+
handle_scheduled_message_result(request_id, result)
|
| 1119 |
+
|
| 1120 |
+
# Acknowledge receipt
|
| 1121 |
+
await websocket.send_json({
|
| 1122 |
+
"type": "scheduled_message_result_received",
|
| 1123 |
+
"request_id": request_id,
|
| 1124 |
+
"success": True,
|
| 1125 |
+
})
|
| 1126 |
+
|
| 1127 |
+
except Exception as e:
|
| 1128 |
+
logger.error(f"Error handling scheduled_message_result: {e}", exc_info=True)
|
| 1129 |
+
await websocket.send_json({
|
| 1130 |
+
"type": "error",
|
| 1131 |
+
"message": f"Scheduled message result failed: {str(e)}",
|
| 1132 |
+
})
|
reachys_brain/routes/conversation_messages.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Multilingual conversation messages for Reachy.
|
| 2 |
+
|
| 3 |
+
Contains greeting and goodbye messages in multiple languages,
|
| 4 |
+
as well as dance moves for post-speech animations and stop commands.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# Stop commands that end the conversation when spoken
|
| 8 |
+
# These are checked case-insensitively and should be short/distinct
|
| 9 |
+
STOP_COMMANDS = {
|
| 10 |
+
"en": ["stop", "stop it", "that's enough", "enough", "shut up", "be quiet"],
|
| 11 |
+
"nl": ["stop", "stop maar", "genoeg", "hou op", "stil", "wees stil"],
|
| 12 |
+
"de": ["stop", "stopp", "genug", "halt", "still", "sei still", "hör auf"],
|
| 13 |
+
"fr": ["stop", "arrête", "assez", "suffit", "tais-toi", "silence"],
|
| 14 |
+
"es": ["stop", "para", "basta", "suficiente", "cállate", "silencio"],
|
| 15 |
+
"it": ["stop", "basta", "fermati", "smettila", "silenzio", "zitto"],
|
| 16 |
+
"pt": ["stop", "pare", "para", "basta", "chega", "silêncio", "cala"],
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
# Fun dances to play occasionally
|
| 20 |
+
DANCE_MOVES = [
|
| 21 |
+
"simple_nod", "yeah_nod", "head_tilt_roll", "side_glance_flick",
|
| 22 |
+
"groovy_sway_and_roll", "uh_huh_tilt"
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
# Greeting messages by language (when conversation starts)
|
| 26 |
+
# These are used when we don't know the user's name
|
| 27 |
+
GREETINGS = {
|
| 28 |
+
"en": [
|
| 29 |
+
"Hello! I'm Reachy. How can I help you today?",
|
| 30 |
+
"Hi there! Reachy here. What would you like to talk about?",
|
| 31 |
+
"Hey! Great to see you. What's on your mind?",
|
| 32 |
+
],
|
| 33 |
+
"nl": [
|
| 34 |
+
"Hallo! Ik ben Reachy. Hoe kan ik je helpen?",
|
| 35 |
+
"Hoi! Reachy hier. Waar wil je over praten?",
|
| 36 |
+
"Hey! Leuk je te zien. Wat kan ik voor je doen?",
|
| 37 |
+
],
|
| 38 |
+
"de": [
|
| 39 |
+
"Hallo! Ich bin Reachy. Wie kann ich dir helfen?",
|
| 40 |
+
"Hi! Reachy hier. Worüber möchtest du sprechen?",
|
| 41 |
+
"Hey! Schön dich zu sehen. Was kann ich für dich tun?",
|
| 42 |
+
],
|
| 43 |
+
"fr": [
|
| 44 |
+
"Bonjour! Je suis Reachy. Comment puis-je vous aider?",
|
| 45 |
+
"Salut! Reachy ici. De quoi voulez-vous parler?",
|
| 46 |
+
"Coucou! Ravi de vous voir. Qu'est-ce qui vous amène?",
|
| 47 |
+
],
|
| 48 |
+
"es": [
|
| 49 |
+
"¡Hola! Soy Reachy. ¿Cómo puedo ayudarte hoy?",
|
| 50 |
+
"¡Hola! Reachy aquí. ¿De qué te gustaría hablar?",
|
| 51 |
+
"¡Hey! Qué bueno verte. ¿Qué tienes en mente?",
|
| 52 |
+
],
|
| 53 |
+
"it": [
|
| 54 |
+
"Ciao! Sono Reachy. Come posso aiutarti oggi?",
|
| 55 |
+
"Ciao! Reachy qui. Di cosa vorresti parlare?",
|
| 56 |
+
"Ehi! Bello vederti. Cosa hai in mente?",
|
| 57 |
+
],
|
| 58 |
+
"pt": [
|
| 59 |
+
"Olá! Eu sou Reachy. Como posso ajudar você hoje?",
|
| 60 |
+
"Oi! Reachy aqui. Sobre o que você quer conversar?",
|
| 61 |
+
"Ei! Que bom te ver. O que posso fazer por você?",
|
| 62 |
+
],
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# Personalized greeting messages by language (when we know the user's name)
|
| 66 |
+
# {name} will be replaced with the user's name
|
| 67 |
+
PERSONALIZED_GREETINGS = {
|
| 68 |
+
"en": [
|
| 69 |
+
"Hey {name}! Great to see you again. What can I do for you?",
|
| 70 |
+
"Hello {name}! How's it going? What would you like to talk about?",
|
| 71 |
+
"Hi {name}! Nice to see you. How can I help today?",
|
| 72 |
+
"{name}! Good to see you again. What's on your mind?",
|
| 73 |
+
],
|
| 74 |
+
"nl": [
|
| 75 |
+
"Hey {name}! Leuk je weer te zien. Wat kan ik voor je doen?",
|
| 76 |
+
"Hallo {name}! Hoe gaat het? Waar wil je over praten?",
|
| 77 |
+
"Hoi {name}! Fijn je te zien. Hoe kan ik je helpen?",
|
| 78 |
+
"{name}! Goed je weer te zien. Wat heb je op je hart?",
|
| 79 |
+
],
|
| 80 |
+
"de": [
|
| 81 |
+
"Hey {name}! Schön dich wiederzusehen. Was kann ich für dich tun?",
|
| 82 |
+
"Hallo {name}! Wie geht's? Worüber möchtest du sprechen?",
|
| 83 |
+
"Hi {name}! Schön dich zu sehen. Wie kann ich dir helfen?",
|
| 84 |
+
"{name}! Gut dich wiederzusehen. Was hast du auf dem Herzen?",
|
| 85 |
+
],
|
| 86 |
+
"fr": [
|
| 87 |
+
"Hey {name}! Content de te revoir. Que puis-je faire pour toi?",
|
| 88 |
+
"Bonjour {name}! Comment ça va? De quoi voudrais-tu parler?",
|
| 89 |
+
"Salut {name}! Ravi de te voir. Comment puis-je t'aider?",
|
| 90 |
+
"{name}! Bon de te revoir. Qu'est-ce qui t'amène?",
|
| 91 |
+
],
|
| 92 |
+
"es": [
|
| 93 |
+
"¡Hey {name}! Qué bueno verte de nuevo. ¿Qué puedo hacer por ti?",
|
| 94 |
+
"¡Hola {name}! ¿Cómo estás? ¿De qué te gustaría hablar?",
|
| 95 |
+
"¡Hola {name}! Me alegra verte. ¿En qué puedo ayudarte?",
|
| 96 |
+
"¡{name}! Qué bueno verte otra vez. ¿Qué tienes en mente?",
|
| 97 |
+
],
|
| 98 |
+
"it": [
|
| 99 |
+
"Hey {name}! Bello rivederti. Cosa posso fare per te?",
|
| 100 |
+
"Ciao {name}! Come va? Di cosa vorresti parlare?",
|
| 101 |
+
"Ciao {name}! Felice di vederti. Come posso aiutarti?",
|
| 102 |
+
"{name}! Bello rivederti. Cosa hai in mente?",
|
| 103 |
+
],
|
| 104 |
+
"pt": [
|
| 105 |
+
"Hey {name}! Que bom te ver de novo. O que posso fazer por você?",
|
| 106 |
+
"Olá {name}! Como você está? Sobre o que quer conversar?",
|
| 107 |
+
"Oi {name}! Bom te ver. Como posso ajudar?",
|
| 108 |
+
"{name}! Bom te ver de novo. O que você tem em mente?",
|
| 109 |
+
],
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
# Goodbye messages by language (when conversation ends)
|
| 113 |
+
GOODBYES = {
|
| 114 |
+
"en": [
|
| 115 |
+
"Thank you! Speak to you later!",
|
| 116 |
+
"Goodbye! It was nice talking to you!",
|
| 117 |
+
"See you later! Take care!",
|
| 118 |
+
],
|
| 119 |
+
"nl": [
|
| 120 |
+
"Dankjewel! Tot later!",
|
| 121 |
+
"Dag! Het was leuk om met je te praten!",
|
| 122 |
+
"Tot ziens! Pas goed op jezelf!",
|
| 123 |
+
],
|
| 124 |
+
"de": [
|
| 125 |
+
"Danke! Bis später!",
|
| 126 |
+
"Tschüss! Es war schön mit dir zu reden!",
|
| 127 |
+
"Bis bald! Pass auf dich auf!",
|
| 128 |
+
],
|
| 129 |
+
"fr": [
|
| 130 |
+
"Merci! À bientôt!",
|
| 131 |
+
"Au revoir! C'était agréable de parler avec vous!",
|
| 132 |
+
"À plus tard! Prenez soin de vous!",
|
| 133 |
+
],
|
| 134 |
+
"es": [
|
| 135 |
+
"¡Gracias! ¡Hasta luego!",
|
| 136 |
+
"¡Adiós! ¡Fue un placer hablar contigo!",
|
| 137 |
+
"¡Nos vemos! ¡Cuídate!",
|
| 138 |
+
],
|
| 139 |
+
"it": [
|
| 140 |
+
"Grazie! A dopo!",
|
| 141 |
+
"Arrivederci! È stato bello parlare con te!",
|
| 142 |
+
"Ci vediamo! Prenditi cura di te!",
|
| 143 |
+
],
|
| 144 |
+
"pt": [
|
| 145 |
+
"Obrigado! Até mais!",
|
| 146 |
+
"Tchau! Foi bom conversar com você!",
|
| 147 |
+
"Até logo! Cuide-se!",
|
| 148 |
+
],
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
# Personalized goodbye messages by language (when we know the user's name)
|
| 152 |
+
# {name} will be replaced with the user's name
|
| 153 |
+
PERSONALIZED_GOODBYES = {
|
| 154 |
+
"en": [
|
| 155 |
+
"Goodbye {name}! It was great talking to you!",
|
| 156 |
+
"See you later {name}! Take care!",
|
| 157 |
+
"Bye {name}! Talk to you soon!",
|
| 158 |
+
],
|
| 159 |
+
"nl": [
|
| 160 |
+
"Dag {name}! Het was fijn om met je te praten!",
|
| 161 |
+
"Tot ziens {name}! Pas goed op jezelf!",
|
| 162 |
+
"Doei {name}! Tot snel!",
|
| 163 |
+
],
|
| 164 |
+
"de": [
|
| 165 |
+
"Tschüss {name}! Es war schön mit dir zu reden!",
|
| 166 |
+
"Bis bald {name}! Pass auf dich auf!",
|
| 167 |
+
"Ciao {name}! Bis zum nächsten Mal!",
|
| 168 |
+
],
|
| 169 |
+
"fr": [
|
| 170 |
+
"Au revoir {name}! C'était super de parler avec toi!",
|
| 171 |
+
"À plus tard {name}! Prends soin de toi!",
|
| 172 |
+
"Salut {name}! À bientôt!",
|
| 173 |
+
],
|
| 174 |
+
"es": [
|
| 175 |
+
"¡Adiós {name}! ¡Fue genial hablar contigo!",
|
| 176 |
+
"¡Hasta luego {name}! ¡Cuídate!",
|
| 177 |
+
"¡Chao {name}! ¡Nos vemos pronto!",
|
| 178 |
+
],
|
| 179 |
+
"it": [
|
| 180 |
+
"Ciao {name}! È stato bello parlare con te!",
|
| 181 |
+
"A presto {name}! Prenditi cura di te!",
|
| 182 |
+
"Arrivederci {name}! Ci vediamo!",
|
| 183 |
+
],
|
| 184 |
+
"pt": [
|
| 185 |
+
"Tchau {name}! Foi ótimo conversar com você!",
|
| 186 |
+
"Até mais {name}! Cuide-se!",
|
| 187 |
+
"Falou {name}! Até a próxima!",
|
| 188 |
+
],
|
| 189 |
+
}
|
| 190 |
+
|
reachys_brain/routes/conversation_services.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Service instances and lifecycle management for conversation handling.
|
| 2 |
+
|
| 3 |
+
Centralizes all service initialization and cleanup for the conversation
|
| 4 |
+
system, reducing global state and improving testability.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import logging
|
| 9 |
+
from dataclasses import dataclass, field
|
| 10 |
+
from typing import Callable, Optional
|
| 11 |
+
|
| 12 |
+
from ..audio_capture import AsyncAudioCaptureService
|
| 13 |
+
from ..audio_playback import StreamingAudioPlayer
|
| 14 |
+
from ..animation_coordinator import AnimationCoordinator
|
| 15 |
+
from ..motion_service import MotionService
|
| 16 |
+
from ..openai_realtime import OpenAIRealtimeService
|
| 17 |
+
from ..speaking_gestures import SpeakingGesturesService
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclass
|
| 23 |
+
class ConversationServices:
|
| 24 |
+
"""Container for all conversation-related services.
|
| 25 |
+
|
| 26 |
+
Provides a single point of access for all services needed during
|
| 27 |
+
a conversation, making dependencies explicit and testable.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
openai: Optional[OpenAIRealtimeService] = None
|
| 31 |
+
audio_capture: Optional[AsyncAudioCaptureService] = None
|
| 32 |
+
audio_player: Optional[StreamingAudioPlayer] = None
|
| 33 |
+
motion: Optional[MotionService] = None
|
| 34 |
+
animation_coordinator: Optional[AnimationCoordinator] = None
|
| 35 |
+
speaking_gestures: Optional[SpeakingGesturesService] = None
|
| 36 |
+
|
| 37 |
+
# Locks for thread safety
|
| 38 |
+
pre_speech_animation_lock: Optional[asyncio.Lock] = None
|
| 39 |
+
|
| 40 |
+
# Callbacks for wiring up event handlers
|
| 41 |
+
on_connection_state: Optional[Callable] = None
|
| 42 |
+
on_speaking_state: Optional[Callable] = None
|
| 43 |
+
on_transcript_update: Optional[Callable] = None
|
| 44 |
+
on_response_text: Optional[Callable] = None
|
| 45 |
+
on_audio_delta: Optional[Callable] = None
|
| 46 |
+
on_error: Optional[Callable] = None
|
| 47 |
+
on_app_change: Optional[Callable] = None
|
| 48 |
+
on_tool_usage: Optional[Callable] = None
|
| 49 |
+
on_website_ready: Optional[Callable] = None
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def is_initialized(self) -> bool:
|
| 53 |
+
"""Check if core services are initialized."""
|
| 54 |
+
return self.openai is not None and self.audio_capture is not None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@dataclass
|
| 58 |
+
class ConversationState:
|
| 59 |
+
"""Mutable state for the current conversation.
|
| 60 |
+
|
| 61 |
+
Tracks audio buffering, animation state, and custom configurations.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
# Audio buffering for pre-speech animation
|
| 65 |
+
audio_buffer: list[bytes] = field(default_factory=list)
|
| 66 |
+
is_buffering_audio: bool = True
|
| 67 |
+
animation_played_for_response: bool = False
|
| 68 |
+
|
| 69 |
+
# Conversation tracking
|
| 70 |
+
response_count: int = 0
|
| 71 |
+
last_response_text: str = ""
|
| 72 |
+
is_active: bool = False # True when a conversation is in progress
|
| 73 |
+
|
| 74 |
+
# Custom app state
|
| 75 |
+
custom_emotion_animations: dict = field(default_factory=dict)
|
| 76 |
+
|
| 77 |
+
# Audio streaming task
|
| 78 |
+
audio_stream_task: Optional[asyncio.Task] = None
|
| 79 |
+
|
| 80 |
+
def reset_for_new_response(self) -> None:
|
| 81 |
+
"""Reset state for a new AI response."""
|
| 82 |
+
self.is_buffering_audio = True
|
| 83 |
+
self.animation_played_for_response = False
|
| 84 |
+
self.audio_buffer = []
|
| 85 |
+
|
| 86 |
+
def reset_for_new_conversation(self) -> None:
|
| 87 |
+
"""Reset state for a new conversation."""
|
| 88 |
+
self.response_count = 0
|
| 89 |
+
self.last_response_text = ""
|
| 90 |
+
self.is_active = True # Mark conversation as active
|
| 91 |
+
self.reset_for_new_response()
|
| 92 |
+
|
| 93 |
+
def end_conversation(self) -> None:
|
| 94 |
+
"""Mark conversation as ended."""
|
| 95 |
+
self.is_active = False
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Global instances (initialized on first use)
|
| 99 |
+
_services: Optional[ConversationServices] = None
|
| 100 |
+
_state: Optional[ConversationState] = None
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def get_services() -> ConversationServices:
|
| 104 |
+
"""Get or create the conversation services instance.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
The ConversationServices singleton.
|
| 108 |
+
"""
|
| 109 |
+
global _services
|
| 110 |
+
if _services is None:
|
| 111 |
+
_services = ConversationServices()
|
| 112 |
+
return _services
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_state() -> ConversationState:
|
| 116 |
+
"""Get or create the conversation state instance.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
The ConversationState singleton.
|
| 120 |
+
"""
|
| 121 |
+
global _state
|
| 122 |
+
if _state is None:
|
| 123 |
+
_state = ConversationState()
|
| 124 |
+
return _state
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def init_services() -> ConversationServices:
|
| 128 |
+
"""Initialize all conversation services.
|
| 129 |
+
|
| 130 |
+
Creates and wires up all services needed for voice conversations.
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
The initialized ConversationServices instance.
|
| 134 |
+
"""
|
| 135 |
+
services = get_services()
|
| 136 |
+
|
| 137 |
+
# Create service instances
|
| 138 |
+
services.openai = OpenAIRealtimeService()
|
| 139 |
+
services.audio_capture = AsyncAudioCaptureService()
|
| 140 |
+
services.audio_player = StreamingAudioPlayer()
|
| 141 |
+
services.motion = MotionService()
|
| 142 |
+
services.animation_coordinator = AnimationCoordinator(services.motion)
|
| 143 |
+
services.speaking_gestures = SpeakingGesturesService()
|
| 144 |
+
services.pre_speech_animation_lock = asyncio.Lock()
|
| 145 |
+
|
| 146 |
+
logger.info("Motion service, AnimationCoordinator, and SpeakingGestures initialized")
|
| 147 |
+
logger.info("Conversation services initialized")
|
| 148 |
+
|
| 149 |
+
return services
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def cleanup_services() -> None:
|
| 153 |
+
"""Clean up all conversation services."""
|
| 154 |
+
services = get_services()
|
| 155 |
+
state = get_state()
|
| 156 |
+
|
| 157 |
+
# Cancel audio streaming task
|
| 158 |
+
if state.audio_stream_task:
|
| 159 |
+
state.audio_stream_task.cancel()
|
| 160 |
+
state.audio_stream_task = None
|
| 161 |
+
|
| 162 |
+
if services.audio_capture:
|
| 163 |
+
services.audio_capture.cleanup()
|
| 164 |
+
|
| 165 |
+
if services.audio_player:
|
| 166 |
+
services.audio_player.cleanup()
|
| 167 |
+
|
| 168 |
+
# Stop speaking gestures if running
|
| 169 |
+
if services.speaking_gestures:
|
| 170 |
+
asyncio.create_task(services.speaking_gestures.close())
|
| 171 |
+
|
| 172 |
+
logger.info("Conversation services cleaned up")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def wire_openai_callbacks(
|
| 176 |
+
on_connection_state: Callable,
|
| 177 |
+
on_speaking_state: Callable,
|
| 178 |
+
on_transcript_update: Callable,
|
| 179 |
+
on_response_text: Callable,
|
| 180 |
+
on_audio_delta: Callable,
|
| 181 |
+
on_error: Callable,
|
| 182 |
+
on_app_change: Callable,
|
| 183 |
+
on_tool_usage: Callable,
|
| 184 |
+
on_website_ready: Callable,
|
| 185 |
+
) -> None:
|
| 186 |
+
"""Wire up callbacks from OpenAI service to handlers.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
on_connection_state: Handler for connection state changes.
|
| 190 |
+
on_speaking_state: Handler for speaking state changes.
|
| 191 |
+
on_transcript_update: Handler for transcript updates.
|
| 192 |
+
on_response_text: Handler for response text.
|
| 193 |
+
on_audio_delta: Handler for audio data.
|
| 194 |
+
on_error: Handler for errors.
|
| 195 |
+
on_app_change: Handler for app changes.
|
| 196 |
+
on_tool_usage: Handler for tool usage.
|
| 197 |
+
on_website_ready: Handler for website ready events.
|
| 198 |
+
"""
|
| 199 |
+
services = get_services()
|
| 200 |
+
|
| 201 |
+
if not services.openai:
|
| 202 |
+
raise RuntimeError("OpenAI service not initialized")
|
| 203 |
+
|
| 204 |
+
services.openai.on_connection_state = on_connection_state
|
| 205 |
+
services.openai.on_speaking_state = on_speaking_state
|
| 206 |
+
services.openai.on_transcript_update = on_transcript_update
|
| 207 |
+
services.openai.on_response_text = on_response_text
|
| 208 |
+
services.openai.on_audio_delta = on_audio_delta
|
| 209 |
+
services.openai.on_error = on_error
|
| 210 |
+
services.openai.on_app_change = on_app_change
|
| 211 |
+
services.openai.on_tool_usage = on_tool_usage
|
| 212 |
+
services.openai.on_website_ready = on_website_ready
|
| 213 |
+
|