Spaces:
Running
Running
Deploy new version
Browse files- reachys_brain/app_tools.py +19 -59
- reachys_brain/database.py +0 -1620
- reachys_brain/database/__init__.py +100 -0
- reachys_brain/database/animations.py +203 -0
- reachys_brain/database/apps.py +249 -0
- reachys_brain/database/base.py +257 -0
- reachys_brain/database/meetings.py +241 -0
- reachys_brain/database/notes.py +158 -0
- reachys_brain/database/scheduled_messages.py +215 -0
- reachys_brain/database/tamareachy.py +145 -0
- reachys_brain/database/user_settings.py +94 -0
- reachys_brain/database/websites.py +158 -0
- reachys_brain/openai_realtime/__init__.py +21 -0
- reachys_brain/openai_realtime/enums.py +38 -0
- reachys_brain/{openai_realtime.py → openai_realtime/service.py} +142 -646
- reachys_brain/openai_realtime/session.py +201 -0
- reachys_brain/openai_realtime/tool_executor.py +158 -0
- reachys_brain/openai_realtime/tools_loader.py +80 -0
- reachys_brain/routes/conversation.py +0 -1285
- reachys_brain/routes/conversation/__init__.py +115 -0
- reachys_brain/routes/conversation/animation.py +22 -0
- reachys_brain/routes/conversation/audio.py +24 -0
- reachys_brain/routes/conversation/callbacks.py +252 -0
- reachys_brain/routes/conversation/commands.py +434 -0
- reachys_brain/routes/conversation/greeting.py +147 -0
- reachys_brain/routes/conversation/history.py +324 -0
- reachys_brain/routes/conversation/meeting_callbacks.py +45 -0
- reachys_brain/routes/conversation/messages.py +10 -0
- reachys_brain/routes/conversation/request_handlers.py +301 -0
- reachys_brain/routes/conversation/services.py +24 -0
- reachys_brain/routes/conversation/state.py +9 -0
- reachys_brain/routes/conversation/websocket.py +158 -0
- reachys_brain/routes/voice.py +0 -1139
- reachys_brain/routes/voice/__init__.py +70 -0
- reachys_brain/routes/voice/constants.py +101 -0
- reachys_brain/routes/voice/endpoints.py +250 -0
- reachys_brain/routes/voice/playback.py +192 -0
- reachys_brain/routes/voice/samples.py +123 -0
- reachys_brain/routes/voice/settings.py +344 -0
- reachys_brain/routes/voice/vad.py +41 -0
- reachys_brain/tools/datetime_tool.py +3 -6
- reachys_brain/tools/reminders.py +2 -3
reachys_brain/app_tools.py
CHANGED
|
@@ -22,34 +22,30 @@ logger = logging.getLogger(__name__)
|
|
| 22 |
|
| 23 |
|
| 24 |
# Tool definitions for OpenAI Realtime API
|
|
|
|
| 25 |
APP_TOOLS = [
|
| 26 |
{
|
| 27 |
"type": "function",
|
| 28 |
"name": "create_custom_app",
|
| 29 |
"description": (
|
| 30 |
-
"Create a
|
| 31 |
-
"
|
| 32 |
-
"After confirmation, this creates the app and automatically activates it. "
|
| 33 |
-
"The app will have a detailed AI-generated personality based on the description."
|
| 34 |
),
|
| 35 |
"parameters": {
|
| 36 |
"type": "object",
|
| 37 |
"properties": {
|
| 38 |
"name": {
|
| 39 |
"type": "string",
|
| 40 |
-
"description": "
|
| 41 |
},
|
| 42 |
"description": {
|
| 43 |
"type": "string",
|
| 44 |
-
"description":
|
| 45 |
-
"A brief description of what the app should do and its personality "
|
| 46 |
-
"(e.g., 'A patient English tutor that helps with grammar and vocabulary')"
|
| 47 |
-
)
|
| 48 |
},
|
| 49 |
"icon_color": {
|
| 50 |
"type": "string",
|
| 51 |
"enum": ["blue", "purple", "pink", "red", "orange", "yellow", "green", "teal", "indigo"],
|
| 52 |
-
"description": "
|
| 53 |
}
|
| 54 |
},
|
| 55 |
"required": ["name", "description"]
|
|
@@ -59,17 +55,15 @@ APP_TOOLS = [
|
|
| 59 |
"type": "function",
|
| 60 |
"name": "activate_custom_app",
|
| 61 |
"description": (
|
| 62 |
-
"Activate
|
| 63 |
-
"
|
| 64 |
-
"Use list_custom_apps first if you're not sure which apps are available. "
|
| 65 |
-
"Fuzzy matching is used - you don't need the exact name."
|
| 66 |
),
|
| 67 |
"parameters": {
|
| 68 |
"type": "object",
|
| 69 |
"properties": {
|
| 70 |
"app_name": {
|
| 71 |
"type": "string",
|
| 72 |
-
"description": "
|
| 73 |
}
|
| 74 |
},
|
| 75 |
"required": ["app_name"]
|
|
@@ -78,11 +72,7 @@ APP_TOOLS = [
|
|
| 78 |
{
|
| 79 |
"type": "function",
|
| 80 |
"name": "deactivate_app",
|
| 81 |
-
"description":
|
| 82 |
-
"Deactivate the current custom app and return to the default Reachy personality. "
|
| 83 |
-
"IMPORTANT: Always ask the user for confirmation before calling this tool. "
|
| 84 |
-
"Call this when the user wants to stop using a custom app."
|
| 85 |
-
),
|
| 86 |
"parameters": {
|
| 87 |
"type": "object",
|
| 88 |
"properties": {},
|
|
@@ -92,11 +82,7 @@ APP_TOOLS = [
|
|
| 92 |
{
|
| 93 |
"type": "function",
|
| 94 |
"name": "list_custom_apps",
|
| 95 |
-
"description":
|
| 96 |
-
"List all available custom apps. "
|
| 97 |
-
"Use this to show the user what apps they have created. "
|
| 98 |
-
"No confirmation needed for this tool."
|
| 99 |
-
),
|
| 100 |
"parameters": {
|
| 101 |
"type": "object",
|
| 102 |
"properties": {},
|
|
@@ -106,11 +92,7 @@ APP_TOOLS = [
|
|
| 106 |
{
|
| 107 |
"type": "function",
|
| 108 |
"name": "wake_up",
|
| 109 |
-
"description":
|
| 110 |
-
"Wake up the robot by enabling motors. "
|
| 111 |
-
"Use this when the user says 'wake up', 'turn on', or similar. "
|
| 112 |
-
"No confirmation needed - just do it and announce."
|
| 113 |
-
),
|
| 114 |
"parameters": {
|
| 115 |
"type": "object",
|
| 116 |
"properties": {},
|
|
@@ -120,11 +102,7 @@ APP_TOOLS = [
|
|
| 120 |
{
|
| 121 |
"type": "function",
|
| 122 |
"name": "go_to_sleep",
|
| 123 |
-
"description":
|
| 124 |
-
"Put the robot to sleep by disabling motors. "
|
| 125 |
-
"Use this when the user says 'go to sleep', 'sleep', 'turn off', or similar. "
|
| 126 |
-
"No confirmation needed - just do it and announce."
|
| 127 |
-
),
|
| 128 |
"parameters": {
|
| 129 |
"type": "object",
|
| 130 |
"properties": {},
|
|
@@ -134,18 +112,13 @@ APP_TOOLS = [
|
|
| 134 |
{
|
| 135 |
"type": "function",
|
| 136 |
"name": "remember_user_name",
|
| 137 |
-
"description":
|
| 138 |
-
"Store the user's name for personalized greetings. "
|
| 139 |
-
"Use this when the user tells you their name (e.g., 'My name is John', 'I'm Sarah', 'Call me Mike'). "
|
| 140 |
-
"IMPORTANT: Only use the first name or nickname the user prefers. "
|
| 141 |
-
"This makes future greetings more personal."
|
| 142 |
-
),
|
| 143 |
"parameters": {
|
| 144 |
"type": "object",
|
| 145 |
"properties": {
|
| 146 |
"name": {
|
| 147 |
"type": "string",
|
| 148 |
-
"description": "
|
| 149 |
}
|
| 150 |
},
|
| 151 |
"required": ["name"]
|
|
@@ -154,11 +127,7 @@ APP_TOOLS = [
|
|
| 154 |
{
|
| 155 |
"type": "function",
|
| 156 |
"name": "get_user_name",
|
| 157 |
-
"description":
|
| 158 |
-
"Retrieve the user's stored name. "
|
| 159 |
-
"Use this to check if you already know the user's name. "
|
| 160 |
-
"Returns the stored name or null if not set."
|
| 161 |
-
),
|
| 162 |
"parameters": {
|
| 163 |
"type": "object",
|
| 164 |
"properties": {},
|
|
@@ -168,18 +137,13 @@ APP_TOOLS = [
|
|
| 168 |
{
|
| 169 |
"type": "function",
|
| 170 |
"name": "remember_preferred_country",
|
| 171 |
-
"description":
|
| 172 |
-
"Store the user's preferred country for timezone and localization. "
|
| 173 |
-
"Use this when the user tells you their country (e.g., 'I live in the Netherlands', "
|
| 174 |
-
"'I'm from Germany', 'My country is Japan'). "
|
| 175 |
-
"This is used to show correct local time and personalize content like news."
|
| 176 |
-
),
|
| 177 |
"parameters": {
|
| 178 |
"type": "object",
|
| 179 |
"properties": {
|
| 180 |
"country": {
|
| 181 |
"type": "string",
|
| 182 |
-
"description": "
|
| 183 |
}
|
| 184 |
},
|
| 185 |
"required": ["country"]
|
|
@@ -188,11 +152,7 @@ APP_TOOLS = [
|
|
| 188 |
{
|
| 189 |
"type": "function",
|
| 190 |
"name": "get_preferred_country",
|
| 191 |
-
"description":
|
| 192 |
-
"Retrieve the user's stored preferred country. "
|
| 193 |
-
"Use this to check if you already know the user's country. "
|
| 194 |
-
"Returns the stored country or null if not set."
|
| 195 |
-
),
|
| 196 |
"parameters": {
|
| 197 |
"type": "object",
|
| 198 |
"properties": {},
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
# Tool definitions for OpenAI Realtime API
|
| 25 |
+
# Note: Quick tools don't need announcements, others should use "Let me..." prefix
|
| 26 |
APP_TOOLS = [
|
| 27 |
{
|
| 28 |
"type": "function",
|
| 29 |
"name": "create_custom_app",
|
| 30 |
"description": (
|
| 31 |
+
"Create a custom app with a specific personality. "
|
| 32 |
+
"Ask for confirmation first. Say 'Let me create that for you' before using."
|
|
|
|
|
|
|
| 33 |
),
|
| 34 |
"parameters": {
|
| 35 |
"type": "object",
|
| 36 |
"properties": {
|
| 37 |
"name": {
|
| 38 |
"type": "string",
|
| 39 |
+
"description": "Short app name (e.g., 'English Tutor')"
|
| 40 |
},
|
| 41 |
"description": {
|
| 42 |
"type": "string",
|
| 43 |
+
"description": "Brief description of app personality/purpose"
|
|
|
|
|
|
|
|
|
|
| 44 |
},
|
| 45 |
"icon_color": {
|
| 46 |
"type": "string",
|
| 47 |
"enum": ["blue", "purple", "pink", "red", "orange", "yellow", "green", "teal", "indigo"],
|
| 48 |
+
"description": "Icon color"
|
| 49 |
}
|
| 50 |
},
|
| 51 |
"required": ["name", "description"]
|
|
|
|
| 55 |
"type": "function",
|
| 56 |
"name": "activate_custom_app",
|
| 57 |
"description": (
|
| 58 |
+
"Activate a custom app by name. Ask for confirmation first. "
|
| 59 |
+
"Fuzzy matching supported."
|
|
|
|
|
|
|
| 60 |
),
|
| 61 |
"parameters": {
|
| 62 |
"type": "object",
|
| 63 |
"properties": {
|
| 64 |
"app_name": {
|
| 65 |
"type": "string",
|
| 66 |
+
"description": "App name or partial name"
|
| 67 |
}
|
| 68 |
},
|
| 69 |
"required": ["app_name"]
|
|
|
|
| 72 |
{
|
| 73 |
"type": "function",
|
| 74 |
"name": "deactivate_app",
|
| 75 |
+
"description": "Deactivate current app, return to default. Ask for confirmation first.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
"parameters": {
|
| 77 |
"type": "object",
|
| 78 |
"properties": {},
|
|
|
|
| 82 |
{
|
| 83 |
"type": "function",
|
| 84 |
"name": "list_custom_apps",
|
| 85 |
+
"description": "List available custom apps. Quick tool - no announcement needed.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
"parameters": {
|
| 87 |
"type": "object",
|
| 88 |
"properties": {},
|
|
|
|
| 92 |
{
|
| 93 |
"type": "function",
|
| 94 |
"name": "wake_up",
|
| 95 |
+
"description": "Enable motors. Quick tool - just do it.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
"parameters": {
|
| 97 |
"type": "object",
|
| 98 |
"properties": {},
|
|
|
|
| 102 |
{
|
| 103 |
"type": "function",
|
| 104 |
"name": "go_to_sleep",
|
| 105 |
+
"description": "Disable motors. Quick tool - just do it.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
"parameters": {
|
| 107 |
"type": "object",
|
| 108 |
"properties": {},
|
|
|
|
| 112 |
{
|
| 113 |
"type": "function",
|
| 114 |
"name": "remember_user_name",
|
| 115 |
+
"description": "Save user's name when they introduce themselves. Quick tool.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
"parameters": {
|
| 117 |
"type": "object",
|
| 118 |
"properties": {
|
| 119 |
"name": {
|
| 120 |
"type": "string",
|
| 121 |
+
"description": "User's name or nickname"
|
| 122 |
}
|
| 123 |
},
|
| 124 |
"required": ["name"]
|
|
|
|
| 127 |
{
|
| 128 |
"type": "function",
|
| 129 |
"name": "get_user_name",
|
| 130 |
+
"description": "Check if user's name is stored. Quick tool.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
"parameters": {
|
| 132 |
"type": "object",
|
| 133 |
"properties": {},
|
|
|
|
| 137 |
{
|
| 138 |
"type": "function",
|
| 139 |
"name": "remember_preferred_country",
|
| 140 |
+
"description": "Save user's country for timezone. Quick tool.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
"parameters": {
|
| 142 |
"type": "object",
|
| 143 |
"properties": {
|
| 144 |
"country": {
|
| 145 |
"type": "string",
|
| 146 |
+
"description": "Country name"
|
| 147 |
}
|
| 148 |
},
|
| 149 |
"required": ["country"]
|
|
|
|
| 152 |
{
|
| 153 |
"type": "function",
|
| 154 |
"name": "get_preferred_country",
|
| 155 |
+
"description": "Check if user's country is stored. Quick tool.",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
"parameters": {
|
| 157 |
"type": "object",
|
| 158 |
"properties": {},
|
reachys_brain/database.py
DELETED
|
@@ -1,1620 +0,0 @@
|
|
| 1 |
-
"""SQLite database service for persistent storage.
|
| 2 |
-
|
| 3 |
-
Provides async database operations for storing custom apps and other data.
|
| 4 |
-
Uses aiosqlite for non-blocking database access.
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
import json
|
| 8 |
-
import logging
|
| 9 |
-
from datetime import datetime
|
| 10 |
-
from pathlib import Path
|
| 11 |
-
from typing import Optional
|
| 12 |
-
|
| 13 |
-
import aiosqlite
|
| 14 |
-
|
| 15 |
-
logger = logging.getLogger(__name__)
|
| 16 |
-
|
| 17 |
-
# Database location - stored in user's home directory
|
| 18 |
-
DATABASE_DIR = Path.home() / ".reachy"
|
| 19 |
-
DATABASE_PATH = DATABASE_DIR / "reachy_bridge.db"
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
class DatabaseService:
|
| 23 |
-
"""Async SQLite database service for the Reachy iOS Bridge."""
|
| 24 |
-
|
| 25 |
-
def __init__(self, db_path: Optional[Path] = None):
|
| 26 |
-
"""Initialize the database service.
|
| 27 |
-
|
| 28 |
-
Args:
|
| 29 |
-
db_path: Optional custom database path. Defaults to ~/.reachy/reachy_bridge.db
|
| 30 |
-
"""
|
| 31 |
-
self.db_path = db_path or DATABASE_PATH
|
| 32 |
-
self._connection: Optional[aiosqlite.Connection] = None
|
| 33 |
-
|
| 34 |
-
async def initialize(self) -> None:
|
| 35 |
-
"""Initialize the database and create tables if needed."""
|
| 36 |
-
# Ensure directory exists
|
| 37 |
-
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
| 38 |
-
|
| 39 |
-
logger.info(f"Initializing database at {self.db_path}")
|
| 40 |
-
|
| 41 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 42 |
-
await self._create_tables(db)
|
| 43 |
-
await db.commit()
|
| 44 |
-
|
| 45 |
-
logger.info("Database initialized successfully")
|
| 46 |
-
|
| 47 |
-
async def _create_tables(self, db: aiosqlite.Connection) -> None:
|
| 48 |
-
"""Create database tables if they don't exist."""
|
| 49 |
-
await db.execute("""
|
| 50 |
-
CREATE TABLE IF NOT EXISTS custom_apps (
|
| 51 |
-
id TEXT PRIMARY KEY,
|
| 52 |
-
name TEXT NOT NULL,
|
| 53 |
-
description TEXT DEFAULT '',
|
| 54 |
-
system_prompt TEXT NOT NULL,
|
| 55 |
-
voice_id TEXT DEFAULT '',
|
| 56 |
-
emotion_animations TEXT DEFAULT '{}',
|
| 57 |
-
icon_color TEXT DEFAULT 'blue',
|
| 58 |
-
enabled_tools TEXT DEFAULT '[]',
|
| 59 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 60 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 61 |
-
)
|
| 62 |
-
""")
|
| 63 |
-
|
| 64 |
-
# Index for faster queries by creation date
|
| 65 |
-
await db.execute("""
|
| 66 |
-
CREATE INDEX IF NOT EXISTS idx_custom_apps_created
|
| 67 |
-
ON custom_apps(created_at)
|
| 68 |
-
""")
|
| 69 |
-
|
| 70 |
-
# Migration: Add enabled_tools column if it doesn't exist
|
| 71 |
-
await self._migrate_add_enabled_tools(db)
|
| 72 |
-
|
| 73 |
-
# Custom animations table for recorded joystick animations
|
| 74 |
-
await db.execute("""
|
| 75 |
-
CREATE TABLE IF NOT EXISTS custom_animations (
|
| 76 |
-
id TEXT PRIMARY KEY,
|
| 77 |
-
name TEXT NOT NULL,
|
| 78 |
-
description TEXT DEFAULT '',
|
| 79 |
-
duration_ms INTEGER NOT NULL,
|
| 80 |
-
start_pose TEXT NOT NULL,
|
| 81 |
-
keyframes TEXT NOT NULL,
|
| 82 |
-
audio_data TEXT DEFAULT NULL,
|
| 83 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 84 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 85 |
-
)
|
| 86 |
-
""")
|
| 87 |
-
|
| 88 |
-
# Migration: Add audio_data column if it doesn't exist
|
| 89 |
-
await self._migrate_add_audio_data(db)
|
| 90 |
-
|
| 91 |
-
# Index for faster queries by creation date
|
| 92 |
-
await db.execute("""
|
| 93 |
-
CREATE INDEX IF NOT EXISTS idx_custom_animations_created
|
| 94 |
-
ON custom_animations(created_at)
|
| 95 |
-
""")
|
| 96 |
-
|
| 97 |
-
# User settings table for personalization (name, preferences, etc.)
|
| 98 |
-
await db.execute("""
|
| 99 |
-
CREATE TABLE IF NOT EXISTS user_settings (
|
| 100 |
-
key TEXT PRIMARY KEY,
|
| 101 |
-
value TEXT NOT NULL,
|
| 102 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 103 |
-
)
|
| 104 |
-
""")
|
| 105 |
-
|
| 106 |
-
# Websites table for generated websites
|
| 107 |
-
await db.execute("""
|
| 108 |
-
CREATE TABLE IF NOT EXISTS websites (
|
| 109 |
-
id TEXT PRIMARY KEY,
|
| 110 |
-
title TEXT NOT NULL,
|
| 111 |
-
description TEXT DEFAULT '',
|
| 112 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 113 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 114 |
-
)
|
| 115 |
-
""")
|
| 116 |
-
|
| 117 |
-
# Index for faster queries by creation date
|
| 118 |
-
await db.execute("""
|
| 119 |
-
CREATE INDEX IF NOT EXISTS idx_websites_created
|
| 120 |
-
ON websites(created_at)
|
| 121 |
-
""")
|
| 122 |
-
|
| 123 |
-
# Mental notes table for AI-created notes
|
| 124 |
-
await db.execute("""
|
| 125 |
-
CREATE TABLE IF NOT EXISTS mental_notes (
|
| 126 |
-
id TEXT PRIMARY KEY,
|
| 127 |
-
title TEXT NOT NULL,
|
| 128 |
-
content TEXT NOT NULL,
|
| 129 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 130 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 131 |
-
)
|
| 132 |
-
""")
|
| 133 |
-
|
| 134 |
-
# Index for faster queries by creation date
|
| 135 |
-
await db.execute("""
|
| 136 |
-
CREATE INDEX IF NOT EXISTS idx_mental_notes_created
|
| 137 |
-
ON mental_notes(created_at)
|
| 138 |
-
""")
|
| 139 |
-
|
| 140 |
-
# Meetings table for meeting transcriptions
|
| 141 |
-
await db.execute("""
|
| 142 |
-
CREATE TABLE IF NOT EXISTS meetings (
|
| 143 |
-
id TEXT PRIMARY KEY,
|
| 144 |
-
title TEXT NOT NULL,
|
| 145 |
-
transcript TEXT DEFAULT '',
|
| 146 |
-
action_items TEXT DEFAULT '[]',
|
| 147 |
-
summary TEXT DEFAULT '',
|
| 148 |
-
duration_seconds INTEGER DEFAULT 0,
|
| 149 |
-
status TEXT DEFAULT 'recording',
|
| 150 |
-
started_at TIMESTAMP,
|
| 151 |
-
ended_at TIMESTAMP,
|
| 152 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 153 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 154 |
-
)
|
| 155 |
-
""")
|
| 156 |
-
|
| 157 |
-
# Index for faster queries by creation date
|
| 158 |
-
await db.execute("""
|
| 159 |
-
CREATE INDEX IF NOT EXISTS idx_meetings_created
|
| 160 |
-
ON meetings(created_at)
|
| 161 |
-
""")
|
| 162 |
-
|
| 163 |
-
# Index for faster queries by status
|
| 164 |
-
await db.execute("""
|
| 165 |
-
CREATE INDEX IF NOT EXISTS idx_meetings_status
|
| 166 |
-
ON meetings(status)
|
| 167 |
-
""")
|
| 168 |
-
|
| 169 |
-
# Scheduled messages table for scheduled iMessage/WhatsApp messages
|
| 170 |
-
await db.execute("""
|
| 171 |
-
CREATE TABLE IF NOT EXISTS scheduled_messages (
|
| 172 |
-
id TEXT PRIMARY KEY,
|
| 173 |
-
recipient_name TEXT NOT NULL,
|
| 174 |
-
recipient_phone TEXT NOT NULL,
|
| 175 |
-
message_content TEXT NOT NULL,
|
| 176 |
-
scheduled_time TIMESTAMP NOT NULL,
|
| 177 |
-
platform TEXT NOT NULL,
|
| 178 |
-
status TEXT DEFAULT 'pending',
|
| 179 |
-
notification_id TEXT,
|
| 180 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 181 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 182 |
-
)
|
| 183 |
-
""")
|
| 184 |
-
|
| 185 |
-
# Index for faster queries by scheduled time
|
| 186 |
-
await db.execute("""
|
| 187 |
-
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_time
|
| 188 |
-
ON scheduled_messages(scheduled_time)
|
| 189 |
-
""")
|
| 190 |
-
|
| 191 |
-
# Index for faster queries by status
|
| 192 |
-
await db.execute("""
|
| 193 |
-
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_status
|
| 194 |
-
ON scheduled_messages(status)
|
| 195 |
-
""")
|
| 196 |
-
|
| 197 |
-
# TamaReachy pet game state table
|
| 198 |
-
await db.execute("""
|
| 199 |
-
CREATE TABLE IF NOT EXISTS tamareachy_state (
|
| 200 |
-
id INTEGER PRIMARY KEY DEFAULT 1,
|
| 201 |
-
enabled INTEGER DEFAULT 0,
|
| 202 |
-
hunger INTEGER DEFAULT 100,
|
| 203 |
-
thirst INTEGER DEFAULT 100,
|
| 204 |
-
happiness INTEGER DEFAULT 100,
|
| 205 |
-
energy INTEGER DEFAULT 100,
|
| 206 |
-
boredom INTEGER DEFAULT 100,
|
| 207 |
-
social INTEGER DEFAULT 100,
|
| 208 |
-
health INTEGER DEFAULT 100,
|
| 209 |
-
cleanliness INTEGER DEFAULT 100,
|
| 210 |
-
last_interaction TIMESTAMP,
|
| 211 |
-
last_decay_check TIMESTAMP,
|
| 212 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 213 |
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 214 |
-
)
|
| 215 |
-
""")
|
| 216 |
-
|
| 217 |
-
# Ensure there's exactly one row for TamaReachy state
|
| 218 |
-
await db.execute("""
|
| 219 |
-
INSERT OR IGNORE INTO tamareachy_state (id) VALUES (1)
|
| 220 |
-
""")
|
| 221 |
-
|
| 222 |
-
logger.debug("Database tables created/verified")
|
| 223 |
-
|
| 224 |
-
async def _migrate_add_enabled_tools(self, db: aiosqlite.Connection) -> None:
|
| 225 |
-
"""Add enabled_tools column to existing databases."""
|
| 226 |
-
try:
|
| 227 |
-
# Check if column exists
|
| 228 |
-
cursor = await db.execute("PRAGMA table_info(custom_apps)")
|
| 229 |
-
columns = await cursor.fetchall()
|
| 230 |
-
column_names = [col[1] for col in columns]
|
| 231 |
-
|
| 232 |
-
if "enabled_tools" not in column_names:
|
| 233 |
-
logger.info("Migrating database: adding enabled_tools column")
|
| 234 |
-
await db.execute(
|
| 235 |
-
"ALTER TABLE custom_apps ADD COLUMN enabled_tools TEXT DEFAULT '[]'"
|
| 236 |
-
)
|
| 237 |
-
await db.commit()
|
| 238 |
-
logger.info("Migration complete: enabled_tools column added")
|
| 239 |
-
except Exception as e:
|
| 240 |
-
logger.warning(f"Migration check failed (may be OK): {e}")
|
| 241 |
-
|
| 242 |
-
async def _migrate_add_audio_data(self, db: aiosqlite.Connection) -> None:
|
| 243 |
-
"""Add audio_data column to custom_animations table."""
|
| 244 |
-
try:
|
| 245 |
-
# Check if column exists
|
| 246 |
-
cursor = await db.execute("PRAGMA table_info(custom_animations)")
|
| 247 |
-
columns = await cursor.fetchall()
|
| 248 |
-
column_names = [col[1] for col in columns]
|
| 249 |
-
|
| 250 |
-
if "audio_data" not in column_names:
|
| 251 |
-
logger.info("Migrating database: adding audio_data column to animations")
|
| 252 |
-
await db.execute(
|
| 253 |
-
"ALTER TABLE custom_animations ADD COLUMN audio_data TEXT DEFAULT NULL"
|
| 254 |
-
)
|
| 255 |
-
await db.commit()
|
| 256 |
-
logger.info("Migration complete: audio_data column added to animations")
|
| 257 |
-
except Exception as e:
|
| 258 |
-
logger.warning(f"Animation audio migration check failed (may be OK): {e}")
|
| 259 |
-
|
| 260 |
-
# =========================================================================
|
| 261 |
-
# Custom Apps CRUD Operations
|
| 262 |
-
# =========================================================================
|
| 263 |
-
|
| 264 |
-
async def get_all_apps(self) -> list[dict]:
|
| 265 |
-
"""Get all custom apps from the database.
|
| 266 |
-
|
| 267 |
-
Returns:
|
| 268 |
-
List of custom app dictionaries.
|
| 269 |
-
"""
|
| 270 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 271 |
-
db.row_factory = aiosqlite.Row
|
| 272 |
-
cursor = await db.execute(
|
| 273 |
-
"SELECT * FROM custom_apps ORDER BY created_at DESC"
|
| 274 |
-
)
|
| 275 |
-
rows = await cursor.fetchall()
|
| 276 |
-
return [self._row_to_app(row) for row in rows]
|
| 277 |
-
|
| 278 |
-
async def get_app(self, app_id: str) -> Optional[dict]:
|
| 279 |
-
"""Get a single custom app by ID.
|
| 280 |
-
|
| 281 |
-
Args:
|
| 282 |
-
app_id: The UUID of the app.
|
| 283 |
-
|
| 284 |
-
Returns:
|
| 285 |
-
The app dictionary, or None if not found.
|
| 286 |
-
"""
|
| 287 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 288 |
-
db.row_factory = aiosqlite.Row
|
| 289 |
-
cursor = await db.execute(
|
| 290 |
-
"SELECT * FROM custom_apps WHERE id = ?", (app_id,)
|
| 291 |
-
)
|
| 292 |
-
row = await cursor.fetchone()
|
| 293 |
-
return self._row_to_app(row) if row else None
|
| 294 |
-
|
| 295 |
-
async def create_app(self, app_data: dict) -> dict:
|
| 296 |
-
"""Create a new custom app.
|
| 297 |
-
|
| 298 |
-
Args:
|
| 299 |
-
app_data: Dictionary containing app data (must include 'id').
|
| 300 |
-
|
| 301 |
-
Returns:
|
| 302 |
-
The created app dictionary.
|
| 303 |
-
"""
|
| 304 |
-
now = datetime.utcnow().isoformat()
|
| 305 |
-
emotion_animations = json.dumps(app_data.get("emotion_animations", {}))
|
| 306 |
-
enabled_tools = json.dumps(app_data.get("enabled_tools", []))
|
| 307 |
-
|
| 308 |
-
# Ensure created_at is always set (use provided value or current time)
|
| 309 |
-
created_at = app_data.get("created_at") or now
|
| 310 |
-
|
| 311 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 312 |
-
await db.execute(
|
| 313 |
-
"""
|
| 314 |
-
INSERT INTO custom_apps
|
| 315 |
-
(id, name, description, system_prompt, voice_id,
|
| 316 |
-
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 317 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 318 |
-
""",
|
| 319 |
-
(
|
| 320 |
-
app_data["id"],
|
| 321 |
-
app_data["name"],
|
| 322 |
-
app_data.get("description", ""),
|
| 323 |
-
app_data["system_prompt"],
|
| 324 |
-
app_data.get("voice_id", ""),
|
| 325 |
-
emotion_animations,
|
| 326 |
-
app_data.get("icon_color", "blue"),
|
| 327 |
-
enabled_tools,
|
| 328 |
-
created_at,
|
| 329 |
-
now,
|
| 330 |
-
),
|
| 331 |
-
)
|
| 332 |
-
await db.commit()
|
| 333 |
-
|
| 334 |
-
logger.info(f"Created custom app: {app_data['name']} ({app_data['id']})")
|
| 335 |
-
return await self.get_app(app_data["id"])
|
| 336 |
-
|
| 337 |
-
async def update_app(self, app_id: str, app_data: dict) -> Optional[dict]:
|
| 338 |
-
"""Update an existing custom app.
|
| 339 |
-
|
| 340 |
-
Args:
|
| 341 |
-
app_id: The UUID of the app to update.
|
| 342 |
-
app_data: Dictionary containing updated app data.
|
| 343 |
-
|
| 344 |
-
Returns:
|
| 345 |
-
The updated app dictionary, or None if not found.
|
| 346 |
-
"""
|
| 347 |
-
existing = await self.get_app(app_id)
|
| 348 |
-
if not existing:
|
| 349 |
-
return None
|
| 350 |
-
|
| 351 |
-
now = datetime.utcnow().isoformat()
|
| 352 |
-
|
| 353 |
-
# Handle emotion_animations - use existing if not provided
|
| 354 |
-
if "emotion_animations" in app_data:
|
| 355 |
-
emotion_animations = json.dumps(app_data["emotion_animations"])
|
| 356 |
-
else:
|
| 357 |
-
emotion_animations = json.dumps(existing["emotion_animations"])
|
| 358 |
-
|
| 359 |
-
# Handle enabled_tools - use existing if not provided
|
| 360 |
-
if "enabled_tools" in app_data:
|
| 361 |
-
enabled_tools = json.dumps(app_data["enabled_tools"])
|
| 362 |
-
else:
|
| 363 |
-
enabled_tools = json.dumps(existing["enabled_tools"])
|
| 364 |
-
|
| 365 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 366 |
-
await db.execute(
|
| 367 |
-
"""
|
| 368 |
-
UPDATE custom_apps SET
|
| 369 |
-
name = ?,
|
| 370 |
-
description = ?,
|
| 371 |
-
system_prompt = ?,
|
| 372 |
-
voice_id = ?,
|
| 373 |
-
emotion_animations = ?,
|
| 374 |
-
icon_color = ?,
|
| 375 |
-
enabled_tools = ?,
|
| 376 |
-
updated_at = ?
|
| 377 |
-
WHERE id = ?
|
| 378 |
-
""",
|
| 379 |
-
(
|
| 380 |
-
app_data.get("name", existing["name"]),
|
| 381 |
-
app_data.get("description", existing["description"]),
|
| 382 |
-
app_data.get("system_prompt", existing["system_prompt"]),
|
| 383 |
-
app_data.get("voice_id", existing["voice_id"]),
|
| 384 |
-
emotion_animations,
|
| 385 |
-
app_data.get("icon_color", existing["icon_color"]),
|
| 386 |
-
enabled_tools,
|
| 387 |
-
now,
|
| 388 |
-
app_id,
|
| 389 |
-
),
|
| 390 |
-
)
|
| 391 |
-
await db.commit()
|
| 392 |
-
|
| 393 |
-
logger.info(f"Updated custom app: {app_id}")
|
| 394 |
-
return await self.get_app(app_id)
|
| 395 |
-
|
| 396 |
-
async def delete_app(self, app_id: str) -> bool:
|
| 397 |
-
"""Delete a custom app.
|
| 398 |
-
|
| 399 |
-
Args:
|
| 400 |
-
app_id: The UUID of the app to delete.
|
| 401 |
-
|
| 402 |
-
Returns:
|
| 403 |
-
True if deleted, False if not found.
|
| 404 |
-
"""
|
| 405 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 406 |
-
cursor = await db.execute(
|
| 407 |
-
"DELETE FROM custom_apps WHERE id = ?", (app_id,)
|
| 408 |
-
)
|
| 409 |
-
await db.commit()
|
| 410 |
-
deleted = cursor.rowcount > 0
|
| 411 |
-
|
| 412 |
-
if deleted:
|
| 413 |
-
logger.info(f"Deleted custom app: {app_id}")
|
| 414 |
-
return deleted
|
| 415 |
-
|
| 416 |
-
async def sync_apps(self, apps: list[dict]) -> list[dict]:
|
| 417 |
-
"""Bulk sync apps from iOS client.
|
| 418 |
-
|
| 419 |
-
This replaces all existing apps with the provided list.
|
| 420 |
-
Used for initial sync or full restore.
|
| 421 |
-
|
| 422 |
-
Args:
|
| 423 |
-
apps: List of app dictionaries to sync.
|
| 424 |
-
|
| 425 |
-
Returns:
|
| 426 |
-
List of all apps after sync.
|
| 427 |
-
"""
|
| 428 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 429 |
-
# Clear existing apps
|
| 430 |
-
await db.execute("DELETE FROM custom_apps")
|
| 431 |
-
|
| 432 |
-
# Insert all new apps
|
| 433 |
-
now = datetime.utcnow().isoformat()
|
| 434 |
-
for app in apps:
|
| 435 |
-
emotion_animations = json.dumps(app.get("emotion_animations", {}))
|
| 436 |
-
enabled_tools = json.dumps(app.get("enabled_tools", []))
|
| 437 |
-
# Ensure created_at is always set
|
| 438 |
-
created_at = app.get("created_at") or now
|
| 439 |
-
await db.execute(
|
| 440 |
-
"""
|
| 441 |
-
INSERT INTO custom_apps
|
| 442 |
-
(id, name, description, system_prompt, voice_id,
|
| 443 |
-
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 444 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 445 |
-
""",
|
| 446 |
-
(
|
| 447 |
-
app["id"],
|
| 448 |
-
app["name"],
|
| 449 |
-
app.get("description", ""),
|
| 450 |
-
app["system_prompt"],
|
| 451 |
-
app.get("voice_id", ""),
|
| 452 |
-
emotion_animations,
|
| 453 |
-
app.get("icon_color", "blue"),
|
| 454 |
-
enabled_tools,
|
| 455 |
-
created_at,
|
| 456 |
-
now,
|
| 457 |
-
),
|
| 458 |
-
)
|
| 459 |
-
|
| 460 |
-
await db.commit()
|
| 461 |
-
|
| 462 |
-
logger.info(f"Synced {len(apps)} custom apps")
|
| 463 |
-
return await self.get_all_apps()
|
| 464 |
-
|
| 465 |
-
# =========================================================================
|
| 466 |
-
# Custom Animations CRUD Operations
|
| 467 |
-
# =========================================================================
|
| 468 |
-
|
| 469 |
-
async def get_all_animations(self) -> list[dict]:
|
| 470 |
-
"""Get all custom animations from the database.
|
| 471 |
-
|
| 472 |
-
Returns:
|
| 473 |
-
List of custom animation dictionaries.
|
| 474 |
-
"""
|
| 475 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 476 |
-
db.row_factory = aiosqlite.Row
|
| 477 |
-
cursor = await db.execute(
|
| 478 |
-
"SELECT * FROM custom_animations ORDER BY created_at DESC"
|
| 479 |
-
)
|
| 480 |
-
rows = await cursor.fetchall()
|
| 481 |
-
return [self._row_to_animation(row) for row in rows]
|
| 482 |
-
|
| 483 |
-
async def get_animation(self, animation_id: str) -> Optional[dict]:
|
| 484 |
-
"""Get a single custom animation by ID.
|
| 485 |
-
|
| 486 |
-
Args:
|
| 487 |
-
animation_id: The UUID of the animation.
|
| 488 |
-
|
| 489 |
-
Returns:
|
| 490 |
-
The animation dictionary, or None if not found.
|
| 491 |
-
"""
|
| 492 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 493 |
-
db.row_factory = aiosqlite.Row
|
| 494 |
-
cursor = await db.execute(
|
| 495 |
-
"SELECT * FROM custom_animations WHERE id = ?", (animation_id,)
|
| 496 |
-
)
|
| 497 |
-
row = await cursor.fetchone()
|
| 498 |
-
return self._row_to_animation(row) if row else None
|
| 499 |
-
|
| 500 |
-
async def create_animation(self, animation_data: dict) -> dict:
|
| 501 |
-
"""Create a new custom animation.
|
| 502 |
-
|
| 503 |
-
Args:
|
| 504 |
-
animation_data: Dictionary containing animation data (must include 'id').
|
| 505 |
-
|
| 506 |
-
Returns:
|
| 507 |
-
The created animation dictionary.
|
| 508 |
-
"""
|
| 509 |
-
now = datetime.utcnow().isoformat()
|
| 510 |
-
start_pose = json.dumps(animation_data.get("start_pose", {}))
|
| 511 |
-
keyframes = json.dumps(animation_data.get("keyframes", []))
|
| 512 |
-
audio_data = animation_data.get("audio_data") # Base64 string or None
|
| 513 |
-
|
| 514 |
-
# Ensure created_at is always set
|
| 515 |
-
created_at = animation_data.get("created_at") or now
|
| 516 |
-
|
| 517 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 518 |
-
await db.execute(
|
| 519 |
-
"""
|
| 520 |
-
INSERT INTO custom_animations
|
| 521 |
-
(id, name, description, duration_ms, start_pose, keyframes,
|
| 522 |
-
audio_data, created_at, updated_at)
|
| 523 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 524 |
-
""",
|
| 525 |
-
(
|
| 526 |
-
animation_data["id"],
|
| 527 |
-
animation_data["name"],
|
| 528 |
-
animation_data.get("description", ""),
|
| 529 |
-
animation_data["duration_ms"],
|
| 530 |
-
start_pose,
|
| 531 |
-
keyframes,
|
| 532 |
-
audio_data,
|
| 533 |
-
created_at,
|
| 534 |
-
now,
|
| 535 |
-
),
|
| 536 |
-
)
|
| 537 |
-
await db.commit()
|
| 538 |
-
|
| 539 |
-
logger.info(f"Created custom animation: {animation_data['name']} ({animation_data['id']})")
|
| 540 |
-
return await self.get_animation(animation_data["id"])
|
| 541 |
-
|
| 542 |
-
async def update_animation(self, animation_id: str, animation_data: dict) -> Optional[dict]:
|
| 543 |
-
"""Update an existing custom animation.
|
| 544 |
-
|
| 545 |
-
Args:
|
| 546 |
-
animation_id: The UUID of the animation to update.
|
| 547 |
-
animation_data: Dictionary containing updated animation data.
|
| 548 |
-
|
| 549 |
-
Returns:
|
| 550 |
-
The updated animation dictionary, or None if not found.
|
| 551 |
-
"""
|
| 552 |
-
existing = await self.get_animation(animation_id)
|
| 553 |
-
if not existing:
|
| 554 |
-
return None
|
| 555 |
-
|
| 556 |
-
now = datetime.utcnow().isoformat()
|
| 557 |
-
|
| 558 |
-
# Handle start_pose - use existing if not provided
|
| 559 |
-
if "start_pose" in animation_data:
|
| 560 |
-
start_pose = json.dumps(animation_data["start_pose"])
|
| 561 |
-
else:
|
| 562 |
-
start_pose = json.dumps(existing["start_pose"])
|
| 563 |
-
|
| 564 |
-
# Handle keyframes - use existing if not provided
|
| 565 |
-
if "keyframes" in animation_data:
|
| 566 |
-
keyframes = json.dumps(animation_data["keyframes"])
|
| 567 |
-
else:
|
| 568 |
-
keyframes = json.dumps(existing["keyframes"])
|
| 569 |
-
|
| 570 |
-
# Handle audio_data - use existing if not provided
|
| 571 |
-
if "audio_data" in animation_data:
|
| 572 |
-
audio_data = animation_data["audio_data"]
|
| 573 |
-
else:
|
| 574 |
-
audio_data = existing.get("audio_data")
|
| 575 |
-
|
| 576 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 577 |
-
await db.execute(
|
| 578 |
-
"""
|
| 579 |
-
UPDATE custom_animations SET
|
| 580 |
-
name = ?,
|
| 581 |
-
description = ?,
|
| 582 |
-
duration_ms = ?,
|
| 583 |
-
start_pose = ?,
|
| 584 |
-
keyframes = ?,
|
| 585 |
-
audio_data = ?,
|
| 586 |
-
updated_at = ?
|
| 587 |
-
WHERE id = ?
|
| 588 |
-
""",
|
| 589 |
-
(
|
| 590 |
-
animation_data.get("name", existing["name"]),
|
| 591 |
-
animation_data.get("description", existing["description"]),
|
| 592 |
-
animation_data.get("duration_ms", existing["duration_ms"]),
|
| 593 |
-
start_pose,
|
| 594 |
-
keyframes,
|
| 595 |
-
audio_data,
|
| 596 |
-
now,
|
| 597 |
-
animation_id,
|
| 598 |
-
),
|
| 599 |
-
)
|
| 600 |
-
await db.commit()
|
| 601 |
-
|
| 602 |
-
logger.info(f"Updated custom animation: {animation_id}")
|
| 603 |
-
return await self.get_animation(animation_id)
|
| 604 |
-
|
| 605 |
-
async def delete_animation(self, animation_id: str) -> bool:
|
| 606 |
-
"""Delete a custom animation.
|
| 607 |
-
|
| 608 |
-
Args:
|
| 609 |
-
animation_id: The UUID of the animation to delete.
|
| 610 |
-
|
| 611 |
-
Returns:
|
| 612 |
-
True if deleted, False if not found.
|
| 613 |
-
"""
|
| 614 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 615 |
-
cursor = await db.execute(
|
| 616 |
-
"DELETE FROM custom_animations WHERE id = ?", (animation_id,)
|
| 617 |
-
)
|
| 618 |
-
await db.commit()
|
| 619 |
-
deleted = cursor.rowcount > 0
|
| 620 |
-
|
| 621 |
-
if deleted:
|
| 622 |
-
logger.info(f"Deleted custom animation: {animation_id}")
|
| 623 |
-
return deleted
|
| 624 |
-
|
| 625 |
-
# =========================================================================
|
| 626 |
-
# User Settings Operations
|
| 627 |
-
# =========================================================================
|
| 628 |
-
|
| 629 |
-
async def get_user_setting(self, key: str) -> Optional[str]:
|
| 630 |
-
"""Get a user setting value by key.
|
| 631 |
-
|
| 632 |
-
Args:
|
| 633 |
-
key: The setting key (e.g., 'user_name').
|
| 634 |
-
|
| 635 |
-
Returns:
|
| 636 |
-
The setting value, or None if not found.
|
| 637 |
-
"""
|
| 638 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 639 |
-
cursor = await db.execute(
|
| 640 |
-
"SELECT value FROM user_settings WHERE key = ?", (key,)
|
| 641 |
-
)
|
| 642 |
-
row = await cursor.fetchone()
|
| 643 |
-
return row[0] if row else None
|
| 644 |
-
|
| 645 |
-
async def set_user_setting(self, key: str, value: str) -> bool:
|
| 646 |
-
"""Set a user setting value.
|
| 647 |
-
|
| 648 |
-
Uses INSERT OR REPLACE to handle both new and existing keys.
|
| 649 |
-
|
| 650 |
-
Args:
|
| 651 |
-
key: The setting key.
|
| 652 |
-
value: The setting value.
|
| 653 |
-
|
| 654 |
-
Returns:
|
| 655 |
-
True if successful.
|
| 656 |
-
"""
|
| 657 |
-
now = datetime.utcnow().isoformat()
|
| 658 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 659 |
-
await db.execute(
|
| 660 |
-
"""
|
| 661 |
-
INSERT OR REPLACE INTO user_settings (key, value, updated_at)
|
| 662 |
-
VALUES (?, ?, ?)
|
| 663 |
-
""",
|
| 664 |
-
(key, value, now),
|
| 665 |
-
)
|
| 666 |
-
await db.commit()
|
| 667 |
-
|
| 668 |
-
logger.info(f"Set user setting: {key}")
|
| 669 |
-
return True
|
| 670 |
-
|
| 671 |
-
async def delete_user_setting(self, key: str) -> bool:
|
| 672 |
-
"""Delete a user setting.
|
| 673 |
-
|
| 674 |
-
Args:
|
| 675 |
-
key: The setting key to delete.
|
| 676 |
-
|
| 677 |
-
Returns:
|
| 678 |
-
True if deleted, False if not found.
|
| 679 |
-
"""
|
| 680 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 681 |
-
cursor = await db.execute(
|
| 682 |
-
"DELETE FROM user_settings WHERE key = ?", (key,)
|
| 683 |
-
)
|
| 684 |
-
await db.commit()
|
| 685 |
-
deleted = cursor.rowcount > 0
|
| 686 |
-
|
| 687 |
-
if deleted:
|
| 688 |
-
logger.info(f"Deleted user setting: {key}")
|
| 689 |
-
return deleted
|
| 690 |
-
|
| 691 |
-
async def get_all_user_settings(self) -> dict[str, str]:
|
| 692 |
-
"""Get all user settings as a dictionary.
|
| 693 |
-
|
| 694 |
-
Returns:
|
| 695 |
-
Dictionary of key-value pairs.
|
| 696 |
-
"""
|
| 697 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 698 |
-
cursor = await db.execute("SELECT key, value FROM user_settings")
|
| 699 |
-
rows = await cursor.fetchall()
|
| 700 |
-
return {row[0]: row[1] for row in rows}
|
| 701 |
-
|
| 702 |
-
# =========================================================================
|
| 703 |
-
# Websites CRUD Operations
|
| 704 |
-
# =========================================================================
|
| 705 |
-
|
| 706 |
-
async def get_all_websites(self) -> list[dict]:
|
| 707 |
-
"""Get all saved websites from the database.
|
| 708 |
-
|
| 709 |
-
Returns:
|
| 710 |
-
List of website dictionaries.
|
| 711 |
-
"""
|
| 712 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 713 |
-
db.row_factory = aiosqlite.Row
|
| 714 |
-
cursor = await db.execute(
|
| 715 |
-
"SELECT * FROM websites ORDER BY created_at DESC"
|
| 716 |
-
)
|
| 717 |
-
rows = await cursor.fetchall()
|
| 718 |
-
return [self._row_to_website(row) for row in rows]
|
| 719 |
-
|
| 720 |
-
async def get_website(self, website_id: str) -> Optional[dict]:
|
| 721 |
-
"""Get a single website by ID.
|
| 722 |
-
|
| 723 |
-
Args:
|
| 724 |
-
website_id: The ID of the website.
|
| 725 |
-
|
| 726 |
-
Returns:
|
| 727 |
-
The website dictionary, or None if not found.
|
| 728 |
-
"""
|
| 729 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 730 |
-
db.row_factory = aiosqlite.Row
|
| 731 |
-
cursor = await db.execute(
|
| 732 |
-
"SELECT * FROM websites WHERE id = ?", (website_id,)
|
| 733 |
-
)
|
| 734 |
-
row = await cursor.fetchone()
|
| 735 |
-
return self._row_to_website(row) if row else None
|
| 736 |
-
|
| 737 |
-
async def create_website(self, website_data: dict) -> dict:
|
| 738 |
-
"""Create a new website record.
|
| 739 |
-
|
| 740 |
-
Args:
|
| 741 |
-
website_data: Dictionary containing website data (must include 'id', 'title').
|
| 742 |
-
|
| 743 |
-
Returns:
|
| 744 |
-
The created website dictionary.
|
| 745 |
-
"""
|
| 746 |
-
now = datetime.utcnow().isoformat()
|
| 747 |
-
created_at = website_data.get("created_at") or now
|
| 748 |
-
|
| 749 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 750 |
-
await db.execute(
|
| 751 |
-
"""
|
| 752 |
-
INSERT INTO websites (id, title, description, created_at, updated_at)
|
| 753 |
-
VALUES (?, ?, ?, ?, ?)
|
| 754 |
-
""",
|
| 755 |
-
(
|
| 756 |
-
website_data["id"],
|
| 757 |
-
website_data["title"],
|
| 758 |
-
website_data.get("description", ""),
|
| 759 |
-
created_at,
|
| 760 |
-
now,
|
| 761 |
-
),
|
| 762 |
-
)
|
| 763 |
-
await db.commit()
|
| 764 |
-
|
| 765 |
-
logger.info(f"Created website: {website_data['title']} ({website_data['id']})")
|
| 766 |
-
return await self.get_website(website_data["id"])
|
| 767 |
-
|
| 768 |
-
async def update_website(self, website_id: str, website_data: dict) -> Optional[dict]:
|
| 769 |
-
"""Update an existing website record.
|
| 770 |
-
|
| 771 |
-
Args:
|
| 772 |
-
website_id: The ID of the website to update.
|
| 773 |
-
website_data: Dictionary containing updated website data.
|
| 774 |
-
|
| 775 |
-
Returns:
|
| 776 |
-
The updated website dictionary, or None if not found.
|
| 777 |
-
"""
|
| 778 |
-
existing = await self.get_website(website_id)
|
| 779 |
-
if not existing:
|
| 780 |
-
return None
|
| 781 |
-
|
| 782 |
-
now = datetime.utcnow().isoformat()
|
| 783 |
-
|
| 784 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 785 |
-
await db.execute(
|
| 786 |
-
"""
|
| 787 |
-
UPDATE websites SET
|
| 788 |
-
title = ?,
|
| 789 |
-
description = ?,
|
| 790 |
-
updated_at = ?
|
| 791 |
-
WHERE id = ?
|
| 792 |
-
""",
|
| 793 |
-
(
|
| 794 |
-
website_data.get("title", existing["title"]),
|
| 795 |
-
website_data.get("description", existing["description"]),
|
| 796 |
-
now,
|
| 797 |
-
website_id,
|
| 798 |
-
),
|
| 799 |
-
)
|
| 800 |
-
await db.commit()
|
| 801 |
-
|
| 802 |
-
logger.info(f"Updated website: {website_id}")
|
| 803 |
-
return await self.get_website(website_id)
|
| 804 |
-
|
| 805 |
-
async def delete_website(self, website_id: str) -> bool:
|
| 806 |
-
"""Delete a website record.
|
| 807 |
-
|
| 808 |
-
Args:
|
| 809 |
-
website_id: The ID of the website to delete.
|
| 810 |
-
|
| 811 |
-
Returns:
|
| 812 |
-
True if deleted, False if not found.
|
| 813 |
-
"""
|
| 814 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 815 |
-
cursor = await db.execute(
|
| 816 |
-
"DELETE FROM websites WHERE id = ?", (website_id,)
|
| 817 |
-
)
|
| 818 |
-
await db.commit()
|
| 819 |
-
deleted = cursor.rowcount > 0
|
| 820 |
-
|
| 821 |
-
if deleted:
|
| 822 |
-
logger.info(f"Deleted website: {website_id}")
|
| 823 |
-
return deleted
|
| 824 |
-
|
| 825 |
-
# =========================================================================
|
| 826 |
-
# Mental Notes CRUD Operations
|
| 827 |
-
# =========================================================================
|
| 828 |
-
|
| 829 |
-
async def get_all_notes(self) -> list[dict]:
|
| 830 |
-
"""Get all mental notes from the database.
|
| 831 |
-
|
| 832 |
-
Returns:
|
| 833 |
-
List of mental note dictionaries.
|
| 834 |
-
"""
|
| 835 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 836 |
-
db.row_factory = aiosqlite.Row
|
| 837 |
-
cursor = await db.execute(
|
| 838 |
-
"SELECT * FROM mental_notes ORDER BY created_at DESC"
|
| 839 |
-
)
|
| 840 |
-
rows = await cursor.fetchall()
|
| 841 |
-
return [self._row_to_note(row) for row in rows]
|
| 842 |
-
|
| 843 |
-
async def get_note(self, note_id: str) -> Optional[dict]:
|
| 844 |
-
"""Get a single mental note by ID.
|
| 845 |
-
|
| 846 |
-
Args:
|
| 847 |
-
note_id: The ID of the note.
|
| 848 |
-
|
| 849 |
-
Returns:
|
| 850 |
-
The note dictionary, or None if not found.
|
| 851 |
-
"""
|
| 852 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 853 |
-
db.row_factory = aiosqlite.Row
|
| 854 |
-
cursor = await db.execute(
|
| 855 |
-
"SELECT * FROM mental_notes WHERE id = ?", (note_id,)
|
| 856 |
-
)
|
| 857 |
-
row = await cursor.fetchone()
|
| 858 |
-
return self._row_to_note(row) if row else None
|
| 859 |
-
|
| 860 |
-
async def create_note(self, note_data: dict) -> dict:
|
| 861 |
-
"""Create a new mental note.
|
| 862 |
-
|
| 863 |
-
Args:
|
| 864 |
-
note_data: Dictionary containing note data (must include 'id', 'title', 'content').
|
| 865 |
-
|
| 866 |
-
Returns:
|
| 867 |
-
The created note dictionary.
|
| 868 |
-
"""
|
| 869 |
-
now = datetime.utcnow().isoformat()
|
| 870 |
-
created_at = note_data.get("created_at") or now
|
| 871 |
-
|
| 872 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 873 |
-
await db.execute(
|
| 874 |
-
"""
|
| 875 |
-
INSERT INTO mental_notes (id, title, content, created_at, updated_at)
|
| 876 |
-
VALUES (?, ?, ?, ?, ?)
|
| 877 |
-
""",
|
| 878 |
-
(
|
| 879 |
-
note_data["id"],
|
| 880 |
-
note_data["title"],
|
| 881 |
-
note_data["content"],
|
| 882 |
-
created_at,
|
| 883 |
-
now,
|
| 884 |
-
),
|
| 885 |
-
)
|
| 886 |
-
await db.commit()
|
| 887 |
-
|
| 888 |
-
logger.info(f"Created mental note: {note_data['title']} ({note_data['id']})")
|
| 889 |
-
return await self.get_note(note_data["id"])
|
| 890 |
-
|
| 891 |
-
async def update_note(self, note_id: str, note_data: dict) -> Optional[dict]:
|
| 892 |
-
"""Update an existing mental note.
|
| 893 |
-
|
| 894 |
-
Args:
|
| 895 |
-
note_id: The ID of the note to update.
|
| 896 |
-
note_data: Dictionary containing updated note data.
|
| 897 |
-
|
| 898 |
-
Returns:
|
| 899 |
-
The updated note dictionary, or None if not found.
|
| 900 |
-
"""
|
| 901 |
-
existing = await self.get_note(note_id)
|
| 902 |
-
if not existing:
|
| 903 |
-
return None
|
| 904 |
-
|
| 905 |
-
now = datetime.utcnow().isoformat()
|
| 906 |
-
|
| 907 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 908 |
-
await db.execute(
|
| 909 |
-
"""
|
| 910 |
-
UPDATE mental_notes SET
|
| 911 |
-
title = ?,
|
| 912 |
-
content = ?,
|
| 913 |
-
updated_at = ?
|
| 914 |
-
WHERE id = ?
|
| 915 |
-
""",
|
| 916 |
-
(
|
| 917 |
-
note_data.get("title", existing["title"]),
|
| 918 |
-
note_data.get("content", existing["content"]),
|
| 919 |
-
now,
|
| 920 |
-
note_id,
|
| 921 |
-
),
|
| 922 |
-
)
|
| 923 |
-
await db.commit()
|
| 924 |
-
|
| 925 |
-
logger.info(f"Updated mental note: {note_id}")
|
| 926 |
-
return await self.get_note(note_id)
|
| 927 |
-
|
| 928 |
-
async def delete_note(self, note_id: str) -> bool:
|
| 929 |
-
"""Delete a mental note.
|
| 930 |
-
|
| 931 |
-
Args:
|
| 932 |
-
note_id: The ID of the note to delete.
|
| 933 |
-
|
| 934 |
-
Returns:
|
| 935 |
-
True if deleted, False if not found.
|
| 936 |
-
"""
|
| 937 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 938 |
-
cursor = await db.execute(
|
| 939 |
-
"DELETE FROM mental_notes WHERE id = ?", (note_id,)
|
| 940 |
-
)
|
| 941 |
-
await db.commit()
|
| 942 |
-
deleted = cursor.rowcount > 0
|
| 943 |
-
|
| 944 |
-
if deleted:
|
| 945 |
-
logger.info(f"Deleted mental note: {note_id}")
|
| 946 |
-
return deleted
|
| 947 |
-
|
| 948 |
-
# =========================================================================
|
| 949 |
-
# Meetings CRUD Operations
|
| 950 |
-
# =========================================================================
|
| 951 |
-
|
| 952 |
-
async def get_all_meetings(self) -> list[dict]:
|
| 953 |
-
"""Get all meetings from the database.
|
| 954 |
-
|
| 955 |
-
Returns:
|
| 956 |
-
List of meeting dictionaries.
|
| 957 |
-
"""
|
| 958 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 959 |
-
db.row_factory = aiosqlite.Row
|
| 960 |
-
cursor = await db.execute(
|
| 961 |
-
"SELECT * FROM meetings ORDER BY created_at DESC"
|
| 962 |
-
)
|
| 963 |
-
rows = await cursor.fetchall()
|
| 964 |
-
return [self._row_to_meeting(row) for row in rows]
|
| 965 |
-
|
| 966 |
-
async def get_meeting(self, meeting_id: str) -> Optional[dict]:
|
| 967 |
-
"""Get a single meeting by ID.
|
| 968 |
-
|
| 969 |
-
Args:
|
| 970 |
-
meeting_id: The ID of the meeting.
|
| 971 |
-
|
| 972 |
-
Returns:
|
| 973 |
-
The meeting dictionary, or None if not found.
|
| 974 |
-
"""
|
| 975 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 976 |
-
db.row_factory = aiosqlite.Row
|
| 977 |
-
cursor = await db.execute(
|
| 978 |
-
"SELECT * FROM meetings WHERE id = ?", (meeting_id,)
|
| 979 |
-
)
|
| 980 |
-
row = await cursor.fetchone()
|
| 981 |
-
return self._row_to_meeting(row) if row else None
|
| 982 |
-
|
| 983 |
-
async def get_active_meeting(self) -> Optional[dict]:
|
| 984 |
-
"""Get the currently active (recording) meeting.
|
| 985 |
-
|
| 986 |
-
Returns:
|
| 987 |
-
The active meeting dictionary, or None if no meeting is recording.
|
| 988 |
-
"""
|
| 989 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 990 |
-
db.row_factory = aiosqlite.Row
|
| 991 |
-
cursor = await db.execute(
|
| 992 |
-
"SELECT * FROM meetings WHERE status = 'recording' LIMIT 1"
|
| 993 |
-
)
|
| 994 |
-
row = await cursor.fetchone()
|
| 995 |
-
return self._row_to_meeting(row) if row else None
|
| 996 |
-
|
| 997 |
-
async def create_meeting(self, meeting_data: dict) -> dict:
|
| 998 |
-
"""Create a new meeting record.
|
| 999 |
-
|
| 1000 |
-
Args:
|
| 1001 |
-
meeting_data: Dictionary containing meeting data (must include 'id', 'title').
|
| 1002 |
-
|
| 1003 |
-
Returns:
|
| 1004 |
-
The created meeting dictionary.
|
| 1005 |
-
"""
|
| 1006 |
-
now = datetime.utcnow().isoformat()
|
| 1007 |
-
action_items = json.dumps(meeting_data.get("action_items", []))
|
| 1008 |
-
started_at = meeting_data.get("started_at") or now
|
| 1009 |
-
|
| 1010 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1011 |
-
await db.execute(
|
| 1012 |
-
"""
|
| 1013 |
-
INSERT INTO meetings
|
| 1014 |
-
(id, title, transcript, action_items, summary, duration_seconds,
|
| 1015 |
-
status, started_at, ended_at, created_at, updated_at)
|
| 1016 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 1017 |
-
""",
|
| 1018 |
-
(
|
| 1019 |
-
meeting_data["id"],
|
| 1020 |
-
meeting_data["title"],
|
| 1021 |
-
meeting_data.get("transcript", ""),
|
| 1022 |
-
action_items,
|
| 1023 |
-
meeting_data.get("summary", ""),
|
| 1024 |
-
meeting_data.get("duration_seconds", 0),
|
| 1025 |
-
meeting_data.get("status", "recording"),
|
| 1026 |
-
started_at,
|
| 1027 |
-
meeting_data.get("ended_at"),
|
| 1028 |
-
now,
|
| 1029 |
-
now,
|
| 1030 |
-
),
|
| 1031 |
-
)
|
| 1032 |
-
await db.commit()
|
| 1033 |
-
|
| 1034 |
-
logger.info(f"Created meeting: {meeting_data['title']} ({meeting_data['id']})")
|
| 1035 |
-
return await self.get_meeting(meeting_data["id"])
|
| 1036 |
-
|
| 1037 |
-
async def update_meeting(self, meeting_id: str, meeting_data: dict) -> Optional[dict]:
|
| 1038 |
-
"""Update an existing meeting.
|
| 1039 |
-
|
| 1040 |
-
Args:
|
| 1041 |
-
meeting_id: The ID of the meeting to update.
|
| 1042 |
-
meeting_data: Dictionary containing updated meeting data.
|
| 1043 |
-
|
| 1044 |
-
Returns:
|
| 1045 |
-
The updated meeting dictionary, or None if not found.
|
| 1046 |
-
"""
|
| 1047 |
-
existing = await self.get_meeting(meeting_id)
|
| 1048 |
-
if not existing:
|
| 1049 |
-
return None
|
| 1050 |
-
|
| 1051 |
-
now = datetime.utcnow().isoformat()
|
| 1052 |
-
|
| 1053 |
-
# Handle action_items - use existing if not provided
|
| 1054 |
-
if "action_items" in meeting_data:
|
| 1055 |
-
action_items = json.dumps(meeting_data["action_items"])
|
| 1056 |
-
else:
|
| 1057 |
-
action_items = json.dumps(existing["action_items"])
|
| 1058 |
-
|
| 1059 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1060 |
-
await db.execute(
|
| 1061 |
-
"""
|
| 1062 |
-
UPDATE meetings SET
|
| 1063 |
-
title = ?,
|
| 1064 |
-
transcript = ?,
|
| 1065 |
-
action_items = ?,
|
| 1066 |
-
summary = ?,
|
| 1067 |
-
duration_seconds = ?,
|
| 1068 |
-
status = ?,
|
| 1069 |
-
started_at = ?,
|
| 1070 |
-
ended_at = ?,
|
| 1071 |
-
updated_at = ?
|
| 1072 |
-
WHERE id = ?
|
| 1073 |
-
""",
|
| 1074 |
-
(
|
| 1075 |
-
meeting_data.get("title", existing["title"]),
|
| 1076 |
-
meeting_data.get("transcript", existing["transcript"]),
|
| 1077 |
-
action_items,
|
| 1078 |
-
meeting_data.get("summary", existing["summary"]),
|
| 1079 |
-
meeting_data.get("duration_seconds", existing["duration_seconds"]),
|
| 1080 |
-
meeting_data.get("status", existing["status"]),
|
| 1081 |
-
meeting_data.get("started_at", existing["started_at"]),
|
| 1082 |
-
meeting_data.get("ended_at", existing["ended_at"]),
|
| 1083 |
-
now,
|
| 1084 |
-
meeting_id,
|
| 1085 |
-
),
|
| 1086 |
-
)
|
| 1087 |
-
await db.commit()
|
| 1088 |
-
|
| 1089 |
-
logger.info(f"Updated meeting: {meeting_id}")
|
| 1090 |
-
return await self.get_meeting(meeting_id)
|
| 1091 |
-
|
| 1092 |
-
async def append_to_meeting_transcript(
|
| 1093 |
-
self, meeting_id: str, text: str
|
| 1094 |
-
) -> Optional[dict]:
|
| 1095 |
-
"""Append text to an existing meeting's transcript.
|
| 1096 |
-
|
| 1097 |
-
Args:
|
| 1098 |
-
meeting_id: The ID of the meeting.
|
| 1099 |
-
text: The text to append to the transcript.
|
| 1100 |
-
|
| 1101 |
-
Returns:
|
| 1102 |
-
The updated meeting dictionary, or None if not found.
|
| 1103 |
-
"""
|
| 1104 |
-
existing = await self.get_meeting(meeting_id)
|
| 1105 |
-
if not existing:
|
| 1106 |
-
return None
|
| 1107 |
-
|
| 1108 |
-
now = datetime.utcnow().isoformat()
|
| 1109 |
-
new_transcript = existing["transcript"] + text
|
| 1110 |
-
|
| 1111 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1112 |
-
await db.execute(
|
| 1113 |
-
"""
|
| 1114 |
-
UPDATE meetings SET
|
| 1115 |
-
transcript = ?,
|
| 1116 |
-
updated_at = ?
|
| 1117 |
-
WHERE id = ?
|
| 1118 |
-
""",
|
| 1119 |
-
(new_transcript, now, meeting_id),
|
| 1120 |
-
)
|
| 1121 |
-
await db.commit()
|
| 1122 |
-
|
| 1123 |
-
logger.debug(f"Appended to meeting transcript: {meeting_id}")
|
| 1124 |
-
return await self.get_meeting(meeting_id)
|
| 1125 |
-
|
| 1126 |
-
async def delete_meeting(self, meeting_id: str) -> bool:
|
| 1127 |
-
"""Delete a meeting.
|
| 1128 |
-
|
| 1129 |
-
Args:
|
| 1130 |
-
meeting_id: The ID of the meeting to delete.
|
| 1131 |
-
|
| 1132 |
-
Returns:
|
| 1133 |
-
True if deleted, False if not found.
|
| 1134 |
-
"""
|
| 1135 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1136 |
-
cursor = await db.execute(
|
| 1137 |
-
"DELETE FROM meetings WHERE id = ?", (meeting_id,)
|
| 1138 |
-
)
|
| 1139 |
-
await db.commit()
|
| 1140 |
-
deleted = cursor.rowcount > 0
|
| 1141 |
-
|
| 1142 |
-
if deleted:
|
| 1143 |
-
logger.info(f"Deleted meeting: {meeting_id}")
|
| 1144 |
-
return deleted
|
| 1145 |
-
|
| 1146 |
-
# =========================================================================
|
| 1147 |
-
# Scheduled Messages CRUD Operations
|
| 1148 |
-
# =========================================================================
|
| 1149 |
-
|
| 1150 |
-
async def get_all_scheduled_messages(self, status: Optional[str] = None) -> list[dict]:
|
| 1151 |
-
"""Get all scheduled messages from the database.
|
| 1152 |
-
|
| 1153 |
-
Args:
|
| 1154 |
-
status: Optional filter by status ('pending', 'sent', 'cancelled').
|
| 1155 |
-
|
| 1156 |
-
Returns:
|
| 1157 |
-
List of scheduled message dictionaries.
|
| 1158 |
-
"""
|
| 1159 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1160 |
-
db.row_factory = aiosqlite.Row
|
| 1161 |
-
if status:
|
| 1162 |
-
cursor = await db.execute(
|
| 1163 |
-
"SELECT * FROM scheduled_messages WHERE status = ? ORDER BY scheduled_time ASC",
|
| 1164 |
-
(status,)
|
| 1165 |
-
)
|
| 1166 |
-
else:
|
| 1167 |
-
cursor = await db.execute(
|
| 1168 |
-
"SELECT * FROM scheduled_messages ORDER BY scheduled_time ASC"
|
| 1169 |
-
)
|
| 1170 |
-
rows = await cursor.fetchall()
|
| 1171 |
-
return [self._row_to_scheduled_message(row) for row in rows]
|
| 1172 |
-
|
| 1173 |
-
async def get_scheduled_message(self, message_id: str) -> Optional[dict]:
|
| 1174 |
-
"""Get a single scheduled message by ID.
|
| 1175 |
-
|
| 1176 |
-
Args:
|
| 1177 |
-
message_id: The ID of the scheduled message.
|
| 1178 |
-
|
| 1179 |
-
Returns:
|
| 1180 |
-
The scheduled message dictionary, or None if not found.
|
| 1181 |
-
"""
|
| 1182 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1183 |
-
db.row_factory = aiosqlite.Row
|
| 1184 |
-
cursor = await db.execute(
|
| 1185 |
-
"SELECT * FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 1186 |
-
)
|
| 1187 |
-
row = await cursor.fetchone()
|
| 1188 |
-
return self._row_to_scheduled_message(row) if row else None
|
| 1189 |
-
|
| 1190 |
-
async def get_pending_scheduled_messages(self) -> list[dict]:
|
| 1191 |
-
"""Get all pending scheduled messages.
|
| 1192 |
-
|
| 1193 |
-
Returns:
|
| 1194 |
-
List of pending scheduled message dictionaries.
|
| 1195 |
-
"""
|
| 1196 |
-
return await self.get_all_scheduled_messages(status="pending")
|
| 1197 |
-
|
| 1198 |
-
async def create_scheduled_message(self, message_data: dict) -> dict:
|
| 1199 |
-
"""Create a new scheduled message.
|
| 1200 |
-
|
| 1201 |
-
Args:
|
| 1202 |
-
message_data: Dictionary containing message data.
|
| 1203 |
-
Required: id, recipient_name, recipient_phone, message_content,
|
| 1204 |
-
scheduled_time, platform
|
| 1205 |
-
Optional: notification_id
|
| 1206 |
-
|
| 1207 |
-
Returns:
|
| 1208 |
-
The created scheduled message dictionary.
|
| 1209 |
-
"""
|
| 1210 |
-
now = datetime.utcnow().isoformat()
|
| 1211 |
-
|
| 1212 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1213 |
-
await db.execute(
|
| 1214 |
-
"""
|
| 1215 |
-
INSERT INTO scheduled_messages
|
| 1216 |
-
(id, recipient_name, recipient_phone, message_content,
|
| 1217 |
-
scheduled_time, platform, status, notification_id, created_at, updated_at)
|
| 1218 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 1219 |
-
""",
|
| 1220 |
-
(
|
| 1221 |
-
message_data["id"],
|
| 1222 |
-
message_data["recipient_name"],
|
| 1223 |
-
message_data["recipient_phone"],
|
| 1224 |
-
message_data["message_content"],
|
| 1225 |
-
message_data["scheduled_time"],
|
| 1226 |
-
message_data["platform"],
|
| 1227 |
-
message_data.get("status", "pending"),
|
| 1228 |
-
message_data.get("notification_id"),
|
| 1229 |
-
now,
|
| 1230 |
-
now,
|
| 1231 |
-
),
|
| 1232 |
-
)
|
| 1233 |
-
await db.commit()
|
| 1234 |
-
|
| 1235 |
-
logger.info(f"Created scheduled message: {message_data['id']} for {message_data['recipient_name']}")
|
| 1236 |
-
return await self.get_scheduled_message(message_data["id"])
|
| 1237 |
-
|
| 1238 |
-
async def update_scheduled_message(
|
| 1239 |
-
self, message_id: str, message_data: dict
|
| 1240 |
-
) -> Optional[dict]:
|
| 1241 |
-
"""Update an existing scheduled message.
|
| 1242 |
-
|
| 1243 |
-
Args:
|
| 1244 |
-
message_id: The ID of the scheduled message to update.
|
| 1245 |
-
message_data: Dictionary containing updated message data.
|
| 1246 |
-
|
| 1247 |
-
Returns:
|
| 1248 |
-
The updated scheduled message dictionary, or None if not found.
|
| 1249 |
-
"""
|
| 1250 |
-
existing = await self.get_scheduled_message(message_id)
|
| 1251 |
-
if not existing:
|
| 1252 |
-
return None
|
| 1253 |
-
|
| 1254 |
-
now = datetime.utcnow().isoformat()
|
| 1255 |
-
|
| 1256 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1257 |
-
await db.execute(
|
| 1258 |
-
"""
|
| 1259 |
-
UPDATE scheduled_messages SET
|
| 1260 |
-
recipient_name = ?,
|
| 1261 |
-
recipient_phone = ?,
|
| 1262 |
-
message_content = ?,
|
| 1263 |
-
scheduled_time = ?,
|
| 1264 |
-
platform = ?,
|
| 1265 |
-
status = ?,
|
| 1266 |
-
notification_id = ?,
|
| 1267 |
-
updated_at = ?
|
| 1268 |
-
WHERE id = ?
|
| 1269 |
-
""",
|
| 1270 |
-
(
|
| 1271 |
-
message_data.get("recipient_name", existing["recipient_name"]),
|
| 1272 |
-
message_data.get("recipient_phone", existing["recipient_phone"]),
|
| 1273 |
-
message_data.get("message_content", existing["message_content"]),
|
| 1274 |
-
message_data.get("scheduled_time", existing["scheduled_time"]),
|
| 1275 |
-
message_data.get("platform", existing["platform"]),
|
| 1276 |
-
message_data.get("status", existing["status"]),
|
| 1277 |
-
message_data.get("notification_id", existing["notification_id"]),
|
| 1278 |
-
now,
|
| 1279 |
-
message_id,
|
| 1280 |
-
),
|
| 1281 |
-
)
|
| 1282 |
-
await db.commit()
|
| 1283 |
-
|
| 1284 |
-
logger.info(f"Updated scheduled message: {message_id}")
|
| 1285 |
-
return await self.get_scheduled_message(message_id)
|
| 1286 |
-
|
| 1287 |
-
async def update_scheduled_message_status(
|
| 1288 |
-
self, message_id: str, status: str
|
| 1289 |
-
) -> Optional[dict]:
|
| 1290 |
-
"""Update just the status of a scheduled message.
|
| 1291 |
-
|
| 1292 |
-
Args:
|
| 1293 |
-
message_id: The ID of the scheduled message.
|
| 1294 |
-
status: The new status ('pending', 'sent', 'cancelled').
|
| 1295 |
-
|
| 1296 |
-
Returns:
|
| 1297 |
-
The updated scheduled message dictionary, or None if not found.
|
| 1298 |
-
"""
|
| 1299 |
-
return await self.update_scheduled_message(message_id, {"status": status})
|
| 1300 |
-
|
| 1301 |
-
async def delete_scheduled_message(self, message_id: str) -> bool:
|
| 1302 |
-
"""Delete a scheduled message.
|
| 1303 |
-
|
| 1304 |
-
Args:
|
| 1305 |
-
message_id: The ID of the scheduled message to delete.
|
| 1306 |
-
|
| 1307 |
-
Returns:
|
| 1308 |
-
True if deleted, False if not found.
|
| 1309 |
-
"""
|
| 1310 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1311 |
-
cursor = await db.execute(
|
| 1312 |
-
"DELETE FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 1313 |
-
)
|
| 1314 |
-
await db.commit()
|
| 1315 |
-
deleted = cursor.rowcount > 0
|
| 1316 |
-
|
| 1317 |
-
if deleted:
|
| 1318 |
-
logger.info(f"Deleted scheduled message: {message_id}")
|
| 1319 |
-
return deleted
|
| 1320 |
-
|
| 1321 |
-
# =========================================================================
|
| 1322 |
-
# TamaReachy Operations
|
| 1323 |
-
# =========================================================================
|
| 1324 |
-
|
| 1325 |
-
async def get_tamareachy_state(self) -> dict:
|
| 1326 |
-
"""Get the current TamaReachy state.
|
| 1327 |
-
|
| 1328 |
-
Returns:
|
| 1329 |
-
TamaReachy state dictionary.
|
| 1330 |
-
"""
|
| 1331 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1332 |
-
db.row_factory = aiosqlite.Row
|
| 1333 |
-
cursor = await db.execute(
|
| 1334 |
-
"SELECT * FROM tamareachy_state WHERE id = 1"
|
| 1335 |
-
)
|
| 1336 |
-
row = await cursor.fetchone()
|
| 1337 |
-
if row:
|
| 1338 |
-
return self._row_to_tamareachy(row)
|
| 1339 |
-
# Return default state if not found
|
| 1340 |
-
return {
|
| 1341 |
-
"enabled": False,
|
| 1342 |
-
"hunger": 100,
|
| 1343 |
-
"thirst": 100,
|
| 1344 |
-
"happiness": 100,
|
| 1345 |
-
"energy": 100,
|
| 1346 |
-
"boredom": 100,
|
| 1347 |
-
"social": 100,
|
| 1348 |
-
"health": 100,
|
| 1349 |
-
"cleanliness": 100,
|
| 1350 |
-
"last_interaction": None,
|
| 1351 |
-
"last_decay_check": None,
|
| 1352 |
-
}
|
| 1353 |
-
|
| 1354 |
-
async def update_tamareachy_state(self, state_data: dict) -> dict:
|
| 1355 |
-
"""Update TamaReachy state.
|
| 1356 |
-
|
| 1357 |
-
Args:
|
| 1358 |
-
state_data: Dictionary containing fields to update.
|
| 1359 |
-
|
| 1360 |
-
Returns:
|
| 1361 |
-
The updated TamaReachy state dictionary.
|
| 1362 |
-
"""
|
| 1363 |
-
existing = await self.get_tamareachy_state()
|
| 1364 |
-
now = datetime.utcnow().isoformat()
|
| 1365 |
-
|
| 1366 |
-
async with aiosqlite.connect(self.db_path) as db:
|
| 1367 |
-
await db.execute(
|
| 1368 |
-
"""
|
| 1369 |
-
UPDATE tamareachy_state SET
|
| 1370 |
-
enabled = ?,
|
| 1371 |
-
hunger = ?,
|
| 1372 |
-
thirst = ?,
|
| 1373 |
-
happiness = ?,
|
| 1374 |
-
energy = ?,
|
| 1375 |
-
boredom = ?,
|
| 1376 |
-
social = ?,
|
| 1377 |
-
health = ?,
|
| 1378 |
-
cleanliness = ?,
|
| 1379 |
-
last_interaction = ?,
|
| 1380 |
-
last_decay_check = ?,
|
| 1381 |
-
updated_at = ?
|
| 1382 |
-
WHERE id = 1
|
| 1383 |
-
""",
|
| 1384 |
-
(
|
| 1385 |
-
1 if state_data.get("enabled", existing["enabled"]) else 0,
|
| 1386 |
-
state_data.get("hunger", existing["hunger"]),
|
| 1387 |
-
state_data.get("thirst", existing["thirst"]),
|
| 1388 |
-
state_data.get("happiness", existing["happiness"]),
|
| 1389 |
-
state_data.get("energy", existing["energy"]),
|
| 1390 |
-
state_data.get("boredom", existing["boredom"]),
|
| 1391 |
-
state_data.get("social", existing["social"]),
|
| 1392 |
-
state_data.get("health", existing["health"]),
|
| 1393 |
-
state_data.get("cleanliness", existing["cleanliness"]),
|
| 1394 |
-
state_data.get("last_interaction", existing["last_interaction"]),
|
| 1395 |
-
state_data.get("last_decay_check", existing["last_decay_check"]),
|
| 1396 |
-
now,
|
| 1397 |
-
),
|
| 1398 |
-
)
|
| 1399 |
-
await db.commit()
|
| 1400 |
-
|
| 1401 |
-
logger.info("Updated TamaReachy state")
|
| 1402 |
-
return await self.get_tamareachy_state()
|
| 1403 |
-
|
| 1404 |
-
async def reset_tamareachy_stats(self) -> dict:
|
| 1405 |
-
"""Reset all TamaReachy stats to 100.
|
| 1406 |
-
|
| 1407 |
-
Returns:
|
| 1408 |
-
The reset TamaReachy state dictionary.
|
| 1409 |
-
"""
|
| 1410 |
-
now = datetime.utcnow().isoformat()
|
| 1411 |
-
return await self.update_tamareachy_state({
|
| 1412 |
-
"hunger": 100,
|
| 1413 |
-
"thirst": 100,
|
| 1414 |
-
"happiness": 100,
|
| 1415 |
-
"energy": 100,
|
| 1416 |
-
"boredom": 100,
|
| 1417 |
-
"social": 100,
|
| 1418 |
-
"health": 100,
|
| 1419 |
-
"cleanliness": 100,
|
| 1420 |
-
"last_interaction": now,
|
| 1421 |
-
"last_decay_check": now,
|
| 1422 |
-
})
|
| 1423 |
-
|
| 1424 |
-
def _row_to_tamareachy(self, row: aiosqlite.Row) -> dict:
|
| 1425 |
-
"""Convert a database row to a TamaReachy state dictionary.
|
| 1426 |
-
|
| 1427 |
-
Args:
|
| 1428 |
-
row: The database row.
|
| 1429 |
-
|
| 1430 |
-
Returns:
|
| 1431 |
-
TamaReachy state dictionary with proper types.
|
| 1432 |
-
"""
|
| 1433 |
-
return {
|
| 1434 |
-
"enabled": bool(row["enabled"]),
|
| 1435 |
-
"hunger": row["hunger"],
|
| 1436 |
-
"thirst": row["thirst"],
|
| 1437 |
-
"happiness": row["happiness"],
|
| 1438 |
-
"energy": row["energy"],
|
| 1439 |
-
"boredom": row["boredom"],
|
| 1440 |
-
"social": row["social"],
|
| 1441 |
-
"health": row["health"],
|
| 1442 |
-
"cleanliness": row["cleanliness"],
|
| 1443 |
-
"last_interaction": row["last_interaction"],
|
| 1444 |
-
"last_decay_check": row["last_decay_check"],
|
| 1445 |
-
"created_at": row["created_at"],
|
| 1446 |
-
"updated_at": row["updated_at"],
|
| 1447 |
-
}
|
| 1448 |
-
|
| 1449 |
-
# =========================================================================
|
| 1450 |
-
# Helper Methods
|
| 1451 |
-
# =========================================================================
|
| 1452 |
-
|
| 1453 |
-
def _row_to_animation(self, row: aiosqlite.Row) -> dict:
|
| 1454 |
-
"""Convert a database row to an animation dictionary.
|
| 1455 |
-
|
| 1456 |
-
Args:
|
| 1457 |
-
row: The database row.
|
| 1458 |
-
|
| 1459 |
-
Returns:
|
| 1460 |
-
Animation dictionary with proper types.
|
| 1461 |
-
"""
|
| 1462 |
-
# Handle audio_data - may not exist in older databases
|
| 1463 |
-
audio_data = row["audio_data"] if "audio_data" in row.keys() else None
|
| 1464 |
-
|
| 1465 |
-
return {
|
| 1466 |
-
"id": row["id"],
|
| 1467 |
-
"name": row["name"],
|
| 1468 |
-
"description": row["description"],
|
| 1469 |
-
"duration_ms": row["duration_ms"],
|
| 1470 |
-
"start_pose": json.loads(row["start_pose"]),
|
| 1471 |
-
"keyframes": json.loads(row["keyframes"]),
|
| 1472 |
-
"audio_data": audio_data,
|
| 1473 |
-
"created_at": row["created_at"],
|
| 1474 |
-
"updated_at": row["updated_at"],
|
| 1475 |
-
}
|
| 1476 |
-
|
| 1477 |
-
def _row_to_app(self, row: aiosqlite.Row) -> dict:
|
| 1478 |
-
"""Convert a database row to an app dictionary.
|
| 1479 |
-
|
| 1480 |
-
Args:
|
| 1481 |
-
row: The database row.
|
| 1482 |
-
|
| 1483 |
-
Returns:
|
| 1484 |
-
App dictionary with proper types.
|
| 1485 |
-
"""
|
| 1486 |
-
# Handle enabled_tools - may not exist in older databases
|
| 1487 |
-
enabled_tools_raw = row["enabled_tools"] if "enabled_tools" in row.keys() else "[]"
|
| 1488 |
-
|
| 1489 |
-
return {
|
| 1490 |
-
"id": row["id"],
|
| 1491 |
-
"name": row["name"],
|
| 1492 |
-
"description": row["description"],
|
| 1493 |
-
"system_prompt": row["system_prompt"],
|
| 1494 |
-
"voice_id": row["voice_id"],
|
| 1495 |
-
"emotion_animations": json.loads(row["emotion_animations"]),
|
| 1496 |
-
"icon_color": row["icon_color"],
|
| 1497 |
-
"enabled_tools": json.loads(enabled_tools_raw) if enabled_tools_raw else [],
|
| 1498 |
-
"created_at": row["created_at"],
|
| 1499 |
-
"updated_at": row["updated_at"],
|
| 1500 |
-
}
|
| 1501 |
-
|
| 1502 |
-
def _row_to_website(self, row: aiosqlite.Row) -> dict:
|
| 1503 |
-
"""Convert a database row to a website dictionary.
|
| 1504 |
-
|
| 1505 |
-
Args:
|
| 1506 |
-
row: The database row.
|
| 1507 |
-
|
| 1508 |
-
Returns:
|
| 1509 |
-
Website dictionary with proper types.
|
| 1510 |
-
"""
|
| 1511 |
-
return {
|
| 1512 |
-
"id": row["id"],
|
| 1513 |
-
"title": row["title"],
|
| 1514 |
-
"description": row["description"],
|
| 1515 |
-
"created_at": row["created_at"],
|
| 1516 |
-
"updated_at": row["updated_at"],
|
| 1517 |
-
}
|
| 1518 |
-
|
| 1519 |
-
def _row_to_note(self, row: aiosqlite.Row) -> dict:
|
| 1520 |
-
"""Convert a database row to a mental note dictionary.
|
| 1521 |
-
|
| 1522 |
-
Args:
|
| 1523 |
-
row: The database row.
|
| 1524 |
-
|
| 1525 |
-
Returns:
|
| 1526 |
-
Mental note dictionary with proper types.
|
| 1527 |
-
"""
|
| 1528 |
-
return {
|
| 1529 |
-
"id": row["id"],
|
| 1530 |
-
"title": row["title"],
|
| 1531 |
-
"content": row["content"],
|
| 1532 |
-
"created_at": row["created_at"],
|
| 1533 |
-
"updated_at": row["updated_at"],
|
| 1534 |
-
}
|
| 1535 |
-
|
| 1536 |
-
def _row_to_meeting(self, row: aiosqlite.Row) -> dict:
|
| 1537 |
-
"""Convert a database row to a meeting dictionary.
|
| 1538 |
-
|
| 1539 |
-
Args:
|
| 1540 |
-
row: The database row.
|
| 1541 |
-
|
| 1542 |
-
Returns:
|
| 1543 |
-
Meeting dictionary with proper types.
|
| 1544 |
-
"""
|
| 1545 |
-
action_items_raw = row["action_items"] if row["action_items"] else "[]"
|
| 1546 |
-
return {
|
| 1547 |
-
"id": row["id"],
|
| 1548 |
-
"title": row["title"],
|
| 1549 |
-
"transcript": row["transcript"] or "",
|
| 1550 |
-
"action_items": json.loads(action_items_raw),
|
| 1551 |
-
"summary": row["summary"] or "",
|
| 1552 |
-
"duration_seconds": row["duration_seconds"] or 0,
|
| 1553 |
-
"status": row["status"] or "recording",
|
| 1554 |
-
"started_at": row["started_at"],
|
| 1555 |
-
"ended_at": row["ended_at"],
|
| 1556 |
-
"created_at": row["created_at"],
|
| 1557 |
-
"updated_at": row["updated_at"],
|
| 1558 |
-
}
|
| 1559 |
-
|
| 1560 |
-
def _row_to_scheduled_message(self, row: aiosqlite.Row) -> dict:
|
| 1561 |
-
"""Convert a database row to a scheduled message dictionary.
|
| 1562 |
-
|
| 1563 |
-
Args:
|
| 1564 |
-
row: The database row.
|
| 1565 |
-
|
| 1566 |
-
Returns:
|
| 1567 |
-
Scheduled message dictionary with proper types.
|
| 1568 |
-
"""
|
| 1569 |
-
return {
|
| 1570 |
-
"id": row["id"],
|
| 1571 |
-
"recipient_name": row["recipient_name"],
|
| 1572 |
-
"recipient_phone": row["recipient_phone"],
|
| 1573 |
-
"message_content": row["message_content"],
|
| 1574 |
-
"scheduled_time": row["scheduled_time"],
|
| 1575 |
-
"platform": row["platform"],
|
| 1576 |
-
"status": row["status"] or "pending",
|
| 1577 |
-
"notification_id": row["notification_id"],
|
| 1578 |
-
"created_at": row["created_at"],
|
| 1579 |
-
"updated_at": row["updated_at"],
|
| 1580 |
-
}
|
| 1581 |
-
|
| 1582 |
-
async def close(self) -> None:
|
| 1583 |
-
"""Close database connection (if using persistent connection)."""
|
| 1584 |
-
if self._connection:
|
| 1585 |
-
await self._connection.close()
|
| 1586 |
-
self._connection = None
|
| 1587 |
-
|
| 1588 |
-
|
| 1589 |
-
# Global database service instance
|
| 1590 |
-
db_service: Optional[DatabaseService] = None
|
| 1591 |
-
|
| 1592 |
-
|
| 1593 |
-
def get_database() -> DatabaseService:
|
| 1594 |
-
"""Get the global database service instance.
|
| 1595 |
-
|
| 1596 |
-
Returns:
|
| 1597 |
-
The database service.
|
| 1598 |
-
|
| 1599 |
-
Raises:
|
| 1600 |
-
RuntimeError: If database not initialized.
|
| 1601 |
-
"""
|
| 1602 |
-
if db_service is None:
|
| 1603 |
-
raise RuntimeError("Database not initialized. Call init_database() first.")
|
| 1604 |
-
return db_service
|
| 1605 |
-
|
| 1606 |
-
|
| 1607 |
-
async def init_database(db_path: Optional[Path] = None) -> DatabaseService:
|
| 1608 |
-
"""Initialize the global database service.
|
| 1609 |
-
|
| 1610 |
-
Args:
|
| 1611 |
-
db_path: Optional custom database path.
|
| 1612 |
-
|
| 1613 |
-
Returns:
|
| 1614 |
-
The initialized database service.
|
| 1615 |
-
"""
|
| 1616 |
-
global db_service
|
| 1617 |
-
db_service = DatabaseService(db_path)
|
| 1618 |
-
await db_service.initialize()
|
| 1619 |
-
return db_service
|
| 1620 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reachys_brain/database/__init__.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SQLite database service for persistent storage.
|
| 2 |
+
|
| 3 |
+
Provides async database operations for storing custom apps, animations,
|
| 4 |
+
user settings, websites, notes, meetings, scheduled messages, and TamaReachy state.
|
| 5 |
+
Uses aiosqlite for non-blocking database access.
|
| 6 |
+
|
| 7 |
+
This module combines all database operations into a single DatabaseService class
|
| 8 |
+
using the mixin pattern for better code organization.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Optional
|
| 13 |
+
|
| 14 |
+
from .base import DatabaseService as _BaseDatabaseService, DATABASE_PATH, DATABASE_DIR
|
| 15 |
+
from .apps import AppsMixin
|
| 16 |
+
from .animations import AnimationsMixin
|
| 17 |
+
from .user_settings import UserSettingsMixin
|
| 18 |
+
from .websites import WebsitesMixin
|
| 19 |
+
from .notes import NotesMixin
|
| 20 |
+
from .meetings import MeetingsMixin
|
| 21 |
+
from .scheduled_messages import ScheduledMessagesMixin
|
| 22 |
+
from .tamareachy import TamaReachyMixin
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class DatabaseService(
|
| 26 |
+
_BaseDatabaseService,
|
| 27 |
+
AppsMixin,
|
| 28 |
+
AnimationsMixin,
|
| 29 |
+
UserSettingsMixin,
|
| 30 |
+
WebsitesMixin,
|
| 31 |
+
NotesMixin,
|
| 32 |
+
MeetingsMixin,
|
| 33 |
+
ScheduledMessagesMixin,
|
| 34 |
+
TamaReachyMixin,
|
| 35 |
+
):
|
| 36 |
+
"""Async SQLite database service for the Reachy iOS Bridge.
|
| 37 |
+
|
| 38 |
+
This class combines the base DatabaseService with all CRUD mixins,
|
| 39 |
+
providing a complete interface for database operations.
|
| 40 |
+
|
| 41 |
+
Example usage:
|
| 42 |
+
db = DatabaseService()
|
| 43 |
+
await db.initialize()
|
| 44 |
+
|
| 45 |
+
# Custom Apps
|
| 46 |
+
apps = await db.get_all_apps()
|
| 47 |
+
app = await db.create_app({"id": "...", "name": "...", ...})
|
| 48 |
+
|
| 49 |
+
# User Settings
|
| 50 |
+
await db.set_user_setting("user_name", "John")
|
| 51 |
+
name = await db.get_user_setting("user_name")
|
| 52 |
+
|
| 53 |
+
# And so on for other tables...
|
| 54 |
+
"""
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Global database service instance
|
| 59 |
+
db_service: Optional[DatabaseService] = None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_database() -> DatabaseService:
|
| 63 |
+
"""Get the global database service instance.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
The database service.
|
| 67 |
+
|
| 68 |
+
Raises:
|
| 69 |
+
RuntimeError: If database not initialized.
|
| 70 |
+
"""
|
| 71 |
+
if db_service is None:
|
| 72 |
+
raise RuntimeError("Database not initialized. Call init_database() first.")
|
| 73 |
+
return db_service
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
async def init_database(db_path: Optional[Path] = None) -> DatabaseService:
|
| 77 |
+
"""Initialize the global database service.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
db_path: Optional custom database path.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
The initialized database service.
|
| 84 |
+
"""
|
| 85 |
+
global db_service
|
| 86 |
+
db_service = DatabaseService(db_path)
|
| 87 |
+
await db_service.initialize()
|
| 88 |
+
return db_service
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# Re-export for backwards compatibility
|
| 92 |
+
__all__ = [
|
| 93 |
+
"DatabaseService",
|
| 94 |
+
"DATABASE_PATH",
|
| 95 |
+
"DATABASE_DIR",
|
| 96 |
+
"get_database",
|
| 97 |
+
"init_database",
|
| 98 |
+
"db_service",
|
| 99 |
+
]
|
| 100 |
+
|
reachys_brain/database/animations.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Custom Animations CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting custom animations.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
import aiosqlite
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class AnimationsMixin:
|
| 17 |
+
"""Mixin class providing Custom Animations CRUD operations.
|
| 18 |
+
|
| 19 |
+
This mixin is designed to be used with DatabaseService.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
db_path: str # Provided by DatabaseService
|
| 23 |
+
|
| 24 |
+
async def get_all_animations(self) -> list[dict]:
|
| 25 |
+
"""Get all custom animations from the database.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
List of custom animation dictionaries.
|
| 29 |
+
"""
|
| 30 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 31 |
+
db.row_factory = aiosqlite.Row
|
| 32 |
+
cursor = await db.execute(
|
| 33 |
+
"SELECT * FROM custom_animations ORDER BY created_at DESC"
|
| 34 |
+
)
|
| 35 |
+
rows = await cursor.fetchall()
|
| 36 |
+
return [self._row_to_animation(row) for row in rows]
|
| 37 |
+
|
| 38 |
+
async def get_animation(self, animation_id: str) -> Optional[dict]:
|
| 39 |
+
"""Get a single custom animation by ID.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
animation_id: The UUID of the animation.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
The animation dictionary, or None if not found.
|
| 46 |
+
"""
|
| 47 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 48 |
+
db.row_factory = aiosqlite.Row
|
| 49 |
+
cursor = await db.execute(
|
| 50 |
+
"SELECT * FROM custom_animations WHERE id = ?", (animation_id,)
|
| 51 |
+
)
|
| 52 |
+
row = await cursor.fetchone()
|
| 53 |
+
return self._row_to_animation(row) if row else None
|
| 54 |
+
|
| 55 |
+
async def create_animation(self, animation_data: dict) -> dict:
|
| 56 |
+
"""Create a new custom animation.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
animation_data: Dictionary containing animation data (must include 'id').
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
The created animation dictionary.
|
| 63 |
+
"""
|
| 64 |
+
now = datetime.utcnow().isoformat()
|
| 65 |
+
start_pose = json.dumps(animation_data.get("start_pose", {}))
|
| 66 |
+
keyframes = json.dumps(animation_data.get("keyframes", []))
|
| 67 |
+
audio_data = animation_data.get("audio_data") # Base64 string or None
|
| 68 |
+
|
| 69 |
+
# Ensure created_at is always set
|
| 70 |
+
created_at = animation_data.get("created_at") or now
|
| 71 |
+
|
| 72 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 73 |
+
await db.execute(
|
| 74 |
+
"""
|
| 75 |
+
INSERT INTO custom_animations
|
| 76 |
+
(id, name, description, duration_ms, start_pose, keyframes,
|
| 77 |
+
audio_data, created_at, updated_at)
|
| 78 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 79 |
+
""",
|
| 80 |
+
(
|
| 81 |
+
animation_data["id"],
|
| 82 |
+
animation_data["name"],
|
| 83 |
+
animation_data.get("description", ""),
|
| 84 |
+
animation_data["duration_ms"],
|
| 85 |
+
start_pose,
|
| 86 |
+
keyframes,
|
| 87 |
+
audio_data,
|
| 88 |
+
created_at,
|
| 89 |
+
now,
|
| 90 |
+
),
|
| 91 |
+
)
|
| 92 |
+
await db.commit()
|
| 93 |
+
|
| 94 |
+
logger.info(f"Created custom animation: {animation_data['name']} ({animation_data['id']})")
|
| 95 |
+
return await self.get_animation(animation_data["id"])
|
| 96 |
+
|
| 97 |
+
async def update_animation(self, animation_id: str, animation_data: dict) -> Optional[dict]:
|
| 98 |
+
"""Update an existing custom animation.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
animation_id: The UUID of the animation to update.
|
| 102 |
+
animation_data: Dictionary containing updated animation data.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
The updated animation dictionary, or None if not found.
|
| 106 |
+
"""
|
| 107 |
+
existing = await self.get_animation(animation_id)
|
| 108 |
+
if not existing:
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
now = datetime.utcnow().isoformat()
|
| 112 |
+
|
| 113 |
+
# Handle start_pose - use existing if not provided
|
| 114 |
+
if "start_pose" in animation_data:
|
| 115 |
+
start_pose = json.dumps(animation_data["start_pose"])
|
| 116 |
+
else:
|
| 117 |
+
start_pose = json.dumps(existing["start_pose"])
|
| 118 |
+
|
| 119 |
+
# Handle keyframes - use existing if not provided
|
| 120 |
+
if "keyframes" in animation_data:
|
| 121 |
+
keyframes = json.dumps(animation_data["keyframes"])
|
| 122 |
+
else:
|
| 123 |
+
keyframes = json.dumps(existing["keyframes"])
|
| 124 |
+
|
| 125 |
+
# Handle audio_data - use existing if not provided
|
| 126 |
+
if "audio_data" in animation_data:
|
| 127 |
+
audio_data = animation_data["audio_data"]
|
| 128 |
+
else:
|
| 129 |
+
audio_data = existing.get("audio_data")
|
| 130 |
+
|
| 131 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 132 |
+
await db.execute(
|
| 133 |
+
"""
|
| 134 |
+
UPDATE custom_animations SET
|
| 135 |
+
name = ?,
|
| 136 |
+
description = ?,
|
| 137 |
+
duration_ms = ?,
|
| 138 |
+
start_pose = ?,
|
| 139 |
+
keyframes = ?,
|
| 140 |
+
audio_data = ?,
|
| 141 |
+
updated_at = ?
|
| 142 |
+
WHERE id = ?
|
| 143 |
+
""",
|
| 144 |
+
(
|
| 145 |
+
animation_data.get("name", existing["name"]),
|
| 146 |
+
animation_data.get("description", existing["description"]),
|
| 147 |
+
animation_data.get("duration_ms", existing["duration_ms"]),
|
| 148 |
+
start_pose,
|
| 149 |
+
keyframes,
|
| 150 |
+
audio_data,
|
| 151 |
+
now,
|
| 152 |
+
animation_id,
|
| 153 |
+
),
|
| 154 |
+
)
|
| 155 |
+
await db.commit()
|
| 156 |
+
|
| 157 |
+
logger.info(f"Updated custom animation: {animation_id}")
|
| 158 |
+
return await self.get_animation(animation_id)
|
| 159 |
+
|
| 160 |
+
async def delete_animation(self, animation_id: str) -> bool:
|
| 161 |
+
"""Delete a custom animation.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
animation_id: The UUID of the animation to delete.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
True if deleted, False if not found.
|
| 168 |
+
"""
|
| 169 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 170 |
+
cursor = await db.execute(
|
| 171 |
+
"DELETE FROM custom_animations WHERE id = ?", (animation_id,)
|
| 172 |
+
)
|
| 173 |
+
await db.commit()
|
| 174 |
+
deleted = cursor.rowcount > 0
|
| 175 |
+
|
| 176 |
+
if deleted:
|
| 177 |
+
logger.info(f"Deleted custom animation: {animation_id}")
|
| 178 |
+
return deleted
|
| 179 |
+
|
| 180 |
+
def _row_to_animation(self, row: aiosqlite.Row) -> dict:
|
| 181 |
+
"""Convert a database row to an animation dictionary.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
row: The database row.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
Animation dictionary with proper types.
|
| 188 |
+
"""
|
| 189 |
+
# Handle audio_data - may not exist in older databases
|
| 190 |
+
audio_data = row["audio_data"] if "audio_data" in row.keys() else None
|
| 191 |
+
|
| 192 |
+
return {
|
| 193 |
+
"id": row["id"],
|
| 194 |
+
"name": row["name"],
|
| 195 |
+
"description": row["description"],
|
| 196 |
+
"duration_ms": row["duration_ms"],
|
| 197 |
+
"start_pose": json.loads(row["start_pose"]),
|
| 198 |
+
"keyframes": json.loads(row["keyframes"]),
|
| 199 |
+
"audio_data": audio_data,
|
| 200 |
+
"created_at": row["created_at"],
|
| 201 |
+
"updated_at": row["updated_at"],
|
| 202 |
+
}
|
| 203 |
+
|
reachys_brain/database/apps.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Custom Apps CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting custom apps.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
import aiosqlite
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class AppsMixin:
|
| 17 |
+
"""Mixin class providing Custom Apps CRUD operations.
|
| 18 |
+
|
| 19 |
+
This mixin is designed to be used with DatabaseService.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
db_path: str # Provided by DatabaseService
|
| 23 |
+
|
| 24 |
+
async def get_all_apps(self) -> list[dict]:
|
| 25 |
+
"""Get all custom apps from the database.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
List of custom app dictionaries.
|
| 29 |
+
"""
|
| 30 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 31 |
+
db.row_factory = aiosqlite.Row
|
| 32 |
+
cursor = await db.execute(
|
| 33 |
+
"SELECT * FROM custom_apps ORDER BY created_at DESC"
|
| 34 |
+
)
|
| 35 |
+
rows = await cursor.fetchall()
|
| 36 |
+
return [self._row_to_app(row) for row in rows]
|
| 37 |
+
|
| 38 |
+
async def get_app(self, app_id: str) -> Optional[dict]:
|
| 39 |
+
"""Get a single custom app by ID.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
app_id: The UUID of the app.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
The app dictionary, or None if not found.
|
| 46 |
+
"""
|
| 47 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 48 |
+
db.row_factory = aiosqlite.Row
|
| 49 |
+
cursor = await db.execute(
|
| 50 |
+
"SELECT * FROM custom_apps WHERE id = ?", (app_id,)
|
| 51 |
+
)
|
| 52 |
+
row = await cursor.fetchone()
|
| 53 |
+
return self._row_to_app(row) if row else None
|
| 54 |
+
|
| 55 |
+
async def create_app(self, app_data: dict) -> dict:
|
| 56 |
+
"""Create a new custom app.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
app_data: Dictionary containing app data (must include 'id').
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
The created app dictionary.
|
| 63 |
+
"""
|
| 64 |
+
now = datetime.utcnow().isoformat()
|
| 65 |
+
emotion_animations = json.dumps(app_data.get("emotion_animations", {}))
|
| 66 |
+
enabled_tools = json.dumps(app_data.get("enabled_tools", []))
|
| 67 |
+
|
| 68 |
+
# Ensure created_at is always set (use provided value or current time)
|
| 69 |
+
created_at = app_data.get("created_at") or now
|
| 70 |
+
|
| 71 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 72 |
+
await db.execute(
|
| 73 |
+
"""
|
| 74 |
+
INSERT INTO custom_apps
|
| 75 |
+
(id, name, description, system_prompt, voice_id,
|
| 76 |
+
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 77 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 78 |
+
""",
|
| 79 |
+
(
|
| 80 |
+
app_data["id"],
|
| 81 |
+
app_data["name"],
|
| 82 |
+
app_data.get("description", ""),
|
| 83 |
+
app_data["system_prompt"],
|
| 84 |
+
app_data.get("voice_id", ""),
|
| 85 |
+
emotion_animations,
|
| 86 |
+
app_data.get("icon_color", "blue"),
|
| 87 |
+
enabled_tools,
|
| 88 |
+
created_at,
|
| 89 |
+
now,
|
| 90 |
+
),
|
| 91 |
+
)
|
| 92 |
+
await db.commit()
|
| 93 |
+
|
| 94 |
+
logger.info(f"Created custom app: {app_data['name']} ({app_data['id']})")
|
| 95 |
+
return await self.get_app(app_data["id"])
|
| 96 |
+
|
| 97 |
+
async def update_app(self, app_id: str, app_data: dict) -> Optional[dict]:
|
| 98 |
+
"""Update an existing custom app.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
app_id: The UUID of the app to update.
|
| 102 |
+
app_data: Dictionary containing updated app data.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
The updated app dictionary, or None if not found.
|
| 106 |
+
"""
|
| 107 |
+
existing = await self.get_app(app_id)
|
| 108 |
+
if not existing:
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
now = datetime.utcnow().isoformat()
|
| 112 |
+
|
| 113 |
+
# Handle emotion_animations - use existing if not provided
|
| 114 |
+
if "emotion_animations" in app_data:
|
| 115 |
+
emotion_animations = json.dumps(app_data["emotion_animations"])
|
| 116 |
+
else:
|
| 117 |
+
emotion_animations = json.dumps(existing["emotion_animations"])
|
| 118 |
+
|
| 119 |
+
# Handle enabled_tools - use existing if not provided
|
| 120 |
+
if "enabled_tools" in app_data:
|
| 121 |
+
enabled_tools = json.dumps(app_data["enabled_tools"])
|
| 122 |
+
else:
|
| 123 |
+
enabled_tools = json.dumps(existing["enabled_tools"])
|
| 124 |
+
|
| 125 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 126 |
+
await db.execute(
|
| 127 |
+
"""
|
| 128 |
+
UPDATE custom_apps SET
|
| 129 |
+
name = ?,
|
| 130 |
+
description = ?,
|
| 131 |
+
system_prompt = ?,
|
| 132 |
+
voice_id = ?,
|
| 133 |
+
emotion_animations = ?,
|
| 134 |
+
icon_color = ?,
|
| 135 |
+
enabled_tools = ?,
|
| 136 |
+
updated_at = ?
|
| 137 |
+
WHERE id = ?
|
| 138 |
+
""",
|
| 139 |
+
(
|
| 140 |
+
app_data.get("name", existing["name"]),
|
| 141 |
+
app_data.get("description", existing["description"]),
|
| 142 |
+
app_data.get("system_prompt", existing["system_prompt"]),
|
| 143 |
+
app_data.get("voice_id", existing["voice_id"]),
|
| 144 |
+
emotion_animations,
|
| 145 |
+
app_data.get("icon_color", existing["icon_color"]),
|
| 146 |
+
enabled_tools,
|
| 147 |
+
now,
|
| 148 |
+
app_id,
|
| 149 |
+
),
|
| 150 |
+
)
|
| 151 |
+
await db.commit()
|
| 152 |
+
|
| 153 |
+
logger.info(f"Updated custom app: {app_id}")
|
| 154 |
+
return await self.get_app(app_id)
|
| 155 |
+
|
| 156 |
+
async def delete_app(self, app_id: str) -> bool:
|
| 157 |
+
"""Delete a custom app.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
app_id: The UUID of the app to delete.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
True if deleted, False if not found.
|
| 164 |
+
"""
|
| 165 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 166 |
+
cursor = await db.execute(
|
| 167 |
+
"DELETE FROM custom_apps WHERE id = ?", (app_id,)
|
| 168 |
+
)
|
| 169 |
+
await db.commit()
|
| 170 |
+
deleted = cursor.rowcount > 0
|
| 171 |
+
|
| 172 |
+
if deleted:
|
| 173 |
+
logger.info(f"Deleted custom app: {app_id}")
|
| 174 |
+
return deleted
|
| 175 |
+
|
| 176 |
+
async def sync_apps(self, apps: list[dict]) -> list[dict]:
|
| 177 |
+
"""Bulk sync apps from iOS client.
|
| 178 |
+
|
| 179 |
+
This replaces all existing apps with the provided list.
|
| 180 |
+
Used for initial sync or full restore.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
apps: List of app dictionaries to sync.
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
List of all apps after sync.
|
| 187 |
+
"""
|
| 188 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 189 |
+
# Clear existing apps
|
| 190 |
+
await db.execute("DELETE FROM custom_apps")
|
| 191 |
+
|
| 192 |
+
# Insert all new apps
|
| 193 |
+
now = datetime.utcnow().isoformat()
|
| 194 |
+
for app in apps:
|
| 195 |
+
emotion_animations = json.dumps(app.get("emotion_animations", {}))
|
| 196 |
+
enabled_tools = json.dumps(app.get("enabled_tools", []))
|
| 197 |
+
# Ensure created_at is always set
|
| 198 |
+
created_at = app.get("created_at") or now
|
| 199 |
+
await db.execute(
|
| 200 |
+
"""
|
| 201 |
+
INSERT INTO custom_apps
|
| 202 |
+
(id, name, description, system_prompt, voice_id,
|
| 203 |
+
emotion_animations, icon_color, enabled_tools, created_at, updated_at)
|
| 204 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 205 |
+
""",
|
| 206 |
+
(
|
| 207 |
+
app["id"],
|
| 208 |
+
app["name"],
|
| 209 |
+
app.get("description", ""),
|
| 210 |
+
app["system_prompt"],
|
| 211 |
+
app.get("voice_id", ""),
|
| 212 |
+
emotion_animations,
|
| 213 |
+
app.get("icon_color", "blue"),
|
| 214 |
+
enabled_tools,
|
| 215 |
+
created_at,
|
| 216 |
+
now,
|
| 217 |
+
),
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
await db.commit()
|
| 221 |
+
|
| 222 |
+
logger.info(f"Synced {len(apps)} custom apps")
|
| 223 |
+
return await self.get_all_apps()
|
| 224 |
+
|
| 225 |
+
def _row_to_app(self, row: aiosqlite.Row) -> dict:
|
| 226 |
+
"""Convert a database row to an app dictionary.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
row: The database row.
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
App dictionary with proper types.
|
| 233 |
+
"""
|
| 234 |
+
# Handle enabled_tools - may not exist in older databases
|
| 235 |
+
enabled_tools_raw = row["enabled_tools"] if "enabled_tools" in row.keys() else "[]"
|
| 236 |
+
|
| 237 |
+
return {
|
| 238 |
+
"id": row["id"],
|
| 239 |
+
"name": row["name"],
|
| 240 |
+
"description": row["description"],
|
| 241 |
+
"system_prompt": row["system_prompt"],
|
| 242 |
+
"voice_id": row["voice_id"],
|
| 243 |
+
"emotion_animations": json.loads(row["emotion_animations"]),
|
| 244 |
+
"icon_color": row["icon_color"],
|
| 245 |
+
"enabled_tools": json.loads(enabled_tools_raw) if enabled_tools_raw else [],
|
| 246 |
+
"created_at": row["created_at"],
|
| 247 |
+
"updated_at": row["updated_at"],
|
| 248 |
+
}
|
| 249 |
+
|
reachys_brain/database/base.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base database service with initialization and table creation.
|
| 2 |
+
|
| 3 |
+
Provides the core DatabaseService class with connection management and schema setup.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
import aiosqlite
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
# Database location - stored in user's home directory
|
| 15 |
+
DATABASE_DIR = Path.home() / ".reachy"
|
| 16 |
+
DATABASE_PATH = DATABASE_DIR / "reachy_bridge.db"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DatabaseService:
|
| 20 |
+
"""Async SQLite database service for the Reachy iOS Bridge.
|
| 21 |
+
|
| 22 |
+
This is the base class that handles connection management and table creation.
|
| 23 |
+
CRUD operations for specific tables are added via mixin classes.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, db_path: Optional[Path] = None):
|
| 27 |
+
"""Initialize the database service.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
db_path: Optional custom database path. Defaults to ~/.reachy/reachy_bridge.db
|
| 31 |
+
"""
|
| 32 |
+
self.db_path = db_path or DATABASE_PATH
|
| 33 |
+
self._connection: Optional[aiosqlite.Connection] = None
|
| 34 |
+
|
| 35 |
+
async def initialize(self) -> None:
|
| 36 |
+
"""Initialize the database and create tables if needed."""
|
| 37 |
+
# Ensure directory exists
|
| 38 |
+
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
| 39 |
+
|
| 40 |
+
logger.info(f"Initializing database at {self.db_path}")
|
| 41 |
+
|
| 42 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 43 |
+
await self._create_tables(db)
|
| 44 |
+
await db.commit()
|
| 45 |
+
|
| 46 |
+
logger.info("Database initialized successfully")
|
| 47 |
+
|
| 48 |
+
async def _create_tables(self, db: aiosqlite.Connection) -> None:
|
| 49 |
+
"""Create database tables if they don't exist."""
|
| 50 |
+
# Custom apps table
|
| 51 |
+
await db.execute("""
|
| 52 |
+
CREATE TABLE IF NOT EXISTS custom_apps (
|
| 53 |
+
id TEXT PRIMARY KEY,
|
| 54 |
+
name TEXT NOT NULL,
|
| 55 |
+
description TEXT DEFAULT '',
|
| 56 |
+
system_prompt TEXT NOT NULL,
|
| 57 |
+
voice_id TEXT DEFAULT '',
|
| 58 |
+
emotion_animations TEXT DEFAULT '{}',
|
| 59 |
+
icon_color TEXT DEFAULT 'blue',
|
| 60 |
+
enabled_tools TEXT DEFAULT '[]',
|
| 61 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 62 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 63 |
+
)
|
| 64 |
+
""")
|
| 65 |
+
|
| 66 |
+
await db.execute("""
|
| 67 |
+
CREATE INDEX IF NOT EXISTS idx_custom_apps_created
|
| 68 |
+
ON custom_apps(created_at)
|
| 69 |
+
""")
|
| 70 |
+
|
| 71 |
+
# Migration: Add enabled_tools column if it doesn't exist
|
| 72 |
+
await self._migrate_add_enabled_tools(db)
|
| 73 |
+
|
| 74 |
+
# Custom animations table for recorded joystick animations
|
| 75 |
+
await db.execute("""
|
| 76 |
+
CREATE TABLE IF NOT EXISTS custom_animations (
|
| 77 |
+
id TEXT PRIMARY KEY,
|
| 78 |
+
name TEXT NOT NULL,
|
| 79 |
+
description TEXT DEFAULT '',
|
| 80 |
+
duration_ms INTEGER NOT NULL,
|
| 81 |
+
start_pose TEXT NOT NULL,
|
| 82 |
+
keyframes TEXT NOT NULL,
|
| 83 |
+
audio_data TEXT DEFAULT NULL,
|
| 84 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 85 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 86 |
+
)
|
| 87 |
+
""")
|
| 88 |
+
|
| 89 |
+
# Migration: Add audio_data column if it doesn't exist
|
| 90 |
+
await self._migrate_add_audio_data(db)
|
| 91 |
+
|
| 92 |
+
await db.execute("""
|
| 93 |
+
CREATE INDEX IF NOT EXISTS idx_custom_animations_created
|
| 94 |
+
ON custom_animations(created_at)
|
| 95 |
+
""")
|
| 96 |
+
|
| 97 |
+
# User settings table for personalization (name, preferences, etc.)
|
| 98 |
+
await db.execute("""
|
| 99 |
+
CREATE TABLE IF NOT EXISTS user_settings (
|
| 100 |
+
key TEXT PRIMARY KEY,
|
| 101 |
+
value TEXT NOT NULL,
|
| 102 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 103 |
+
)
|
| 104 |
+
""")
|
| 105 |
+
|
| 106 |
+
# Websites table for generated websites
|
| 107 |
+
await db.execute("""
|
| 108 |
+
CREATE TABLE IF NOT EXISTS websites (
|
| 109 |
+
id TEXT PRIMARY KEY,
|
| 110 |
+
title TEXT NOT NULL,
|
| 111 |
+
description TEXT DEFAULT '',
|
| 112 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 113 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 114 |
+
)
|
| 115 |
+
""")
|
| 116 |
+
|
| 117 |
+
await db.execute("""
|
| 118 |
+
CREATE INDEX IF NOT EXISTS idx_websites_created
|
| 119 |
+
ON websites(created_at)
|
| 120 |
+
""")
|
| 121 |
+
|
| 122 |
+
# Mental notes table for AI-created notes
|
| 123 |
+
await db.execute("""
|
| 124 |
+
CREATE TABLE IF NOT EXISTS mental_notes (
|
| 125 |
+
id TEXT PRIMARY KEY,
|
| 126 |
+
title TEXT NOT NULL,
|
| 127 |
+
content TEXT NOT NULL,
|
| 128 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 129 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 130 |
+
)
|
| 131 |
+
""")
|
| 132 |
+
|
| 133 |
+
await db.execute("""
|
| 134 |
+
CREATE INDEX IF NOT EXISTS idx_mental_notes_created
|
| 135 |
+
ON mental_notes(created_at)
|
| 136 |
+
""")
|
| 137 |
+
|
| 138 |
+
# Meetings table for meeting transcriptions
|
| 139 |
+
await db.execute("""
|
| 140 |
+
CREATE TABLE IF NOT EXISTS meetings (
|
| 141 |
+
id TEXT PRIMARY KEY,
|
| 142 |
+
title TEXT NOT NULL,
|
| 143 |
+
transcript TEXT DEFAULT '',
|
| 144 |
+
action_items TEXT DEFAULT '[]',
|
| 145 |
+
summary TEXT DEFAULT '',
|
| 146 |
+
duration_seconds INTEGER DEFAULT 0,
|
| 147 |
+
status TEXT DEFAULT 'recording',
|
| 148 |
+
started_at TIMESTAMP,
|
| 149 |
+
ended_at TIMESTAMP,
|
| 150 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 151 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 152 |
+
)
|
| 153 |
+
""")
|
| 154 |
+
|
| 155 |
+
await db.execute("""
|
| 156 |
+
CREATE INDEX IF NOT EXISTS idx_meetings_created
|
| 157 |
+
ON meetings(created_at)
|
| 158 |
+
""")
|
| 159 |
+
|
| 160 |
+
await db.execute("""
|
| 161 |
+
CREATE INDEX IF NOT EXISTS idx_meetings_status
|
| 162 |
+
ON meetings(status)
|
| 163 |
+
""")
|
| 164 |
+
|
| 165 |
+
# Scheduled messages table for scheduled iMessage/WhatsApp messages
|
| 166 |
+
await db.execute("""
|
| 167 |
+
CREATE TABLE IF NOT EXISTS scheduled_messages (
|
| 168 |
+
id TEXT PRIMARY KEY,
|
| 169 |
+
recipient_name TEXT NOT NULL,
|
| 170 |
+
recipient_phone TEXT NOT NULL,
|
| 171 |
+
message_content TEXT NOT NULL,
|
| 172 |
+
scheduled_time TIMESTAMP NOT NULL,
|
| 173 |
+
platform TEXT NOT NULL,
|
| 174 |
+
status TEXT DEFAULT 'pending',
|
| 175 |
+
notification_id TEXT,
|
| 176 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 177 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 178 |
+
)
|
| 179 |
+
""")
|
| 180 |
+
|
| 181 |
+
await db.execute("""
|
| 182 |
+
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_time
|
| 183 |
+
ON scheduled_messages(scheduled_time)
|
| 184 |
+
""")
|
| 185 |
+
|
| 186 |
+
await db.execute("""
|
| 187 |
+
CREATE INDEX IF NOT EXISTS idx_scheduled_messages_status
|
| 188 |
+
ON scheduled_messages(status)
|
| 189 |
+
""")
|
| 190 |
+
|
| 191 |
+
# TamaReachy pet game state table
|
| 192 |
+
await db.execute("""
|
| 193 |
+
CREATE TABLE IF NOT EXISTS tamareachy_state (
|
| 194 |
+
id INTEGER PRIMARY KEY DEFAULT 1,
|
| 195 |
+
enabled INTEGER DEFAULT 0,
|
| 196 |
+
hunger INTEGER DEFAULT 100,
|
| 197 |
+
thirst INTEGER DEFAULT 100,
|
| 198 |
+
happiness INTEGER DEFAULT 100,
|
| 199 |
+
energy INTEGER DEFAULT 100,
|
| 200 |
+
boredom INTEGER DEFAULT 100,
|
| 201 |
+
social INTEGER DEFAULT 100,
|
| 202 |
+
health INTEGER DEFAULT 100,
|
| 203 |
+
cleanliness INTEGER DEFAULT 100,
|
| 204 |
+
last_interaction TIMESTAMP,
|
| 205 |
+
last_decay_check TIMESTAMP,
|
| 206 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 207 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 208 |
+
)
|
| 209 |
+
""")
|
| 210 |
+
|
| 211 |
+
# Ensure there's exactly one row for TamaReachy state
|
| 212 |
+
await db.execute("""
|
| 213 |
+
INSERT OR IGNORE INTO tamareachy_state (id) VALUES (1)
|
| 214 |
+
""")
|
| 215 |
+
|
| 216 |
+
logger.debug("Database tables created/verified")
|
| 217 |
+
|
| 218 |
+
async def _migrate_add_enabled_tools(self, db: aiosqlite.Connection) -> None:
|
| 219 |
+
"""Add enabled_tools column to existing databases."""
|
| 220 |
+
try:
|
| 221 |
+
cursor = await db.execute("PRAGMA table_info(custom_apps)")
|
| 222 |
+
columns = await cursor.fetchall()
|
| 223 |
+
column_names = [col[1] for col in columns]
|
| 224 |
+
|
| 225 |
+
if "enabled_tools" not in column_names:
|
| 226 |
+
logger.info("Migrating database: adding enabled_tools column")
|
| 227 |
+
await db.execute(
|
| 228 |
+
"ALTER TABLE custom_apps ADD COLUMN enabled_tools TEXT DEFAULT '[]'"
|
| 229 |
+
)
|
| 230 |
+
await db.commit()
|
| 231 |
+
logger.info("Migration complete: enabled_tools column added")
|
| 232 |
+
except Exception as e:
|
| 233 |
+
logger.warning(f"Migration check failed (may be OK): {e}")
|
| 234 |
+
|
| 235 |
+
async def _migrate_add_audio_data(self, db: aiosqlite.Connection) -> None:
|
| 236 |
+
"""Add audio_data column to custom_animations table."""
|
| 237 |
+
try:
|
| 238 |
+
cursor = await db.execute("PRAGMA table_info(custom_animations)")
|
| 239 |
+
columns = await cursor.fetchall()
|
| 240 |
+
column_names = [col[1] for col in columns]
|
| 241 |
+
|
| 242 |
+
if "audio_data" not in column_names:
|
| 243 |
+
logger.info("Migrating database: adding audio_data column to animations")
|
| 244 |
+
await db.execute(
|
| 245 |
+
"ALTER TABLE custom_animations ADD COLUMN audio_data TEXT DEFAULT NULL"
|
| 246 |
+
)
|
| 247 |
+
await db.commit()
|
| 248 |
+
logger.info("Migration complete: audio_data column added to animations")
|
| 249 |
+
except Exception as e:
|
| 250 |
+
logger.warning(f"Animation audio migration check failed (may be OK): {e}")
|
| 251 |
+
|
| 252 |
+
async def close(self) -> None:
|
| 253 |
+
"""Close database connection (if using persistent connection)."""
|
| 254 |
+
if self._connection:
|
| 255 |
+
await self._connection.close()
|
| 256 |
+
self._connection = None
|
| 257 |
+
|
reachys_brain/database/meetings.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Meetings CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting meeting transcriptions.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
import aiosqlite
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MeetingsMixin:
|
| 17 |
+
"""Mixin class providing Meetings CRUD operations.
|
| 18 |
+
|
| 19 |
+
This mixin is designed to be used with DatabaseService.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
db_path: str # Provided by DatabaseService
|
| 23 |
+
|
| 24 |
+
async def get_all_meetings(self) -> list[dict]:
|
| 25 |
+
"""Get all meetings from the database.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
List of meeting dictionaries.
|
| 29 |
+
"""
|
| 30 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 31 |
+
db.row_factory = aiosqlite.Row
|
| 32 |
+
cursor = await db.execute(
|
| 33 |
+
"SELECT * FROM meetings ORDER BY created_at DESC"
|
| 34 |
+
)
|
| 35 |
+
rows = await cursor.fetchall()
|
| 36 |
+
return [self._row_to_meeting(row) for row in rows]
|
| 37 |
+
|
| 38 |
+
async def get_meeting(self, meeting_id: str) -> Optional[dict]:
|
| 39 |
+
"""Get a single meeting by ID.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
meeting_id: The ID of the meeting.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
The meeting dictionary, or None if not found.
|
| 46 |
+
"""
|
| 47 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 48 |
+
db.row_factory = aiosqlite.Row
|
| 49 |
+
cursor = await db.execute(
|
| 50 |
+
"SELECT * FROM meetings WHERE id = ?", (meeting_id,)
|
| 51 |
+
)
|
| 52 |
+
row = await cursor.fetchone()
|
| 53 |
+
return self._row_to_meeting(row) if row else None
|
| 54 |
+
|
| 55 |
+
async def get_active_meeting(self) -> Optional[dict]:
|
| 56 |
+
"""Get the currently active (recording) meeting.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
The active meeting dictionary, or None if no meeting is recording.
|
| 60 |
+
"""
|
| 61 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 62 |
+
db.row_factory = aiosqlite.Row
|
| 63 |
+
cursor = await db.execute(
|
| 64 |
+
"SELECT * FROM meetings WHERE status = 'recording' LIMIT 1"
|
| 65 |
+
)
|
| 66 |
+
row = await cursor.fetchone()
|
| 67 |
+
return self._row_to_meeting(row) if row else None
|
| 68 |
+
|
| 69 |
+
async def create_meeting(self, meeting_data: dict) -> dict:
|
| 70 |
+
"""Create a new meeting record.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
meeting_data: Dictionary containing meeting data (must include 'id', 'title').
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
The created meeting dictionary.
|
| 77 |
+
"""
|
| 78 |
+
now = datetime.utcnow().isoformat()
|
| 79 |
+
action_items = json.dumps(meeting_data.get("action_items", []))
|
| 80 |
+
started_at = meeting_data.get("started_at") or now
|
| 81 |
+
|
| 82 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 83 |
+
await db.execute(
|
| 84 |
+
"""
|
| 85 |
+
INSERT INTO meetings
|
| 86 |
+
(id, title, transcript, action_items, summary, duration_seconds,
|
| 87 |
+
status, started_at, ended_at, created_at, updated_at)
|
| 88 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 89 |
+
""",
|
| 90 |
+
(
|
| 91 |
+
meeting_data["id"],
|
| 92 |
+
meeting_data["title"],
|
| 93 |
+
meeting_data.get("transcript", ""),
|
| 94 |
+
action_items,
|
| 95 |
+
meeting_data.get("summary", ""),
|
| 96 |
+
meeting_data.get("duration_seconds", 0),
|
| 97 |
+
meeting_data.get("status", "recording"),
|
| 98 |
+
started_at,
|
| 99 |
+
meeting_data.get("ended_at"),
|
| 100 |
+
now,
|
| 101 |
+
now,
|
| 102 |
+
),
|
| 103 |
+
)
|
| 104 |
+
await db.commit()
|
| 105 |
+
|
| 106 |
+
logger.info(f"Created meeting: {meeting_data['title']} ({meeting_data['id']})")
|
| 107 |
+
return await self.get_meeting(meeting_data["id"])
|
| 108 |
+
|
| 109 |
+
async def update_meeting(self, meeting_id: str, meeting_data: dict) -> Optional[dict]:
|
| 110 |
+
"""Update an existing meeting.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
meeting_id: The ID of the meeting to update.
|
| 114 |
+
meeting_data: Dictionary containing updated meeting data.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
The updated meeting dictionary, or None if not found.
|
| 118 |
+
"""
|
| 119 |
+
existing = await self.get_meeting(meeting_id)
|
| 120 |
+
if not existing:
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
now = datetime.utcnow().isoformat()
|
| 124 |
+
|
| 125 |
+
# Handle action_items - use existing if not provided
|
| 126 |
+
if "action_items" in meeting_data:
|
| 127 |
+
action_items = json.dumps(meeting_data["action_items"])
|
| 128 |
+
else:
|
| 129 |
+
action_items = json.dumps(existing["action_items"])
|
| 130 |
+
|
| 131 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 132 |
+
await db.execute(
|
| 133 |
+
"""
|
| 134 |
+
UPDATE meetings SET
|
| 135 |
+
title = ?,
|
| 136 |
+
transcript = ?,
|
| 137 |
+
action_items = ?,
|
| 138 |
+
summary = ?,
|
| 139 |
+
duration_seconds = ?,
|
| 140 |
+
status = ?,
|
| 141 |
+
started_at = ?,
|
| 142 |
+
ended_at = ?,
|
| 143 |
+
updated_at = ?
|
| 144 |
+
WHERE id = ?
|
| 145 |
+
""",
|
| 146 |
+
(
|
| 147 |
+
meeting_data.get("title", existing["title"]),
|
| 148 |
+
meeting_data.get("transcript", existing["transcript"]),
|
| 149 |
+
action_items,
|
| 150 |
+
meeting_data.get("summary", existing["summary"]),
|
| 151 |
+
meeting_data.get("duration_seconds", existing["duration_seconds"]),
|
| 152 |
+
meeting_data.get("status", existing["status"]),
|
| 153 |
+
meeting_data.get("started_at", existing["started_at"]),
|
| 154 |
+
meeting_data.get("ended_at", existing["ended_at"]),
|
| 155 |
+
now,
|
| 156 |
+
meeting_id,
|
| 157 |
+
),
|
| 158 |
+
)
|
| 159 |
+
await db.commit()
|
| 160 |
+
|
| 161 |
+
logger.info(f"Updated meeting: {meeting_id}")
|
| 162 |
+
return await self.get_meeting(meeting_id)
|
| 163 |
+
|
| 164 |
+
async def append_to_meeting_transcript(
|
| 165 |
+
self, meeting_id: str, text: str
|
| 166 |
+
) -> Optional[dict]:
|
| 167 |
+
"""Append text to an existing meeting's transcript.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
meeting_id: The ID of the meeting.
|
| 171 |
+
text: The text to append to the transcript.
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
The updated meeting dictionary, or None if not found.
|
| 175 |
+
"""
|
| 176 |
+
existing = await self.get_meeting(meeting_id)
|
| 177 |
+
if not existing:
|
| 178 |
+
return None
|
| 179 |
+
|
| 180 |
+
now = datetime.utcnow().isoformat()
|
| 181 |
+
new_transcript = existing["transcript"] + text
|
| 182 |
+
|
| 183 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 184 |
+
await db.execute(
|
| 185 |
+
"""
|
| 186 |
+
UPDATE meetings SET
|
| 187 |
+
transcript = ?,
|
| 188 |
+
updated_at = ?
|
| 189 |
+
WHERE id = ?
|
| 190 |
+
""",
|
| 191 |
+
(new_transcript, now, meeting_id),
|
| 192 |
+
)
|
| 193 |
+
await db.commit()
|
| 194 |
+
|
| 195 |
+
logger.debug(f"Appended to meeting transcript: {meeting_id}")
|
| 196 |
+
return await self.get_meeting(meeting_id)
|
| 197 |
+
|
| 198 |
+
async def delete_meeting(self, meeting_id: str) -> bool:
|
| 199 |
+
"""Delete a meeting.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
meeting_id: The ID of the meeting to delete.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
True if deleted, False if not found.
|
| 206 |
+
"""
|
| 207 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 208 |
+
cursor = await db.execute(
|
| 209 |
+
"DELETE FROM meetings WHERE id = ?", (meeting_id,)
|
| 210 |
+
)
|
| 211 |
+
await db.commit()
|
| 212 |
+
deleted = cursor.rowcount > 0
|
| 213 |
+
|
| 214 |
+
if deleted:
|
| 215 |
+
logger.info(f"Deleted meeting: {meeting_id}")
|
| 216 |
+
return deleted
|
| 217 |
+
|
| 218 |
+
def _row_to_meeting(self, row: aiosqlite.Row) -> dict:
|
| 219 |
+
"""Convert a database row to a meeting dictionary.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
row: The database row.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
Meeting dictionary with proper types.
|
| 226 |
+
"""
|
| 227 |
+
action_items_raw = row["action_items"] if row["action_items"] else "[]"
|
| 228 |
+
return {
|
| 229 |
+
"id": row["id"],
|
| 230 |
+
"title": row["title"],
|
| 231 |
+
"transcript": row["transcript"] or "",
|
| 232 |
+
"action_items": json.loads(action_items_raw),
|
| 233 |
+
"summary": row["summary"] or "",
|
| 234 |
+
"duration_seconds": row["duration_seconds"] or 0,
|
| 235 |
+
"status": row["status"] or "recording",
|
| 236 |
+
"started_at": row["started_at"],
|
| 237 |
+
"ended_at": row["ended_at"],
|
| 238 |
+
"created_at": row["created_at"],
|
| 239 |
+
"updated_at": row["updated_at"],
|
| 240 |
+
}
|
| 241 |
+
|
reachys_brain/database/notes.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Mental Notes CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting AI-created notes.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
import aiosqlite
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class NotesMixin:
|
| 16 |
+
"""Mixin class providing Mental Notes CRUD operations.
|
| 17 |
+
|
| 18 |
+
This mixin is designed to be used with DatabaseService.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
db_path: str # Provided by DatabaseService
|
| 22 |
+
|
| 23 |
+
async def get_all_notes(self) -> list[dict]:
|
| 24 |
+
"""Get all mental notes from the database.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
List of mental note dictionaries.
|
| 28 |
+
"""
|
| 29 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 30 |
+
db.row_factory = aiosqlite.Row
|
| 31 |
+
cursor = await db.execute(
|
| 32 |
+
"SELECT * FROM mental_notes ORDER BY created_at DESC"
|
| 33 |
+
)
|
| 34 |
+
rows = await cursor.fetchall()
|
| 35 |
+
return [self._row_to_note(row) for row in rows]
|
| 36 |
+
|
| 37 |
+
async def get_note(self, note_id: str) -> Optional[dict]:
|
| 38 |
+
"""Get a single mental note by ID.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
note_id: The ID of the note.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
The note dictionary, or None if not found.
|
| 45 |
+
"""
|
| 46 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 47 |
+
db.row_factory = aiosqlite.Row
|
| 48 |
+
cursor = await db.execute(
|
| 49 |
+
"SELECT * FROM mental_notes WHERE id = ?", (note_id,)
|
| 50 |
+
)
|
| 51 |
+
row = await cursor.fetchone()
|
| 52 |
+
return self._row_to_note(row) if row else None
|
| 53 |
+
|
| 54 |
+
async def create_note(self, note_data: dict) -> dict:
|
| 55 |
+
"""Create a new mental note.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
note_data: Dictionary containing note data (must include 'id', 'title', 'content').
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
The created note dictionary.
|
| 62 |
+
"""
|
| 63 |
+
now = datetime.utcnow().isoformat()
|
| 64 |
+
created_at = note_data.get("created_at") or now
|
| 65 |
+
|
| 66 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 67 |
+
await db.execute(
|
| 68 |
+
"""
|
| 69 |
+
INSERT INTO mental_notes (id, title, content, created_at, updated_at)
|
| 70 |
+
VALUES (?, ?, ?, ?, ?)
|
| 71 |
+
""",
|
| 72 |
+
(
|
| 73 |
+
note_data["id"],
|
| 74 |
+
note_data["title"],
|
| 75 |
+
note_data["content"],
|
| 76 |
+
created_at,
|
| 77 |
+
now,
|
| 78 |
+
),
|
| 79 |
+
)
|
| 80 |
+
await db.commit()
|
| 81 |
+
|
| 82 |
+
logger.info(f"Created mental note: {note_data['title']} ({note_data['id']})")
|
| 83 |
+
return await self.get_note(note_data["id"])
|
| 84 |
+
|
| 85 |
+
async def update_note(self, note_id: str, note_data: dict) -> Optional[dict]:
|
| 86 |
+
"""Update an existing mental note.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
note_id: The ID of the note to update.
|
| 90 |
+
note_data: Dictionary containing updated note data.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
The updated note dictionary, or None if not found.
|
| 94 |
+
"""
|
| 95 |
+
existing = await self.get_note(note_id)
|
| 96 |
+
if not existing:
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
now = datetime.utcnow().isoformat()
|
| 100 |
+
|
| 101 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 102 |
+
await db.execute(
|
| 103 |
+
"""
|
| 104 |
+
UPDATE mental_notes SET
|
| 105 |
+
title = ?,
|
| 106 |
+
content = ?,
|
| 107 |
+
updated_at = ?
|
| 108 |
+
WHERE id = ?
|
| 109 |
+
""",
|
| 110 |
+
(
|
| 111 |
+
note_data.get("title", existing["title"]),
|
| 112 |
+
note_data.get("content", existing["content"]),
|
| 113 |
+
now,
|
| 114 |
+
note_id,
|
| 115 |
+
),
|
| 116 |
+
)
|
| 117 |
+
await db.commit()
|
| 118 |
+
|
| 119 |
+
logger.info(f"Updated mental note: {note_id}")
|
| 120 |
+
return await self.get_note(note_id)
|
| 121 |
+
|
| 122 |
+
async def delete_note(self, note_id: str) -> bool:
|
| 123 |
+
"""Delete a mental note.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
note_id: The ID of the note to delete.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
True if deleted, False if not found.
|
| 130 |
+
"""
|
| 131 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 132 |
+
cursor = await db.execute(
|
| 133 |
+
"DELETE FROM mental_notes WHERE id = ?", (note_id,)
|
| 134 |
+
)
|
| 135 |
+
await db.commit()
|
| 136 |
+
deleted = cursor.rowcount > 0
|
| 137 |
+
|
| 138 |
+
if deleted:
|
| 139 |
+
logger.info(f"Deleted mental note: {note_id}")
|
| 140 |
+
return deleted
|
| 141 |
+
|
| 142 |
+
def _row_to_note(self, row: aiosqlite.Row) -> dict:
|
| 143 |
+
"""Convert a database row to a mental note dictionary.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
row: The database row.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
Mental note dictionary with proper types.
|
| 150 |
+
"""
|
| 151 |
+
return {
|
| 152 |
+
"id": row["id"],
|
| 153 |
+
"title": row["title"],
|
| 154 |
+
"content": row["content"],
|
| 155 |
+
"created_at": row["created_at"],
|
| 156 |
+
"updated_at": row["updated_at"],
|
| 157 |
+
}
|
| 158 |
+
|
reachys_brain/database/scheduled_messages.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scheduled Messages CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting scheduled messages.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
import aiosqlite
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ScheduledMessagesMixin:
|
| 16 |
+
"""Mixin class providing Scheduled Messages CRUD operations.
|
| 17 |
+
|
| 18 |
+
This mixin is designed to be used with DatabaseService.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
db_path: str # Provided by DatabaseService
|
| 22 |
+
|
| 23 |
+
async def get_all_scheduled_messages(self, status: Optional[str] = None) -> list[dict]:
|
| 24 |
+
"""Get all scheduled messages from the database.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
status: Optional filter by status ('pending', 'sent', 'cancelled').
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
List of scheduled message dictionaries.
|
| 31 |
+
"""
|
| 32 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 33 |
+
db.row_factory = aiosqlite.Row
|
| 34 |
+
if status:
|
| 35 |
+
cursor = await db.execute(
|
| 36 |
+
"SELECT * FROM scheduled_messages WHERE status = ? ORDER BY scheduled_time ASC",
|
| 37 |
+
(status,)
|
| 38 |
+
)
|
| 39 |
+
else:
|
| 40 |
+
cursor = await db.execute(
|
| 41 |
+
"SELECT * FROM scheduled_messages ORDER BY scheduled_time ASC"
|
| 42 |
+
)
|
| 43 |
+
rows = await cursor.fetchall()
|
| 44 |
+
return [self._row_to_scheduled_message(row) for row in rows]
|
| 45 |
+
|
| 46 |
+
async def get_scheduled_message(self, message_id: str) -> Optional[dict]:
|
| 47 |
+
"""Get a single scheduled message by ID.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
message_id: The ID of the scheduled message.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
The scheduled message dictionary, or None if not found.
|
| 54 |
+
"""
|
| 55 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 56 |
+
db.row_factory = aiosqlite.Row
|
| 57 |
+
cursor = await db.execute(
|
| 58 |
+
"SELECT * FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 59 |
+
)
|
| 60 |
+
row = await cursor.fetchone()
|
| 61 |
+
return self._row_to_scheduled_message(row) if row else None
|
| 62 |
+
|
| 63 |
+
async def get_pending_scheduled_messages(self) -> list[dict]:
|
| 64 |
+
"""Get all pending scheduled messages.
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
List of pending scheduled message dictionaries.
|
| 68 |
+
"""
|
| 69 |
+
return await self.get_all_scheduled_messages(status="pending")
|
| 70 |
+
|
| 71 |
+
async def create_scheduled_message(self, message_data: dict) -> dict:
|
| 72 |
+
"""Create a new scheduled message.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
message_data: Dictionary containing message data.
|
| 76 |
+
Required: id, recipient_name, recipient_phone, message_content,
|
| 77 |
+
scheduled_time, platform
|
| 78 |
+
Optional: notification_id
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
The created scheduled message dictionary.
|
| 82 |
+
"""
|
| 83 |
+
now = datetime.utcnow().isoformat()
|
| 84 |
+
|
| 85 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 86 |
+
await db.execute(
|
| 87 |
+
"""
|
| 88 |
+
INSERT INTO scheduled_messages
|
| 89 |
+
(id, recipient_name, recipient_phone, message_content,
|
| 90 |
+
scheduled_time, platform, status, notification_id, created_at, updated_at)
|
| 91 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
| 92 |
+
""",
|
| 93 |
+
(
|
| 94 |
+
message_data["id"],
|
| 95 |
+
message_data["recipient_name"],
|
| 96 |
+
message_data["recipient_phone"],
|
| 97 |
+
message_data["message_content"],
|
| 98 |
+
message_data["scheduled_time"],
|
| 99 |
+
message_data["platform"],
|
| 100 |
+
message_data.get("status", "pending"),
|
| 101 |
+
message_data.get("notification_id"),
|
| 102 |
+
now,
|
| 103 |
+
now,
|
| 104 |
+
),
|
| 105 |
+
)
|
| 106 |
+
await db.commit()
|
| 107 |
+
|
| 108 |
+
logger.info(f"Created scheduled message: {message_data['id']} for {message_data['recipient_name']}")
|
| 109 |
+
return await self.get_scheduled_message(message_data["id"])
|
| 110 |
+
|
| 111 |
+
async def update_scheduled_message(
|
| 112 |
+
self, message_id: str, message_data: dict
|
| 113 |
+
) -> Optional[dict]:
|
| 114 |
+
"""Update an existing scheduled message.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
message_id: The ID of the scheduled message to update.
|
| 118 |
+
message_data: Dictionary containing updated message data.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
The updated scheduled message dictionary, or None if not found.
|
| 122 |
+
"""
|
| 123 |
+
existing = await self.get_scheduled_message(message_id)
|
| 124 |
+
if not existing:
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
now = datetime.utcnow().isoformat()
|
| 128 |
+
|
| 129 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 130 |
+
await db.execute(
|
| 131 |
+
"""
|
| 132 |
+
UPDATE scheduled_messages SET
|
| 133 |
+
recipient_name = ?,
|
| 134 |
+
recipient_phone = ?,
|
| 135 |
+
message_content = ?,
|
| 136 |
+
scheduled_time = ?,
|
| 137 |
+
platform = ?,
|
| 138 |
+
status = ?,
|
| 139 |
+
notification_id = ?,
|
| 140 |
+
updated_at = ?
|
| 141 |
+
WHERE id = ?
|
| 142 |
+
""",
|
| 143 |
+
(
|
| 144 |
+
message_data.get("recipient_name", existing["recipient_name"]),
|
| 145 |
+
message_data.get("recipient_phone", existing["recipient_phone"]),
|
| 146 |
+
message_data.get("message_content", existing["message_content"]),
|
| 147 |
+
message_data.get("scheduled_time", existing["scheduled_time"]),
|
| 148 |
+
message_data.get("platform", existing["platform"]),
|
| 149 |
+
message_data.get("status", existing["status"]),
|
| 150 |
+
message_data.get("notification_id", existing["notification_id"]),
|
| 151 |
+
now,
|
| 152 |
+
message_id,
|
| 153 |
+
),
|
| 154 |
+
)
|
| 155 |
+
await db.commit()
|
| 156 |
+
|
| 157 |
+
logger.info(f"Updated scheduled message: {message_id}")
|
| 158 |
+
return await self.get_scheduled_message(message_id)
|
| 159 |
+
|
| 160 |
+
async def update_scheduled_message_status(
|
| 161 |
+
self, message_id: str, status: str
|
| 162 |
+
) -> Optional[dict]:
|
| 163 |
+
"""Update just the status of a scheduled message.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
message_id: The ID of the scheduled message.
|
| 167 |
+
status: The new status ('pending', 'sent', 'cancelled').
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
The updated scheduled message dictionary, or None if not found.
|
| 171 |
+
"""
|
| 172 |
+
return await self.update_scheduled_message(message_id, {"status": status})
|
| 173 |
+
|
| 174 |
+
async def delete_scheduled_message(self, message_id: str) -> bool:
|
| 175 |
+
"""Delete a scheduled message.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
message_id: The ID of the scheduled message to delete.
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
True if deleted, False if not found.
|
| 182 |
+
"""
|
| 183 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 184 |
+
cursor = await db.execute(
|
| 185 |
+
"DELETE FROM scheduled_messages WHERE id = ?", (message_id,)
|
| 186 |
+
)
|
| 187 |
+
await db.commit()
|
| 188 |
+
deleted = cursor.rowcount > 0
|
| 189 |
+
|
| 190 |
+
if deleted:
|
| 191 |
+
logger.info(f"Deleted scheduled message: {message_id}")
|
| 192 |
+
return deleted
|
| 193 |
+
|
| 194 |
+
def _row_to_scheduled_message(self, row: aiosqlite.Row) -> dict:
|
| 195 |
+
"""Convert a database row to a scheduled message dictionary.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
row: The database row.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
Scheduled message dictionary with proper types.
|
| 202 |
+
"""
|
| 203 |
+
return {
|
| 204 |
+
"id": row["id"],
|
| 205 |
+
"recipient_name": row["recipient_name"],
|
| 206 |
+
"recipient_phone": row["recipient_phone"],
|
| 207 |
+
"message_content": row["message_content"],
|
| 208 |
+
"scheduled_time": row["scheduled_time"],
|
| 209 |
+
"platform": row["platform"],
|
| 210 |
+
"status": row["status"] or "pending",
|
| 211 |
+
"notification_id": row["notification_id"],
|
| 212 |
+
"created_at": row["created_at"],
|
| 213 |
+
"updated_at": row["updated_at"],
|
| 214 |
+
}
|
| 215 |
+
|
reachys_brain/database/tamareachy.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TamaReachy game state operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for managing the TamaReachy virtual pet game state.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
import aiosqlite
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TamaReachyMixin:
|
| 15 |
+
"""Mixin class providing TamaReachy state operations.
|
| 16 |
+
|
| 17 |
+
This mixin is designed to be used with DatabaseService.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
db_path: str # Provided by DatabaseService
|
| 21 |
+
|
| 22 |
+
async def get_tamareachy_state(self) -> dict:
|
| 23 |
+
"""Get the current TamaReachy state.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
TamaReachy state dictionary.
|
| 27 |
+
"""
|
| 28 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 29 |
+
db.row_factory = aiosqlite.Row
|
| 30 |
+
cursor = await db.execute(
|
| 31 |
+
"SELECT * FROM tamareachy_state WHERE id = 1"
|
| 32 |
+
)
|
| 33 |
+
row = await cursor.fetchone()
|
| 34 |
+
if row:
|
| 35 |
+
return self._row_to_tamareachy(row)
|
| 36 |
+
# Return default state if not found
|
| 37 |
+
return {
|
| 38 |
+
"enabled": False,
|
| 39 |
+
"hunger": 100,
|
| 40 |
+
"thirst": 100,
|
| 41 |
+
"happiness": 100,
|
| 42 |
+
"energy": 100,
|
| 43 |
+
"boredom": 100,
|
| 44 |
+
"social": 100,
|
| 45 |
+
"health": 100,
|
| 46 |
+
"cleanliness": 100,
|
| 47 |
+
"last_interaction": None,
|
| 48 |
+
"last_decay_check": None,
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
async def update_tamareachy_state(self, state_data: dict) -> dict:
|
| 52 |
+
"""Update TamaReachy state.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
state_data: Dictionary containing fields to update.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
The updated TamaReachy state dictionary.
|
| 59 |
+
"""
|
| 60 |
+
existing = await self.get_tamareachy_state()
|
| 61 |
+
now = datetime.utcnow().isoformat()
|
| 62 |
+
|
| 63 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 64 |
+
await db.execute(
|
| 65 |
+
"""
|
| 66 |
+
UPDATE tamareachy_state SET
|
| 67 |
+
enabled = ?,
|
| 68 |
+
hunger = ?,
|
| 69 |
+
thirst = ?,
|
| 70 |
+
happiness = ?,
|
| 71 |
+
energy = ?,
|
| 72 |
+
boredom = ?,
|
| 73 |
+
social = ?,
|
| 74 |
+
health = ?,
|
| 75 |
+
cleanliness = ?,
|
| 76 |
+
last_interaction = ?,
|
| 77 |
+
last_decay_check = ?,
|
| 78 |
+
updated_at = ?
|
| 79 |
+
WHERE id = 1
|
| 80 |
+
""",
|
| 81 |
+
(
|
| 82 |
+
1 if state_data.get("enabled", existing["enabled"]) else 0,
|
| 83 |
+
state_data.get("hunger", existing["hunger"]),
|
| 84 |
+
state_data.get("thirst", existing["thirst"]),
|
| 85 |
+
state_data.get("happiness", existing["happiness"]),
|
| 86 |
+
state_data.get("energy", existing["energy"]),
|
| 87 |
+
state_data.get("boredom", existing["boredom"]),
|
| 88 |
+
state_data.get("social", existing["social"]),
|
| 89 |
+
state_data.get("health", existing["health"]),
|
| 90 |
+
state_data.get("cleanliness", existing["cleanliness"]),
|
| 91 |
+
state_data.get("last_interaction", existing["last_interaction"]),
|
| 92 |
+
state_data.get("last_decay_check", existing["last_decay_check"]),
|
| 93 |
+
now,
|
| 94 |
+
),
|
| 95 |
+
)
|
| 96 |
+
await db.commit()
|
| 97 |
+
|
| 98 |
+
logger.info("Updated TamaReachy state")
|
| 99 |
+
return await self.get_tamareachy_state()
|
| 100 |
+
|
| 101 |
+
async def reset_tamareachy_stats(self) -> dict:
|
| 102 |
+
"""Reset all TamaReachy stats to 100.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
The reset TamaReachy state dictionary.
|
| 106 |
+
"""
|
| 107 |
+
now = datetime.utcnow().isoformat()
|
| 108 |
+
return await self.update_tamareachy_state({
|
| 109 |
+
"hunger": 100,
|
| 110 |
+
"thirst": 100,
|
| 111 |
+
"happiness": 100,
|
| 112 |
+
"energy": 100,
|
| 113 |
+
"boredom": 100,
|
| 114 |
+
"social": 100,
|
| 115 |
+
"health": 100,
|
| 116 |
+
"cleanliness": 100,
|
| 117 |
+
"last_interaction": now,
|
| 118 |
+
"last_decay_check": now,
|
| 119 |
+
})
|
| 120 |
+
|
| 121 |
+
def _row_to_tamareachy(self, row: aiosqlite.Row) -> dict:
|
| 122 |
+
"""Convert a database row to a TamaReachy state dictionary.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
row: The database row.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
TamaReachy state dictionary with proper types.
|
| 129 |
+
"""
|
| 130 |
+
return {
|
| 131 |
+
"enabled": bool(row["enabled"]),
|
| 132 |
+
"hunger": row["hunger"],
|
| 133 |
+
"thirst": row["thirst"],
|
| 134 |
+
"happiness": row["happiness"],
|
| 135 |
+
"energy": row["energy"],
|
| 136 |
+
"boredom": row["boredom"],
|
| 137 |
+
"social": row["social"],
|
| 138 |
+
"health": row["health"],
|
| 139 |
+
"cleanliness": row["cleanliness"],
|
| 140 |
+
"last_interaction": row["last_interaction"],
|
| 141 |
+
"last_decay_check": row["last_decay_check"],
|
| 142 |
+
"created_at": row["created_at"],
|
| 143 |
+
"updated_at": row["updated_at"],
|
| 144 |
+
}
|
| 145 |
+
|
reachys_brain/database/user_settings.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""User Settings operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for storing and retrieving user preferences and settings.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
import aiosqlite
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class UserSettingsMixin:
|
| 15 |
+
"""Mixin class providing User Settings operations.
|
| 16 |
+
|
| 17 |
+
This mixin is designed to be used with DatabaseService.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
db_path: str # Provided by DatabaseService
|
| 21 |
+
|
| 22 |
+
async def get_user_setting(self, key: str) -> str | None:
|
| 23 |
+
"""Get a user setting value by key.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
key: The setting key (e.g., 'user_name').
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
The setting value, or None if not found.
|
| 30 |
+
"""
|
| 31 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 32 |
+
cursor = await db.execute(
|
| 33 |
+
"SELECT value FROM user_settings WHERE key = ?", (key,)
|
| 34 |
+
)
|
| 35 |
+
row = await cursor.fetchone()
|
| 36 |
+
return row[0] if row else None
|
| 37 |
+
|
| 38 |
+
async def set_user_setting(self, key: str, value: str) -> bool:
|
| 39 |
+
"""Set a user setting value.
|
| 40 |
+
|
| 41 |
+
Uses INSERT OR REPLACE to handle both new and existing keys.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
key: The setting key.
|
| 45 |
+
value: The setting value.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
True if successful.
|
| 49 |
+
"""
|
| 50 |
+
now = datetime.utcnow().isoformat()
|
| 51 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 52 |
+
await db.execute(
|
| 53 |
+
"""
|
| 54 |
+
INSERT OR REPLACE INTO user_settings (key, value, updated_at)
|
| 55 |
+
VALUES (?, ?, ?)
|
| 56 |
+
""",
|
| 57 |
+
(key, value, now),
|
| 58 |
+
)
|
| 59 |
+
await db.commit()
|
| 60 |
+
|
| 61 |
+
logger.info(f"Set user setting: {key}")
|
| 62 |
+
return True
|
| 63 |
+
|
| 64 |
+
async def delete_user_setting(self, key: str) -> bool:
|
| 65 |
+
"""Delete a user setting.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
key: The setting key to delete.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
True if deleted, False if not found.
|
| 72 |
+
"""
|
| 73 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 74 |
+
cursor = await db.execute(
|
| 75 |
+
"DELETE FROM user_settings WHERE key = ?", (key,)
|
| 76 |
+
)
|
| 77 |
+
await db.commit()
|
| 78 |
+
deleted = cursor.rowcount > 0
|
| 79 |
+
|
| 80 |
+
if deleted:
|
| 81 |
+
logger.info(f"Deleted user setting: {key}")
|
| 82 |
+
return deleted
|
| 83 |
+
|
| 84 |
+
async def get_all_user_settings(self) -> dict[str, str]:
|
| 85 |
+
"""Get all user settings as a dictionary.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Dictionary of key-value pairs.
|
| 89 |
+
"""
|
| 90 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 91 |
+
cursor = await db.execute("SELECT key, value FROM user_settings")
|
| 92 |
+
rows = await cursor.fetchall()
|
| 93 |
+
return {row[0]: row[1] for row in rows}
|
| 94 |
+
|
reachys_brain/database/websites.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Websites CRUD operations for the database.
|
| 2 |
+
|
| 3 |
+
Provides methods for creating, reading, updating, and deleting generated websites.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
import aiosqlite
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class WebsitesMixin:
|
| 16 |
+
"""Mixin class providing Websites CRUD operations.
|
| 17 |
+
|
| 18 |
+
This mixin is designed to be used with DatabaseService.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
db_path: str # Provided by DatabaseService
|
| 22 |
+
|
| 23 |
+
async def get_all_websites(self) -> list[dict]:
|
| 24 |
+
"""Get all saved websites from the database.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
List of website dictionaries.
|
| 28 |
+
"""
|
| 29 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 30 |
+
db.row_factory = aiosqlite.Row
|
| 31 |
+
cursor = await db.execute(
|
| 32 |
+
"SELECT * FROM websites ORDER BY created_at DESC"
|
| 33 |
+
)
|
| 34 |
+
rows = await cursor.fetchall()
|
| 35 |
+
return [self._row_to_website(row) for row in rows]
|
| 36 |
+
|
| 37 |
+
async def get_website(self, website_id: str) -> Optional[dict]:
|
| 38 |
+
"""Get a single website by ID.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
website_id: The ID of the website.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
The website dictionary, or None if not found.
|
| 45 |
+
"""
|
| 46 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 47 |
+
db.row_factory = aiosqlite.Row
|
| 48 |
+
cursor = await db.execute(
|
| 49 |
+
"SELECT * FROM websites WHERE id = ?", (website_id,)
|
| 50 |
+
)
|
| 51 |
+
row = await cursor.fetchone()
|
| 52 |
+
return self._row_to_website(row) if row else None
|
| 53 |
+
|
| 54 |
+
async def create_website(self, website_data: dict) -> dict:
|
| 55 |
+
"""Create a new website record.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
website_data: Dictionary containing website data (must include 'id', 'title').
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
The created website dictionary.
|
| 62 |
+
"""
|
| 63 |
+
now = datetime.utcnow().isoformat()
|
| 64 |
+
created_at = website_data.get("created_at") or now
|
| 65 |
+
|
| 66 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 67 |
+
await db.execute(
|
| 68 |
+
"""
|
| 69 |
+
INSERT INTO websites (id, title, description, created_at, updated_at)
|
| 70 |
+
VALUES (?, ?, ?, ?, ?)
|
| 71 |
+
""",
|
| 72 |
+
(
|
| 73 |
+
website_data["id"],
|
| 74 |
+
website_data["title"],
|
| 75 |
+
website_data.get("description", ""),
|
| 76 |
+
created_at,
|
| 77 |
+
now,
|
| 78 |
+
),
|
| 79 |
+
)
|
| 80 |
+
await db.commit()
|
| 81 |
+
|
| 82 |
+
logger.info(f"Created website: {website_data['title']} ({website_data['id']})")
|
| 83 |
+
return await self.get_website(website_data["id"])
|
| 84 |
+
|
| 85 |
+
async def update_website(self, website_id: str, website_data: dict) -> Optional[dict]:
|
| 86 |
+
"""Update an existing website record.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
website_id: The ID of the website to update.
|
| 90 |
+
website_data: Dictionary containing updated website data.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
The updated website dictionary, or None if not found.
|
| 94 |
+
"""
|
| 95 |
+
existing = await self.get_website(website_id)
|
| 96 |
+
if not existing:
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
now = datetime.utcnow().isoformat()
|
| 100 |
+
|
| 101 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 102 |
+
await db.execute(
|
| 103 |
+
"""
|
| 104 |
+
UPDATE websites SET
|
| 105 |
+
title = ?,
|
| 106 |
+
description = ?,
|
| 107 |
+
updated_at = ?
|
| 108 |
+
WHERE id = ?
|
| 109 |
+
""",
|
| 110 |
+
(
|
| 111 |
+
website_data.get("title", existing["title"]),
|
| 112 |
+
website_data.get("description", existing["description"]),
|
| 113 |
+
now,
|
| 114 |
+
website_id,
|
| 115 |
+
),
|
| 116 |
+
)
|
| 117 |
+
await db.commit()
|
| 118 |
+
|
| 119 |
+
logger.info(f"Updated website: {website_id}")
|
| 120 |
+
return await self.get_website(website_id)
|
| 121 |
+
|
| 122 |
+
async def delete_website(self, website_id: str) -> bool:
|
| 123 |
+
"""Delete a website record.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
website_id: The ID of the website to delete.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
True if deleted, False if not found.
|
| 130 |
+
"""
|
| 131 |
+
async with aiosqlite.connect(self.db_path) as db:
|
| 132 |
+
cursor = await db.execute(
|
| 133 |
+
"DELETE FROM websites WHERE id = ?", (website_id,)
|
| 134 |
+
)
|
| 135 |
+
await db.commit()
|
| 136 |
+
deleted = cursor.rowcount > 0
|
| 137 |
+
|
| 138 |
+
if deleted:
|
| 139 |
+
logger.info(f"Deleted website: {website_id}")
|
| 140 |
+
return deleted
|
| 141 |
+
|
| 142 |
+
def _row_to_website(self, row: aiosqlite.Row) -> dict:
|
| 143 |
+
"""Convert a database row to a website dictionary.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
row: The database row.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
Website dictionary with proper types.
|
| 150 |
+
"""
|
| 151 |
+
return {
|
| 152 |
+
"id": row["id"],
|
| 153 |
+
"title": row["title"],
|
| 154 |
+
"description": row["description"],
|
| 155 |
+
"created_at": row["created_at"],
|
| 156 |
+
"updated_at": row["updated_at"],
|
| 157 |
+
}
|
| 158 |
+
|
reachys_brain/openai_realtime/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenAI Realtime API WebSocket client for voice conversations on Reachy.
|
| 2 |
+
|
| 3 |
+
This package provides the OpenAI Realtime API integration for voice conversations:
|
| 4 |
+
- service: Main OpenAIRealtimeService class
|
| 5 |
+
- enums: Connection, speaking, and response state enums
|
| 6 |
+
- session: Session configuration and system instructions
|
| 7 |
+
- tool_executor: Tool execution with rate limiting
|
| 8 |
+
- tools_loader: Lazy loading of tools to avoid circular imports
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from .enums import ConnectionState, SpeakingState, ResponseState, RealtimeEvent
|
| 12 |
+
from .service import OpenAIRealtimeService
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"ConnectionState",
|
| 16 |
+
"SpeakingState",
|
| 17 |
+
"ResponseState",
|
| 18 |
+
"RealtimeEvent",
|
| 19 |
+
"OpenAIRealtimeService",
|
| 20 |
+
]
|
| 21 |
+
|
reachys_brain/openai_realtime/enums.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Enums and data classes for OpenAI Realtime API."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from enum import Enum
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ConnectionState(str, Enum):
|
| 8 |
+
"""WebSocket connection state."""
|
| 9 |
+
DISCONNECTED = "disconnected"
|
| 10 |
+
CONNECTING = "connecting"
|
| 11 |
+
CONNECTED = "connected"
|
| 12 |
+
ERROR = "error"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SpeakingState(str, Enum):
|
| 16 |
+
"""AI speaking state."""
|
| 17 |
+
IDLE = "idle"
|
| 18 |
+
SPEAKING = "speaking"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ResponseState(str, Enum):
|
| 22 |
+
"""AI response generation state.
|
| 23 |
+
|
| 24 |
+
This tracks the full lifecycle of an AI response to ensure
|
| 25 |
+
microphone is properly paused during the entire response cycle.
|
| 26 |
+
"""
|
| 27 |
+
IDLE = "idle" # No response in progress
|
| 28 |
+
WAITING = "waiting" # Request sent, waiting for response to start
|
| 29 |
+
GENERATING = "generating" # AI is generating/streaming audio
|
| 30 |
+
COMPLETE = "complete" # Response done, waiting for audio buffer to drain
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class RealtimeEvent:
|
| 35 |
+
"""Event from OpenAI Realtime API to relay to iOS."""
|
| 36 |
+
event_type: str
|
| 37 |
+
data: dict = field(default_factory=dict)
|
| 38 |
+
|
reachys_brain/{openai_realtime.py → openai_realtime/service.py}
RENAMED
|
@@ -1,57 +1,27 @@
|
|
| 1 |
-
"""OpenAI Realtime API WebSocket client for voice conversations on Reachy.
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import asyncio
|
| 4 |
import base64
|
| 5 |
import json
|
| 6 |
import logging
|
| 7 |
import os
|
| 8 |
-
import
|
| 9 |
-
from dataclasses import dataclass, field
|
| 10 |
-
from enum import Enum
|
| 11 |
-
from typing import Any, Callable, Optional
|
| 12 |
|
| 13 |
import websockets
|
| 14 |
from websockets.client import WebSocketClientProtocol
|
| 15 |
|
| 16 |
-
from .routes.voice import (
|
| 17 |
-
get_current_voice,
|
| 18 |
-
get_current_language,
|
| 19 |
get_preferred_language,
|
| 20 |
-
get_vad_threshold,
|
| 21 |
-
get_vad_silence_ms,
|
| 22 |
-
get_vad_prefix_ms,
|
| 23 |
set_vad_settings_callback,
|
| 24 |
)
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
_tool_registry = None
|
| 31 |
-
|
| 32 |
-
def _get_app_tools():
|
| 33 |
-
"""Lazy import of APP_TOOLS to avoid circular imports."""
|
| 34 |
-
global APP_TOOLS
|
| 35 |
-
if APP_TOOLS is None:
|
| 36 |
-
from .app_tools import APP_TOOLS as tools
|
| 37 |
-
APP_TOOLS = tools
|
| 38 |
-
return APP_TOOLS
|
| 39 |
-
|
| 40 |
-
def _get_tools_handler():
|
| 41 |
-
"""Lazy import of tools handler to avoid circular imports."""
|
| 42 |
-
global _tools_handler
|
| 43 |
-
if _tools_handler is None:
|
| 44 |
-
from .app_tools import get_tools_handler
|
| 45 |
-
_tools_handler = get_tools_handler()
|
| 46 |
-
return _tools_handler
|
| 47 |
-
|
| 48 |
-
def _get_tool_registry():
|
| 49 |
-
"""Lazy import of tool registry to avoid circular imports."""
|
| 50 |
-
global _tool_registry
|
| 51 |
-
if _tool_registry is None:
|
| 52 |
-
from .tools import get_registry
|
| 53 |
-
_tool_registry = get_registry()
|
| 54 |
-
return _tool_registry
|
| 55 |
|
| 56 |
logger = logging.getLogger(__name__)
|
| 57 |
|
|
@@ -59,42 +29,6 @@ logger = logging.getLogger(__name__)
|
|
| 59 |
OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime"
|
| 60 |
OPENAI_MODEL = "gpt-4o-realtime-preview-2024-12-17"
|
| 61 |
|
| 62 |
-
# Voice Activity Detection (VAD) settings are now configurable via VoiceSettings
|
| 63 |
-
# Use get_vad_threshold(), get_vad_silence_ms(), get_vad_prefix_ms() to get current values
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
class ConnectionState(str, Enum):
|
| 67 |
-
"""WebSocket connection state."""
|
| 68 |
-
DISCONNECTED = "disconnected"
|
| 69 |
-
CONNECTING = "connecting"
|
| 70 |
-
CONNECTED = "connected"
|
| 71 |
-
ERROR = "error"
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
class SpeakingState(str, Enum):
|
| 75 |
-
"""AI speaking state."""
|
| 76 |
-
IDLE = "idle"
|
| 77 |
-
SPEAKING = "speaking"
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
class ResponseState(str, Enum):
|
| 81 |
-
"""AI response generation state.
|
| 82 |
-
|
| 83 |
-
This tracks the full lifecycle of an AI response to ensure
|
| 84 |
-
microphone is properly paused during the entire response cycle.
|
| 85 |
-
"""
|
| 86 |
-
IDLE = "idle" # No response in progress
|
| 87 |
-
WAITING = "waiting" # Request sent, waiting for response to start
|
| 88 |
-
GENERATING = "generating" # AI is generating/streaming audio
|
| 89 |
-
COMPLETE = "complete" # Response done, waiting for audio buffer to drain
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
@dataclass
|
| 93 |
-
class RealtimeEvent:
|
| 94 |
-
"""Event from OpenAI Realtime API to relay to iOS."""
|
| 95 |
-
event_type: str
|
| 96 |
-
data: dict = field(default_factory=dict)
|
| 97 |
-
|
| 98 |
|
| 99 |
class OpenAIRealtimeService:
|
| 100 |
"""WebSocket client for OpenAI Realtime API.
|
|
@@ -111,41 +45,17 @@ class OpenAIRealtimeService:
|
|
| 111 |
# State
|
| 112 |
self._connection_state = ConnectionState.DISCONNECTED
|
| 113 |
self._speaking_state = SpeakingState.IDLE
|
| 114 |
-
self._response_state = ResponseState.IDLE
|
| 115 |
self._is_listening = False
|
| 116 |
self._current_transcript = ""
|
| 117 |
self._response_text_buffer = ""
|
| 118 |
-
self._response_audio_chunks = 0
|
| 119 |
-
self._response_audio_bytes = 0
|
| 120 |
|
| 121 |
-
# Event
|
| 122 |
-
# This matters because `session.update` (voice/language) only applies to *future* responses.
|
| 123 |
self._speaking_idle_event = asyncio.Event()
|
| 124 |
self._speaking_idle_event.set()
|
| 125 |
|
| 126 |
-
# Language settings
|
| 127 |
self._language = get_preferred_language()
|
| 128 |
-
self._language_names = {
|
| 129 |
-
"en": "English",
|
| 130 |
-
"nl": "Dutch",
|
| 131 |
-
"de": "German",
|
| 132 |
-
"fr": "French",
|
| 133 |
-
"es": "Spanish",
|
| 134 |
-
"it": "Italian",
|
| 135 |
-
"pt": "Portuguese",
|
| 136 |
-
"ja": "Japanese",
|
| 137 |
-
"ko": "Korean",
|
| 138 |
-
"zh": "Chinese",
|
| 139 |
-
"ar": "Arabic",
|
| 140 |
-
"hi": "Hindi",
|
| 141 |
-
"ru": "Russian",
|
| 142 |
-
"pl": "Polish",
|
| 143 |
-
"tr": "Turkish",
|
| 144 |
-
"sv": "Swedish",
|
| 145 |
-
"da": "Danish",
|
| 146 |
-
"no": "Norwegian",
|
| 147 |
-
"fi": "Finnish",
|
| 148 |
-
}
|
| 149 |
|
| 150 |
# Custom personality (system prompt)
|
| 151 |
self._custom_system_prompt: Optional[str] = None
|
|
@@ -154,42 +64,41 @@ class OpenAIRealtimeService:
|
|
| 154 |
self._enabled_tools: Optional[list[str]] = None
|
| 155 |
|
| 156 |
# Tool call tracking
|
| 157 |
-
self._pending_tool_calls: dict[str, dict] = {}
|
| 158 |
-
|
| 159 |
-
# Rate limiting for tool calls to prevent infinite loops
|
| 160 |
-
self._tool_call_count = 0
|
| 161 |
-
self._tool_call_reset_time = 0.0
|
| 162 |
-
self._MAX_TOOL_CALLS_PER_WINDOW = 10 # Max tool calls in 10 seconds
|
| 163 |
-
self._TOOL_CALL_WINDOW_SECONDS = 10.0
|
| 164 |
|
| 165 |
# Callbacks for iOS relay
|
| 166 |
self.on_connection_state: Optional[Callable[[ConnectionState], None]] = None
|
| 167 |
self.on_speaking_state: Optional[Callable[[SpeakingState], None]] = None
|
| 168 |
-
self.on_response_state: Optional[Callable[[ResponseState], None]] = None
|
| 169 |
self.on_transcript_update: Optional[Callable[[str], None]] = None
|
| 170 |
self.on_response_text: Optional[Callable[[str], None]] = None
|
| 171 |
self.on_audio_delta: Optional[Callable[[bytes], None]] = None
|
| 172 |
self.on_error: Optional[Callable[[str], None]] = None
|
| 173 |
-
self.on_app_change: Optional[Callable[[dict], None]] = None
|
| 174 |
-
self.on_tool_usage: Optional[Callable[[str, str], None]] = None
|
| 175 |
-
self.on_website_ready: Optional[Callable[[dict], None]] = None
|
| 176 |
|
| 177 |
# Meeting callbacks
|
| 178 |
-
self.on_meeting_started: Optional[Callable[[str, str], None]] = None
|
| 179 |
-
self.on_meeting_stopped: Optional[Callable[[str], None]] = None
|
| 180 |
-
self.on_meeting_transcript_update: Optional[Callable[[str, str], None]] = None
|
| 181 |
|
| 182 |
-
# Session update callback
|
| 183 |
self.on_session_updated: Optional[Callable[[], None]] = None
|
| 184 |
|
| 185 |
# Session update tracking for async waiting
|
| 186 |
self._session_update_event: Optional[asyncio.Event] = None
|
| 187 |
|
| 188 |
-
#
|
|
|
|
|
|
|
|
|
|
| 189 |
self._setup_tools_handler()
|
| 190 |
self._setup_meeting_callbacks()
|
| 191 |
self._setup_vad_callback()
|
| 192 |
|
|
|
|
|
|
|
| 193 |
@property
|
| 194 |
def connection_state(self) -> ConnectionState:
|
| 195 |
"""Get current connection state."""
|
|
@@ -207,10 +116,7 @@ class OpenAIRealtimeService:
|
|
| 207 |
|
| 208 |
@property
|
| 209 |
def is_responding(self) -> bool:
|
| 210 |
-
"""Check if AI is currently responding
|
| 211 |
-
|
| 212 |
-
When True, microphone should be paused to prevent VAD interruption.
|
| 213 |
-
"""
|
| 214 |
return self._response_state in (
|
| 215 |
ResponseState.WAITING,
|
| 216 |
ResponseState.GENERATING,
|
|
@@ -240,139 +146,42 @@ class OpenAIRealtimeService:
|
|
| 240 |
if self.is_connected:
|
| 241 |
asyncio.create_task(self._update_session())
|
| 242 |
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
- You can remember the user's name and country for more personal interactions.
|
| 273 |
-
|
| 274 |
-
NAME:
|
| 275 |
-
- When a user introduces themselves or tells you their name (e.g., "My name is John", "I'm Sarah", "Call me Mike"),
|
| 276 |
-
use the remember_user_name tool to save it. This makes conversations more personal!
|
| 277 |
-
- You can use get_user_name to check if you already know someone's name.
|
| 278 |
-
- IMPORTANT: If you DON'T know the user's name yet and it's the first few exchanges, naturally ask for their name.
|
| 279 |
-
Example: "By the way, I don't think I know your name yet. What should I call you?"
|
| 280 |
-
Don't ask for their name every single time - just once if you don't know it.
|
| 281 |
-
|
| 282 |
-
COUNTRY:
|
| 283 |
-
- After you know the user's name, also ask for their country so you can show them correct local time.
|
| 284 |
-
- When a user tells you their country (e.g., "I live in the Netherlands", "I'm from Germany", "My country is Japan"),
|
| 285 |
-
use the remember_preferred_country tool to save it.
|
| 286 |
-
- You can use get_preferred_country to check if you already know someone's country.
|
| 287 |
-
- IMPORTANT: If you DON'T know the user's country yet and you already have their name, naturally ask for it.
|
| 288 |
-
Example: "And what country are you in? That way I can tell you the correct local time!"
|
| 289 |
-
Don't ask for their country every single time - just once if you don't know it.
|
| 290 |
-
- The country is used to determine the user's timezone for time queries.
|
| 291 |
-
|
| 292 |
-
TOOLS AND CAPABILITIES:
|
| 293 |
-
|
| 294 |
-
IMPORTANT RULE - ALWAYS ANNOUNCE TOOL USAGE:
|
| 295 |
-
Before using ANY tool, you MUST tell the user what you're about to do. This helps them understand what's happening. Examples:
|
| 296 |
-
- Web Search: "Let me search for that..." / "I'll look that up..."
|
| 297 |
-
- Weather: "Let me check the weather..." / "Checking the forecast..."
|
| 298 |
-
- Camera/Vision: "Let me take a look..." / "I'll see what that is..."
|
| 299 |
-
- App commands: "I'll create that app for you..." / "Switching to..."
|
| 300 |
-
- Reminders: "I'll add that reminder..." / "Let me check your reminders..."
|
| 301 |
-
|
| 302 |
-
1. WEB SEARCH (IMPORTANT - USE PROACTIVELY):
|
| 303 |
-
ALWAYS use web_search when the user asks about:
|
| 304 |
-
- Facts, trivia, or general knowledge questions
|
| 305 |
-
- People (celebrities, politicians, scientists, etc.)
|
| 306 |
-
- Places, locations, cities, countries, landmarks
|
| 307 |
-
- Maps, directions, or geographic information
|
| 308 |
-
- Current events, news, or recent happenings
|
| 309 |
-
- Sports scores, game results, standings
|
| 310 |
-
- Movies, TV shows, music, or entertainment
|
| 311 |
-
- Historical events or dates
|
| 312 |
-
- Prices, stock values, or market information
|
| 313 |
-
- Product information or reviews
|
| 314 |
-
- Any question where accuracy or up-to-date info matters
|
| 315 |
-
|
| 316 |
-
DO NOT rely on your training data for factual questions - ALWAYS search first!
|
| 317 |
-
Examples: "Who is the president of France?", "What is the capital of Australia?",
|
| 318 |
-
"Tell me about Einstein", "Where is the Eiffel Tower?", "How tall is Mount Everest?"
|
| 319 |
-
|
| 320 |
-
2. WEATHER: Use get_weather to check weather conditions for any location.
|
| 321 |
-
- "What's the weather in Paris?"
|
| 322 |
-
- "Will it rain in Tokyo tomorrow?"
|
| 323 |
-
- ALWAYS say "Let me check the weather..." before using this tool.
|
| 324 |
-
|
| 325 |
-
3. VISION: Use recognize_object to see and describe what's in front of you.
|
| 326 |
-
- When user says: "What is this?", "Can you see this?", "What do you see?"
|
| 327 |
-
- When user says: "Look at this", "Recognize this", "What am I holding?"
|
| 328 |
-
- ALWAYS say "Let me take a look..." before using this tool.
|
| 329 |
-
- Respond naturally with what you observe.
|
| 330 |
-
|
| 331 |
-
4. CUSTOM APPS: You can create and manage custom assistant personalities.
|
| 332 |
-
- "Create an app", "Make me a tutor": Use create_custom_app
|
| 333 |
-
- "Activate the...", "Switch to...": Use activate_custom_app
|
| 334 |
-
- "Go back to normal", "Deactivate": Use deactivate_app
|
| 335 |
-
- "What apps do I have?": Use list_custom_apps
|
| 336 |
-
|
| 337 |
-
IMPORTANT: For app creation/activation/deactivation - ALWAYS ask for confirmation first.
|
| 338 |
-
|
| 339 |
-
5. POWER CONTROL: Control your motors (no confirmation needed):
|
| 340 |
-
- "Wake up", "Turn on": Use wake_up
|
| 341 |
-
- "Go to sleep", "Sleep": Use go_to_sleep
|
| 342 |
-
|
| 343 |
-
6. PERSONALIZATION: Remember user details for personal interactions.
|
| 344 |
-
- When user says their name: Use remember_user_name to save it
|
| 345 |
-
- To check if you know their name: Use get_user_name
|
| 346 |
-
- When user says their country: Use remember_preferred_country to save it
|
| 347 |
-
- To check if you know their country: Use get_preferred_country
|
| 348 |
-
|
| 349 |
-
7. WEBSITE GENERATION: Create beautiful websites with voice commands!
|
| 350 |
-
- Use generate_website when user asks to create a website, landing page, portfolio, etc.
|
| 351 |
-
- Examples: "Create a website for my bakery", "Make me a portfolio", "Build a landing page"
|
| 352 |
-
- You can also EDIT existing websites: "Change the colors", "Add a contact section"
|
| 353 |
-
- ALWAYS say "Let me create that website for you..." before using this tool.
|
| 354 |
-
- The website will be shown as a live preview while being built!
|
| 355 |
-
|
| 356 |
-
8. REMINDERS: Manage the user's iOS Reminders!
|
| 357 |
-
- Use add_reminder when the user wants to be reminded of something.
|
| 358 |
-
- Examples: "Remind me to call mom tomorrow at 3pm", "Add a reminder to buy groceries"
|
| 359 |
-
- ALWAYS say "I'll add that reminder for you..." before using this tool.
|
| 360 |
-
- Use get_reminders when the user wants to see their reminders.
|
| 361 |
-
- Examples: "What reminders do I have?", "Show me my reminders for today", "What's on my to-do list?"
|
| 362 |
-
- ALWAYS say "Let me check your reminders..." before using this tool.
|
| 363 |
-
- You can filter by date: today, tomorrow, this_week, or all.""" + language_instruction
|
| 364 |
|
| 365 |
def set_custom_personality(
|
| 366 |
self,
|
| 367 |
system_prompt: str,
|
| 368 |
enabled_tools: Optional[list[str]] = None,
|
| 369 |
) -> None:
|
| 370 |
-
"""Set a custom personality/system prompt and enabled tools.
|
| 371 |
-
|
| 372 |
-
Args:
|
| 373 |
-
system_prompt: The custom system prompt for the personality.
|
| 374 |
-
enabled_tools: List of enabled tool IDs. None means use all tools.
|
| 375 |
-
"""
|
| 376 |
logger.info(f"🎭 Setting custom personality ({len(system_prompt)} chars)")
|
| 377 |
self._custom_system_prompt = system_prompt
|
| 378 |
self._enabled_tools = enabled_tools
|
|
@@ -380,7 +189,6 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 380 |
if enabled_tools is not None:
|
| 381 |
logger.info(f"🔧 Custom app tools: {enabled_tools}")
|
| 382 |
|
| 383 |
-
# Update session if connected
|
| 384 |
if self.is_connected:
|
| 385 |
asyncio.create_task(self._update_session())
|
| 386 |
|
|
@@ -388,51 +196,17 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 388 |
"""Clear custom personality and revert to default Reachy."""
|
| 389 |
logger.info("🎭 Clearing custom personality - reverting to default Reachy")
|
| 390 |
self._custom_system_prompt = None
|
| 391 |
-
self._enabled_tools = None
|
| 392 |
|
| 393 |
-
# Update session if connected
|
| 394 |
if self.is_connected:
|
| 395 |
asyncio.create_task(self._update_session())
|
| 396 |
|
| 397 |
-
|
| 398 |
-
"""Get all tools to register with OpenAI.
|
| 399 |
-
|
| 400 |
-
Combines base APP_TOOLS (app management, power) with enabled dynamic tools
|
| 401 |
-
(weather, web_search) based on the active app's configuration.
|
| 402 |
-
|
| 403 |
-
Returns:
|
| 404 |
-
List of OpenAI function definitions.
|
| 405 |
-
"""
|
| 406 |
-
# Always include base app tools (create, activate, deactivate, etc.)
|
| 407 |
-
base_tools = _get_app_tools()
|
| 408 |
-
|
| 409 |
-
# Get dynamic tools from registry based on enabled_tools
|
| 410 |
-
registry = _get_tool_registry()
|
| 411 |
-
|
| 412 |
-
if self._enabled_tools is None:
|
| 413 |
-
# Default Reachy: enable all dynamic tools
|
| 414 |
-
dynamic_tool_ids = registry.get_all_ids()
|
| 415 |
-
else:
|
| 416 |
-
# Custom app: only enable specified tools
|
| 417 |
-
dynamic_tool_ids = self._enabled_tools
|
| 418 |
-
|
| 419 |
-
# Get tool definitions for enabled dynamic tools
|
| 420 |
-
dynamic_tools = registry.get_definitions(dynamic_tool_ids)
|
| 421 |
-
|
| 422 |
-
# Combine base + dynamic tools
|
| 423 |
-
all_tools = list(base_tools) + dynamic_tools
|
| 424 |
-
|
| 425 |
-
logger.info(
|
| 426 |
-
f"🔧 Tools: {len(base_tools)} base + {len(dynamic_tools)} dynamic = {len(all_tools)} total"
|
| 427 |
-
)
|
| 428 |
-
|
| 429 |
-
return all_tools
|
| 430 |
|
| 431 |
def _setup_tools_handler(self) -> None:
|
| 432 |
"""Setup callbacks for the app tools handler."""
|
| 433 |
-
tools_handler =
|
| 434 |
|
| 435 |
-
# When tools change personality
|
| 436 |
def on_personality_change(
|
| 437 |
system_prompt: Optional[str],
|
| 438 |
enabled_tools: Optional[list[str]] = None,
|
|
@@ -444,7 +218,6 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 444 |
|
| 445 |
tools_handler.on_personality_change = on_personality_change
|
| 446 |
|
| 447 |
-
# When tools change active app (async callback)
|
| 448 |
async def on_app_change(data: dict) -> None:
|
| 449 |
if self.on_app_change:
|
| 450 |
self.on_app_change(data)
|
|
@@ -453,39 +226,31 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 453 |
|
| 454 |
def _setup_meeting_callbacks(self) -> None:
|
| 455 |
"""Setup callbacks for meeting tool events."""
|
| 456 |
-
from .tools.meeting import set_meeting_callbacks
|
| 457 |
|
| 458 |
-
def
|
| 459 |
-
"""Called when a meeting recording starts."""
|
| 460 |
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 461 |
if self.on_meeting_started:
|
| 462 |
self.on_meeting_started(meeting_id, title)
|
| 463 |
|
| 464 |
-
def
|
| 465 |
-
"""Called when a meeting recording stops."""
|
| 466 |
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 467 |
if self.on_meeting_stopped:
|
| 468 |
self.on_meeting_stopped(meeting_id)
|
| 469 |
|
| 470 |
def on_transcript_update(meeting_id: str, transcript: str) -> None:
|
| 471 |
-
"""Called when meeting transcript is updated."""
|
| 472 |
if self.on_meeting_transcript_update:
|
| 473 |
self.on_meeting_transcript_update(meeting_id, transcript)
|
| 474 |
|
| 475 |
set_meeting_callbacks(
|
| 476 |
-
on_started=
|
| 477 |
-
on_stopped=
|
| 478 |
on_transcript_update=on_transcript_update,
|
| 479 |
)
|
| 480 |
|
| 481 |
def _setup_vad_callback(self) -> None:
|
| 482 |
-
"""Setup callback for VAD settings changes.
|
| 483 |
-
|
| 484 |
-
When user changes VAD settings via the app, this triggers
|
| 485 |
-
a session reconfiguration to apply the new settings.
|
| 486 |
-
"""
|
| 487 |
def on_vad_changed() -> None:
|
| 488 |
-
"""Called when VAD settings are changed."""
|
| 489 |
if self.is_connected:
|
| 490 |
logger.info("🎤 VAD settings changed, updating OpenAI session...")
|
| 491 |
asyncio.create_task(self._configure_session())
|
|
@@ -493,68 +258,21 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 493 |
set_vad_settings_callback(on_vad_changed)
|
| 494 |
|
| 495 |
def _append_to_meeting_transcript(self, text: str, speaker: str = "user") -> None:
|
| 496 |
-
"""Append transcribed text to active meeting if one exists.
|
| 497 |
-
|
| 498 |
-
Args:
|
| 499 |
-
text: The transcribed text.
|
| 500 |
-
speaker: Who spoke ('user' or 'assistant').
|
| 501 |
-
"""
|
| 502 |
-
from .tools.meeting import is_meeting_active, append_to_transcript
|
| 503 |
|
| 504 |
if is_meeting_active() and text.strip():
|
| 505 |
-
# Format with speaker label
|
| 506 |
formatted_text = f"[{speaker.upper()}]: {text.strip()}"
|
| 507 |
append_to_transcript(formatted_text)
|
| 508 |
|
| 509 |
-
|
| 510 |
-
"""Update connection state and notify callback."""
|
| 511 |
-
self._connection_state = state
|
| 512 |
-
if self.on_connection_state:
|
| 513 |
-
self.on_connection_state(state)
|
| 514 |
-
|
| 515 |
-
def _set_speaking_state(self, state: SpeakingState) -> None:
|
| 516 |
-
"""Update speaking state and notify callback."""
|
| 517 |
-
if self._speaking_state != state:
|
| 518 |
-
self._speaking_state = state
|
| 519 |
-
# Keep an event that indicates whether Reachy/OpenAI are currently speaking.
|
| 520 |
-
if state == SpeakingState.IDLE:
|
| 521 |
-
self._speaking_idle_event.set()
|
| 522 |
-
else:
|
| 523 |
-
self._speaking_idle_event.clear()
|
| 524 |
-
if self.on_speaking_state:
|
| 525 |
-
self.on_speaking_state(state)
|
| 526 |
-
|
| 527 |
-
def _set_response_state(self, state: ResponseState) -> None:
|
| 528 |
-
"""Update response state and notify callback.
|
| 529 |
-
|
| 530 |
-
Response state tracks the full lifecycle of an AI response:
|
| 531 |
-
- IDLE: No response in progress, microphone can be active
|
| 532 |
-
- WAITING: Request sent, waiting for first audio - microphone should be paused
|
| 533 |
-
- GENERATING: Audio is being generated/streamed - microphone should be paused
|
| 534 |
-
- COMPLETE: Response done, waiting for buffer drain - microphone should be paused
|
| 535 |
-
"""
|
| 536 |
-
if self._response_state != state:
|
| 537 |
-
old_state = self._response_state
|
| 538 |
-
self._response_state = state
|
| 539 |
-
logger.info(f"📊 Response state: {old_state.value} -> {state.value}")
|
| 540 |
-
if self.on_response_state:
|
| 541 |
-
self.on_response_state(state)
|
| 542 |
-
# Reset per-response counters when a new response is requested.
|
| 543 |
-
if state == ResponseState.WAITING:
|
| 544 |
-
self._response_audio_chunks = 0
|
| 545 |
-
self._response_audio_bytes = 0
|
| 546 |
|
| 547 |
async def connect(self, api_key: Optional[str] = None) -> None:
|
| 548 |
-
"""Connect to OpenAI Realtime API.
|
| 549 |
-
|
| 550 |
-
Args:
|
| 551 |
-
api_key: OpenAI API key. If not provided, uses OPENAI_API_KEY env var.
|
| 552 |
-
"""
|
| 553 |
if self.is_connected:
|
| 554 |
logger.warning("Already connected to OpenAI")
|
| 555 |
return
|
| 556 |
|
| 557 |
-
# Get API key
|
| 558 |
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 559 |
if not key:
|
| 560 |
logger.error("❌ No OpenAI API key provided or found in environment")
|
|
@@ -564,10 +282,7 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 564 |
self._set_connection_state(ConnectionState.CONNECTING)
|
| 565 |
|
| 566 |
try:
|
| 567 |
-
# Build URL with model parameter
|
| 568 |
url = f"{OPENAI_REALTIME_URL}?model={OPENAI_MODEL}"
|
| 569 |
-
|
| 570 |
-
# Connect with headers
|
| 571 |
headers = {
|
| 572 |
"Authorization": f"Bearer {key}",
|
| 573 |
"OpenAI-Beta": "realtime=v1",
|
|
@@ -584,14 +299,12 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 584 |
|
| 585 |
logger.info("✅ WebSocket connected to OpenAI Realtime API")
|
| 586 |
|
| 587 |
-
# Configure session
|
| 588 |
logger.info("⚙️ Configuring OpenAI session...")
|
| 589 |
await self._configure_session()
|
| 590 |
|
| 591 |
self._set_connection_state(ConnectionState.CONNECTED)
|
| 592 |
logger.info("✅ OpenAI Realtime session configured and ready!")
|
| 593 |
|
| 594 |
-
# Start receiving messages
|
| 595 |
self._receive_task = asyncio.create_task(self._receive_messages())
|
| 596 |
|
| 597 |
except Exception as e:
|
|
@@ -620,49 +333,26 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 620 |
self._set_connection_state(ConnectionState.DISCONNECTED)
|
| 621 |
logger.info("Disconnected from OpenAI Realtime API")
|
| 622 |
|
|
|
|
|
|
|
| 623 |
async def _configure_session(self) -> None:
|
| 624 |
-
"""Configure the OpenAI session with audio settings and
|
| 625 |
-
|
| 626 |
-
voice = get_current_voice()
|
| 627 |
|
| 628 |
-
# Sync language with preferred setting
|
| 629 |
self._language = get_preferred_language()
|
|
|
|
| 630 |
|
| 631 |
-
#
|
| 632 |
-
|
| 633 |
-
vad_silence_ms = get_vad_silence_ms()
|
| 634 |
-
vad_prefix_ms = get_vad_prefix_ms()
|
| 635 |
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
|
|
|
| 641 |
)
|
| 642 |
|
| 643 |
-
session_config = {
|
| 644 |
-
"type": "session.update",
|
| 645 |
-
"session": {
|
| 646 |
-
"modalities": ["text", "audio"],
|
| 647 |
-
"instructions": self._get_system_instructions(),
|
| 648 |
-
"voice": voice,
|
| 649 |
-
"input_audio_format": "pcm16",
|
| 650 |
-
"output_audio_format": "pcm16",
|
| 651 |
-
"input_audio_transcription": {
|
| 652 |
-
"model": "whisper-1",
|
| 653 |
-
"language": self._language,
|
| 654 |
-
},
|
| 655 |
-
"turn_detection": {
|
| 656 |
-
"type": "server_vad",
|
| 657 |
-
"threshold": vad_threshold,
|
| 658 |
-
"prefix_padding_ms": vad_prefix_ms,
|
| 659 |
-
"silence_duration_ms": vad_silence_ms,
|
| 660 |
-
},
|
| 661 |
-
"tools": all_tools,
|
| 662 |
-
"tool_choice": "auto",
|
| 663 |
-
},
|
| 664 |
-
}
|
| 665 |
-
|
| 666 |
await self._send_message(session_config)
|
| 667 |
|
| 668 |
async def _update_session(self) -> None:
|
|
@@ -670,7 +360,7 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 670 |
if not self.is_connected:
|
| 671 |
return
|
| 672 |
|
| 673 |
-
lang_name =
|
| 674 |
logger.info(f"Updating OpenAI session language to {lang_name}")
|
| 675 |
|
| 676 |
await self._configure_session()
|
|
@@ -682,6 +372,8 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 682 |
|
| 683 |
await self._websocket.send(json.dumps(message))
|
| 684 |
|
|
|
|
|
|
|
| 685 |
async def _receive_messages(self) -> None:
|
| 686 |
"""Receive and handle messages from OpenAI."""
|
| 687 |
if not self._websocket:
|
|
@@ -705,105 +397,76 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 705 |
data = json.loads(message)
|
| 706 |
msg_type = data.get("type", "")
|
| 707 |
|
| 708 |
-
|
| 709 |
-
if msg_type not in ("response.audio.delta",): # Skip noisy audio deltas
|
| 710 |
logger.debug(f"📨 OpenAI event: {msg_type}")
|
| 711 |
|
| 712 |
if msg_type in ("session.created", "session.updated"):
|
| 713 |
logger.info("✅ OpenAI session configured")
|
| 714 |
-
# Signal any waiting tasks that session update is complete
|
| 715 |
if self._session_update_event:
|
| 716 |
self._session_update_event.set()
|
| 717 |
-
# Notify callback
|
| 718 |
if self.on_session_updated:
|
| 719 |
self.on_session_updated()
|
| 720 |
|
| 721 |
elif msg_type == "input_audio_buffer.speech_started":
|
| 722 |
-
# User started speaking - but ignore if AI is responding
|
| 723 |
-
# This prevents VAD from interrupting responses due to echo/noise
|
| 724 |
if self.is_responding:
|
| 725 |
logger.warning(
|
| 726 |
-
"🎤 VAD speech detected during AI response - ignoring "
|
| 727 |
f"(response_state={self._response_state.value})"
|
| 728 |
)
|
| 729 |
-
# Don't change speaking state - let the response complete
|
| 730 |
else:
|
| 731 |
logger.info("🎤 Speech detected - user is speaking")
|
| 732 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 733 |
|
| 734 |
elif msg_type == "input_audio_buffer.speech_stopped":
|
| 735 |
-
# User stopped speaking
|
| 736 |
logger.info("🎤 Speech ended - processing...")
|
| 737 |
|
| 738 |
elif msg_type == "conversation.item.input_audio_transcription.completed":
|
| 739 |
-
# User's speech was transcribed
|
| 740 |
transcript = data.get("transcript", "")
|
| 741 |
logger.info(f"📝 User transcript: {transcript}")
|
| 742 |
if transcript:
|
| 743 |
self._current_transcript = transcript
|
| 744 |
if self.on_transcript_update:
|
| 745 |
self.on_transcript_update(transcript)
|
| 746 |
-
# Append to active meeting transcript
|
| 747 |
self._append_to_meeting_transcript(transcript, speaker="user")
|
| 748 |
|
| 749 |
elif msg_type == "response.audio_transcript.delta":
|
| 750 |
-
# AI response text delta
|
| 751 |
delta = data.get("delta", "")
|
| 752 |
if delta:
|
| 753 |
self._response_text_buffer += delta
|
| 754 |
|
| 755 |
elif msg_type == "response.audio_transcript.done":
|
| 756 |
-
# AI response text complete
|
| 757 |
transcript = data.get("transcript", "")
|
| 758 |
if transcript:
|
| 759 |
self._response_text_buffer = ""
|
| 760 |
if self.on_response_text:
|
| 761 |
self.on_response_text(transcript)
|
| 762 |
-
# Append to active meeting transcript
|
| 763 |
self._append_to_meeting_transcript(transcript, speaker="assistant")
|
| 764 |
|
| 765 |
elif msg_type == "response.audio.delta":
|
| 766 |
-
# AI audio response chunk
|
| 767 |
audio_base64 = data.get("delta", "")
|
| 768 |
if audio_base64:
|
| 769 |
-
# Set speaking state on first audio chunk
|
| 770 |
if self._speaking_state != SpeakingState.SPEAKING:
|
| 771 |
self._set_speaking_state(SpeakingState.SPEAKING)
|
| 772 |
|
| 773 |
-
# Transition from WAITING to GENERATING on first audio
|
| 774 |
if self._response_state == ResponseState.WAITING:
|
| 775 |
self._set_response_state(ResponseState.GENERATING)
|
| 776 |
|
| 777 |
-
# Decode and forward audio
|
| 778 |
audio_data = base64.b64decode(audio_base64)
|
| 779 |
-
self._response_audio_chunks += 1
|
| 780 |
-
self._response_audio_bytes += len(audio_data)
|
| 781 |
if self.on_audio_delta:
|
| 782 |
self.on_audio_delta(audio_data)
|
| 783 |
|
| 784 |
elif msg_type == "response.audio.done":
|
| 785 |
-
# AI audio response complete - set to COMPLETE (waiting for buffer drain)
|
| 786 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 787 |
self._set_response_state(ResponseState.COMPLETE)
|
| 788 |
-
logger.info(
|
| 789 |
-
"🔊 OpenAI audio done "
|
| 790 |
-
f"({self._response_audio_chunks} chunks, {self._response_audio_bytes} bytes)"
|
| 791 |
-
)
|
| 792 |
|
| 793 |
elif msg_type == "response.done":
|
| 794 |
-
# Full response complete
|
| 795 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 796 |
-
# Only set COMPLETE if we were generating (avoid duplicate transitions)
|
| 797 |
if self._response_state == ResponseState.GENERATING:
|
| 798 |
self._set_response_state(ResponseState.COMPLETE)
|
| 799 |
if self._response_text_buffer:
|
| 800 |
if self.on_response_text:
|
| 801 |
self.on_response_text(self._response_text_buffer)
|
| 802 |
self._response_text_buffer = ""
|
| 803 |
-
logger.info(
|
| 804 |
-
"✅ OpenAI response done "
|
| 805 |
-
f"({self._response_audio_chunks} chunks, {self._response_audio_bytes} bytes)"
|
| 806 |
-
)
|
| 807 |
|
| 808 |
elif msg_type == "error":
|
| 809 |
error_data = data.get("error", {})
|
|
@@ -820,9 +483,8 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 820 |
elif msg_type == "input_audio_buffer.cleared":
|
| 821 |
logger.info("🗑️ Audio buffer cleared")
|
| 822 |
|
| 823 |
-
# Tool
|
| 824 |
elif msg_type == "response.output_item.added":
|
| 825 |
-
# A new output item was added - could be a function call
|
| 826 |
item = data.get("item", {})
|
| 827 |
if item.get("type") == "function_call":
|
| 828 |
call_id = item.get("call_id", "")
|
|
@@ -832,19 +494,16 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 832 |
"name": func_name,
|
| 833 |
"arguments_buffer": ""
|
| 834 |
}
|
| 835 |
-
# Notify iOS that a tool is being used
|
| 836 |
if self.on_tool_usage:
|
| 837 |
self.on_tool_usage(func_name, "started")
|
| 838 |
|
| 839 |
elif msg_type == "response.function_call_arguments.delta":
|
| 840 |
-
# Incremental function arguments
|
| 841 |
call_id = data.get("call_id", "")
|
| 842 |
delta = data.get("delta", "")
|
| 843 |
if call_id in self._pending_tool_calls:
|
| 844 |
self._pending_tool_calls[call_id]["arguments_buffer"] += delta
|
| 845 |
|
| 846 |
elif msg_type == "response.function_call_arguments.done":
|
| 847 |
-
# Function call arguments complete
|
| 848 |
call_id = data.get("call_id", "")
|
| 849 |
arguments_str = data.get("arguments", "")
|
| 850 |
|
|
@@ -852,7 +511,6 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 852 |
tool_info = self._pending_tool_calls[call_id]
|
| 853 |
tool_name = tool_info["name"]
|
| 854 |
|
| 855 |
-
# Use the complete arguments from the event
|
| 856 |
logger.info(f"🔧 Function call complete: {tool_name}")
|
| 857 |
logger.debug(f"🔧 Arguments: {arguments_str}")
|
| 858 |
|
|
@@ -862,7 +520,6 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 862 |
)
|
| 863 |
|
| 864 |
elif msg_type == "response.output_item.done":
|
| 865 |
-
# Output item complete - cleanup pending tool call if it was a function
|
| 866 |
item = data.get("item", {})
|
| 867 |
if item.get("type") == "function_call":
|
| 868 |
call_id = item.get("call_id", "")
|
|
@@ -875,133 +532,24 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 875 |
except json.JSONDecodeError:
|
| 876 |
logger.error(f"Failed to parse message: {message[:100]}")
|
| 877 |
|
| 878 |
-
# MARK: - Tool Execution
|
| 879 |
-
|
| 880 |
async def _execute_tool_call(self, call_id: str, tool_name: str, arguments_str: str) -> None:
|
| 881 |
-
"""Execute a tool call
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
Args:
|
| 889 |
-
call_id: The unique ID for this tool call.
|
| 890 |
-
tool_name: Name of the tool to execute.
|
| 891 |
-
arguments_str: JSON string of tool arguments.
|
| 892 |
-
"""
|
| 893 |
-
# Rate limiting to prevent infinite tool call loops
|
| 894 |
-
current_time = time.time()
|
| 895 |
-
if current_time - self._tool_call_reset_time > self._TOOL_CALL_WINDOW_SECONDS:
|
| 896 |
-
# Reset the counter after the window expires
|
| 897 |
-
self._tool_call_count = 0
|
| 898 |
-
self._tool_call_reset_time = current_time
|
| 899 |
-
|
| 900 |
-
self._tool_call_count += 1
|
| 901 |
-
|
| 902 |
-
if self._tool_call_count > self._MAX_TOOL_CALLS_PER_WINDOW:
|
| 903 |
-
logger.warning(
|
| 904 |
-
f"⚠️ Tool call rate limit exceeded ({self._tool_call_count} calls in "
|
| 905 |
-
f"{self._TOOL_CALL_WINDOW_SECONDS}s). Possible infinite loop detected."
|
| 906 |
)
|
| 907 |
-
# Send an error back to OpenAI to break the loop
|
| 908 |
-
error_result = {
|
| 909 |
-
"type": "conversation.item.create",
|
| 910 |
-
"item": {
|
| 911 |
-
"type": "function_call_output",
|
| 912 |
-
"call_id": call_id,
|
| 913 |
-
"output": json.dumps({
|
| 914 |
-
"success": False,
|
| 915 |
-
"error": "Rate limit exceeded. Please wait before making more requests."
|
| 916 |
-
})
|
| 917 |
-
}
|
| 918 |
-
}
|
| 919 |
-
await self._send_message(error_result)
|
| 920 |
-
# Don't trigger another response - this breaks the loop
|
| 921 |
-
return
|
| 922 |
|
| 923 |
-
|
| 924 |
-
# Parse arguments
|
| 925 |
-
try:
|
| 926 |
-
arguments = json.loads(arguments_str) if arguments_str else {}
|
| 927 |
-
except json.JSONDecodeError:
|
| 928 |
-
arguments = {}
|
| 929 |
-
logger.warning(f"Failed to parse tool arguments: {arguments_str}")
|
| 930 |
-
|
| 931 |
-
# Check if this is a dynamic tool from the registry
|
| 932 |
-
registry = _get_tool_registry()
|
| 933 |
-
dynamic_tool = registry.get(tool_name)
|
| 934 |
-
|
| 935 |
-
if dynamic_tool:
|
| 936 |
-
# Execute dynamic tool (weather, web_search, etc.)
|
| 937 |
-
logger.info(f"🔧 Executing dynamic tool: {tool_name}")
|
| 938 |
-
result = await registry.execute(tool_name, arguments)
|
| 939 |
-
else:
|
| 940 |
-
# Execute base app tool (create_custom_app, activate_custom_app, etc.)
|
| 941 |
-
tools_handler = _get_tools_handler()
|
| 942 |
-
result = await tools_handler.execute_tool(tool_name, arguments)
|
| 943 |
-
|
| 944 |
-
logger.info(f"🔧 Tool result: {result}")
|
| 945 |
-
|
| 946 |
-
# Check if this is a website generation result
|
| 947 |
-
if tool_name == "generate_website" and result.get("success"):
|
| 948 |
-
if self.on_website_ready:
|
| 949 |
-
self.on_website_ready({
|
| 950 |
-
"website_id": result.get("website_id"),
|
| 951 |
-
"url": result.get("url"),
|
| 952 |
-
"title": result.get("title"),
|
| 953 |
-
"is_edit": result.get("is_edit", False),
|
| 954 |
-
})
|
| 955 |
-
|
| 956 |
-
# Send result back to OpenAI
|
| 957 |
-
result_message = {
|
| 958 |
-
"type": "conversation.item.create",
|
| 959 |
-
"item": {
|
| 960 |
-
"type": "function_call_output",
|
| 961 |
-
"call_id": call_id,
|
| 962 |
-
"output": json.dumps(result)
|
| 963 |
-
}
|
| 964 |
-
}
|
| 965 |
-
await self._send_message(result_message)
|
| 966 |
-
|
| 967 |
-
# Trigger OpenAI to generate a response based on the tool result
|
| 968 |
-
await self._send_message({"type": "response.create"})
|
| 969 |
-
|
| 970 |
-
# Notify iOS that tool usage is complete
|
| 971 |
-
if self.on_tool_usage:
|
| 972 |
-
self.on_tool_usage(tool_name, "completed")
|
| 973 |
-
|
| 974 |
-
except Exception as e:
|
| 975 |
-
logger.error(f"Error executing tool {tool_name}: {e}", exc_info=True)
|
| 976 |
-
|
| 977 |
-
# Send error result
|
| 978 |
-
error_result = {
|
| 979 |
-
"type": "conversation.item.create",
|
| 980 |
-
"item": {
|
| 981 |
-
"type": "function_call_output",
|
| 982 |
-
"call_id": call_id,
|
| 983 |
-
"output": json.dumps({"success": False, "error": str(e)})
|
| 984 |
-
}
|
| 985 |
-
}
|
| 986 |
-
await self._send_message(error_result)
|
| 987 |
-
await self._send_message({"type": "response.create"})
|
| 988 |
-
|
| 989 |
-
# Notify iOS that tool usage is complete (even on error)
|
| 990 |
-
if self.on_tool_usage:
|
| 991 |
-
self.on_tool_usage(tool_name, "completed")
|
| 992 |
|
| 993 |
# MARK: - Audio Input
|
| 994 |
|
| 995 |
async def send_audio(self, audio_data: bytes) -> None:
|
| 996 |
-
"""Send audio data to OpenAI.
|
| 997 |
-
|
| 998 |
-
Args:
|
| 999 |
-
audio_data: PCM16 audio data at 24kHz mono.
|
| 1000 |
-
"""
|
| 1001 |
if not self.is_connected:
|
| 1002 |
return
|
| 1003 |
|
| 1004 |
-
# Encode audio as base64
|
| 1005 |
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
|
| 1006 |
|
| 1007 |
message = {
|
|
@@ -1012,22 +560,12 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1012 |
await self._send_message(message)
|
| 1013 |
|
| 1014 |
async def commit_audio_and_respond(self) -> None:
|
| 1015 |
-
"""Commit the audio buffer and request a response.
|
| 1016 |
-
|
| 1017 |
-
Sets response state to WAITING before committing to ensure microphone
|
| 1018 |
-
is paused before the AI starts responding.
|
| 1019 |
-
"""
|
| 1020 |
if not self.is_connected:
|
| 1021 |
return
|
| 1022 |
|
| 1023 |
-
# Set response state to WAITING before committing
|
| 1024 |
-
# This triggers microphone pause BEFORE the request is sent
|
| 1025 |
self._set_response_state(ResponseState.WAITING)
|
| 1026 |
-
|
| 1027 |
-
# Commit audio buffer
|
| 1028 |
await self._send_message({"type": "input_audio_buffer.commit"})
|
| 1029 |
-
|
| 1030 |
-
# Create response
|
| 1031 |
await self._send_message({"type": "response.create"})
|
| 1032 |
|
| 1033 |
async def clear_audio_buffer(self) -> None:
|
|
@@ -1047,22 +585,12 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1047 |
# MARK: - Text Input
|
| 1048 |
|
| 1049 |
async def send_text_message(self, text: str) -> None:
|
| 1050 |
-
"""Send a text message (non-voice interaction).
|
| 1051 |
-
|
| 1052 |
-
Sets response state to WAITING before sending to ensure microphone
|
| 1053 |
-
is paused before the AI starts responding.
|
| 1054 |
-
|
| 1055 |
-
Args:
|
| 1056 |
-
text: The text message to send.
|
| 1057 |
-
"""
|
| 1058 |
if not self.is_connected:
|
| 1059 |
raise RuntimeError("Not connected to OpenAI")
|
| 1060 |
|
| 1061 |
-
# Set response state to WAITING before sending
|
| 1062 |
-
# This triggers microphone pause BEFORE the request is sent
|
| 1063 |
self._set_response_state(ResponseState.WAITING)
|
| 1064 |
|
| 1065 |
-
# Create conversation item
|
| 1066 |
item_message = {
|
| 1067 |
"type": "conversation.item.create",
|
| 1068 |
"item": {
|
|
@@ -1077,10 +605,36 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1077 |
},
|
| 1078 |
}
|
| 1079 |
await self._send_message(item_message)
|
| 1080 |
-
|
| 1081 |
-
# Trigger response
|
| 1082 |
await self._send_message({"type": "response.create"})
|
| 1083 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1084 |
# MARK: - Listening Control
|
| 1085 |
|
| 1086 |
def start_listening(self) -> None:
|
|
@@ -1095,63 +649,36 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1095 |
logger.info("Stopped listening")
|
| 1096 |
|
| 1097 |
def mark_response_complete(self) -> None:
|
| 1098 |
-
"""Mark the response cycle as complete (audio buffer drained).
|
| 1099 |
-
|
| 1100 |
-
Called by conversation handler after audio has finished playing
|
| 1101 |
-
and microphone can be safely resumed.
|
| 1102 |
-
"""
|
| 1103 |
if self._response_state != ResponseState.IDLE:
|
| 1104 |
self._set_response_state(ResponseState.IDLE)
|
| 1105 |
|
|
|
|
|
|
|
| 1106 |
def set_voice(self, voice_id: str) -> None:
|
| 1107 |
-
"""Set the OpenAI voice and update the session.
|
| 1108 |
-
|
| 1109 |
-
This updates both the backend voice settings and reconfigures
|
| 1110 |
-
the active OpenAI session to use the new voice.
|
| 1111 |
-
|
| 1112 |
-
Args:
|
| 1113 |
-
voice_id: OpenAI voice ID (e.g., "alloy", "coral", "sage").
|
| 1114 |
-
"""
|
| 1115 |
-
from .routes.voice import set_current_voice
|
| 1116 |
|
| 1117 |
-
# Update the backend voice settings
|
| 1118 |
if set_current_voice(voice_id):
|
| 1119 |
logger.info(f"🔊 Voice set to: {voice_id}")
|
| 1120 |
-
|
| 1121 |
-
# Reconfigure the OpenAI session if connected
|
| 1122 |
if self.is_connected:
|
| 1123 |
asyncio.create_task(self._configure_session())
|
| 1124 |
else:
|
| 1125 |
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 1126 |
|
| 1127 |
async def set_voice_async(self, voice_id: str, timeout: float = 5.0) -> bool:
|
| 1128 |
-
"""Set the OpenAI voice and wait for session update confirmation.
|
|
|
|
| 1129 |
|
| 1130 |
-
This is the async version that waits for OpenAI to confirm the session update.
|
| 1131 |
-
If the AI is currently speaking, it will cancel the current response first
|
| 1132 |
-
since the new voice only applies to future responses.
|
| 1133 |
-
|
| 1134 |
-
Args:
|
| 1135 |
-
voice_id: OpenAI voice ID (e.g., "alloy", "coral", "sage").
|
| 1136 |
-
timeout: Maximum time to wait for confirmation in seconds.
|
| 1137 |
-
|
| 1138 |
-
Returns:
|
| 1139 |
-
True if voice was set and session updated successfully.
|
| 1140 |
-
"""
|
| 1141 |
-
from .routes.voice import set_current_voice
|
| 1142 |
-
|
| 1143 |
-
# Update the backend voice settings
|
| 1144 |
if not set_current_voice(voice_id):
|
| 1145 |
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 1146 |
return False
|
| 1147 |
|
| 1148 |
logger.info(f"🔊 Setting voice to: {voice_id}")
|
| 1149 |
|
| 1150 |
-
# If not connected, just return success (settings are persisted)
|
| 1151 |
if not self.is_connected:
|
| 1152 |
return True
|
| 1153 |
|
| 1154 |
-
# Cancel current response if speaking (new voice won't apply to current response)
|
| 1155 |
if self._speaking_state == SpeakingState.SPEAKING:
|
| 1156 |
logger.info("🛑 Cancelling current response for voice change")
|
| 1157 |
await self.cancel_response()
|
|
@@ -1161,54 +688,33 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1161 |
except asyncio.TimeoutError:
|
| 1162 |
logger.warning("⚠️ Timeout waiting for speaking to stop before voice change")
|
| 1163 |
|
| 1164 |
-
# Create event to wait for session update
|
| 1165 |
self._session_update_event = asyncio.Event()
|
| 1166 |
|
| 1167 |
try:
|
| 1168 |
-
# Reconfigure the session
|
| 1169 |
await self._configure_session()
|
| 1170 |
-
|
| 1171 |
-
# Wait for OpenAI to confirm session update
|
| 1172 |
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 1173 |
logger.info(f"✅ Voice change confirmed: {voice_id}")
|
| 1174 |
return True
|
| 1175 |
-
|
| 1176 |
except asyncio.TimeoutError:
|
| 1177 |
-
logger.warning(
|
| 1178 |
return False
|
| 1179 |
finally:
|
| 1180 |
self._session_update_event = None
|
| 1181 |
|
| 1182 |
async def set_language_async(self, language: str, timeout: float = 5.0) -> bool:
|
| 1183 |
-
"""Set the language and wait for session update confirmation.
|
| 1184 |
-
|
| 1185 |
-
If the AI is currently speaking, it will cancel the current response first
|
| 1186 |
-
since the new language only applies to future responses.
|
| 1187 |
-
|
| 1188 |
-
Args:
|
| 1189 |
-
language: Language code (e.g., "en", "nl", "de").
|
| 1190 |
-
timeout: Maximum time to wait for confirmation in seconds.
|
| 1191 |
-
|
| 1192 |
-
Returns:
|
| 1193 |
-
True if language was set and session updated successfully.
|
| 1194 |
-
"""
|
| 1195 |
-
from .routes.voice import set_preferred_language
|
| 1196 |
|
| 1197 |
-
# Update the backend language settings
|
| 1198 |
if not set_preferred_language(language):
|
| 1199 |
logger.warning(f"⚠️ Invalid language: {language}")
|
| 1200 |
return False
|
| 1201 |
|
| 1202 |
logger.info(f"🌍 Setting language to: {language}")
|
| 1203 |
-
|
| 1204 |
-
# Update internal language
|
| 1205 |
self._language = language
|
| 1206 |
|
| 1207 |
-
# If not connected, just return success (settings are persisted)
|
| 1208 |
if not self.is_connected:
|
| 1209 |
return True
|
| 1210 |
|
| 1211 |
-
# Cancel current response if speaking (new language won't apply to current response)
|
| 1212 |
if self._speaking_state == SpeakingState.SPEAKING:
|
| 1213 |
logger.info("🛑 Cancelling current response for language change")
|
| 1214 |
await self.cancel_response()
|
|
@@ -1218,37 +724,27 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 1218 |
except asyncio.TimeoutError:
|
| 1219 |
logger.warning("⚠️ Timeout waiting for speaking to stop before language change")
|
| 1220 |
|
| 1221 |
-
# Create event to wait for session update
|
| 1222 |
self._session_update_event = asyncio.Event()
|
| 1223 |
|
| 1224 |
try:
|
| 1225 |
-
# Reconfigure the session
|
| 1226 |
await self._configure_session()
|
| 1227 |
-
|
| 1228 |
-
# Wait for OpenAI to confirm session update
|
| 1229 |
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 1230 |
logger.info(f"✅ Language change confirmed: {language}")
|
| 1231 |
return True
|
| 1232 |
-
|
| 1233 |
except asyncio.TimeoutError:
|
| 1234 |
-
logger.warning(
|
| 1235 |
return False
|
| 1236 |
finally:
|
| 1237 |
self._session_update_event = None
|
| 1238 |
|
| 1239 |
def set_language_from_voice(self, voice_id: str) -> None:
|
| 1240 |
-
"""Set language based on a voice ID.
|
| 1241 |
-
|
| 1242 |
-
Args:
|
| 1243 |
-
voice_id: Voice ID pattern like "nl_BE-nathalie-medium".
|
| 1244 |
-
"""
|
| 1245 |
-
# Extract language code from voice ID pattern: "xx_YY-name-quality"
|
| 1246 |
if "_" in voice_id:
|
| 1247 |
lang_code = voice_id[:2].lower()
|
| 1248 |
else:
|
| 1249 |
lang_code = "en"
|
| 1250 |
|
| 1251 |
-
if lang_code in
|
| 1252 |
self.language = lang_code
|
| 1253 |
else:
|
| 1254 |
self.language = "en"
|
|
|
|
| 1 |
+
"""OpenAI Realtime API WebSocket client for voice conversations on Reachy.
|
| 2 |
+
|
| 3 |
+
Main service class for managing connections to OpenAI's Realtime API.
|
| 4 |
+
"""
|
| 5 |
|
| 6 |
import asyncio
|
| 7 |
import base64
|
| 8 |
import json
|
| 9 |
import logging
|
| 10 |
import os
|
| 11 |
+
from typing import Callable, Optional
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
import websockets
|
| 14 |
from websockets.client import WebSocketClientProtocol
|
| 15 |
|
| 16 |
+
from ..routes.voice import (
|
|
|
|
|
|
|
| 17 |
get_preferred_language,
|
|
|
|
|
|
|
|
|
|
| 18 |
set_vad_settings_callback,
|
| 19 |
)
|
| 20 |
|
| 21 |
+
from .enums import ConnectionState, SpeakingState, ResponseState
|
| 22 |
+
from .session import build_session_config, LANGUAGE_NAMES, fetch_user_personalization
|
| 23 |
+
from .tools_loader import get_all_tools, get_tools_handler
|
| 24 |
+
from .tool_executor import ToolExecutor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
logger = logging.getLogger(__name__)
|
| 27 |
|
|
|
|
| 29 |
OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime"
|
| 30 |
OPENAI_MODEL = "gpt-4o-realtime-preview-2024-12-17"
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
class OpenAIRealtimeService:
|
| 34 |
"""WebSocket client for OpenAI Realtime API.
|
|
|
|
| 45 |
# State
|
| 46 |
self._connection_state = ConnectionState.DISCONNECTED
|
| 47 |
self._speaking_state = SpeakingState.IDLE
|
| 48 |
+
self._response_state = ResponseState.IDLE
|
| 49 |
self._is_listening = False
|
| 50 |
self._current_transcript = ""
|
| 51 |
self._response_text_buffer = ""
|
|
|
|
|
|
|
| 52 |
|
| 53 |
+
# Event for awaiting end of in-flight spoken response
|
|
|
|
| 54 |
self._speaking_idle_event = asyncio.Event()
|
| 55 |
self._speaking_idle_event.set()
|
| 56 |
|
| 57 |
+
# Language settings
|
| 58 |
self._language = get_preferred_language()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
# Custom personality (system prompt)
|
| 61 |
self._custom_system_prompt: Optional[str] = None
|
|
|
|
| 64 |
self._enabled_tools: Optional[list[str]] = None
|
| 65 |
|
| 66 |
# Tool call tracking
|
| 67 |
+
self._pending_tool_calls: dict[str, dict] = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
# Callbacks for iOS relay
|
| 70 |
self.on_connection_state: Optional[Callable[[ConnectionState], None]] = None
|
| 71 |
self.on_speaking_state: Optional[Callable[[SpeakingState], None]] = None
|
| 72 |
+
self.on_response_state: Optional[Callable[[ResponseState], None]] = None
|
| 73 |
self.on_transcript_update: Optional[Callable[[str], None]] = None
|
| 74 |
self.on_response_text: Optional[Callable[[str], None]] = None
|
| 75 |
self.on_audio_delta: Optional[Callable[[bytes], None]] = None
|
| 76 |
self.on_error: Optional[Callable[[str], None]] = None
|
| 77 |
+
self.on_app_change: Optional[Callable[[dict], None]] = None
|
| 78 |
+
self.on_tool_usage: Optional[Callable[[str, str], None]] = None
|
| 79 |
+
self.on_website_ready: Optional[Callable[[dict], None]] = None
|
| 80 |
|
| 81 |
# Meeting callbacks
|
| 82 |
+
self.on_meeting_started: Optional[Callable[[str, str], None]] = None
|
| 83 |
+
self.on_meeting_stopped: Optional[Callable[[str], None]] = None
|
| 84 |
+
self.on_meeting_transcript_update: Optional[Callable[[str, str], None]] = None
|
| 85 |
|
| 86 |
+
# Session update callback
|
| 87 |
self.on_session_updated: Optional[Callable[[], None]] = None
|
| 88 |
|
| 89 |
# Session update tracking for async waiting
|
| 90 |
self._session_update_event: Optional[asyncio.Event] = None
|
| 91 |
|
| 92 |
+
# Tool executor (initialized lazily)
|
| 93 |
+
self._tool_executor: Optional[ToolExecutor] = None
|
| 94 |
+
|
| 95 |
+
# Setup callbacks
|
| 96 |
self._setup_tools_handler()
|
| 97 |
self._setup_meeting_callbacks()
|
| 98 |
self._setup_vad_callback()
|
| 99 |
|
| 100 |
+
# MARK: - Properties
|
| 101 |
+
|
| 102 |
@property
|
| 103 |
def connection_state(self) -> ConnectionState:
|
| 104 |
"""Get current connection state."""
|
|
|
|
| 116 |
|
| 117 |
@property
|
| 118 |
def is_responding(self) -> bool:
|
| 119 |
+
"""Check if AI is currently responding."""
|
|
|
|
|
|
|
|
|
|
| 120 |
return self._response_state in (
|
| 121 |
ResponseState.WAITING,
|
| 122 |
ResponseState.GENERATING,
|
|
|
|
| 146 |
if self.is_connected:
|
| 147 |
asyncio.create_task(self._update_session())
|
| 148 |
|
| 149 |
+
# MARK: - State Management
|
| 150 |
+
|
| 151 |
+
def _set_connection_state(self, state: ConnectionState) -> None:
|
| 152 |
+
"""Update connection state and notify callback."""
|
| 153 |
+
self._connection_state = state
|
| 154 |
+
if self.on_connection_state:
|
| 155 |
+
self.on_connection_state(state)
|
| 156 |
+
|
| 157 |
+
def _set_speaking_state(self, state: SpeakingState) -> None:
|
| 158 |
+
"""Update speaking state and notify callback."""
|
| 159 |
+
if self._speaking_state != state:
|
| 160 |
+
self._speaking_state = state
|
| 161 |
+
if state == SpeakingState.IDLE:
|
| 162 |
+
self._speaking_idle_event.set()
|
| 163 |
+
else:
|
| 164 |
+
self._speaking_idle_event.clear()
|
| 165 |
+
if self.on_speaking_state:
|
| 166 |
+
self.on_speaking_state(state)
|
| 167 |
+
|
| 168 |
+
def _set_response_state(self, state: ResponseState) -> None:
|
| 169 |
+
"""Update response state and notify callback."""
|
| 170 |
+
if self._response_state != state:
|
| 171 |
+
old_state = self._response_state
|
| 172 |
+
self._response_state = state
|
| 173 |
+
logger.info(f"📊 Response state: {old_state.value} -> {state.value}")
|
| 174 |
+
if self.on_response_state:
|
| 175 |
+
self.on_response_state(state)
|
| 176 |
+
|
| 177 |
+
# MARK: - Personality Management
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
def set_custom_personality(
|
| 180 |
self,
|
| 181 |
system_prompt: str,
|
| 182 |
enabled_tools: Optional[list[str]] = None,
|
| 183 |
) -> None:
|
| 184 |
+
"""Set a custom personality/system prompt and enabled tools."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
logger.info(f"🎭 Setting custom personality ({len(system_prompt)} chars)")
|
| 186 |
self._custom_system_prompt = system_prompt
|
| 187 |
self._enabled_tools = enabled_tools
|
|
|
|
| 189 |
if enabled_tools is not None:
|
| 190 |
logger.info(f"🔧 Custom app tools: {enabled_tools}")
|
| 191 |
|
|
|
|
| 192 |
if self.is_connected:
|
| 193 |
asyncio.create_task(self._update_session())
|
| 194 |
|
|
|
|
| 196 |
"""Clear custom personality and revert to default Reachy."""
|
| 197 |
logger.info("🎭 Clearing custom personality - reverting to default Reachy")
|
| 198 |
self._custom_system_prompt = None
|
| 199 |
+
self._enabled_tools = None
|
| 200 |
|
|
|
|
| 201 |
if self.is_connected:
|
| 202 |
asyncio.create_task(self._update_session())
|
| 203 |
|
| 204 |
+
# MARK: - Callback Setup
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
def _setup_tools_handler(self) -> None:
|
| 207 |
"""Setup callbacks for the app tools handler."""
|
| 208 |
+
tools_handler = get_tools_handler()
|
| 209 |
|
|
|
|
| 210 |
def on_personality_change(
|
| 211 |
system_prompt: Optional[str],
|
| 212 |
enabled_tools: Optional[list[str]] = None,
|
|
|
|
| 218 |
|
| 219 |
tools_handler.on_personality_change = on_personality_change
|
| 220 |
|
|
|
|
| 221 |
async def on_app_change(data: dict) -> None:
|
| 222 |
if self.on_app_change:
|
| 223 |
self.on_app_change(data)
|
|
|
|
| 226 |
|
| 227 |
def _setup_meeting_callbacks(self) -> None:
|
| 228 |
"""Setup callbacks for meeting tool events."""
|
| 229 |
+
from ..tools.meeting import set_meeting_callbacks
|
| 230 |
|
| 231 |
+
def on_started(meeting_id: str, title: str) -> None:
|
|
|
|
| 232 |
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 233 |
if self.on_meeting_started:
|
| 234 |
self.on_meeting_started(meeting_id, title)
|
| 235 |
|
| 236 |
+
def on_stopped(meeting_id: str) -> None:
|
|
|
|
| 237 |
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 238 |
if self.on_meeting_stopped:
|
| 239 |
self.on_meeting_stopped(meeting_id)
|
| 240 |
|
| 241 |
def on_transcript_update(meeting_id: str, transcript: str) -> None:
|
|
|
|
| 242 |
if self.on_meeting_transcript_update:
|
| 243 |
self.on_meeting_transcript_update(meeting_id, transcript)
|
| 244 |
|
| 245 |
set_meeting_callbacks(
|
| 246 |
+
on_started=on_started,
|
| 247 |
+
on_stopped=on_stopped,
|
| 248 |
on_transcript_update=on_transcript_update,
|
| 249 |
)
|
| 250 |
|
| 251 |
def _setup_vad_callback(self) -> None:
|
| 252 |
+
"""Setup callback for VAD settings changes."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
def on_vad_changed() -> None:
|
|
|
|
| 254 |
if self.is_connected:
|
| 255 |
logger.info("🎤 VAD settings changed, updating OpenAI session...")
|
| 256 |
asyncio.create_task(self._configure_session())
|
|
|
|
| 258 |
set_vad_settings_callback(on_vad_changed)
|
| 259 |
|
| 260 |
def _append_to_meeting_transcript(self, text: str, speaker: str = "user") -> None:
|
| 261 |
+
"""Append transcribed text to active meeting if one exists."""
|
| 262 |
+
from ..tools.meeting import is_meeting_active, append_to_transcript
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
if is_meeting_active() and text.strip():
|
|
|
|
| 265 |
formatted_text = f"[{speaker.upper()}]: {text.strip()}"
|
| 266 |
append_to_transcript(formatted_text)
|
| 267 |
|
| 268 |
+
# MARK: - Connection Management
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
|
| 270 |
async def connect(self, api_key: Optional[str] = None) -> None:
|
| 271 |
+
"""Connect to OpenAI Realtime API."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
if self.is_connected:
|
| 273 |
logger.warning("Already connected to OpenAI")
|
| 274 |
return
|
| 275 |
|
|
|
|
| 276 |
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 277 |
if not key:
|
| 278 |
logger.error("❌ No OpenAI API key provided or found in environment")
|
|
|
|
| 282 |
self._set_connection_state(ConnectionState.CONNECTING)
|
| 283 |
|
| 284 |
try:
|
|
|
|
| 285 |
url = f"{OPENAI_REALTIME_URL}?model={OPENAI_MODEL}"
|
|
|
|
|
|
|
| 286 |
headers = {
|
| 287 |
"Authorization": f"Bearer {key}",
|
| 288 |
"OpenAI-Beta": "realtime=v1",
|
|
|
|
| 299 |
|
| 300 |
logger.info("✅ WebSocket connected to OpenAI Realtime API")
|
| 301 |
|
|
|
|
| 302 |
logger.info("⚙️ Configuring OpenAI session...")
|
| 303 |
await self._configure_session()
|
| 304 |
|
| 305 |
self._set_connection_state(ConnectionState.CONNECTED)
|
| 306 |
logger.info("✅ OpenAI Realtime session configured and ready!")
|
| 307 |
|
|
|
|
| 308 |
self._receive_task = asyncio.create_task(self._receive_messages())
|
| 309 |
|
| 310 |
except Exception as e:
|
|
|
|
| 333 |
self._set_connection_state(ConnectionState.DISCONNECTED)
|
| 334 |
logger.info("Disconnected from OpenAI Realtime API")
|
| 335 |
|
| 336 |
+
# MARK: - Session Configuration
|
| 337 |
+
|
| 338 |
async def _configure_session(self) -> None:
|
| 339 |
+
"""Configure the OpenAI session with audio settings, tools, and personalization."""
|
| 340 |
+
from ..routes.voice import get_preferred_language
|
|
|
|
| 341 |
|
|
|
|
| 342 |
self._language = get_preferred_language()
|
| 343 |
+
all_tools = get_all_tools(self._enabled_tools)
|
| 344 |
|
| 345 |
+
# Fetch user personalization for smarter greetings
|
| 346 |
+
user_name, user_country = await fetch_user_personalization()
|
|
|
|
|
|
|
| 347 |
|
| 348 |
+
session_config = build_session_config(
|
| 349 |
+
language=self._language,
|
| 350 |
+
custom_system_prompt=self._custom_system_prompt,
|
| 351 |
+
tools=all_tools,
|
| 352 |
+
user_name=user_name,
|
| 353 |
+
user_country=user_country,
|
| 354 |
)
|
| 355 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 356 |
await self._send_message(session_config)
|
| 357 |
|
| 358 |
async def _update_session(self) -> None:
|
|
|
|
| 360 |
if not self.is_connected:
|
| 361 |
return
|
| 362 |
|
| 363 |
+
lang_name = LANGUAGE_NAMES.get(self._language, "English")
|
| 364 |
logger.info(f"Updating OpenAI session language to {lang_name}")
|
| 365 |
|
| 366 |
await self._configure_session()
|
|
|
|
| 372 |
|
| 373 |
await self._websocket.send(json.dumps(message))
|
| 374 |
|
| 375 |
+
# MARK: - Message Handling
|
| 376 |
+
|
| 377 |
async def _receive_messages(self) -> None:
|
| 378 |
"""Receive and handle messages from OpenAI."""
|
| 379 |
if not self._websocket:
|
|
|
|
| 397 |
data = json.loads(message)
|
| 398 |
msg_type = data.get("type", "")
|
| 399 |
|
| 400 |
+
if msg_type not in ("response.audio.delta",):
|
|
|
|
| 401 |
logger.debug(f"📨 OpenAI event: {msg_type}")
|
| 402 |
|
| 403 |
if msg_type in ("session.created", "session.updated"):
|
| 404 |
logger.info("✅ OpenAI session configured")
|
|
|
|
| 405 |
if self._session_update_event:
|
| 406 |
self._session_update_event.set()
|
|
|
|
| 407 |
if self.on_session_updated:
|
| 408 |
self.on_session_updated()
|
| 409 |
|
| 410 |
elif msg_type == "input_audio_buffer.speech_started":
|
|
|
|
|
|
|
| 411 |
if self.is_responding:
|
| 412 |
logger.warning(
|
| 413 |
+
f"🎤 VAD speech detected during AI response - ignoring "
|
| 414 |
f"(response_state={self._response_state.value})"
|
| 415 |
)
|
|
|
|
| 416 |
else:
|
| 417 |
logger.info("🎤 Speech detected - user is speaking")
|
| 418 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 419 |
|
| 420 |
elif msg_type == "input_audio_buffer.speech_stopped":
|
|
|
|
| 421 |
logger.info("🎤 Speech ended - processing...")
|
| 422 |
|
| 423 |
elif msg_type == "conversation.item.input_audio_transcription.completed":
|
|
|
|
| 424 |
transcript = data.get("transcript", "")
|
| 425 |
logger.info(f"📝 User transcript: {transcript}")
|
| 426 |
if transcript:
|
| 427 |
self._current_transcript = transcript
|
| 428 |
if self.on_transcript_update:
|
| 429 |
self.on_transcript_update(transcript)
|
|
|
|
| 430 |
self._append_to_meeting_transcript(transcript, speaker="user")
|
| 431 |
|
| 432 |
elif msg_type == "response.audio_transcript.delta":
|
|
|
|
| 433 |
delta = data.get("delta", "")
|
| 434 |
if delta:
|
| 435 |
self._response_text_buffer += delta
|
| 436 |
|
| 437 |
elif msg_type == "response.audio_transcript.done":
|
|
|
|
| 438 |
transcript = data.get("transcript", "")
|
| 439 |
if transcript:
|
| 440 |
self._response_text_buffer = ""
|
| 441 |
if self.on_response_text:
|
| 442 |
self.on_response_text(transcript)
|
|
|
|
| 443 |
self._append_to_meeting_transcript(transcript, speaker="assistant")
|
| 444 |
|
| 445 |
elif msg_type == "response.audio.delta":
|
|
|
|
| 446 |
audio_base64 = data.get("delta", "")
|
| 447 |
if audio_base64:
|
|
|
|
| 448 |
if self._speaking_state != SpeakingState.SPEAKING:
|
| 449 |
self._set_speaking_state(SpeakingState.SPEAKING)
|
| 450 |
|
|
|
|
| 451 |
if self._response_state == ResponseState.WAITING:
|
| 452 |
self._set_response_state(ResponseState.GENERATING)
|
| 453 |
|
|
|
|
| 454 |
audio_data = base64.b64decode(audio_base64)
|
|
|
|
|
|
|
| 455 |
if self.on_audio_delta:
|
| 456 |
self.on_audio_delta(audio_data)
|
| 457 |
|
| 458 |
elif msg_type == "response.audio.done":
|
|
|
|
| 459 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 460 |
self._set_response_state(ResponseState.COMPLETE)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 461 |
|
| 462 |
elif msg_type == "response.done":
|
|
|
|
| 463 |
self._set_speaking_state(SpeakingState.IDLE)
|
|
|
|
| 464 |
if self._response_state == ResponseState.GENERATING:
|
| 465 |
self._set_response_state(ResponseState.COMPLETE)
|
| 466 |
if self._response_text_buffer:
|
| 467 |
if self.on_response_text:
|
| 468 |
self.on_response_text(self._response_text_buffer)
|
| 469 |
self._response_text_buffer = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 470 |
|
| 471 |
elif msg_type == "error":
|
| 472 |
error_data = data.get("error", {})
|
|
|
|
| 483 |
elif msg_type == "input_audio_buffer.cleared":
|
| 484 |
logger.info("🗑️ Audio buffer cleared")
|
| 485 |
|
| 486 |
+
# Tool call events
|
| 487 |
elif msg_type == "response.output_item.added":
|
|
|
|
| 488 |
item = data.get("item", {})
|
| 489 |
if item.get("type") == "function_call":
|
| 490 |
call_id = item.get("call_id", "")
|
|
|
|
| 494 |
"name": func_name,
|
| 495 |
"arguments_buffer": ""
|
| 496 |
}
|
|
|
|
| 497 |
if self.on_tool_usage:
|
| 498 |
self.on_tool_usage(func_name, "started")
|
| 499 |
|
| 500 |
elif msg_type == "response.function_call_arguments.delta":
|
|
|
|
| 501 |
call_id = data.get("call_id", "")
|
| 502 |
delta = data.get("delta", "")
|
| 503 |
if call_id in self._pending_tool_calls:
|
| 504 |
self._pending_tool_calls[call_id]["arguments_buffer"] += delta
|
| 505 |
|
| 506 |
elif msg_type == "response.function_call_arguments.done":
|
|
|
|
| 507 |
call_id = data.get("call_id", "")
|
| 508 |
arguments_str = data.get("arguments", "")
|
| 509 |
|
|
|
|
| 511 |
tool_info = self._pending_tool_calls[call_id]
|
| 512 |
tool_name = tool_info["name"]
|
| 513 |
|
|
|
|
| 514 |
logger.info(f"🔧 Function call complete: {tool_name}")
|
| 515 |
logger.debug(f"🔧 Arguments: {arguments_str}")
|
| 516 |
|
|
|
|
| 520 |
)
|
| 521 |
|
| 522 |
elif msg_type == "response.output_item.done":
|
|
|
|
| 523 |
item = data.get("item", {})
|
| 524 |
if item.get("type") == "function_call":
|
| 525 |
call_id = item.get("call_id", "")
|
|
|
|
| 532 |
except json.JSONDecodeError:
|
| 533 |
logger.error(f"Failed to parse message: {message[:100]}")
|
| 534 |
|
|
|
|
|
|
|
| 535 |
async def _execute_tool_call(self, call_id: str, tool_name: str, arguments_str: str) -> None:
|
| 536 |
+
"""Execute a tool call using the tool executor."""
|
| 537 |
+
if self._tool_executor is None:
|
| 538 |
+
self._tool_executor = ToolExecutor(
|
| 539 |
+
send_message_fn=self._send_message,
|
| 540 |
+
on_tool_usage=self.on_tool_usage,
|
| 541 |
+
on_website_ready=self.on_website_ready,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 542 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 543 |
|
| 544 |
+
await self._tool_executor.execute(call_id, tool_name, arguments_str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 545 |
|
| 546 |
# MARK: - Audio Input
|
| 547 |
|
| 548 |
async def send_audio(self, audio_data: bytes) -> None:
|
| 549 |
+
"""Send audio data to OpenAI."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 550 |
if not self.is_connected:
|
| 551 |
return
|
| 552 |
|
|
|
|
| 553 |
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
|
| 554 |
|
| 555 |
message = {
|
|
|
|
| 560 |
await self._send_message(message)
|
| 561 |
|
| 562 |
async def commit_audio_and_respond(self) -> None:
|
| 563 |
+
"""Commit the audio buffer and request a response."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 564 |
if not self.is_connected:
|
| 565 |
return
|
| 566 |
|
|
|
|
|
|
|
| 567 |
self._set_response_state(ResponseState.WAITING)
|
|
|
|
|
|
|
| 568 |
await self._send_message({"type": "input_audio_buffer.commit"})
|
|
|
|
|
|
|
| 569 |
await self._send_message({"type": "response.create"})
|
| 570 |
|
| 571 |
async def clear_audio_buffer(self) -> None:
|
|
|
|
| 585 |
# MARK: - Text Input
|
| 586 |
|
| 587 |
async def send_text_message(self, text: str) -> None:
|
| 588 |
+
"""Send a text message (non-voice interaction)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 589 |
if not self.is_connected:
|
| 590 |
raise RuntimeError("Not connected to OpenAI")
|
| 591 |
|
|
|
|
|
|
|
| 592 |
self._set_response_state(ResponseState.WAITING)
|
| 593 |
|
|
|
|
| 594 |
item_message = {
|
| 595 |
"type": "conversation.item.create",
|
| 596 |
"item": {
|
|
|
|
| 605 |
},
|
| 606 |
}
|
| 607 |
await self._send_message(item_message)
|
|
|
|
|
|
|
| 608 |
await self._send_message({"type": "response.create"})
|
| 609 |
|
| 610 |
+
async def restore_conversation_context(self, messages: list[dict]) -> None:
|
| 611 |
+
"""Restore conversation context from history.
|
| 612 |
+
|
| 613 |
+
Injects previous messages into the session to maintain context
|
| 614 |
+
after a session reset (e.g., voice/language change).
|
| 615 |
+
|
| 616 |
+
Args:
|
| 617 |
+
messages: List of message dicts formatted for conversation.item.create.
|
| 618 |
+
"""
|
| 619 |
+
if not self.is_connected:
|
| 620 |
+
logger.warning("Cannot restore context: not connected")
|
| 621 |
+
return
|
| 622 |
+
|
| 623 |
+
if not messages:
|
| 624 |
+
logger.info("📚 No conversation history to restore")
|
| 625 |
+
return
|
| 626 |
+
|
| 627 |
+
logger.info(f"📚 Restoring {len(messages)} messages from history")
|
| 628 |
+
|
| 629 |
+
for msg in messages:
|
| 630 |
+
item_message = {
|
| 631 |
+
"type": "conversation.item.create",
|
| 632 |
+
"item": msg,
|
| 633 |
+
}
|
| 634 |
+
await self._send_message(item_message)
|
| 635 |
+
|
| 636 |
+
logger.info("📚 Conversation context restored")
|
| 637 |
+
|
| 638 |
# MARK: - Listening Control
|
| 639 |
|
| 640 |
def start_listening(self) -> None:
|
|
|
|
| 649 |
logger.info("Stopped listening")
|
| 650 |
|
| 651 |
def mark_response_complete(self) -> None:
|
| 652 |
+
"""Mark the response cycle as complete (audio buffer drained)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 653 |
if self._response_state != ResponseState.IDLE:
|
| 654 |
self._set_response_state(ResponseState.IDLE)
|
| 655 |
|
| 656 |
+
# MARK: - Voice/Language Control
|
| 657 |
+
|
| 658 |
def set_voice(self, voice_id: str) -> None:
|
| 659 |
+
"""Set the OpenAI voice and update the session."""
|
| 660 |
+
from ..routes.voice import set_current_voice
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 661 |
|
|
|
|
| 662 |
if set_current_voice(voice_id):
|
| 663 |
logger.info(f"🔊 Voice set to: {voice_id}")
|
|
|
|
|
|
|
| 664 |
if self.is_connected:
|
| 665 |
asyncio.create_task(self._configure_session())
|
| 666 |
else:
|
| 667 |
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 668 |
|
| 669 |
async def set_voice_async(self, voice_id: str, timeout: float = 5.0) -> bool:
|
| 670 |
+
"""Set the OpenAI voice and wait for session update confirmation."""
|
| 671 |
+
from ..routes.voice import set_current_voice
|
| 672 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 673 |
if not set_current_voice(voice_id):
|
| 674 |
logger.warning(f"⚠️ Invalid voice ID: {voice_id}")
|
| 675 |
return False
|
| 676 |
|
| 677 |
logger.info(f"🔊 Setting voice to: {voice_id}")
|
| 678 |
|
|
|
|
| 679 |
if not self.is_connected:
|
| 680 |
return True
|
| 681 |
|
|
|
|
| 682 |
if self._speaking_state == SpeakingState.SPEAKING:
|
| 683 |
logger.info("🛑 Cancelling current response for voice change")
|
| 684 |
await self.cancel_response()
|
|
|
|
| 688 |
except asyncio.TimeoutError:
|
| 689 |
logger.warning("⚠️ Timeout waiting for speaking to stop before voice change")
|
| 690 |
|
|
|
|
| 691 |
self._session_update_event = asyncio.Event()
|
| 692 |
|
| 693 |
try:
|
|
|
|
| 694 |
await self._configure_session()
|
|
|
|
|
|
|
| 695 |
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 696 |
logger.info(f"✅ Voice change confirmed: {voice_id}")
|
| 697 |
return True
|
|
|
|
| 698 |
except asyncio.TimeoutError:
|
| 699 |
+
logger.warning("⚠️ Timeout waiting for voice change confirmation")
|
| 700 |
return False
|
| 701 |
finally:
|
| 702 |
self._session_update_event = None
|
| 703 |
|
| 704 |
async def set_language_async(self, language: str, timeout: float = 5.0) -> bool:
|
| 705 |
+
"""Set the language and wait for session update confirmation."""
|
| 706 |
+
from ..routes.voice import set_preferred_language
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
|
|
|
|
| 708 |
if not set_preferred_language(language):
|
| 709 |
logger.warning(f"⚠️ Invalid language: {language}")
|
| 710 |
return False
|
| 711 |
|
| 712 |
logger.info(f"🌍 Setting language to: {language}")
|
|
|
|
|
|
|
| 713 |
self._language = language
|
| 714 |
|
|
|
|
| 715 |
if not self.is_connected:
|
| 716 |
return True
|
| 717 |
|
|
|
|
| 718 |
if self._speaking_state == SpeakingState.SPEAKING:
|
| 719 |
logger.info("🛑 Cancelling current response for language change")
|
| 720 |
await self.cancel_response()
|
|
|
|
| 724 |
except asyncio.TimeoutError:
|
| 725 |
logger.warning("⚠️ Timeout waiting for speaking to stop before language change")
|
| 726 |
|
|
|
|
| 727 |
self._session_update_event = asyncio.Event()
|
| 728 |
|
| 729 |
try:
|
|
|
|
| 730 |
await self._configure_session()
|
|
|
|
|
|
|
| 731 |
await asyncio.wait_for(self._session_update_event.wait(), timeout=timeout)
|
| 732 |
logger.info(f"✅ Language change confirmed: {language}")
|
| 733 |
return True
|
|
|
|
| 734 |
except asyncio.TimeoutError:
|
| 735 |
+
logger.warning("⚠️ Timeout waiting for language change confirmation")
|
| 736 |
return False
|
| 737 |
finally:
|
| 738 |
self._session_update_event = None
|
| 739 |
|
| 740 |
def set_language_from_voice(self, voice_id: str) -> None:
|
| 741 |
+
"""Set language based on a voice ID."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 742 |
if "_" in voice_id:
|
| 743 |
lang_code = voice_id[:2].lower()
|
| 744 |
else:
|
| 745 |
lang_code = "en"
|
| 746 |
|
| 747 |
+
if lang_code in LANGUAGE_NAMES:
|
| 748 |
self.language = lang_code
|
| 749 |
else:
|
| 750 |
self.language = "en"
|
reachys_brain/openai_realtime/session.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Session configuration for OpenAI Realtime API.
|
| 2 |
+
|
| 3 |
+
Handles session setup, system instructions, and tool registration.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from ..routes.voice import (
|
| 10 |
+
get_current_voice,
|
| 11 |
+
get_preferred_language,
|
| 12 |
+
get_vad_threshold,
|
| 13 |
+
get_vad_silence_ms,
|
| 14 |
+
get_vad_prefix_ms,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# Language names for system instructions
|
| 20 |
+
LANGUAGE_NAMES = {
|
| 21 |
+
"en": "English",
|
| 22 |
+
"nl": "Dutch",
|
| 23 |
+
"de": "German",
|
| 24 |
+
"fr": "French",
|
| 25 |
+
"es": "Spanish",
|
| 26 |
+
"it": "Italian",
|
| 27 |
+
"pt": "Portuguese",
|
| 28 |
+
"ja": "Japanese",
|
| 29 |
+
"ko": "Korean",
|
| 30 |
+
"zh": "Chinese",
|
| 31 |
+
"ar": "Arabic",
|
| 32 |
+
"hi": "Hindi",
|
| 33 |
+
"ru": "Russian",
|
| 34 |
+
"pl": "Polish",
|
| 35 |
+
"tr": "Turkish",
|
| 36 |
+
"sv": "Swedish",
|
| 37 |
+
"da": "Danish",
|
| 38 |
+
"no": "Norwegian",
|
| 39 |
+
"fi": "Finnish",
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
# Tools that require pre-announcement (slower, user benefits from knowing)
|
| 43 |
+
TOOLS_REQUIRING_ANNOUNCEMENT = {
|
| 44 |
+
"web_search", "get_weather", "recognize_object", "test_camera",
|
| 45 |
+
"generate_website", "create_custom_app", "activate_custom_app",
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# Tools that can execute immediately (fast, no announcement needed)
|
| 49 |
+
QUICK_TOOLS = {
|
| 50 |
+
"get_current_datetime", "get_user_name", "get_preferred_country",
|
| 51 |
+
"add_reminder", "get_reminders", "wake_up", "go_to_sleep",
|
| 52 |
+
"list_custom_apps", "deactivate_app",
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_system_instructions(
|
| 57 |
+
language: str,
|
| 58 |
+
custom_system_prompt: Optional[str] = None,
|
| 59 |
+
user_name: Optional[str] = None,
|
| 60 |
+
user_country: Optional[str] = None,
|
| 61 |
+
) -> str:
|
| 62 |
+
"""Get system instructions with current language and user context.
|
| 63 |
+
|
| 64 |
+
Returns custom personality prompt if set, otherwise default Reachy personality.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
language: Current language code.
|
| 68 |
+
custom_system_prompt: Optional custom personality prompt.
|
| 69 |
+
user_name: Known user name (pre-fetched from DB).
|
| 70 |
+
user_country: Known user country (pre-fetched from DB).
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
The system instructions string.
|
| 74 |
+
"""
|
| 75 |
+
lang_name = LANGUAGE_NAMES.get(language, "English")
|
| 76 |
+
|
| 77 |
+
# Build personalization context
|
| 78 |
+
personalization = _build_personalization_context(user_name, user_country)
|
| 79 |
+
|
| 80 |
+
# Language instruction (shortened)
|
| 81 |
+
language_instruction = f"\n\nLANGUAGE: Always respond in {lang_name}."
|
| 82 |
+
|
| 83 |
+
# Use custom prompt if set, otherwise use default
|
| 84 |
+
if custom_system_prompt:
|
| 85 |
+
return custom_system_prompt + personalization + language_instruction
|
| 86 |
+
|
| 87 |
+
# Default Reachy personality - compressed version
|
| 88 |
+
return f"""You are Reachy, a friendly robot assistant. Be warm, concise, and conversational (1-3 sentences unless more detail is needed).
|
| 89 |
+
|
| 90 |
+
{personalization}
|
| 91 |
+
|
| 92 |
+
TOOL GUIDELINES:
|
| 93 |
+
- For web_search, get_weather, recognize_object, generate_website: Say "Let me..." before using
|
| 94 |
+
- For quick tools (datetime, reminders, power): Just use them directly
|
| 95 |
+
- ALWAYS use web_search for factual questions (people, places, events, prices, news) - don't rely on training data
|
| 96 |
+
- For app creation/activation: Ask for confirmation first
|
| 97 |
+
- When user mentions their name/country: Use remember_user_name/remember_preferred_country to save it""" + language_instruction
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _build_personalization_context(
|
| 101 |
+
user_name: Optional[str],
|
| 102 |
+
user_country: Optional[str],
|
| 103 |
+
) -> str:
|
| 104 |
+
"""Build personalization context based on known user info.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
user_name: Known user name or None.
|
| 108 |
+
user_country: Known user country or None.
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
Personalization instruction string.
|
| 112 |
+
"""
|
| 113 |
+
parts = []
|
| 114 |
+
|
| 115 |
+
if user_name and user_country:
|
| 116 |
+
parts.append(f"USER CONTEXT: You know this user as {user_name} from {user_country}. Use their name naturally in conversation.")
|
| 117 |
+
elif user_name:
|
| 118 |
+
parts.append(f"USER CONTEXT: You know this user as {user_name}. If appropriate, ask what country they're in for timezone info.")
|
| 119 |
+
elif user_country:
|
| 120 |
+
parts.append(f"USER CONTEXT: User is from {user_country}. If appropriate, ask for their name to personalize the conversation.")
|
| 121 |
+
else:
|
| 122 |
+
parts.append("USER CONTEXT: New user - if natural, ask for their name early in the conversation.")
|
| 123 |
+
|
| 124 |
+
return "\n".join(parts)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def build_session_config(
|
| 128 |
+
language: str,
|
| 129 |
+
custom_system_prompt: Optional[str],
|
| 130 |
+
tools: list[dict],
|
| 131 |
+
user_name: Optional[str] = None,
|
| 132 |
+
user_country: Optional[str] = None,
|
| 133 |
+
) -> dict:
|
| 134 |
+
"""Build the session configuration for OpenAI.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
language: Current language code.
|
| 138 |
+
custom_system_prompt: Optional custom personality prompt.
|
| 139 |
+
tools: List of tool definitions to register.
|
| 140 |
+
user_name: Known user name for personalization.
|
| 141 |
+
user_country: Known user country for personalization.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
Session configuration dictionary.
|
| 145 |
+
"""
|
| 146 |
+
voice = get_current_voice()
|
| 147 |
+
vad_threshold = get_vad_threshold()
|
| 148 |
+
vad_silence_ms = get_vad_silence_ms()
|
| 149 |
+
vad_prefix_ms = get_vad_prefix_ms()
|
| 150 |
+
|
| 151 |
+
logger.info(
|
| 152 |
+
f"🔊 Building session config: voice={voice}, language={language}, "
|
| 153 |
+
f"VAD: threshold={vad_threshold}, silence={vad_silence_ms}ms, "
|
| 154 |
+
f"user={user_name or 'unknown'}, country={user_country or 'unknown'}"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
return {
|
| 158 |
+
"type": "session.update",
|
| 159 |
+
"session": {
|
| 160 |
+
"modalities": ["text", "audio"],
|
| 161 |
+
"instructions": get_system_instructions(
|
| 162 |
+
language,
|
| 163 |
+
custom_system_prompt,
|
| 164 |
+
user_name=user_name,
|
| 165 |
+
user_country=user_country,
|
| 166 |
+
),
|
| 167 |
+
"voice": voice,
|
| 168 |
+
"input_audio_format": "pcm16",
|
| 169 |
+
"output_audio_format": "pcm16",
|
| 170 |
+
"input_audio_transcription": {
|
| 171 |
+
"model": "whisper-1",
|
| 172 |
+
"language": language,
|
| 173 |
+
},
|
| 174 |
+
"turn_detection": {
|
| 175 |
+
"type": "server_vad",
|
| 176 |
+
"threshold": vad_threshold,
|
| 177 |
+
"prefix_padding_ms": vad_prefix_ms,
|
| 178 |
+
"silence_duration_ms": vad_silence_ms,
|
| 179 |
+
},
|
| 180 |
+
"tools": tools,
|
| 181 |
+
"tool_choice": "auto",
|
| 182 |
+
},
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
async def fetch_user_personalization() -> tuple[Optional[str], Optional[str]]:
|
| 187 |
+
"""Fetch user name and country from database for personalization.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Tuple of (user_name, user_country), either may be None.
|
| 191 |
+
"""
|
| 192 |
+
try:
|
| 193 |
+
from ..database import get_database
|
| 194 |
+
db = get_database()
|
| 195 |
+
user_name = await db.get_user_setting("user_name")
|
| 196 |
+
user_country = await db.get_user_setting("preferred_country")
|
| 197 |
+
return user_name, user_country
|
| 198 |
+
except Exception as e:
|
| 199 |
+
logger.warning(f"Could not fetch user personalization: {e}")
|
| 200 |
+
return None, None
|
| 201 |
+
|
reachys_brain/openai_realtime/tool_executor.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tool execution for OpenAI Realtime API.
|
| 2 |
+
|
| 3 |
+
Handles executing tool calls and sending results back to OpenAI.
|
| 4 |
+
Includes rate limiting to prevent infinite tool call loops.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
import time
|
| 11 |
+
from typing import Callable, Optional
|
| 12 |
+
|
| 13 |
+
from .tools_loader import get_tools_handler, get_tool_registry
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# Rate limiting constants
|
| 18 |
+
MAX_TOOL_CALLS_PER_WINDOW = 10 # Max tool calls in 10 seconds
|
| 19 |
+
TOOL_CALL_WINDOW_SECONDS = 10.0
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ToolExecutor:
|
| 23 |
+
"""Handles tool execution with rate limiting."""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
send_message_fn: Callable,
|
| 28 |
+
on_tool_usage: Optional[Callable[[str, str], None]] = None,
|
| 29 |
+
on_website_ready: Optional[Callable[[dict], None]] = None,
|
| 30 |
+
):
|
| 31 |
+
"""Initialize the tool executor.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
send_message_fn: Async function to send messages to OpenAI.
|
| 35 |
+
on_tool_usage: Callback for tool usage notifications.
|
| 36 |
+
on_website_ready: Callback for website generation results.
|
| 37 |
+
"""
|
| 38 |
+
self._send_message = send_message_fn
|
| 39 |
+
self.on_tool_usage = on_tool_usage
|
| 40 |
+
self.on_website_ready = on_website_ready
|
| 41 |
+
|
| 42 |
+
# Rate limiting state
|
| 43 |
+
self._tool_call_count = 0
|
| 44 |
+
self._tool_call_reset_time = 0.0
|
| 45 |
+
|
| 46 |
+
async def execute(self, call_id: str, tool_name: str, arguments_str: str) -> None:
|
| 47 |
+
"""Execute a tool call and send the result back to OpenAI.
|
| 48 |
+
|
| 49 |
+
Routes to either the app tools handler (for app management tools) or
|
| 50 |
+
the tool registry (for dynamic tools like weather, web_search).
|
| 51 |
+
|
| 52 |
+
Includes rate limiting to prevent infinite tool call loops.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
call_id: The unique ID for this tool call.
|
| 56 |
+
tool_name: Name of the tool to execute.
|
| 57 |
+
arguments_str: JSON string of tool arguments.
|
| 58 |
+
"""
|
| 59 |
+
# Rate limiting check
|
| 60 |
+
current_time = time.time()
|
| 61 |
+
if current_time - self._tool_call_reset_time > TOOL_CALL_WINDOW_SECONDS:
|
| 62 |
+
# Reset the counter after the window expires
|
| 63 |
+
self._tool_call_count = 0
|
| 64 |
+
self._tool_call_reset_time = current_time
|
| 65 |
+
|
| 66 |
+
self._tool_call_count += 1
|
| 67 |
+
|
| 68 |
+
if self._tool_call_count > MAX_TOOL_CALLS_PER_WINDOW:
|
| 69 |
+
logger.warning(
|
| 70 |
+
f"⚠️ Tool call rate limit exceeded ({self._tool_call_count} calls in "
|
| 71 |
+
f"{TOOL_CALL_WINDOW_SECONDS}s). Possible infinite loop detected."
|
| 72 |
+
)
|
| 73 |
+
# Send an error back to OpenAI to break the loop
|
| 74 |
+
error_result = {
|
| 75 |
+
"type": "conversation.item.create",
|
| 76 |
+
"item": {
|
| 77 |
+
"type": "function_call_output",
|
| 78 |
+
"call_id": call_id,
|
| 79 |
+
"output": json.dumps({
|
| 80 |
+
"success": False,
|
| 81 |
+
"error": "Rate limit exceeded. Please wait before making more requests."
|
| 82 |
+
})
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
await self._send_message(error_result)
|
| 86 |
+
# Don't trigger another response - this breaks the loop
|
| 87 |
+
return
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
# Parse arguments
|
| 91 |
+
try:
|
| 92 |
+
arguments = json.loads(arguments_str) if arguments_str else {}
|
| 93 |
+
except json.JSONDecodeError:
|
| 94 |
+
arguments = {}
|
| 95 |
+
logger.warning(f"Failed to parse tool arguments: {arguments_str}")
|
| 96 |
+
|
| 97 |
+
# Check if this is a dynamic tool from the registry
|
| 98 |
+
registry = get_tool_registry()
|
| 99 |
+
dynamic_tool = registry.get(tool_name)
|
| 100 |
+
|
| 101 |
+
if dynamic_tool:
|
| 102 |
+
# Execute dynamic tool (weather, web_search, etc.)
|
| 103 |
+
logger.info(f"🔧 Executing dynamic tool: {tool_name}")
|
| 104 |
+
result = await registry.execute(tool_name, arguments)
|
| 105 |
+
else:
|
| 106 |
+
# Execute base app tool (create_custom_app, activate_custom_app, etc.)
|
| 107 |
+
tools_handler = get_tools_handler()
|
| 108 |
+
result = await tools_handler.execute_tool(tool_name, arguments)
|
| 109 |
+
|
| 110 |
+
logger.info(f"🔧 Tool result: {result}")
|
| 111 |
+
|
| 112 |
+
# Check if this is a website generation result
|
| 113 |
+
if tool_name == "generate_website" and result.get("success"):
|
| 114 |
+
if self.on_website_ready:
|
| 115 |
+
self.on_website_ready({
|
| 116 |
+
"website_id": result.get("website_id"),
|
| 117 |
+
"url": result.get("url"),
|
| 118 |
+
"title": result.get("title"),
|
| 119 |
+
"is_edit": result.get("is_edit", False),
|
| 120 |
+
})
|
| 121 |
+
|
| 122 |
+
# Send result back to OpenAI
|
| 123 |
+
result_message = {
|
| 124 |
+
"type": "conversation.item.create",
|
| 125 |
+
"item": {
|
| 126 |
+
"type": "function_call_output",
|
| 127 |
+
"call_id": call_id,
|
| 128 |
+
"output": json.dumps(result)
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
await self._send_message(result_message)
|
| 132 |
+
|
| 133 |
+
# Trigger OpenAI to generate a response based on the tool result
|
| 134 |
+
await self._send_message({"type": "response.create"})
|
| 135 |
+
|
| 136 |
+
# Notify iOS that tool usage is complete
|
| 137 |
+
if self.on_tool_usage:
|
| 138 |
+
self.on_tool_usage(tool_name, "completed")
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"Error executing tool {tool_name}: {e}", exc_info=True)
|
| 142 |
+
|
| 143 |
+
# Send error result
|
| 144 |
+
error_result = {
|
| 145 |
+
"type": "conversation.item.create",
|
| 146 |
+
"item": {
|
| 147 |
+
"type": "function_call_output",
|
| 148 |
+
"call_id": call_id,
|
| 149 |
+
"output": json.dumps({"success": False, "error": str(e)})
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
await self._send_message(error_result)
|
| 153 |
+
await self._send_message({"type": "response.create"})
|
| 154 |
+
|
| 155 |
+
# Notify iOS that tool usage is complete (even on error)
|
| 156 |
+
if self.on_tool_usage:
|
| 157 |
+
self.on_tool_usage(tool_name, "completed")
|
| 158 |
+
|
reachys_brain/openai_realtime/tools_loader.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Lazy loading of tools to avoid circular imports.
|
| 2 |
+
|
| 3 |
+
Provides functions to lazily load APP_TOOLS, tools handler, and tool registry.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
# Lazy-loaded modules
|
| 12 |
+
_APP_TOOLS = None
|
| 13 |
+
_tools_handler = None
|
| 14 |
+
_tool_registry = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def get_app_tools() -> list[dict]:
|
| 18 |
+
"""Lazy import of APP_TOOLS to avoid circular imports."""
|
| 19 |
+
global _APP_TOOLS
|
| 20 |
+
if _APP_TOOLS is None:
|
| 21 |
+
from ..app_tools import APP_TOOLS as tools
|
| 22 |
+
_APP_TOOLS = tools
|
| 23 |
+
return _APP_TOOLS
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_tools_handler():
|
| 27 |
+
"""Lazy import of tools handler to avoid circular imports."""
|
| 28 |
+
global _tools_handler
|
| 29 |
+
if _tools_handler is None:
|
| 30 |
+
from ..app_tools import get_tools_handler
|
| 31 |
+
_tools_handler = get_tools_handler()
|
| 32 |
+
return _tools_handler
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_tool_registry():
|
| 36 |
+
"""Lazy import of tool registry to avoid circular imports."""
|
| 37 |
+
global _tool_registry
|
| 38 |
+
if _tool_registry is None:
|
| 39 |
+
from ..tools import get_registry
|
| 40 |
+
_tool_registry = get_registry()
|
| 41 |
+
return _tool_registry
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_all_tools(enabled_tools: Optional[list[str]] = None) -> list[dict]:
|
| 45 |
+
"""Get all tools to register with OpenAI.
|
| 46 |
+
|
| 47 |
+
Combines base APP_TOOLS (app management, power) with enabled dynamic tools
|
| 48 |
+
(weather, web_search) based on the active app's configuration.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
enabled_tools: List of enabled tool IDs. None means enable all.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
List of OpenAI function definitions.
|
| 55 |
+
"""
|
| 56 |
+
# Always include base app tools (create, activate, deactivate, etc.)
|
| 57 |
+
base_tools = get_app_tools()
|
| 58 |
+
|
| 59 |
+
# Get dynamic tools from registry based on enabled_tools
|
| 60 |
+
registry = get_tool_registry()
|
| 61 |
+
|
| 62 |
+
if enabled_tools is None:
|
| 63 |
+
# Default Reachy: enable all dynamic tools
|
| 64 |
+
dynamic_tool_ids = registry.get_all_ids()
|
| 65 |
+
else:
|
| 66 |
+
# Custom app: only enable specified tools
|
| 67 |
+
dynamic_tool_ids = enabled_tools
|
| 68 |
+
|
| 69 |
+
# Get tool definitions for enabled dynamic tools
|
| 70 |
+
dynamic_tools = registry.get_definitions(dynamic_tool_ids)
|
| 71 |
+
|
| 72 |
+
# Combine base + dynamic tools
|
| 73 |
+
all_tools = list(base_tools) + dynamic_tools
|
| 74 |
+
|
| 75 |
+
logger.info(
|
| 76 |
+
f"🔧 Tools: {len(base_tools)} base + {len(dynamic_tools)} dynamic = {len(all_tools)} total"
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
return all_tools
|
| 80 |
+
|
reachys_brain/routes/conversation.py
DELETED
|
@@ -1,1285 +0,0 @@
|
|
| 1 |
-
"""WebSocket endpoint for real-time conversation with iOS app.
|
| 2 |
-
|
| 3 |
-
This module provides the WebSocket endpoint for voice conversations,
|
| 4 |
-
delegating to specialized modules for:
|
| 5 |
-
- broadcast_manager: Client connection management and broadcasting
|
| 6 |
-
- conversation_services: Service lifecycle management
|
| 7 |
-
- audio_manager: Audio streaming and buffering
|
| 8 |
-
- animation_manager: Pre-speech animations and gestures
|
| 9 |
-
"""
|
| 10 |
-
|
| 11 |
-
import asyncio
|
| 12 |
-
import logging
|
| 13 |
-
import os
|
| 14 |
-
from typing import Optional
|
| 15 |
-
|
| 16 |
-
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
| 17 |
-
|
| 18 |
-
from ..openai_realtime import ConnectionState, SpeakingState, ResponseState
|
| 19 |
-
from ..tools.reminders import set_reminder_request_callback, handle_reminder_result
|
| 20 |
-
from ..tools.contacts import set_contacts_request_callback, handle_contacts_result
|
| 21 |
-
from ..tools.scheduled_messages import set_scheduled_message_callback, handle_scheduled_message_result
|
| 22 |
-
from ..tools.website_generator import set_website_request_callback, save_website_from_ios
|
| 23 |
-
|
| 24 |
-
from .audio_stream_manager import ConversationTimings
|
| 25 |
-
from .broadcast_manager import broadcast, safe_broadcast, add_client, remove_client
|
| 26 |
-
from .conversation_messages import STOP_COMMANDS
|
| 27 |
-
from .conversation_services import (
|
| 28 |
-
get_services,
|
| 29 |
-
get_state,
|
| 30 |
-
init_services as init_conversation_services,
|
| 31 |
-
cleanup_services as cleanup_conversation_services,
|
| 32 |
-
wire_openai_callbacks,
|
| 33 |
-
)
|
| 34 |
-
from .audio_manager import (
|
| 35 |
-
handle_audio_delta,
|
| 36 |
-
delayed_resume_microphone,
|
| 37 |
-
start_audio_streaming,
|
| 38 |
-
stop_audio_streaming,
|
| 39 |
-
)
|
| 40 |
-
from .animation_manager import (
|
| 41 |
-
handle_pre_speech_animation,
|
| 42 |
-
send_greeting,
|
| 43 |
-
send_goodbye,
|
| 44 |
-
set_custom_animations,
|
| 45 |
-
clear_custom_animations,
|
| 46 |
-
)
|
| 47 |
-
from .task_tracker import create_tracked_task, cancel_all_tracked_tasks
|
| 48 |
-
|
| 49 |
-
logger = logging.getLogger(__name__)
|
| 50 |
-
|
| 51 |
-
router = APIRouter(tags=["Conversation"])
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
async def send_greeting_and_start_streaming() -> None:
|
| 55 |
-
"""Send greeting and start audio streaming after it completes.
|
| 56 |
-
|
| 57 |
-
This ensures that:
|
| 58 |
-
1. No audio is sent to OpenAI during the welcome animation
|
| 59 |
-
2. The greeting plays completely without VAD interruption
|
| 60 |
-
3. Audio streaming only starts after greeting finishes
|
| 61 |
-
|
| 62 |
-
Flow:
|
| 63 |
-
1. send_greeting() sends text -> ResponseState.WAITING
|
| 64 |
-
2. Audio arrives -> ResponseState.GENERATING
|
| 65 |
-
3. response.audio.done -> ResponseState.COMPLETE
|
| 66 |
-
4. We wait for COMPLETE (audio sent by OpenAI)
|
| 67 |
-
5. Wait for audio buffer to drain (audio played by speaker)
|
| 68 |
-
6. Start streaming and resume microphone
|
| 69 |
-
"""
|
| 70 |
-
services = get_services()
|
| 71 |
-
|
| 72 |
-
try:
|
| 73 |
-
# Send the greeting (this will set ResponseState.WAITING -> GENERATING -> COMPLETE)
|
| 74 |
-
await send_greeting()
|
| 75 |
-
|
| 76 |
-
# Wait for the greeting response to be COMPLETE or IDLE
|
| 77 |
-
# COMPLETE = OpenAI finished sending audio
|
| 78 |
-
# IDLE = Response cycle fully complete
|
| 79 |
-
max_wait = 15.0 # Maximum wait for greeting
|
| 80 |
-
wait_interval = 0.2
|
| 81 |
-
waited = 0.0
|
| 82 |
-
|
| 83 |
-
while waited < max_wait:
|
| 84 |
-
if services.openai:
|
| 85 |
-
state = services.openai.response_state
|
| 86 |
-
if state in (ResponseState.COMPLETE, ResponseState.IDLE):
|
| 87 |
-
break
|
| 88 |
-
await asyncio.sleep(wait_interval)
|
| 89 |
-
waited += wait_interval
|
| 90 |
-
|
| 91 |
-
if waited >= max_wait:
|
| 92 |
-
logger.warning("⚠️ Greeting response timeout - starting streaming anyway")
|
| 93 |
-
else:
|
| 94 |
-
logger.info(f"✅ Greeting response complete (waited {waited:.1f}s)")
|
| 95 |
-
|
| 96 |
-
# Wait for audio to finish playing through speaker
|
| 97 |
-
if services.audio_player and services.audio_player.is_playing:
|
| 98 |
-
extra_wait = 0.0
|
| 99 |
-
max_extra = 8.0
|
| 100 |
-
while services.audio_player.is_playing and extra_wait < max_extra:
|
| 101 |
-
await asyncio.sleep(wait_interval)
|
| 102 |
-
extra_wait += wait_interval
|
| 103 |
-
if extra_wait > 0:
|
| 104 |
-
logger.info(f"⏳ Waited {extra_wait:.1f}s for greeting audio to finish")
|
| 105 |
-
|
| 106 |
-
# Additional buffer drain time
|
| 107 |
-
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 108 |
-
|
| 109 |
-
# Now start audio streaming
|
| 110 |
-
if services.openai and services.openai.is_listening:
|
| 111 |
-
start_audio_streaming()
|
| 112 |
-
logger.info("✅ Audio streaming started after greeting")
|
| 113 |
-
|
| 114 |
-
# Clear any buffered audio
|
| 115 |
-
await services.openai.clear_audio_buffer()
|
| 116 |
-
|
| 117 |
-
# Mark response cycle as complete
|
| 118 |
-
services.openai.mark_response_complete()
|
| 119 |
-
|
| 120 |
-
# Resume audio capture
|
| 121 |
-
if services.audio_capture:
|
| 122 |
-
services.audio_capture.resume_capture()
|
| 123 |
-
logger.info("▶️ Microphone resumed after greeting")
|
| 124 |
-
|
| 125 |
-
except Exception as e:
|
| 126 |
-
logger.error(f"Error in greeting flow: {e}")
|
| 127 |
-
# Start streaming anyway to not leave the system in a broken state
|
| 128 |
-
if services.openai and services.openai.is_listening:
|
| 129 |
-
start_audio_streaming()
|
| 130 |
-
if services.audio_capture:
|
| 131 |
-
services.audio_capture.resume_capture()
|
| 132 |
-
|
| 133 |
-
async def _reset_openai_session(keep_listening: bool) -> None:
|
| 134 |
-
"""Hard reset OpenAI session by disconnecting and reconnecting.
|
| 135 |
-
|
| 136 |
-
OpenAI Realtime rejects changing voice via `session.update` once assistant
|
| 137 |
-
audio has been produced in the current session. To apply a new voice/language
|
| 138 |
-
reliably, we must create a new session.
|
| 139 |
-
|
| 140 |
-
If `keep_listening` is True, we restore listening + audio streaming so the
|
| 141 |
-
next user turn works without user-visible reconnect steps.
|
| 142 |
-
"""
|
| 143 |
-
services = get_services()
|
| 144 |
-
|
| 145 |
-
if not services.openai:
|
| 146 |
-
return
|
| 147 |
-
|
| 148 |
-
# Stop any local playback/gestures
|
| 149 |
-
if services.audio_player:
|
| 150 |
-
services.audio_player.cancel()
|
| 151 |
-
|
| 152 |
-
if services.speaking_gestures:
|
| 153 |
-
await services.speaking_gestures.stop()
|
| 154 |
-
|
| 155 |
-
# Stop audio streaming task (depends on openai.is_connected)
|
| 156 |
-
stop_audio_streaming()
|
| 157 |
-
|
| 158 |
-
# Pause capture while we reset, to avoid buffering user audio into nowhere
|
| 159 |
-
if services.audio_capture:
|
| 160 |
-
services.audio_capture.pause_capture()
|
| 161 |
-
|
| 162 |
-
# Best-effort: cancel any in-flight response and clear input buffer
|
| 163 |
-
try:
|
| 164 |
-
await services.openai.cancel_response()
|
| 165 |
-
await services.openai.clear_audio_buffer()
|
| 166 |
-
except Exception:
|
| 167 |
-
pass
|
| 168 |
-
|
| 169 |
-
# Fully disconnect and reconnect (new session)
|
| 170 |
-
try:
|
| 171 |
-
await services.openai.disconnect()
|
| 172 |
-
except Exception:
|
| 173 |
-
pass
|
| 174 |
-
|
| 175 |
-
# connect() reads OPENAI_API_KEY from env (set by iOS connect command)
|
| 176 |
-
await services.openai.connect()
|
| 177 |
-
|
| 178 |
-
if keep_listening:
|
| 179 |
-
services.openai.start_listening()
|
| 180 |
-
start_audio_streaming()
|
| 181 |
-
|
| 182 |
-
# Resume capture with normal echo-avoidance delay
|
| 183 |
-
create_tracked_task(delayed_resume_microphone())
|
| 184 |
-
else:
|
| 185 |
-
if services.audio_capture:
|
| 186 |
-
services.audio_capture.stop_capture()
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
# MARK: - Service Lifecycle
|
| 190 |
-
|
| 191 |
-
def init_services() -> None:
|
| 192 |
-
"""Initialize conversation services and wire up callbacks."""
|
| 193 |
-
services = init_conversation_services()
|
| 194 |
-
|
| 195 |
-
# Wire up OpenAI callbacks
|
| 196 |
-
wire_openai_callbacks(
|
| 197 |
-
on_connection_state=_on_connection_state,
|
| 198 |
-
on_speaking_state=_on_speaking_state,
|
| 199 |
-
on_response_state=_on_response_state,
|
| 200 |
-
on_transcript_update=_on_transcript_update,
|
| 201 |
-
on_response_text=_on_response_text,
|
| 202 |
-
on_audio_delta=handle_audio_delta,
|
| 203 |
-
on_error=_on_error,
|
| 204 |
-
on_app_change=_on_app_change,
|
| 205 |
-
on_tool_usage=_on_tool_usage,
|
| 206 |
-
on_website_ready=_on_website_ready,
|
| 207 |
-
)
|
| 208 |
-
|
| 209 |
-
# Wire up meeting callbacks
|
| 210 |
-
if services.openai:
|
| 211 |
-
services.openai.on_meeting_started = _on_meeting_started
|
| 212 |
-
services.openai.on_meeting_stopped = _on_meeting_stopped
|
| 213 |
-
services.openai.on_meeting_transcript_update = _on_meeting_transcript_update
|
| 214 |
-
|
| 215 |
-
# Wire up website request callback to delegate generation to iOS
|
| 216 |
-
set_website_request_callback(_on_website_request)
|
| 217 |
-
|
| 218 |
-
# Wire up reminder request callback to delegate operations to iOS
|
| 219 |
-
set_reminder_request_callback(_on_reminder_request)
|
| 220 |
-
|
| 221 |
-
# Wire up contacts request callback to delegate operations to iOS
|
| 222 |
-
set_contacts_request_callback(_on_contacts_request)
|
| 223 |
-
|
| 224 |
-
# Wire up scheduled message request callback to delegate operations to iOS
|
| 225 |
-
set_scheduled_message_callback(_on_scheduled_message_request)
|
| 226 |
-
|
| 227 |
-
logger.info("Conversation services initialized")
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
async def cleanup_services() -> None:
|
| 231 |
-
"""Clean up conversation services.
|
| 232 |
-
|
| 233 |
-
This is async to properly await all cleanup operations and prevent
|
| 234 |
-
the app from hanging during shutdown.
|
| 235 |
-
"""
|
| 236 |
-
# Cancel all tracked tasks first
|
| 237 |
-
cancel_all_tracked_tasks()
|
| 238 |
-
# Await the async cleanup
|
| 239 |
-
await cleanup_conversation_services()
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
# MARK: - OpenAI Callbacks
|
| 243 |
-
|
| 244 |
-
def _on_connection_state(state: ConnectionState) -> None:
|
| 245 |
-
"""Handle OpenAI connection state changes."""
|
| 246 |
-
create_tracked_task(broadcast({
|
| 247 |
-
"type": "connection_state",
|
| 248 |
-
"state": state.value,
|
| 249 |
-
}))
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
def _on_speaking_state(state: SpeakingState) -> None:
|
| 253 |
-
"""Handle OpenAI speaking state changes."""
|
| 254 |
-
services = get_services()
|
| 255 |
-
conv_state = get_state()
|
| 256 |
-
|
| 257 |
-
# Pause/resume audio capture based on speaking state
|
| 258 |
-
if services.audio_capture:
|
| 259 |
-
if state == SpeakingState.SPEAKING:
|
| 260 |
-
services.audio_capture.pause_capture()
|
| 261 |
-
logger.info("⏸️ Paused microphone (AI speaking)")
|
| 262 |
-
|
| 263 |
-
# Reset buffering state for new response
|
| 264 |
-
conv_state.reset_for_new_response()
|
| 265 |
-
|
| 266 |
-
# Start subtle speaking gestures while talking
|
| 267 |
-
if services.speaking_gestures:
|
| 268 |
-
create_tracked_task(services.speaking_gestures.start())
|
| 269 |
-
|
| 270 |
-
# Clear OpenAI's audio buffer to prevent echo from interrupting
|
| 271 |
-
if services.openai:
|
| 272 |
-
create_tracked_task(services.openai.clear_audio_buffer())
|
| 273 |
-
logger.info("🗑️ Cleared OpenAI audio buffer (preventing echo)")
|
| 274 |
-
else:
|
| 275 |
-
# AI finished speaking - stop gestures
|
| 276 |
-
if services.speaking_gestures:
|
| 277 |
-
create_tracked_task(services.speaking_gestures.stop())
|
| 278 |
-
|
| 279 |
-
conv_state.response_count += 1
|
| 280 |
-
|
| 281 |
-
# Reset buffering state for next response
|
| 282 |
-
conv_state.is_buffering_audio = True
|
| 283 |
-
conv_state.animation_played_for_response = False
|
| 284 |
-
|
| 285 |
-
# Delay resuming microphone to let audio buffer drain (prevent echo)
|
| 286 |
-
create_tracked_task(delayed_resume_microphone())
|
| 287 |
-
|
| 288 |
-
create_tracked_task(broadcast({
|
| 289 |
-
"type": "speaking_state",
|
| 290 |
-
"state": state.value,
|
| 291 |
-
}))
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
def _on_response_state(state: ResponseState) -> None:
|
| 295 |
-
"""Handle OpenAI response lifecycle state changes.
|
| 296 |
-
|
| 297 |
-
This is the key callback for preventing audio cutoff:
|
| 298 |
-
- WAITING: Request sent, pause microphone immediately
|
| 299 |
-
- GENERATING: Audio being received, mic stays paused
|
| 300 |
-
- COMPLETE: Audio done, wait for buffer drain then resume
|
| 301 |
-
- IDLE: Buffer drained, mic can be active
|
| 302 |
-
"""
|
| 303 |
-
services = get_services()
|
| 304 |
-
conv_state = get_state()
|
| 305 |
-
|
| 306 |
-
if state == ResponseState.WAITING:
|
| 307 |
-
# Immediately pause microphone when a request is sent
|
| 308 |
-
# This prevents VAD from detecting noise/echo before response starts
|
| 309 |
-
if services.audio_capture:
|
| 310 |
-
services.audio_capture.pause_capture()
|
| 311 |
-
logger.info("⏸️ Paused microphone (waiting for AI response)")
|
| 312 |
-
|
| 313 |
-
# Clear OpenAI's audio buffer to prevent any buffered audio
|
| 314 |
-
if services.openai:
|
| 315 |
-
create_tracked_task(services.openai.clear_audio_buffer())
|
| 316 |
-
|
| 317 |
-
elif state == ResponseState.GENERATING:
|
| 318 |
-
# Audio is being generated - ensure mic stays paused
|
| 319 |
-
if services.audio_capture and not services.audio_capture.is_paused:
|
| 320 |
-
services.audio_capture.pause_capture()
|
| 321 |
-
logger.info("⏸️ Paused microphone (AI generating audio)")
|
| 322 |
-
|
| 323 |
-
elif state == ResponseState.COMPLETE:
|
| 324 |
-
# Response complete - schedule microphone resume after buffer drains
|
| 325 |
-
# The actual resume happens via delayed_resume_microphone
|
| 326 |
-
logger.info("📊 Response complete - waiting for audio buffer to drain")
|
| 327 |
-
|
| 328 |
-
elif state == ResponseState.IDLE:
|
| 329 |
-
# Response cycle complete - microphone can be resumed
|
| 330 |
-
# This is called after buffer has drained
|
| 331 |
-
logger.info("📊 Response cycle idle - microphone can resume")
|
| 332 |
-
|
| 333 |
-
# Broadcast state change to iOS
|
| 334 |
-
create_tracked_task(broadcast({
|
| 335 |
-
"type": "response_state",
|
| 336 |
-
"state": state.value,
|
| 337 |
-
}))
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
def _is_stop_command(transcript: str, language: str = "en") -> bool:
|
| 341 |
-
"""Check if the transcript is a stop command."""
|
| 342 |
-
text = transcript.strip().lower()
|
| 343 |
-
|
| 344 |
-
# Check stop commands for current language
|
| 345 |
-
stop_words = STOP_COMMANDS.get(language, STOP_COMMANDS["en"])
|
| 346 |
-
for stop in stop_words:
|
| 347 |
-
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 348 |
-
return True
|
| 349 |
-
|
| 350 |
-
# Always check English stop commands as fallback
|
| 351 |
-
if language != "en":
|
| 352 |
-
for stop in STOP_COMMANDS["en"]:
|
| 353 |
-
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 354 |
-
return True
|
| 355 |
-
|
| 356 |
-
return False
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
def _on_transcript_update(transcript: str) -> None:
|
| 360 |
-
"""Handle user transcript updates."""
|
| 361 |
-
services = get_services()
|
| 362 |
-
|
| 363 |
-
# Check for stop command
|
| 364 |
-
lang = services.openai.language if services.openai else "en"
|
| 365 |
-
if _is_stop_command(transcript, lang):
|
| 366 |
-
logger.info(f"🛑 Stop command detected: '{transcript}'")
|
| 367 |
-
create_tracked_task(_handle_stop_command())
|
| 368 |
-
return
|
| 369 |
-
|
| 370 |
-
create_tracked_task(broadcast({
|
| 371 |
-
"type": "transcript_update",
|
| 372 |
-
"transcript": transcript,
|
| 373 |
-
}))
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
async def _handle_stop_command() -> None:
|
| 377 |
-
"""Handle a voice stop command - immediately end the conversation."""
|
| 378 |
-
services = get_services()
|
| 379 |
-
state = get_state()
|
| 380 |
-
|
| 381 |
-
try:
|
| 382 |
-
# Cancel any current AI response
|
| 383 |
-
if services.audio_player:
|
| 384 |
-
services.audio_player.cancel()
|
| 385 |
-
|
| 386 |
-
if services.openai:
|
| 387 |
-
await services.openai.cancel_response()
|
| 388 |
-
|
| 389 |
-
# Stop speaking gestures if running
|
| 390 |
-
if services.speaking_gestures:
|
| 391 |
-
await services.speaking_gestures.stop()
|
| 392 |
-
|
| 393 |
-
# Stop the audio stream task
|
| 394 |
-
stop_audio_streaming()
|
| 395 |
-
|
| 396 |
-
# Stop audio capture
|
| 397 |
-
if services.audio_capture:
|
| 398 |
-
services.audio_capture.stop_capture()
|
| 399 |
-
|
| 400 |
-
# Stop listening on OpenAI
|
| 401 |
-
if services.openai:
|
| 402 |
-
services.openai.stop_listening()
|
| 403 |
-
|
| 404 |
-
# Enforce: next conversation is always a new OpenAI session
|
| 405 |
-
try:
|
| 406 |
-
await services.openai.disconnect()
|
| 407 |
-
except Exception:
|
| 408 |
-
pass
|
| 409 |
-
|
| 410 |
-
# Mark conversation as ended (allows idle movements to resume)
|
| 411 |
-
state.end_conversation()
|
| 412 |
-
|
| 413 |
-
# Broadcast to iOS that listening has stopped
|
| 414 |
-
await broadcast({
|
| 415 |
-
"type": "listening_state",
|
| 416 |
-
"listening": False,
|
| 417 |
-
})
|
| 418 |
-
|
| 419 |
-
# Also notify of the transcript that caused the stop
|
| 420 |
-
await broadcast({
|
| 421 |
-
"type": "conversation_stopped",
|
| 422 |
-
"reason": "voice_command",
|
| 423 |
-
})
|
| 424 |
-
|
| 425 |
-
logger.info("✅ Conversation stopped via voice command")
|
| 426 |
-
|
| 427 |
-
except Exception as e:
|
| 428 |
-
logger.error(f"Error handling stop command: {e}")
|
| 429 |
-
# Broadcast error to clients
|
| 430 |
-
await broadcast({
|
| 431 |
-
"type": "error",
|
| 432 |
-
"message": "Failed to stop conversation",
|
| 433 |
-
})
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
def _on_response_text(text: str) -> None:
|
| 437 |
-
"""Handle AI response text - triggers pre-speech animation."""
|
| 438 |
-
state = get_state()
|
| 439 |
-
state.last_response_text = text
|
| 440 |
-
|
| 441 |
-
# Trigger pre-speech animation workflow
|
| 442 |
-
create_tracked_task(handle_pre_speech_animation(text))
|
| 443 |
-
|
| 444 |
-
create_tracked_task(broadcast({
|
| 445 |
-
"type": "response_text",
|
| 446 |
-
"text": text,
|
| 447 |
-
}))
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
def _on_error(error: str) -> None:
|
| 451 |
-
"""Handle OpenAI errors."""
|
| 452 |
-
create_tracked_task(broadcast({
|
| 453 |
-
"type": "error",
|
| 454 |
-
"message": error,
|
| 455 |
-
}))
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
def _on_tool_usage(tool_name: str, status: str) -> None:
|
| 459 |
-
"""Handle tool usage notifications."""
|
| 460 |
-
logger.info(f"🔧 Tool usage: {tool_name} ({status})")
|
| 461 |
-
create_tracked_task(broadcast({
|
| 462 |
-
"type": "tool_usage",
|
| 463 |
-
"tool": tool_name if status == "started" else None,
|
| 464 |
-
"status": status,
|
| 465 |
-
}))
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
def _on_website_ready(data: dict) -> None:
|
| 469 |
-
"""Handle website generation completion."""
|
| 470 |
-
try:
|
| 471 |
-
website_id = data.get("website_id", "")
|
| 472 |
-
url = data.get("url", "")
|
| 473 |
-
title = data.get("title", "Generated Website")
|
| 474 |
-
is_edit = data.get("is_edit", False)
|
| 475 |
-
|
| 476 |
-
if not website_id:
|
| 477 |
-
logger.error("_on_website_ready: Missing website_id")
|
| 478 |
-
return
|
| 479 |
-
|
| 480 |
-
action = "updated" if is_edit else "created"
|
| 481 |
-
logger.info(f"🌐 Website {action}: {title} ({website_id})")
|
| 482 |
-
|
| 483 |
-
create_tracked_task(safe_broadcast({
|
| 484 |
-
"type": "website_ready",
|
| 485 |
-
"website_id": website_id,
|
| 486 |
-
"url": url,
|
| 487 |
-
"title": title,
|
| 488 |
-
"is_edit": is_edit,
|
| 489 |
-
}))
|
| 490 |
-
except Exception as e:
|
| 491 |
-
logger.error(f"Error in _on_website_ready: {e}", exc_info=True)
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
def _on_website_request(
|
| 495 |
-
website_id: str,
|
| 496 |
-
description: str,
|
| 497 |
-
is_edit: bool,
|
| 498 |
-
existing_html: Optional[str],
|
| 499 |
-
) -> None:
|
| 500 |
-
"""Handle website generation request - delegates to iOS."""
|
| 501 |
-
try:
|
| 502 |
-
if not website_id:
|
| 503 |
-
logger.error("_on_website_request: Missing website_id")
|
| 504 |
-
return
|
| 505 |
-
|
| 506 |
-
if not description:
|
| 507 |
-
logger.error("_on_website_request: Missing description")
|
| 508 |
-
return
|
| 509 |
-
|
| 510 |
-
action = "Editing" if is_edit else "Creating"
|
| 511 |
-
desc_preview = description[:50] if description else ""
|
| 512 |
-
logger.info(f"📱 Requesting iOS to generate website {website_id}: {desc_preview}...")
|
| 513 |
-
|
| 514 |
-
# Limit existing_html size for broadcast to prevent WebSocket issues
|
| 515 |
-
broadcast_existing_html = existing_html
|
| 516 |
-
if existing_html and len(existing_html) > 100000:
|
| 517 |
-
logger.warning(f"Truncating existing_html for broadcast ({len(existing_html)} chars)")
|
| 518 |
-
broadcast_existing_html = existing_html[:100000]
|
| 519 |
-
|
| 520 |
-
create_tracked_task(safe_broadcast({
|
| 521 |
-
"type": "website_request",
|
| 522 |
-
"website_id": website_id,
|
| 523 |
-
"description": description,
|
| 524 |
-
"is_edit": is_edit,
|
| 525 |
-
"existing_html": broadcast_existing_html,
|
| 526 |
-
}))
|
| 527 |
-
except Exception as e:
|
| 528 |
-
logger.error(f"Error in _on_website_request: {e}", exc_info=True)
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
def _on_reminder_request(
|
| 532 |
-
request_id: str,
|
| 533 |
-
action: str,
|
| 534 |
-
params: dict,
|
| 535 |
-
) -> None:
|
| 536 |
-
"""Handle reminder operation request - delegates to iOS."""
|
| 537 |
-
try:
|
| 538 |
-
if not request_id:
|
| 539 |
-
logger.error("_on_reminder_request: Missing request_id")
|
| 540 |
-
return
|
| 541 |
-
|
| 542 |
-
logger.info(f"📱 Requesting iOS to {action} reminder: {request_id}")
|
| 543 |
-
|
| 544 |
-
create_tracked_task(safe_broadcast({
|
| 545 |
-
"type": "reminder_request",
|
| 546 |
-
"request_id": request_id,
|
| 547 |
-
"action": action,
|
| 548 |
-
"params": params,
|
| 549 |
-
}))
|
| 550 |
-
except Exception as e:
|
| 551 |
-
logger.error(f"Error in _on_reminder_request: {e}", exc_info=True)
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
def _on_contacts_request(
|
| 555 |
-
request_id: str,
|
| 556 |
-
action: str,
|
| 557 |
-
params: dict,
|
| 558 |
-
) -> None:
|
| 559 |
-
"""Handle contacts operation request - delegates to iOS."""
|
| 560 |
-
try:
|
| 561 |
-
if not request_id:
|
| 562 |
-
logger.error("_on_contacts_request: Missing request_id")
|
| 563 |
-
return
|
| 564 |
-
|
| 565 |
-
logger.info(f"📇 Requesting iOS to {action} contacts: {request_id}")
|
| 566 |
-
|
| 567 |
-
create_tracked_task(safe_broadcast({
|
| 568 |
-
"type": "contacts_request",
|
| 569 |
-
"request_id": request_id,
|
| 570 |
-
"action": action,
|
| 571 |
-
"params": params,
|
| 572 |
-
}))
|
| 573 |
-
except Exception as e:
|
| 574 |
-
logger.error(f"Error in _on_contacts_request: {e}", exc_info=True)
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
def _on_scheduled_message_request(
|
| 578 |
-
request_id: str,
|
| 579 |
-
action: str,
|
| 580 |
-
params: dict,
|
| 581 |
-
) -> None:
|
| 582 |
-
"""Handle scheduled message operation request - delegates to iOS."""
|
| 583 |
-
try:
|
| 584 |
-
if not request_id:
|
| 585 |
-
logger.error("_on_scheduled_message_request: Missing request_id")
|
| 586 |
-
return
|
| 587 |
-
|
| 588 |
-
logger.info(f"📬 Requesting iOS to {action} scheduled message: {request_id}")
|
| 589 |
-
|
| 590 |
-
create_tracked_task(safe_broadcast({
|
| 591 |
-
"type": "scheduled_message_request",
|
| 592 |
-
"request_id": request_id,
|
| 593 |
-
"action": action,
|
| 594 |
-
"params": params,
|
| 595 |
-
}))
|
| 596 |
-
except Exception as e:
|
| 597 |
-
logger.error(f"Error in _on_scheduled_message_request: {e}", exc_info=True)
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
def _on_app_change(data: dict) -> None:
|
| 601 |
-
"""Handle app activation/deactivation changes from voice commands."""
|
| 602 |
-
state = get_state()
|
| 603 |
-
event_type = data.get("type", "")
|
| 604 |
-
|
| 605 |
-
if event_type == "app_activated":
|
| 606 |
-
app = data.get("app", {})
|
| 607 |
-
logger.info(f"🚀 Voice-activated app: {app.get('name', 'Unknown')}")
|
| 608 |
-
|
| 609 |
-
# Update emotion animations if the app has custom ones
|
| 610 |
-
emotion_animations = app.get("emotion_animations", {})
|
| 611 |
-
if emotion_animations:
|
| 612 |
-
set_custom_animations(emotion_animations)
|
| 613 |
-
|
| 614 |
-
create_tracked_task(broadcast({
|
| 615 |
-
"type": "app_activated",
|
| 616 |
-
"app": {
|
| 617 |
-
"id": app.get("id"),
|
| 618 |
-
"name": app.get("name"),
|
| 619 |
-
"description": app.get("description", ""),
|
| 620 |
-
}
|
| 621 |
-
}))
|
| 622 |
-
|
| 623 |
-
elif event_type == "app_deactivated":
|
| 624 |
-
logger.info("🛑 Voice-deactivated app - reverting to default")
|
| 625 |
-
clear_custom_animations()
|
| 626 |
-
|
| 627 |
-
create_tracked_task(broadcast({
|
| 628 |
-
"type": "app_deactivated"
|
| 629 |
-
}))
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
# MARK: - Meeting Callbacks
|
| 633 |
-
|
| 634 |
-
def _on_meeting_started(meeting_id: str, title: str) -> None:
|
| 635 |
-
"""Handle meeting recording started."""
|
| 636 |
-
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 637 |
-
create_tracked_task(safe_broadcast({
|
| 638 |
-
"type": "meeting_started",
|
| 639 |
-
"meeting_id": meeting_id,
|
| 640 |
-
"title": title,
|
| 641 |
-
}))
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
def _on_meeting_stopped(meeting_id: str) -> None:
|
| 645 |
-
"""Handle meeting recording stopped."""
|
| 646 |
-
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 647 |
-
create_tracked_task(safe_broadcast({
|
| 648 |
-
"type": "meeting_stopped",
|
| 649 |
-
"meeting_id": meeting_id,
|
| 650 |
-
}))
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
def _on_meeting_transcript_update(meeting_id: str, transcript: str) -> None:
|
| 654 |
-
"""Handle meeting transcript update."""
|
| 655 |
-
# Only send the last portion for live updates (full transcript can be large)
|
| 656 |
-
# Get last 500 characters for preview
|
| 657 |
-
preview = transcript[-500:] if len(transcript) > 500 else transcript
|
| 658 |
-
|
| 659 |
-
create_tracked_task(safe_broadcast({
|
| 660 |
-
"type": "meeting_transcript_update",
|
| 661 |
-
"meeting_id": meeting_id,
|
| 662 |
-
"transcript_preview": preview,
|
| 663 |
-
"total_length": len(transcript),
|
| 664 |
-
}))
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
# MARK: - WebSocket Endpoint
|
| 668 |
-
|
| 669 |
-
@router.websocket("/ws/conversation")
|
| 670 |
-
async def conversation_websocket(websocket: WebSocket) -> None:
|
| 671 |
-
"""WebSocket endpoint for iOS conversation app.
|
| 672 |
-
|
| 673 |
-
Accepts commands from iOS and relays events from OpenAI.
|
| 674 |
-
|
| 675 |
-
Commands:
|
| 676 |
-
- connect: Connect to OpenAI (requires api_key)
|
| 677 |
-
- disconnect: Disconnect from OpenAI
|
| 678 |
-
- start_listening: Start microphone capture
|
| 679 |
-
- stop_listening: Stop microphone capture
|
| 680 |
-
- set_language: Set conversation language
|
| 681 |
-
- set_voice: Set voice ID (also updates language)
|
| 682 |
-
- send_text: Send a text message
|
| 683 |
-
- set_system_prompt: Set custom personality (requires system_prompt)
|
| 684 |
-
- clear_system_prompt: Clear custom personality, revert to default
|
| 685 |
-
- cancel_response: Cancel current AI response
|
| 686 |
-
- clear_audio_buffer: Clear the input audio buffer
|
| 687 |
-
|
| 688 |
-
Events (sent to iOS):
|
| 689 |
-
- connection_state: OpenAI connection state
|
| 690 |
-
- speaking_state: AI speaking state
|
| 691 |
-
- transcript_update: User transcript (real-time)
|
| 692 |
-
- response_text: AI response text
|
| 693 |
-
- personality_set: Custom personality was set
|
| 694 |
-
- personality_cleared: Reverted to default personality
|
| 695 |
-
- error: Error messages
|
| 696 |
-
"""
|
| 697 |
-
await websocket.accept()
|
| 698 |
-
count = add_client(websocket)
|
| 699 |
-
logger.info(f"iOS client connected ({count} total)")
|
| 700 |
-
|
| 701 |
-
services = get_services()
|
| 702 |
-
|
| 703 |
-
# Send current state
|
| 704 |
-
if services.openai:
|
| 705 |
-
await websocket.send_json({
|
| 706 |
-
"type": "connection_state",
|
| 707 |
-
"state": services.openai.connection_state.value,
|
| 708 |
-
})
|
| 709 |
-
|
| 710 |
-
try:
|
| 711 |
-
while True:
|
| 712 |
-
data = await websocket.receive_json()
|
| 713 |
-
command = data.get("command", "")
|
| 714 |
-
await _handle_command(websocket, command, data)
|
| 715 |
-
|
| 716 |
-
except WebSocketDisconnect:
|
| 717 |
-
logger.info("iOS client disconnected")
|
| 718 |
-
except Exception as e:
|
| 719 |
-
logger.error(f"WebSocket error: {e}")
|
| 720 |
-
finally:
|
| 721 |
-
count = remove_client(websocket)
|
| 722 |
-
logger.info(f"iOS client removed ({count} remaining)")
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
async def _handle_command(websocket: WebSocket, command: str, data: dict) -> None:
|
| 726 |
-
"""Handle a command from the iOS app."""
|
| 727 |
-
services = get_services()
|
| 728 |
-
state = get_state()
|
| 729 |
-
|
| 730 |
-
if not services.is_initialized:
|
| 731 |
-
init_services()
|
| 732 |
-
services = get_services()
|
| 733 |
-
|
| 734 |
-
try:
|
| 735 |
-
if command == "connect":
|
| 736 |
-
await _handle_connect(websocket, data)
|
| 737 |
-
|
| 738 |
-
elif command == "disconnect":
|
| 739 |
-
await _handle_disconnect()
|
| 740 |
-
|
| 741 |
-
elif command == "start_listening":
|
| 742 |
-
await _handle_start_listening(websocket)
|
| 743 |
-
|
| 744 |
-
elif command == "stop_listening":
|
| 745 |
-
await _handle_stop_listening(websocket)
|
| 746 |
-
|
| 747 |
-
elif command == "set_language":
|
| 748 |
-
language = data.get("language", "en")
|
| 749 |
-
# Persist language; immediately reset OpenAI session to apply new settings.
|
| 750 |
-
from .voice import set_preferred_language
|
| 751 |
-
success = set_preferred_language(language)
|
| 752 |
-
confirmed = False
|
| 753 |
-
|
| 754 |
-
if success and services.openai and services.openai.is_connected:
|
| 755 |
-
# Immediately disconnect and reconnect with new language
|
| 756 |
-
keep_listening = bool(services.openai.is_listening)
|
| 757 |
-
await _reset_openai_session(keep_listening=keep_listening)
|
| 758 |
-
confirmed = services.openai.is_connected
|
| 759 |
-
logger.info(f"🌍 Language switched to {language}, session reset, confirmed={confirmed}")
|
| 760 |
-
|
| 761 |
-
await websocket.send_json({
|
| 762 |
-
"type": "language_set",
|
| 763 |
-
"language": language,
|
| 764 |
-
"success": success,
|
| 765 |
-
"confirmed": confirmed,
|
| 766 |
-
})
|
| 767 |
-
|
| 768 |
-
elif command == "set_voice":
|
| 769 |
-
voice_id = data.get("voice_id", "")
|
| 770 |
-
if voice_id:
|
| 771 |
-
# Persist voice; immediately reset OpenAI session to apply new settings.
|
| 772 |
-
from .voice import set_current_voice
|
| 773 |
-
success = set_current_voice(voice_id)
|
| 774 |
-
confirmed = False
|
| 775 |
-
|
| 776 |
-
if success and services.openai and services.openai.is_connected:
|
| 777 |
-
# Immediately disconnect and reconnect with new voice
|
| 778 |
-
keep_listening = bool(services.openai.is_listening)
|
| 779 |
-
await _reset_openai_session(keep_listening=keep_listening)
|
| 780 |
-
confirmed = services.openai.is_connected
|
| 781 |
-
logger.info(f"🔊 Voice switched to {voice_id}, session reset, confirmed={confirmed}")
|
| 782 |
-
|
| 783 |
-
await websocket.send_json({
|
| 784 |
-
"type": "voice_set",
|
| 785 |
-
"voice_id": voice_id,
|
| 786 |
-
"language": services.openai.language,
|
| 787 |
-
"success": success,
|
| 788 |
-
"confirmed": confirmed,
|
| 789 |
-
})
|
| 790 |
-
|
| 791 |
-
elif command == "send_text":
|
| 792 |
-
text = data.get("text", "")
|
| 793 |
-
if text:
|
| 794 |
-
await services.openai.send_text_message(text)
|
| 795 |
-
|
| 796 |
-
elif command == "cancel_response":
|
| 797 |
-
if services.audio_player:
|
| 798 |
-
services.audio_player.cancel()
|
| 799 |
-
await services.openai.cancel_response()
|
| 800 |
-
|
| 801 |
-
elif command == "interrupt_response":
|
| 802 |
-
await _handle_interrupt(websocket)
|
| 803 |
-
|
| 804 |
-
elif command == "clear_audio_buffer":
|
| 805 |
-
await services.openai.clear_audio_buffer()
|
| 806 |
-
|
| 807 |
-
elif command == "set_system_prompt":
|
| 808 |
-
await _handle_set_system_prompt(websocket, data)
|
| 809 |
-
|
| 810 |
-
elif command == "clear_system_prompt":
|
| 811 |
-
services.openai.clear_custom_personality()
|
| 812 |
-
logger.info("🎭 Reverted to default Reachy personality")
|
| 813 |
-
await websocket.send_json({
|
| 814 |
-
"type": "personality_cleared",
|
| 815 |
-
"success": True,
|
| 816 |
-
})
|
| 817 |
-
|
| 818 |
-
elif command == "set_emotion_animations":
|
| 819 |
-
await _handle_set_emotion_animations(websocket, data)
|
| 820 |
-
|
| 821 |
-
elif command == "clear_emotion_animations":
|
| 822 |
-
clear_custom_animations()
|
| 823 |
-
await websocket.send_json({
|
| 824 |
-
"type": "emotion_animations_cleared",
|
| 825 |
-
"success": True,
|
| 826 |
-
})
|
| 827 |
-
|
| 828 |
-
elif command == "upload_website":
|
| 829 |
-
await _handle_upload_website(websocket, data)
|
| 830 |
-
|
| 831 |
-
elif command == "reminder_result":
|
| 832 |
-
await _handle_reminder_result(websocket, data)
|
| 833 |
-
|
| 834 |
-
elif command == "contacts_result":
|
| 835 |
-
await _handle_contacts_result(websocket, data)
|
| 836 |
-
|
| 837 |
-
elif command == "scheduled_message_result":
|
| 838 |
-
await _handle_scheduled_message_result(websocket, data)
|
| 839 |
-
|
| 840 |
-
else:
|
| 841 |
-
logger.warning(f"Unknown command: {command}")
|
| 842 |
-
await websocket.send_json({
|
| 843 |
-
"type": "error",
|
| 844 |
-
"message": f"Unknown command: {command}",
|
| 845 |
-
})
|
| 846 |
-
|
| 847 |
-
except Exception as e:
|
| 848 |
-
logger.error(f"Error handling command {command}: {e}")
|
| 849 |
-
await websocket.send_json({
|
| 850 |
-
"type": "error",
|
| 851 |
-
"message": str(e),
|
| 852 |
-
})
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
# MARK: - Command Handlers
|
| 856 |
-
|
| 857 |
-
async def _handle_connect(websocket: WebSocket, data: dict) -> None:
|
| 858 |
-
"""Handle the connect command."""
|
| 859 |
-
services = get_services()
|
| 860 |
-
|
| 861 |
-
api_key = data.get("api_key") or os.environ.get("OPENAI_API_KEY")
|
| 862 |
-
|
| 863 |
-
if not api_key:
|
| 864 |
-
await websocket.send_json({
|
| 865 |
-
"type": "error",
|
| 866 |
-
"message": "API key required",
|
| 867 |
-
})
|
| 868 |
-
return
|
| 869 |
-
|
| 870 |
-
if data.get("api_key"):
|
| 871 |
-
# Log masked key for debugging
|
| 872 |
-
key_preview = f"***{api_key[-4:]}" if len(api_key) > 4 else "***"
|
| 873 |
-
logger.info(f"API key provided ({key_preview})")
|
| 874 |
-
os.environ["OPENAI_API_KEY"] = data["api_key"]
|
| 875 |
-
|
| 876 |
-
await services.openai.connect(api_key)
|
| 877 |
-
|
| 878 |
-
if services.audio_player:
|
| 879 |
-
services.audio_player.start_stream()
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
async def _handle_disconnect() -> None:
|
| 883 |
-
"""Handle the disconnect command."""
|
| 884 |
-
services = get_services()
|
| 885 |
-
|
| 886 |
-
stop_audio_streaming()
|
| 887 |
-
|
| 888 |
-
if services.audio_capture:
|
| 889 |
-
services.audio_capture.stop_capture()
|
| 890 |
-
|
| 891 |
-
if services.audio_player:
|
| 892 |
-
services.audio_player.cancel()
|
| 893 |
-
|
| 894 |
-
await services.openai.disconnect()
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
async def _handle_start_listening(websocket: WebSocket) -> None:
|
| 898 |
-
"""Handle the start_listening command."""
|
| 899 |
-
services = get_services()
|
| 900 |
-
state = get_state()
|
| 901 |
-
|
| 902 |
-
if not services.openai.is_connected:
|
| 903 |
-
# Auto-connect so that "stop => new session next time" doesn't require
|
| 904 |
-
# a separate iOS connect step.
|
| 905 |
-
try:
|
| 906 |
-
await services.openai.connect()
|
| 907 |
-
except Exception as e:
|
| 908 |
-
logger.error(f"Cannot start listening: not connected to OpenAI ({e})")
|
| 909 |
-
await websocket.send_json({
|
| 910 |
-
"type": "error",
|
| 911 |
-
"message": "Not connected to OpenAI",
|
| 912 |
-
})
|
| 913 |
-
return
|
| 914 |
-
|
| 915 |
-
# Reset conversation state
|
| 916 |
-
state.reset_for_new_conversation()
|
| 917 |
-
logger.info("🎬 New conversation started - animation state reset")
|
| 918 |
-
|
| 919 |
-
# Start audio capture
|
| 920 |
-
if services.audio_capture:
|
| 921 |
-
if not services.audio_capture.is_available:
|
| 922 |
-
logger.error("Audio capture not available - no microphone detected!")
|
| 923 |
-
await websocket.send_json({
|
| 924 |
-
"type": "error",
|
| 925 |
-
"message": "Microphone not available on Reachy",
|
| 926 |
-
})
|
| 927 |
-
return
|
| 928 |
-
|
| 929 |
-
capture_started = await services.audio_capture.start_capture()
|
| 930 |
-
if not capture_started:
|
| 931 |
-
logger.error("❌ Failed to start audio capture")
|
| 932 |
-
await websocket.send_json({
|
| 933 |
-
"type": "error",
|
| 934 |
-
"message": "Failed to start microphone capture",
|
| 935 |
-
})
|
| 936 |
-
return
|
| 937 |
-
|
| 938 |
-
logger.info("✅ Audio capture started successfully")
|
| 939 |
-
else:
|
| 940 |
-
logger.error("Audio capture service not initialized")
|
| 941 |
-
await websocket.send_json({
|
| 942 |
-
"type": "error",
|
| 943 |
-
"message": "Audio capture service not initialized",
|
| 944 |
-
})
|
| 945 |
-
return
|
| 946 |
-
|
| 947 |
-
services.openai.start_listening()
|
| 948 |
-
logger.info("✅ OpenAI listening mode enabled")
|
| 949 |
-
|
| 950 |
-
# IMPORTANT: Do NOT start audio streaming here!
|
| 951 |
-
# Audio streaming will be started AFTER the greeting completes.
|
| 952 |
-
# This prevents motor noise from the welcome animation and ambient
|
| 953 |
-
# noise from triggering VAD and interrupting the greeting.
|
| 954 |
-
|
| 955 |
-
# Pause audio capture immediately - it will be resumed after greeting
|
| 956 |
-
if services.audio_capture:
|
| 957 |
-
services.audio_capture.pause_capture()
|
| 958 |
-
logger.info("⏸️ Audio capture paused until greeting completes")
|
| 959 |
-
|
| 960 |
-
await websocket.send_json({
|
| 961 |
-
"type": "listening_state",
|
| 962 |
-
"listening": True,
|
| 963 |
-
})
|
| 964 |
-
|
| 965 |
-
# Send greeting - audio streaming will start after it completes
|
| 966 |
-
create_tracked_task(send_greeting_and_start_streaming())
|
| 967 |
-
|
| 968 |
-
|
| 969 |
-
async def _handle_stop_listening(websocket: WebSocket) -> None:
|
| 970 |
-
"""Handle the stop_listening command."""
|
| 971 |
-
services = get_services()
|
| 972 |
-
state = get_state()
|
| 973 |
-
|
| 974 |
-
# Stop capturing new audio input
|
| 975 |
-
stop_audio_streaming()
|
| 976 |
-
|
| 977 |
-
if services.audio_capture:
|
| 978 |
-
services.audio_capture.stop_capture()
|
| 979 |
-
|
| 980 |
-
services.openai.stop_listening()
|
| 981 |
-
|
| 982 |
-
# Broadcast listening stopped immediately for UI responsiveness
|
| 983 |
-
await websocket.send_json({
|
| 984 |
-
"type": "listening_state",
|
| 985 |
-
"listening": False,
|
| 986 |
-
})
|
| 987 |
-
|
| 988 |
-
# Send goodbye and wait for it to be spoken before disconnecting
|
| 989 |
-
# This runs in the background so we don't block the WebSocket response
|
| 990 |
-
async def goodbye_and_disconnect():
|
| 991 |
-
try:
|
| 992 |
-
# Send the goodbye prompt
|
| 993 |
-
await send_goodbye()
|
| 994 |
-
|
| 995 |
-
# Wait for the AI to generate and speak the goodbye
|
| 996 |
-
# Give it enough time to respond and speak (typical goodbye is ~3-5 seconds)
|
| 997 |
-
max_wait = 8.0 # Maximum seconds to wait
|
| 998 |
-
wait_interval = 0.2
|
| 999 |
-
waited = 0.0
|
| 1000 |
-
|
| 1001 |
-
# Wait for speech to start generating
|
| 1002 |
-
await asyncio.sleep(0.5)
|
| 1003 |
-
|
| 1004 |
-
# Wait for AI response to complete
|
| 1005 |
-
while waited < max_wait:
|
| 1006 |
-
if services.openai:
|
| 1007 |
-
response_state = services.openai.response_state
|
| 1008 |
-
if response_state in (ResponseState.WAITING, ResponseState.GENERATING):
|
| 1009 |
-
# Response is ongoing, keep waiting
|
| 1010 |
-
await asyncio.sleep(wait_interval)
|
| 1011 |
-
waited += wait_interval
|
| 1012 |
-
elif waited > 1.0:
|
| 1013 |
-
# Response has finished (or never started after initial delay)
|
| 1014 |
-
break
|
| 1015 |
-
else:
|
| 1016 |
-
# Give it a moment to start
|
| 1017 |
-
await asyncio.sleep(wait_interval)
|
| 1018 |
-
waited += wait_interval
|
| 1019 |
-
else:
|
| 1020 |
-
break
|
| 1021 |
-
|
| 1022 |
-
# Also wait for audio playback to finish
|
| 1023 |
-
if services.audio_player and services.audio_player.is_playing:
|
| 1024 |
-
extra_wait = 0.0
|
| 1025 |
-
max_extra = 5.0
|
| 1026 |
-
while services.audio_player.is_playing and extra_wait < max_extra:
|
| 1027 |
-
await asyncio.sleep(wait_interval)
|
| 1028 |
-
extra_wait += wait_interval
|
| 1029 |
-
|
| 1030 |
-
except Exception as e:
|
| 1031 |
-
logger.error(f"Error in goodbye sequence: {e}")
|
| 1032 |
-
finally:
|
| 1033 |
-
# Now disconnect the OpenAI session
|
| 1034 |
-
try:
|
| 1035 |
-
await services.openai.disconnect()
|
| 1036 |
-
except Exception:
|
| 1037 |
-
pass
|
| 1038 |
-
|
| 1039 |
-
# Mark conversation as ended (allows idle movements to resume)
|
| 1040 |
-
state.end_conversation()
|
| 1041 |
-
|
| 1042 |
-
create_tracked_task(goodbye_and_disconnect())
|
| 1043 |
-
|
| 1044 |
-
|
| 1045 |
-
async def _handle_interrupt(websocket: WebSocket) -> None:
|
| 1046 |
-
"""Handle the interrupt_response command."""
|
| 1047 |
-
services = get_services()
|
| 1048 |
-
|
| 1049 |
-
logger.info("🛑 Interrupt command received from iOS!")
|
| 1050 |
-
|
| 1051 |
-
# Cancel audio playback immediately
|
| 1052 |
-
if services.audio_player:
|
| 1053 |
-
services.audio_player.cancel()
|
| 1054 |
-
|
| 1055 |
-
# Cancel the AI response
|
| 1056 |
-
if services.openai:
|
| 1057 |
-
await services.openai.cancel_response()
|
| 1058 |
-
|
| 1059 |
-
# Stop speaking gestures
|
| 1060 |
-
if services.speaking_gestures:
|
| 1061 |
-
await services.speaking_gestures.stop()
|
| 1062 |
-
|
| 1063 |
-
# Resume microphone capture so user can speak
|
| 1064 |
-
if services.audio_capture:
|
| 1065 |
-
services.audio_capture.resume_capture()
|
| 1066 |
-
logger.info("▶️ Microphone resumed after interrupt")
|
| 1067 |
-
|
| 1068 |
-
# Clear any buffered audio to prevent echo
|
| 1069 |
-
if services.openai:
|
| 1070 |
-
await services.openai.clear_audio_buffer()
|
| 1071 |
-
|
| 1072 |
-
# Notify iOS that interrupt was successful
|
| 1073 |
-
await websocket.send_json({
|
| 1074 |
-
"type": "interrupt_complete",
|
| 1075 |
-
"success": True,
|
| 1076 |
-
})
|
| 1077 |
-
|
| 1078 |
-
logger.info("✅ Interrupt complete - listening to user")
|
| 1079 |
-
|
| 1080 |
-
|
| 1081 |
-
async def _handle_set_system_prompt(websocket: WebSocket, data: dict) -> None:
|
| 1082 |
-
"""Handle the set_system_prompt command."""
|
| 1083 |
-
services = get_services()
|
| 1084 |
-
|
| 1085 |
-
system_prompt = data.get("system_prompt", "")
|
| 1086 |
-
if system_prompt:
|
| 1087 |
-
services.openai.set_custom_personality(system_prompt)
|
| 1088 |
-
logger.info(f"🎭 Custom personality set ({len(system_prompt)} chars)")
|
| 1089 |
-
await websocket.send_json({
|
| 1090 |
-
"type": "personality_set",
|
| 1091 |
-
"success": True,
|
| 1092 |
-
})
|
| 1093 |
-
else:
|
| 1094 |
-
await websocket.send_json({
|
| 1095 |
-
"type": "error",
|
| 1096 |
-
"message": "No system_prompt provided",
|
| 1097 |
-
})
|
| 1098 |
-
|
| 1099 |
-
|
| 1100 |
-
async def _handle_set_emotion_animations(websocket: WebSocket, data: dict) -> None:
|
| 1101 |
-
"""Handle the set_emotion_animations command."""
|
| 1102 |
-
emotion_animations = data.get("emotion_animations", {})
|
| 1103 |
-
if emotion_animations:
|
| 1104 |
-
set_custom_animations(emotion_animations)
|
| 1105 |
-
await websocket.send_json({
|
| 1106 |
-
"type": "emotion_animations_set",
|
| 1107 |
-
"success": True,
|
| 1108 |
-
"emotions": list(emotion_animations.keys()),
|
| 1109 |
-
})
|
| 1110 |
-
else:
|
| 1111 |
-
await websocket.send_json({
|
| 1112 |
-
"type": "error",
|
| 1113 |
-
"message": "No emotion_animations provided",
|
| 1114 |
-
})
|
| 1115 |
-
|
| 1116 |
-
|
| 1117 |
-
async def _handle_upload_website(websocket: WebSocket, data: dict) -> None:
|
| 1118 |
-
"""Handle the upload_website command."""
|
| 1119 |
-
try:
|
| 1120 |
-
website_id = data.get("website_id", "")
|
| 1121 |
-
html_content = data.get("html_content", "")
|
| 1122 |
-
title = data.get("title", "Generated Website")
|
| 1123 |
-
description = data.get("description", "")
|
| 1124 |
-
|
| 1125 |
-
# Validate required fields
|
| 1126 |
-
if not website_id:
|
| 1127 |
-
logger.error("upload_website: Missing website_id")
|
| 1128 |
-
await websocket.send_json({
|
| 1129 |
-
"type": "error",
|
| 1130 |
-
"message": "website_id is required",
|
| 1131 |
-
})
|
| 1132 |
-
return
|
| 1133 |
-
|
| 1134 |
-
if not html_content:
|
| 1135 |
-
logger.error("upload_website: Missing html_content")
|
| 1136 |
-
await websocket.send_json({
|
| 1137 |
-
"type": "error",
|
| 1138 |
-
"message": "html_content is required",
|
| 1139 |
-
})
|
| 1140 |
-
return
|
| 1141 |
-
|
| 1142 |
-
# Log upload size for debugging
|
| 1143 |
-
logger.info(f"📤 Receiving website upload: {website_id} ({len(html_content)} bytes)")
|
| 1144 |
-
|
| 1145 |
-
# Save the website to robot storage
|
| 1146 |
-
result = await save_website_from_ios(
|
| 1147 |
-
website_id=website_id,
|
| 1148 |
-
html_content=html_content,
|
| 1149 |
-
title=title,
|
| 1150 |
-
description=description,
|
| 1151 |
-
)
|
| 1152 |
-
|
| 1153 |
-
if result.get("success"):
|
| 1154 |
-
logger.info(f"💾 Website uploaded from iOS: {website_id}")
|
| 1155 |
-
await websocket.send_json({
|
| 1156 |
-
"type": "website_uploaded",
|
| 1157 |
-
"website_id": website_id,
|
| 1158 |
-
"url": result.get("url"),
|
| 1159 |
-
"title": title,
|
| 1160 |
-
"success": True,
|
| 1161 |
-
})
|
| 1162 |
-
|
| 1163 |
-
# Broadcast to all clients that website is ready
|
| 1164 |
-
await safe_broadcast({
|
| 1165 |
-
"type": "website_ready",
|
| 1166 |
-
"website_id": website_id,
|
| 1167 |
-
"url": result.get("url"),
|
| 1168 |
-
"title": title,
|
| 1169 |
-
"is_edit": data.get("is_edit", False),
|
| 1170 |
-
})
|
| 1171 |
-
else:
|
| 1172 |
-
error_msg = result.get("error", "Failed to save website")
|
| 1173 |
-
logger.error(f"upload_website failed: {error_msg}")
|
| 1174 |
-
await websocket.send_json({
|
| 1175 |
-
"type": "error",
|
| 1176 |
-
"message": error_msg,
|
| 1177 |
-
})
|
| 1178 |
-
except Exception as e:
|
| 1179 |
-
logger.error(f"Error handling upload_website: {e}", exc_info=True)
|
| 1180 |
-
await websocket.send_json({
|
| 1181 |
-
"type": "error",
|
| 1182 |
-
"message": f"Upload failed: {str(e)}",
|
| 1183 |
-
})
|
| 1184 |
-
|
| 1185 |
-
|
| 1186 |
-
async def _handle_reminder_result(websocket: WebSocket, data: dict) -> None:
|
| 1187 |
-
"""Handle the reminder_result command from iOS."""
|
| 1188 |
-
try:
|
| 1189 |
-
request_id = data.get("request_id", "")
|
| 1190 |
-
result = data.get("result", {})
|
| 1191 |
-
|
| 1192 |
-
if not request_id:
|
| 1193 |
-
logger.error("reminder_result: Missing request_id")
|
| 1194 |
-
await websocket.send_json({
|
| 1195 |
-
"type": "error",
|
| 1196 |
-
"message": "request_id is required",
|
| 1197 |
-
})
|
| 1198 |
-
return
|
| 1199 |
-
|
| 1200 |
-
logger.info(f"📱 Received reminder result: {request_id} (success: {result.get('success', False)})")
|
| 1201 |
-
|
| 1202 |
-
# Pass the result to the reminders module to resolve the pending future
|
| 1203 |
-
handle_reminder_result(request_id, result)
|
| 1204 |
-
|
| 1205 |
-
# Acknowledge receipt
|
| 1206 |
-
await websocket.send_json({
|
| 1207 |
-
"type": "reminder_result_received",
|
| 1208 |
-
"request_id": request_id,
|
| 1209 |
-
"success": True,
|
| 1210 |
-
})
|
| 1211 |
-
|
| 1212 |
-
except Exception as e:
|
| 1213 |
-
logger.error(f"Error handling reminder_result: {e}", exc_info=True)
|
| 1214 |
-
await websocket.send_json({
|
| 1215 |
-
"type": "error",
|
| 1216 |
-
"message": f"Reminder result failed: {str(e)}",
|
| 1217 |
-
})
|
| 1218 |
-
|
| 1219 |
-
|
| 1220 |
-
async def _handle_contacts_result(websocket: WebSocket, data: dict) -> None:
|
| 1221 |
-
"""Handle the contacts_result command from iOS."""
|
| 1222 |
-
try:
|
| 1223 |
-
request_id = data.get("request_id", "")
|
| 1224 |
-
result = data.get("result", {})
|
| 1225 |
-
|
| 1226 |
-
if not request_id:
|
| 1227 |
-
logger.error("contacts_result: Missing request_id")
|
| 1228 |
-
await websocket.send_json({
|
| 1229 |
-
"type": "error",
|
| 1230 |
-
"message": "request_id is required",
|
| 1231 |
-
})
|
| 1232 |
-
return
|
| 1233 |
-
|
| 1234 |
-
logger.info(f"📇 Received contacts result: {request_id} (success: {result.get('success', False)})")
|
| 1235 |
-
|
| 1236 |
-
# Pass the result to the contacts module to resolve the pending future
|
| 1237 |
-
handle_contacts_result(request_id, result)
|
| 1238 |
-
|
| 1239 |
-
# Acknowledge receipt
|
| 1240 |
-
await websocket.send_json({
|
| 1241 |
-
"type": "contacts_result_received",
|
| 1242 |
-
"request_id": request_id,
|
| 1243 |
-
"success": True,
|
| 1244 |
-
})
|
| 1245 |
-
|
| 1246 |
-
except Exception as e:
|
| 1247 |
-
logger.error(f"Error handling contacts_result: {e}", exc_info=True)
|
| 1248 |
-
await websocket.send_json({
|
| 1249 |
-
"type": "error",
|
| 1250 |
-
"message": f"Contacts result failed: {str(e)}",
|
| 1251 |
-
})
|
| 1252 |
-
|
| 1253 |
-
|
| 1254 |
-
async def _handle_scheduled_message_result(websocket: WebSocket, data: dict) -> None:
|
| 1255 |
-
"""Handle the scheduled_message_result command from iOS."""
|
| 1256 |
-
try:
|
| 1257 |
-
request_id = data.get("request_id", "")
|
| 1258 |
-
result = data.get("result", {})
|
| 1259 |
-
|
| 1260 |
-
if not request_id:
|
| 1261 |
-
logger.error("scheduled_message_result: Missing request_id")
|
| 1262 |
-
await websocket.send_json({
|
| 1263 |
-
"type": "error",
|
| 1264 |
-
"message": "request_id is required",
|
| 1265 |
-
})
|
| 1266 |
-
return
|
| 1267 |
-
|
| 1268 |
-
logger.info(f"📬 Received scheduled message result: {request_id} (success: {result.get('success', False)})")
|
| 1269 |
-
|
| 1270 |
-
# Pass the result to the scheduled_messages module to resolve the pending future
|
| 1271 |
-
handle_scheduled_message_result(request_id, result)
|
| 1272 |
-
|
| 1273 |
-
# Acknowledge receipt
|
| 1274 |
-
await websocket.send_json({
|
| 1275 |
-
"type": "scheduled_message_result_received",
|
| 1276 |
-
"request_id": request_id,
|
| 1277 |
-
"success": True,
|
| 1278 |
-
})
|
| 1279 |
-
|
| 1280 |
-
except Exception as e:
|
| 1281 |
-
logger.error(f"Error handling scheduled_message_result: {e}", exc_info=True)
|
| 1282 |
-
await websocket.send_json({
|
| 1283 |
-
"type": "error",
|
| 1284 |
-
"message": f"Scheduled message result failed: {str(e)}",
|
| 1285 |
-
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reachys_brain/routes/conversation/__init__.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""WebSocket endpoint for real-time conversation with iOS app.
|
| 2 |
+
|
| 3 |
+
This package provides the WebSocket endpoint for voice conversations,
|
| 4 |
+
delegating to specialized modules for:
|
| 5 |
+
- broadcast_manager: Client connection management and broadcasting
|
| 6 |
+
- conversation_services: Service lifecycle management
|
| 7 |
+
- audio_manager: Audio streaming and buffering
|
| 8 |
+
- animation_manager: Pre-speech animations and gestures
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
|
| 13 |
+
from ...tools.reminders import set_reminder_request_callback
|
| 14 |
+
from ...tools.contacts import set_contacts_request_callback
|
| 15 |
+
from ...tools.scheduled_messages import set_scheduled_message_callback
|
| 16 |
+
from ...tools.website_generator import set_website_request_callback
|
| 17 |
+
|
| 18 |
+
from ..conversation_services import (
|
| 19 |
+
get_services,
|
| 20 |
+
get_state,
|
| 21 |
+
init_services as _init_conversation_services,
|
| 22 |
+
cleanup_services as _cleanup_conversation_services,
|
| 23 |
+
wire_openai_callbacks,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from ..audio_manager import handle_audio_delta
|
| 27 |
+
from ..task_tracker import cancel_all_tracked_tasks
|
| 28 |
+
|
| 29 |
+
from .websocket import router
|
| 30 |
+
from .callbacks import (
|
| 31 |
+
on_connection_state,
|
| 32 |
+
on_speaking_state,
|
| 33 |
+
on_response_state,
|
| 34 |
+
on_transcript_update,
|
| 35 |
+
on_response_text,
|
| 36 |
+
on_error,
|
| 37 |
+
on_app_change,
|
| 38 |
+
on_tool_usage,
|
| 39 |
+
on_website_ready,
|
| 40 |
+
)
|
| 41 |
+
from .meeting_callbacks import (
|
| 42 |
+
on_meeting_started,
|
| 43 |
+
on_meeting_stopped,
|
| 44 |
+
on_meeting_transcript_update,
|
| 45 |
+
)
|
| 46 |
+
from .request_handlers import (
|
| 47 |
+
on_website_request,
|
| 48 |
+
on_reminder_request,
|
| 49 |
+
on_contacts_request,
|
| 50 |
+
on_scheduled_message_request,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
logger = logging.getLogger(__name__)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def init_services() -> None:
|
| 57 |
+
"""Initialize conversation services and wire up callbacks."""
|
| 58 |
+
services = _init_conversation_services()
|
| 59 |
+
|
| 60 |
+
# Wire up OpenAI callbacks
|
| 61 |
+
wire_openai_callbacks(
|
| 62 |
+
on_connection_state=on_connection_state,
|
| 63 |
+
on_speaking_state=on_speaking_state,
|
| 64 |
+
on_response_state=on_response_state,
|
| 65 |
+
on_transcript_update=on_transcript_update,
|
| 66 |
+
on_response_text=on_response_text,
|
| 67 |
+
on_audio_delta=handle_audio_delta,
|
| 68 |
+
on_error=on_error,
|
| 69 |
+
on_app_change=on_app_change,
|
| 70 |
+
on_tool_usage=on_tool_usage,
|
| 71 |
+
on_website_ready=on_website_ready,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Wire up meeting callbacks
|
| 75 |
+
if services.openai:
|
| 76 |
+
services.openai.on_meeting_started = on_meeting_started
|
| 77 |
+
services.openai.on_meeting_stopped = on_meeting_stopped
|
| 78 |
+
services.openai.on_meeting_transcript_update = on_meeting_transcript_update
|
| 79 |
+
|
| 80 |
+
# Wire up website request callback to delegate generation to iOS
|
| 81 |
+
set_website_request_callback(on_website_request)
|
| 82 |
+
|
| 83 |
+
# Wire up reminder request callback to delegate operations to iOS
|
| 84 |
+
set_reminder_request_callback(on_reminder_request)
|
| 85 |
+
|
| 86 |
+
# Wire up contacts request callback to delegate operations to iOS
|
| 87 |
+
set_contacts_request_callback(on_contacts_request)
|
| 88 |
+
|
| 89 |
+
# Wire up scheduled message request callback to delegate operations to iOS
|
| 90 |
+
set_scheduled_message_callback(on_scheduled_message_request)
|
| 91 |
+
|
| 92 |
+
logger.info("Conversation services initialized")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
async def cleanup_services() -> None:
|
| 96 |
+
"""Clean up conversation services.
|
| 97 |
+
|
| 98 |
+
This is async to properly await all cleanup operations and prevent
|
| 99 |
+
the app from hanging during shutdown.
|
| 100 |
+
"""
|
| 101 |
+
# Cancel all tracked tasks first
|
| 102 |
+
cancel_all_tracked_tasks()
|
| 103 |
+
# Await the async cleanup
|
| 104 |
+
await _cleanup_conversation_services()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# Re-export for backwards compatibility
|
| 108 |
+
__all__ = [
|
| 109 |
+
"router",
|
| 110 |
+
"init_services",
|
| 111 |
+
"cleanup_services",
|
| 112 |
+
"get_services",
|
| 113 |
+
"get_state",
|
| 114 |
+
]
|
| 115 |
+
|
reachys_brain/routes/conversation/animation.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-exports from animation_manager for backwards compatibility.
|
| 2 |
+
|
| 3 |
+
This module provides a bridge to the existing animation_manager module,
|
| 4 |
+
making it accessible from within the conversation package.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from ..animation_manager import (
|
| 8 |
+
handle_pre_speech_animation,
|
| 9 |
+
send_greeting,
|
| 10 |
+
send_goodbye,
|
| 11 |
+
set_custom_animations,
|
| 12 |
+
clear_custom_animations,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
__all__ = [
|
| 16 |
+
"handle_pre_speech_animation",
|
| 17 |
+
"send_greeting",
|
| 18 |
+
"send_goodbye",
|
| 19 |
+
"set_custom_animations",
|
| 20 |
+
"clear_custom_animations",
|
| 21 |
+
]
|
| 22 |
+
|
reachys_brain/routes/conversation/audio.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-exports from audio_manager for backwards compatibility.
|
| 2 |
+
|
| 3 |
+
This module provides a bridge to the existing audio_manager module,
|
| 4 |
+
making it accessible from within the conversation package.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from ..audio_manager import (
|
| 8 |
+
handle_audio_delta,
|
| 9 |
+
delayed_resume_microphone,
|
| 10 |
+
start_audio_streaming,
|
| 11 |
+
stop_audio_streaming,
|
| 12 |
+
flush_audio_buffer,
|
| 13 |
+
stream_audio_to_openai,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"handle_audio_delta",
|
| 18 |
+
"delayed_resume_microphone",
|
| 19 |
+
"start_audio_streaming",
|
| 20 |
+
"stop_audio_streaming",
|
| 21 |
+
"flush_audio_buffer",
|
| 22 |
+
"stream_audio_to_openai",
|
| 23 |
+
]
|
| 24 |
+
|
reachys_brain/routes/conversation/callbacks.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenAI state callbacks for conversation handling.
|
| 2 |
+
|
| 3 |
+
Handles connection state, speaking state, response state, and transcript updates.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
from ...openai_realtime import ConnectionState, SpeakingState, ResponseState
|
| 9 |
+
from ..task_tracker import create_tracked_task
|
| 10 |
+
from ..broadcast_manager import broadcast
|
| 11 |
+
from .services import get_services, get_state
|
| 12 |
+
from .audio import delayed_resume_microphone, stop_audio_streaming
|
| 13 |
+
from .animation import handle_pre_speech_animation, clear_custom_animations, set_custom_animations
|
| 14 |
+
from .messages import STOP_COMMANDS
|
| 15 |
+
from .history import get_history
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def on_connection_state(state: ConnectionState) -> None:
|
| 21 |
+
"""Handle OpenAI connection state changes."""
|
| 22 |
+
create_tracked_task(broadcast({
|
| 23 |
+
"type": "connection_state",
|
| 24 |
+
"state": state.value,
|
| 25 |
+
}))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def on_speaking_state(state: SpeakingState) -> None:
|
| 29 |
+
"""Handle OpenAI speaking state changes."""
|
| 30 |
+
services = get_services()
|
| 31 |
+
conv_state = get_state()
|
| 32 |
+
|
| 33 |
+
# Pause/resume audio capture based on speaking state
|
| 34 |
+
if services.audio_capture:
|
| 35 |
+
if state == SpeakingState.SPEAKING:
|
| 36 |
+
services.audio_capture.pause_capture()
|
| 37 |
+
logger.info("⏸️ Paused microphone (AI speaking)")
|
| 38 |
+
|
| 39 |
+
# Reset buffering state for new response
|
| 40 |
+
conv_state.reset_for_new_response()
|
| 41 |
+
|
| 42 |
+
# Start subtle speaking gestures while talking
|
| 43 |
+
if services.speaking_gestures:
|
| 44 |
+
create_tracked_task(services.speaking_gestures.start())
|
| 45 |
+
|
| 46 |
+
# Clear OpenAI's audio buffer to prevent echo from interrupting
|
| 47 |
+
if services.openai:
|
| 48 |
+
create_tracked_task(services.openai.clear_audio_buffer())
|
| 49 |
+
logger.info("🗑️ Cleared OpenAI audio buffer (preventing echo)")
|
| 50 |
+
else:
|
| 51 |
+
# AI finished speaking - stop gestures
|
| 52 |
+
if services.speaking_gestures:
|
| 53 |
+
create_tracked_task(services.speaking_gestures.stop())
|
| 54 |
+
|
| 55 |
+
conv_state.response_count += 1
|
| 56 |
+
|
| 57 |
+
# Reset buffering state for next response
|
| 58 |
+
conv_state.is_buffering_audio = True
|
| 59 |
+
conv_state.animation_played_for_response = False
|
| 60 |
+
|
| 61 |
+
# Delay resuming microphone to let audio buffer drain (prevent echo)
|
| 62 |
+
create_tracked_task(delayed_resume_microphone())
|
| 63 |
+
|
| 64 |
+
create_tracked_task(broadcast({
|
| 65 |
+
"type": "speaking_state",
|
| 66 |
+
"state": state.value,
|
| 67 |
+
}))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def on_response_state(state: ResponseState) -> None:
|
| 71 |
+
"""Handle OpenAI response lifecycle state changes.
|
| 72 |
+
|
| 73 |
+
This is the key callback for preventing audio cutoff:
|
| 74 |
+
- WAITING: Request sent, pause microphone immediately
|
| 75 |
+
- GENERATING: Audio being received, mic stays paused
|
| 76 |
+
- COMPLETE: Audio done, wait for buffer drain then resume
|
| 77 |
+
- IDLE: Buffer drained, mic can be active
|
| 78 |
+
"""
|
| 79 |
+
services = get_services()
|
| 80 |
+
|
| 81 |
+
if state == ResponseState.WAITING:
|
| 82 |
+
# Immediately pause microphone when a request is sent
|
| 83 |
+
# This prevents VAD from detecting noise/echo before response starts
|
| 84 |
+
if services.audio_capture:
|
| 85 |
+
services.audio_capture.pause_capture()
|
| 86 |
+
logger.info("⏸️ Paused microphone (waiting for AI response)")
|
| 87 |
+
|
| 88 |
+
# Clear OpenAI's audio buffer to prevent any buffered audio
|
| 89 |
+
if services.openai:
|
| 90 |
+
create_tracked_task(services.openai.clear_audio_buffer())
|
| 91 |
+
|
| 92 |
+
elif state == ResponseState.GENERATING:
|
| 93 |
+
# Audio is being generated - ensure mic stays paused
|
| 94 |
+
if services.audio_capture and not services.audio_capture.is_paused:
|
| 95 |
+
services.audio_capture.pause_capture()
|
| 96 |
+
logger.info("⏸️ Paused microphone (AI generating audio)")
|
| 97 |
+
|
| 98 |
+
elif state == ResponseState.COMPLETE:
|
| 99 |
+
# Response complete - schedule microphone resume after buffer drains
|
| 100 |
+
# The actual resume happens via delayed_resume_microphone
|
| 101 |
+
logger.info("📊 Response complete - waiting for audio buffer to drain")
|
| 102 |
+
|
| 103 |
+
elif state == ResponseState.IDLE:
|
| 104 |
+
# Response cycle complete - microphone can be resumed
|
| 105 |
+
# This is called after buffer has drained
|
| 106 |
+
logger.info("📊 Response cycle idle - microphone can resume")
|
| 107 |
+
|
| 108 |
+
# Broadcast state change to iOS
|
| 109 |
+
create_tracked_task(broadcast({
|
| 110 |
+
"type": "response_state",
|
| 111 |
+
"state": state.value,
|
| 112 |
+
}))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _is_stop_command(transcript: str, language: str = "en") -> bool:
|
| 116 |
+
"""Check if the transcript is a stop command."""
|
| 117 |
+
text = transcript.strip().lower()
|
| 118 |
+
|
| 119 |
+
# Check stop commands for current language
|
| 120 |
+
stop_words = STOP_COMMANDS.get(language, STOP_COMMANDS["en"])
|
| 121 |
+
for stop in stop_words:
|
| 122 |
+
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 123 |
+
return True
|
| 124 |
+
|
| 125 |
+
# Always check English stop commands as fallback
|
| 126 |
+
if language != "en":
|
| 127 |
+
for stop in STOP_COMMANDS["en"]:
|
| 128 |
+
if text == stop or text.startswith(stop + " ") or text.endswith(" " + stop):
|
| 129 |
+
return True
|
| 130 |
+
|
| 131 |
+
return False
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def on_transcript_update(transcript: str) -> None:
|
| 135 |
+
"""Handle user transcript updates."""
|
| 136 |
+
from .commands import handle_stop_command
|
| 137 |
+
|
| 138 |
+
services = get_services()
|
| 139 |
+
|
| 140 |
+
# Check for stop command
|
| 141 |
+
lang = services.openai.language if services.openai else "en"
|
| 142 |
+
if _is_stop_command(transcript, lang):
|
| 143 |
+
logger.info(f"🛑 Stop command detected: '{transcript}'")
|
| 144 |
+
create_tracked_task(handle_stop_command())
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
# Record to conversation history for context persistence
|
| 148 |
+
if transcript and transcript.strip():
|
| 149 |
+
history = get_history()
|
| 150 |
+
history.add_user_message(transcript)
|
| 151 |
+
|
| 152 |
+
create_tracked_task(broadcast({
|
| 153 |
+
"type": "transcript_update",
|
| 154 |
+
"transcript": transcript,
|
| 155 |
+
}))
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def on_response_text(text: str) -> None:
|
| 159 |
+
"""Handle AI response text - triggers pre-speech animation."""
|
| 160 |
+
conv_state = get_state()
|
| 161 |
+
conv_state.last_response_text = text
|
| 162 |
+
|
| 163 |
+
# Record to conversation history for context persistence
|
| 164 |
+
if text and text.strip():
|
| 165 |
+
history = get_history()
|
| 166 |
+
history.add_assistant_message(text)
|
| 167 |
+
|
| 168 |
+
# Trigger pre-speech animation workflow
|
| 169 |
+
create_tracked_task(handle_pre_speech_animation(text))
|
| 170 |
+
|
| 171 |
+
create_tracked_task(broadcast({
|
| 172 |
+
"type": "response_text",
|
| 173 |
+
"text": text,
|
| 174 |
+
}))
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def on_error(error: str) -> None:
|
| 178 |
+
"""Handle OpenAI errors."""
|
| 179 |
+
create_tracked_task(broadcast({
|
| 180 |
+
"type": "error",
|
| 181 |
+
"message": error,
|
| 182 |
+
}))
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def on_tool_usage(tool_name: str, status: str) -> None:
|
| 186 |
+
"""Handle tool usage notifications."""
|
| 187 |
+
logger.info(f"🔧 Tool usage: {tool_name} ({status})")
|
| 188 |
+
create_tracked_task(broadcast({
|
| 189 |
+
"type": "tool_usage",
|
| 190 |
+
"tool": tool_name if status == "started" else None,
|
| 191 |
+
"status": status,
|
| 192 |
+
}))
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def on_website_ready(data: dict) -> None:
|
| 196 |
+
"""Handle website generation completion."""
|
| 197 |
+
from ..broadcast_manager import safe_broadcast
|
| 198 |
+
|
| 199 |
+
try:
|
| 200 |
+
website_id = data.get("website_id", "")
|
| 201 |
+
url = data.get("url", "")
|
| 202 |
+
title = data.get("title", "Generated Website")
|
| 203 |
+
is_edit = data.get("is_edit", False)
|
| 204 |
+
|
| 205 |
+
if not website_id:
|
| 206 |
+
logger.error("on_website_ready: Missing website_id")
|
| 207 |
+
return
|
| 208 |
+
|
| 209 |
+
action = "updated" if is_edit else "created"
|
| 210 |
+
logger.info(f"🌐 Website {action}: {title} ({website_id})")
|
| 211 |
+
|
| 212 |
+
create_tracked_task(safe_broadcast({
|
| 213 |
+
"type": "website_ready",
|
| 214 |
+
"website_id": website_id,
|
| 215 |
+
"url": url,
|
| 216 |
+
"title": title,
|
| 217 |
+
"is_edit": is_edit,
|
| 218 |
+
}))
|
| 219 |
+
except Exception as e:
|
| 220 |
+
logger.error(f"Error in on_website_ready: {e}", exc_info=True)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def on_app_change(data: dict) -> None:
|
| 224 |
+
"""Handle app activation/deactivation changes from voice commands."""
|
| 225 |
+
event_type = data.get("type", "")
|
| 226 |
+
|
| 227 |
+
if event_type == "app_activated":
|
| 228 |
+
app = data.get("app", {})
|
| 229 |
+
logger.info(f"🚀 Voice-activated app: {app.get('name', 'Unknown')}")
|
| 230 |
+
|
| 231 |
+
# Update emotion animations if the app has custom ones
|
| 232 |
+
emotion_animations = app.get("emotion_animations", {})
|
| 233 |
+
if emotion_animations:
|
| 234 |
+
set_custom_animations(emotion_animations)
|
| 235 |
+
|
| 236 |
+
create_tracked_task(broadcast({
|
| 237 |
+
"type": "app_activated",
|
| 238 |
+
"app": {
|
| 239 |
+
"id": app.get("id"),
|
| 240 |
+
"name": app.get("name"),
|
| 241 |
+
"description": app.get("description", ""),
|
| 242 |
+
}
|
| 243 |
+
}))
|
| 244 |
+
|
| 245 |
+
elif event_type == "app_deactivated":
|
| 246 |
+
logger.info("🛑 Voice-deactivated app - reverting to default")
|
| 247 |
+
clear_custom_animations()
|
| 248 |
+
|
| 249 |
+
create_tracked_task(broadcast({
|
| 250 |
+
"type": "app_deactivated"
|
| 251 |
+
}))
|
| 252 |
+
|
reachys_brain/routes/conversation/commands.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Command handlers for conversation WebSocket.
|
| 2 |
+
|
| 3 |
+
Handles all commands received from the iOS app via WebSocket.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
from fastapi import WebSocket
|
| 12 |
+
|
| 13 |
+
from ..broadcast_manager import broadcast
|
| 14 |
+
from ..task_tracker import create_tracked_task
|
| 15 |
+
from .services import get_services, get_state
|
| 16 |
+
from .audio import stop_audio_streaming, start_audio_streaming, delayed_resume_microphone
|
| 17 |
+
from .animation import clear_custom_animations, set_custom_animations
|
| 18 |
+
from .greeting import send_greeting_and_start_streaming, goodbye_and_disconnect
|
| 19 |
+
from .history import get_history
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
async def reset_openai_session(keep_listening: bool) -> None:
|
| 25 |
+
"""Hard reset OpenAI session by disconnecting and reconnecting.
|
| 26 |
+
|
| 27 |
+
OpenAI Realtime rejects changing voice via `session.update` once assistant
|
| 28 |
+
audio has been produced in the current session. To apply a new voice/language
|
| 29 |
+
reliably, we must create a new session.
|
| 30 |
+
|
| 31 |
+
If `keep_listening` is True, we restore listening + audio streaming so the
|
| 32 |
+
next user turn works without user-visible reconnect steps.
|
| 33 |
+
|
| 34 |
+
Conversation context is preserved and restored after reconnection.
|
| 35 |
+
"""
|
| 36 |
+
services = get_services()
|
| 37 |
+
|
| 38 |
+
if not services.openai:
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
# Get conversation history BEFORE disconnecting
|
| 42 |
+
history = get_history()
|
| 43 |
+
restore_messages = history.get_restore_messages() if history.has_history else []
|
| 44 |
+
|
| 45 |
+
if restore_messages:
|
| 46 |
+
logger.info(f"📚 Will restore {len(restore_messages)} messages after reconnect")
|
| 47 |
+
|
| 48 |
+
# Stop any local playback/gestures
|
| 49 |
+
if services.audio_player:
|
| 50 |
+
services.audio_player.cancel()
|
| 51 |
+
|
| 52 |
+
if services.speaking_gestures:
|
| 53 |
+
await services.speaking_gestures.stop()
|
| 54 |
+
|
| 55 |
+
# Stop audio streaming task (depends on openai.is_connected)
|
| 56 |
+
stop_audio_streaming()
|
| 57 |
+
|
| 58 |
+
# Pause capture while we reset, to avoid buffering user audio into nowhere
|
| 59 |
+
if services.audio_capture:
|
| 60 |
+
services.audio_capture.pause_capture()
|
| 61 |
+
|
| 62 |
+
# Best-effort: cancel any in-flight response and clear input buffer
|
| 63 |
+
try:
|
| 64 |
+
await services.openai.cancel_response()
|
| 65 |
+
await services.openai.clear_audio_buffer()
|
| 66 |
+
except Exception:
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
# Fully disconnect and reconnect (new session)
|
| 70 |
+
try:
|
| 71 |
+
await services.openai.disconnect()
|
| 72 |
+
except Exception:
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
# connect() reads OPENAI_API_KEY from env (set by iOS connect command)
|
| 76 |
+
await services.openai.connect()
|
| 77 |
+
|
| 78 |
+
# Restore conversation context after reconnection
|
| 79 |
+
if restore_messages:
|
| 80 |
+
await services.openai.restore_conversation_context(restore_messages)
|
| 81 |
+
|
| 82 |
+
if keep_listening:
|
| 83 |
+
services.openai.start_listening()
|
| 84 |
+
start_audio_streaming()
|
| 85 |
+
|
| 86 |
+
# Resume capture with normal echo-avoidance delay
|
| 87 |
+
create_tracked_task(delayed_resume_microphone())
|
| 88 |
+
else:
|
| 89 |
+
if services.audio_capture:
|
| 90 |
+
services.audio_capture.stop_capture()
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
async def handle_stop_command() -> None:
|
| 94 |
+
"""Handle a voice stop command - immediately end the conversation."""
|
| 95 |
+
services = get_services()
|
| 96 |
+
state = get_state()
|
| 97 |
+
|
| 98 |
+
try:
|
| 99 |
+
# Cancel any current AI response
|
| 100 |
+
if services.audio_player:
|
| 101 |
+
services.audio_player.cancel()
|
| 102 |
+
|
| 103 |
+
if services.openai:
|
| 104 |
+
await services.openai.cancel_response()
|
| 105 |
+
|
| 106 |
+
# Stop speaking gestures if running
|
| 107 |
+
if services.speaking_gestures:
|
| 108 |
+
await services.speaking_gestures.stop()
|
| 109 |
+
|
| 110 |
+
# Stop the audio stream task
|
| 111 |
+
stop_audio_streaming()
|
| 112 |
+
|
| 113 |
+
# Stop audio capture
|
| 114 |
+
if services.audio_capture:
|
| 115 |
+
services.audio_capture.stop_capture()
|
| 116 |
+
|
| 117 |
+
# Stop listening on OpenAI
|
| 118 |
+
if services.openai:
|
| 119 |
+
services.openai.stop_listening()
|
| 120 |
+
|
| 121 |
+
# Enforce: next conversation is always a new OpenAI session
|
| 122 |
+
try:
|
| 123 |
+
await services.openai.disconnect()
|
| 124 |
+
except Exception:
|
| 125 |
+
pass
|
| 126 |
+
|
| 127 |
+
# Mark conversation as ended (allows idle movements to resume)
|
| 128 |
+
state.end_conversation()
|
| 129 |
+
|
| 130 |
+
# Broadcast to iOS that listening has stopped
|
| 131 |
+
await broadcast({
|
| 132 |
+
"type": "listening_state",
|
| 133 |
+
"listening": False,
|
| 134 |
+
})
|
| 135 |
+
|
| 136 |
+
# Also notify of the transcript that caused the stop
|
| 137 |
+
await broadcast({
|
| 138 |
+
"type": "conversation_stopped",
|
| 139 |
+
"reason": "voice_command",
|
| 140 |
+
})
|
| 141 |
+
|
| 142 |
+
logger.info("✅ Conversation stopped via voice command")
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logger.error(f"Error handling stop command: {e}")
|
| 146 |
+
# Broadcast error to clients
|
| 147 |
+
await broadcast({
|
| 148 |
+
"type": "error",
|
| 149 |
+
"message": "Failed to stop conversation",
|
| 150 |
+
})
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
async def handle_connect(websocket: WebSocket, data: dict) -> None:
|
| 154 |
+
"""Handle the connect command."""
|
| 155 |
+
services = get_services()
|
| 156 |
+
|
| 157 |
+
api_key = data.get("api_key") or os.environ.get("OPENAI_API_KEY")
|
| 158 |
+
|
| 159 |
+
if not api_key:
|
| 160 |
+
await websocket.send_json({
|
| 161 |
+
"type": "error",
|
| 162 |
+
"message": "API key required",
|
| 163 |
+
})
|
| 164 |
+
return
|
| 165 |
+
|
| 166 |
+
if data.get("api_key"):
|
| 167 |
+
# Log masked key for debugging
|
| 168 |
+
key_preview = f"***{api_key[-4:]}" if len(api_key) > 4 else "***"
|
| 169 |
+
logger.info(f"API key provided ({key_preview})")
|
| 170 |
+
os.environ["OPENAI_API_KEY"] = data["api_key"]
|
| 171 |
+
|
| 172 |
+
await services.openai.connect(api_key)
|
| 173 |
+
|
| 174 |
+
if services.audio_player:
|
| 175 |
+
services.audio_player.start_stream()
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
async def handle_disconnect() -> None:
|
| 179 |
+
"""Handle the disconnect command."""
|
| 180 |
+
services = get_services()
|
| 181 |
+
|
| 182 |
+
stop_audio_streaming()
|
| 183 |
+
|
| 184 |
+
if services.audio_capture:
|
| 185 |
+
services.audio_capture.stop_capture()
|
| 186 |
+
|
| 187 |
+
if services.audio_player:
|
| 188 |
+
services.audio_player.cancel()
|
| 189 |
+
|
| 190 |
+
await services.openai.disconnect()
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
async def handle_start_listening(websocket: WebSocket) -> None:
|
| 194 |
+
"""Handle the start_listening command."""
|
| 195 |
+
services = get_services()
|
| 196 |
+
state = get_state()
|
| 197 |
+
history = get_history()
|
| 198 |
+
|
| 199 |
+
if not services.openai.is_connected:
|
| 200 |
+
# Auto-connect so that "stop => new session next time" doesn't require
|
| 201 |
+
# a separate iOS connect step.
|
| 202 |
+
try:
|
| 203 |
+
await services.openai.connect()
|
| 204 |
+
except Exception as e:
|
| 205 |
+
logger.error(f"Cannot start listening: not connected to OpenAI ({e})")
|
| 206 |
+
await websocket.send_json({
|
| 207 |
+
"type": "error",
|
| 208 |
+
"message": "Not connected to OpenAI",
|
| 209 |
+
})
|
| 210 |
+
return
|
| 211 |
+
|
| 212 |
+
# Reset conversation state and start fresh history
|
| 213 |
+
state.reset_for_new_conversation()
|
| 214 |
+
history.clear()
|
| 215 |
+
history.start_session()
|
| 216 |
+
logger.info("🎬 New conversation started - animation state reset, history cleared")
|
| 217 |
+
|
| 218 |
+
# Start audio capture
|
| 219 |
+
if services.audio_capture:
|
| 220 |
+
if not services.audio_capture.is_available:
|
| 221 |
+
logger.error("Audio capture not available - no microphone detected!")
|
| 222 |
+
await websocket.send_json({
|
| 223 |
+
"type": "error",
|
| 224 |
+
"message": "Microphone not available on Reachy",
|
| 225 |
+
})
|
| 226 |
+
return
|
| 227 |
+
|
| 228 |
+
capture_started = await services.audio_capture.start_capture()
|
| 229 |
+
if not capture_started:
|
| 230 |
+
logger.error("❌ Failed to start audio capture")
|
| 231 |
+
await websocket.send_json({
|
| 232 |
+
"type": "error",
|
| 233 |
+
"message": "Failed to start microphone capture",
|
| 234 |
+
})
|
| 235 |
+
return
|
| 236 |
+
|
| 237 |
+
logger.info("✅ Audio capture started successfully")
|
| 238 |
+
else:
|
| 239 |
+
logger.error("Audio capture service not initialized")
|
| 240 |
+
await websocket.send_json({
|
| 241 |
+
"type": "error",
|
| 242 |
+
"message": "Audio capture service not initialized",
|
| 243 |
+
})
|
| 244 |
+
return
|
| 245 |
+
|
| 246 |
+
services.openai.start_listening()
|
| 247 |
+
logger.info("✅ OpenAI listening mode enabled")
|
| 248 |
+
|
| 249 |
+
# IMPORTANT: Do NOT start audio streaming here!
|
| 250 |
+
# Audio streaming will be started AFTER the greeting completes.
|
| 251 |
+
# This prevents motor noise from the welcome animation and ambient
|
| 252 |
+
# noise from triggering VAD and interrupting the greeting.
|
| 253 |
+
|
| 254 |
+
# Pause audio capture immediately - it will be resumed after greeting
|
| 255 |
+
if services.audio_capture:
|
| 256 |
+
services.audio_capture.pause_capture()
|
| 257 |
+
logger.info("⏸️ Audio capture paused until greeting completes")
|
| 258 |
+
|
| 259 |
+
await websocket.send_json({
|
| 260 |
+
"type": "listening_state",
|
| 261 |
+
"listening": True,
|
| 262 |
+
})
|
| 263 |
+
|
| 264 |
+
# Send greeting - audio streaming will start after it completes
|
| 265 |
+
create_tracked_task(send_greeting_and_start_streaming())
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
async def handle_stop_listening(websocket: WebSocket) -> None:
|
| 269 |
+
"""Handle the stop_listening command."""
|
| 270 |
+
services = get_services()
|
| 271 |
+
|
| 272 |
+
# Stop capturing new audio input
|
| 273 |
+
stop_audio_streaming()
|
| 274 |
+
|
| 275 |
+
if services.audio_capture:
|
| 276 |
+
services.audio_capture.stop_capture()
|
| 277 |
+
|
| 278 |
+
services.openai.stop_listening()
|
| 279 |
+
|
| 280 |
+
# Broadcast listening stopped immediately for UI responsiveness
|
| 281 |
+
await websocket.send_json({
|
| 282 |
+
"type": "listening_state",
|
| 283 |
+
"listening": False,
|
| 284 |
+
})
|
| 285 |
+
|
| 286 |
+
# Send goodbye and wait for it to be spoken before disconnecting
|
| 287 |
+
# This runs in the background so we don't block the WebSocket response
|
| 288 |
+
create_tracked_task(goodbye_and_disconnect())
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
async def handle_interrupt(websocket: WebSocket) -> None:
|
| 292 |
+
"""Handle the interrupt_response command."""
|
| 293 |
+
services = get_services()
|
| 294 |
+
|
| 295 |
+
logger.info("🛑 Interrupt command received from iOS!")
|
| 296 |
+
|
| 297 |
+
# Cancel audio playback immediately
|
| 298 |
+
if services.audio_player:
|
| 299 |
+
services.audio_player.cancel()
|
| 300 |
+
|
| 301 |
+
# Cancel the AI response
|
| 302 |
+
if services.openai:
|
| 303 |
+
await services.openai.cancel_response()
|
| 304 |
+
|
| 305 |
+
# Stop speaking gestures
|
| 306 |
+
if services.speaking_gestures:
|
| 307 |
+
await services.speaking_gestures.stop()
|
| 308 |
+
|
| 309 |
+
# Resume microphone capture so user can speak
|
| 310 |
+
if services.audio_capture:
|
| 311 |
+
services.audio_capture.resume_capture()
|
| 312 |
+
logger.info("▶️ Microphone resumed after interrupt")
|
| 313 |
+
|
| 314 |
+
# Clear any buffered audio to prevent echo
|
| 315 |
+
if services.openai:
|
| 316 |
+
await services.openai.clear_audio_buffer()
|
| 317 |
+
|
| 318 |
+
# Notify iOS that interrupt was successful
|
| 319 |
+
await websocket.send_json({
|
| 320 |
+
"type": "interrupt_complete",
|
| 321 |
+
"success": True,
|
| 322 |
+
})
|
| 323 |
+
|
| 324 |
+
logger.info("✅ Interrupt complete - listening to user")
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
async def handle_set_language(websocket: WebSocket, data: dict) -> None:
|
| 328 |
+
"""Handle the set_language command."""
|
| 329 |
+
services = get_services()
|
| 330 |
+
|
| 331 |
+
language = data.get("language", "en")
|
| 332 |
+
# Persist language; immediately reset OpenAI session to apply new settings.
|
| 333 |
+
from ..voice import set_preferred_language
|
| 334 |
+
success = set_preferred_language(language)
|
| 335 |
+
confirmed = False
|
| 336 |
+
|
| 337 |
+
if success and services.openai and services.openai.is_connected:
|
| 338 |
+
# Immediately disconnect and reconnect with new language
|
| 339 |
+
keep_listening = bool(services.openai.is_listening)
|
| 340 |
+
await reset_openai_session(keep_listening=keep_listening)
|
| 341 |
+
confirmed = services.openai.is_connected
|
| 342 |
+
logger.info(f"🌍 Language switched to {language}, session reset, confirmed={confirmed}")
|
| 343 |
+
|
| 344 |
+
await websocket.send_json({
|
| 345 |
+
"type": "language_set",
|
| 346 |
+
"language": language,
|
| 347 |
+
"success": success,
|
| 348 |
+
"confirmed": confirmed,
|
| 349 |
+
})
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
async def handle_set_voice(websocket: WebSocket, data: dict) -> None:
|
| 353 |
+
"""Handle the set_voice command."""
|
| 354 |
+
services = get_services()
|
| 355 |
+
|
| 356 |
+
voice_id = data.get("voice_id", "")
|
| 357 |
+
if voice_id:
|
| 358 |
+
# Persist voice; immediately reset OpenAI session to apply new settings.
|
| 359 |
+
from ..voice import set_current_voice
|
| 360 |
+
success = set_current_voice(voice_id)
|
| 361 |
+
confirmed = False
|
| 362 |
+
|
| 363 |
+
if success and services.openai and services.openai.is_connected:
|
| 364 |
+
# Immediately disconnect and reconnect with new voice
|
| 365 |
+
keep_listening = bool(services.openai.is_listening)
|
| 366 |
+
await reset_openai_session(keep_listening=keep_listening)
|
| 367 |
+
confirmed = services.openai.is_connected
|
| 368 |
+
logger.info(f"🔊 Voice switched to {voice_id}, session reset, confirmed={confirmed}")
|
| 369 |
+
|
| 370 |
+
await websocket.send_json({
|
| 371 |
+
"type": "voice_set",
|
| 372 |
+
"voice_id": voice_id,
|
| 373 |
+
"language": services.openai.language,
|
| 374 |
+
"success": success,
|
| 375 |
+
"confirmed": confirmed,
|
| 376 |
+
})
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
async def handle_set_system_prompt(websocket: WebSocket, data: dict) -> None:
|
| 380 |
+
"""Handle the set_system_prompt command."""
|
| 381 |
+
services = get_services()
|
| 382 |
+
|
| 383 |
+
system_prompt = data.get("system_prompt", "")
|
| 384 |
+
if system_prompt:
|
| 385 |
+
services.openai.set_custom_personality(system_prompt)
|
| 386 |
+
logger.info(f"🎭 Custom personality set ({len(system_prompt)} chars)")
|
| 387 |
+
await websocket.send_json({
|
| 388 |
+
"type": "personality_set",
|
| 389 |
+
"success": True,
|
| 390 |
+
})
|
| 391 |
+
else:
|
| 392 |
+
await websocket.send_json({
|
| 393 |
+
"type": "error",
|
| 394 |
+
"message": "No system_prompt provided",
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
async def handle_clear_system_prompt(websocket: WebSocket) -> None:
|
| 399 |
+
"""Handle the clear_system_prompt command."""
|
| 400 |
+
services = get_services()
|
| 401 |
+
|
| 402 |
+
services.openai.clear_custom_personality()
|
| 403 |
+
logger.info("🎭 Reverted to default Reachy personality")
|
| 404 |
+
await websocket.send_json({
|
| 405 |
+
"type": "personality_cleared",
|
| 406 |
+
"success": True,
|
| 407 |
+
})
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
async def handle_set_emotion_animations(websocket: WebSocket, data: dict) -> None:
|
| 411 |
+
"""Handle the set_emotion_animations command."""
|
| 412 |
+
emotion_animations = data.get("emotion_animations", {})
|
| 413 |
+
if emotion_animations:
|
| 414 |
+
set_custom_animations(emotion_animations)
|
| 415 |
+
await websocket.send_json({
|
| 416 |
+
"type": "emotion_animations_set",
|
| 417 |
+
"success": True,
|
| 418 |
+
"emotions": list(emotion_animations.keys()),
|
| 419 |
+
})
|
| 420 |
+
else:
|
| 421 |
+
await websocket.send_json({
|
| 422 |
+
"type": "error",
|
| 423 |
+
"message": "No emotion_animations provided",
|
| 424 |
+
})
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
async def handle_clear_emotion_animations(websocket: WebSocket) -> None:
|
| 428 |
+
"""Handle the clear_emotion_animations command."""
|
| 429 |
+
clear_custom_animations()
|
| 430 |
+
await websocket.send_json({
|
| 431 |
+
"type": "emotion_animations_cleared",
|
| 432 |
+
"success": True,
|
| 433 |
+
})
|
| 434 |
+
|
reachys_brain/routes/conversation/greeting.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Greeting and goodbye flow for conversations.
|
| 2 |
+
|
| 3 |
+
Handles the greeting sequence when a conversation starts and goodbye when it ends.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
from ..audio_stream_manager import ConversationTimings
|
| 10 |
+
from .services import get_services
|
| 11 |
+
from .audio import start_audio_streaming
|
| 12 |
+
from .animation import send_greeting, send_goodbye
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def send_greeting_and_start_streaming() -> None:
|
| 18 |
+
"""Send greeting and start audio streaming after it completes.
|
| 19 |
+
|
| 20 |
+
This ensures that:
|
| 21 |
+
1. No audio is sent to OpenAI during the welcome animation
|
| 22 |
+
2. The greeting plays completely without VAD interruption
|
| 23 |
+
3. Audio streaming only starts after greeting finishes
|
| 24 |
+
|
| 25 |
+
Flow:
|
| 26 |
+
1. send_greeting() sends text -> ResponseState.WAITING
|
| 27 |
+
2. Audio arrives -> ResponseState.GENERATING
|
| 28 |
+
3. response.audio.done -> ResponseState.COMPLETE
|
| 29 |
+
4. We wait for COMPLETE (audio sent by OpenAI)
|
| 30 |
+
5. Wait for audio buffer to drain (audio played by speaker)
|
| 31 |
+
6. Start streaming and resume microphone
|
| 32 |
+
"""
|
| 33 |
+
from ...openai_realtime import ResponseState
|
| 34 |
+
|
| 35 |
+
services = get_services()
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
# Send the greeting (this will set ResponseState.WAITING -> GENERATING -> COMPLETE)
|
| 39 |
+
await send_greeting()
|
| 40 |
+
|
| 41 |
+
# Wait for the greeting response to be COMPLETE or IDLE
|
| 42 |
+
# COMPLETE = OpenAI finished sending audio
|
| 43 |
+
# IDLE = Response cycle fully complete
|
| 44 |
+
max_wait = 15.0 # Maximum wait for greeting
|
| 45 |
+
wait_interval = 0.2
|
| 46 |
+
waited = 0.0
|
| 47 |
+
|
| 48 |
+
while waited < max_wait:
|
| 49 |
+
if services.openai:
|
| 50 |
+
state = services.openai.response_state
|
| 51 |
+
if state in (ResponseState.COMPLETE, ResponseState.IDLE):
|
| 52 |
+
break
|
| 53 |
+
await asyncio.sleep(wait_interval)
|
| 54 |
+
waited += wait_interval
|
| 55 |
+
|
| 56 |
+
if waited >= max_wait:
|
| 57 |
+
logger.warning("⚠️ Greeting response timeout - starting streaming anyway")
|
| 58 |
+
else:
|
| 59 |
+
logger.info(f"✅ Greeting response complete (waited {waited:.1f}s)")
|
| 60 |
+
|
| 61 |
+
# Wait for audio to finish playing through speaker
|
| 62 |
+
if services.audio_player and services.audio_player.is_playing:
|
| 63 |
+
extra_wait = 0.0
|
| 64 |
+
max_extra = 8.0
|
| 65 |
+
while services.audio_player.is_playing and extra_wait < max_extra:
|
| 66 |
+
await asyncio.sleep(wait_interval)
|
| 67 |
+
extra_wait += wait_interval
|
| 68 |
+
if extra_wait > 0:
|
| 69 |
+
logger.info(f"⏳ Waited {extra_wait:.1f}s for greeting audio to finish")
|
| 70 |
+
|
| 71 |
+
# Additional buffer drain time
|
| 72 |
+
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 73 |
+
|
| 74 |
+
# Now start audio streaming
|
| 75 |
+
if services.openai and services.openai.is_listening:
|
| 76 |
+
start_audio_streaming()
|
| 77 |
+
logger.info("✅ Audio streaming started after greeting")
|
| 78 |
+
|
| 79 |
+
# Clear any buffered audio
|
| 80 |
+
await services.openai.clear_audio_buffer()
|
| 81 |
+
|
| 82 |
+
# Mark response cycle as complete
|
| 83 |
+
services.openai.mark_response_complete()
|
| 84 |
+
|
| 85 |
+
# Resume audio capture
|
| 86 |
+
if services.audio_capture:
|
| 87 |
+
services.audio_capture.resume_capture()
|
| 88 |
+
logger.info("▶️ Microphone resumed after greeting")
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
logger.error(f"Error in greeting flow: {e}")
|
| 92 |
+
# Start streaming anyway to not leave the system in a broken state
|
| 93 |
+
if services.openai and services.openai.is_listening:
|
| 94 |
+
start_audio_streaming()
|
| 95 |
+
if services.audio_capture:
|
| 96 |
+
services.audio_capture.resume_capture()
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
async def goodbye_and_disconnect() -> None:
|
| 100 |
+
"""Send goodbye and wait for it to be spoken before disconnecting.
|
| 101 |
+
|
| 102 |
+
This runs in the background so we don't block the WebSocket response.
|
| 103 |
+
"""
|
| 104 |
+
from .state import get_state
|
| 105 |
+
|
| 106 |
+
services = get_services()
|
| 107 |
+
state = get_state()
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
# Send the goodbye prompt
|
| 111 |
+
await send_goodbye()
|
| 112 |
+
|
| 113 |
+
# Wait for the AI to generate and speak the goodbye
|
| 114 |
+
# Give it enough time to respond and speak (typical goodbye is ~3-5 seconds)
|
| 115 |
+
max_wait = 8.0 # Maximum seconds to wait
|
| 116 |
+
wait_interval = 0.2
|
| 117 |
+
waited = 0.0
|
| 118 |
+
|
| 119 |
+
# Wait for speech to start
|
| 120 |
+
await asyncio.sleep(0.5)
|
| 121 |
+
|
| 122 |
+
# Wait for speech to finish
|
| 123 |
+
while waited < max_wait:
|
| 124 |
+
if services.tts and services.tts.is_speaking:
|
| 125 |
+
# Speech is ongoing, keep waiting
|
| 126 |
+
await asyncio.sleep(wait_interval)
|
| 127 |
+
waited += wait_interval
|
| 128 |
+
elif waited > 1.0:
|
| 129 |
+
# Speech has finished (or never started after initial delay)
|
| 130 |
+
break
|
| 131 |
+
else:
|
| 132 |
+
# Give it a moment to start
|
| 133 |
+
await asyncio.sleep(wait_interval)
|
| 134 |
+
waited += wait_interval
|
| 135 |
+
|
| 136 |
+
except Exception as e:
|
| 137 |
+
logger.error(f"Error in goodbye sequence: {e}")
|
| 138 |
+
finally:
|
| 139 |
+
# Now disconnect the OpenAI session
|
| 140 |
+
try:
|
| 141 |
+
await services.openai.disconnect()
|
| 142 |
+
except Exception:
|
| 143 |
+
pass
|
| 144 |
+
|
| 145 |
+
# Mark conversation as ended (allows idle movements to resume)
|
| 146 |
+
state.end_conversation()
|
| 147 |
+
|
reachys_brain/routes/conversation/history.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Conversation history management for context persistence.
|
| 2 |
+
|
| 3 |
+
Tracks conversation messages and can restore context after session resets.
|
| 4 |
+
This ensures continuity when voice/language changes force a session reconnect.
|
| 5 |
+
|
| 6 |
+
Also provides long conversation management with automatic summarization
|
| 7 |
+
when conversations exceed token limits.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
from typing import Optional
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# Maximum messages to keep in active history
|
| 18 |
+
MAX_HISTORY_MESSAGES = 30
|
| 19 |
+
|
| 20 |
+
# Maximum messages to restore on session reset
|
| 21 |
+
MAX_RESTORE_MESSAGES = 10
|
| 22 |
+
|
| 23 |
+
# Approximate tokens per message (conservative estimate for voice conversations)
|
| 24 |
+
APPROX_TOKENS_PER_MESSAGE = 50
|
| 25 |
+
|
| 26 |
+
# Token threshold before triggering summarization
|
| 27 |
+
TOKEN_THRESHOLD_FOR_SUMMARY = 1500
|
| 28 |
+
|
| 29 |
+
# Messages to keep after summarization (most recent)
|
| 30 |
+
MESSAGES_TO_KEEP_AFTER_SUMMARY = 8
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class HistoryMessage:
|
| 35 |
+
"""A message in the conversation history."""
|
| 36 |
+
role: str # "user" or "assistant"
|
| 37 |
+
content: str
|
| 38 |
+
timestamp: datetime = field(default_factory=datetime.utcnow)
|
| 39 |
+
token_estimate: int = 0
|
| 40 |
+
|
| 41 |
+
def __post_init__(self):
|
| 42 |
+
# Estimate tokens: ~4 chars per token for English
|
| 43 |
+
if self.token_estimate == 0:
|
| 44 |
+
self.token_estimate = max(1, len(self.content) // 4)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class ConversationHistory:
|
| 48 |
+
"""Manages conversation history for context persistence.
|
| 49 |
+
|
| 50 |
+
Stores user and assistant messages during a conversation,
|
| 51 |
+
can generate context restoration messages for session resets,
|
| 52 |
+
and automatically manages long conversations through summarization.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self, max_messages: int = MAX_HISTORY_MESSAGES):
|
| 56 |
+
"""Initialize conversation history.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
max_messages: Maximum messages to keep in active history.
|
| 60 |
+
"""
|
| 61 |
+
self._messages: list[HistoryMessage] = []
|
| 62 |
+
self._max_messages = max_messages
|
| 63 |
+
self._session_start: Optional[datetime] = None
|
| 64 |
+
|
| 65 |
+
# Summary of older conversation (when history is compressed)
|
| 66 |
+
self._conversation_summary: Optional[str] = None
|
| 67 |
+
self._summarized_message_count: int = 0
|
| 68 |
+
|
| 69 |
+
# Token tracking
|
| 70 |
+
self._total_token_estimate: int = 0
|
| 71 |
+
|
| 72 |
+
def start_session(self) -> None:
|
| 73 |
+
"""Mark the start of a new session (but keep history)."""
|
| 74 |
+
self._session_start = datetime.utcnow()
|
| 75 |
+
logger.info(f"📚 Conversation session started (history: {len(self._messages)} messages)")
|
| 76 |
+
|
| 77 |
+
def clear(self) -> None:
|
| 78 |
+
"""Clear all conversation history."""
|
| 79 |
+
self._messages = []
|
| 80 |
+
self._session_start = None
|
| 81 |
+
self._conversation_summary = None
|
| 82 |
+
self._summarized_message_count = 0
|
| 83 |
+
self._total_token_estimate = 0
|
| 84 |
+
logger.info("📚 Conversation history cleared")
|
| 85 |
+
|
| 86 |
+
def add_user_message(self, content: str) -> None:
|
| 87 |
+
"""Add a user message to history.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
content: The user's transcribed message.
|
| 91 |
+
"""
|
| 92 |
+
if not content or not content.strip():
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
msg = HistoryMessage(role="user", content=content.strip())
|
| 96 |
+
self._messages.append(msg)
|
| 97 |
+
self._total_token_estimate += msg.token_estimate
|
| 98 |
+
|
| 99 |
+
self._check_and_summarize()
|
| 100 |
+
self._trim_history()
|
| 101 |
+
logger.debug(f"📚 Added user message (total: {len(self._messages)}, ~{self._total_token_estimate} tokens)")
|
| 102 |
+
|
| 103 |
+
def add_assistant_message(self, content: str) -> None:
|
| 104 |
+
"""Add an assistant message to history.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
content: The assistant's response.
|
| 108 |
+
"""
|
| 109 |
+
if not content or not content.strip():
|
| 110 |
+
return
|
| 111 |
+
|
| 112 |
+
msg = HistoryMessage(role="assistant", content=content.strip())
|
| 113 |
+
self._messages.append(msg)
|
| 114 |
+
self._total_token_estimate += msg.token_estimate
|
| 115 |
+
|
| 116 |
+
self._check_and_summarize()
|
| 117 |
+
self._trim_history()
|
| 118 |
+
logger.debug(f"📚 Added assistant message (total: {len(self._messages)}, ~{self._total_token_estimate} tokens)")
|
| 119 |
+
|
| 120 |
+
def _check_and_summarize(self) -> None:
|
| 121 |
+
"""Check if we need to summarize older messages to manage context size."""
|
| 122 |
+
if self._total_token_estimate < TOKEN_THRESHOLD_FOR_SUMMARY:
|
| 123 |
+
return
|
| 124 |
+
|
| 125 |
+
if len(self._messages) <= MESSAGES_TO_KEEP_AFTER_SUMMARY:
|
| 126 |
+
return
|
| 127 |
+
|
| 128 |
+
# Number of messages to summarize
|
| 129 |
+
messages_to_summarize = self._messages[:-MESSAGES_TO_KEEP_AFTER_SUMMARY]
|
| 130 |
+
|
| 131 |
+
if len(messages_to_summarize) < 4:
|
| 132 |
+
return # Not enough to summarize
|
| 133 |
+
|
| 134 |
+
logger.info(f"📚 Summarizing {len(messages_to_summarize)} older messages (token threshold reached)")
|
| 135 |
+
|
| 136 |
+
# Generate a simple summary of older messages
|
| 137 |
+
summary_parts = []
|
| 138 |
+
|
| 139 |
+
# Track topics discussed
|
| 140 |
+
topics = set()
|
| 141 |
+
for msg in messages_to_summarize:
|
| 142 |
+
# Simple topic extraction (first few words of user messages)
|
| 143 |
+
if msg.role == "user" and len(msg.content) > 10:
|
| 144 |
+
words = msg.content.split()[:5]
|
| 145 |
+
if len(words) >= 2:
|
| 146 |
+
topics.add(" ".join(words[:3]))
|
| 147 |
+
|
| 148 |
+
# Build summary
|
| 149 |
+
user_msgs = [m for m in messages_to_summarize if m.role == "user"]
|
| 150 |
+
assistant_msgs = [m for m in messages_to_summarize if m.role == "assistant"]
|
| 151 |
+
|
| 152 |
+
if topics:
|
| 153 |
+
topic_list = list(topics)[:5] # Max 5 topics
|
| 154 |
+
summary_parts.append(f"Earlier topics: {', '.join(topic_list)}")
|
| 155 |
+
|
| 156 |
+
summary_parts.append(
|
| 157 |
+
f"({len(user_msgs)} user messages, {len(assistant_msgs)} assistant responses summarized)"
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# Update summary
|
| 161 |
+
new_summary = ". ".join(summary_parts)
|
| 162 |
+
if self._conversation_summary:
|
| 163 |
+
self._conversation_summary = f"{self._conversation_summary} | {new_summary}"
|
| 164 |
+
else:
|
| 165 |
+
self._conversation_summary = new_summary
|
| 166 |
+
|
| 167 |
+
# Update counters
|
| 168 |
+
summarized_tokens = sum(m.token_estimate for m in messages_to_summarize)
|
| 169 |
+
self._summarized_message_count += len(messages_to_summarize)
|
| 170 |
+
self._total_token_estimate -= summarized_tokens
|
| 171 |
+
|
| 172 |
+
# Keep only recent messages
|
| 173 |
+
self._messages = self._messages[-MESSAGES_TO_KEEP_AFTER_SUMMARY:]
|
| 174 |
+
|
| 175 |
+
logger.info(f"📚 Summarized conversation. Kept {len(self._messages)} recent messages. Summary: {new_summary[:100]}...")
|
| 176 |
+
|
| 177 |
+
def _trim_history(self) -> None:
|
| 178 |
+
"""Trim history to max messages, keeping most recent."""
|
| 179 |
+
if len(self._messages) > self._max_messages:
|
| 180 |
+
removed = len(self._messages) - self._max_messages
|
| 181 |
+
removed_tokens = sum(m.token_estimate for m in self._messages[:removed])
|
| 182 |
+
self._messages = self._messages[-self._max_messages:]
|
| 183 |
+
self._total_token_estimate -= removed_tokens
|
| 184 |
+
logger.debug(f"📚 Trimmed {removed} old messages from history")
|
| 185 |
+
|
| 186 |
+
@property
|
| 187 |
+
def message_count(self) -> int:
|
| 188 |
+
"""Get number of active messages in history."""
|
| 189 |
+
return len(self._messages)
|
| 190 |
+
|
| 191 |
+
@property
|
| 192 |
+
def total_message_count(self) -> int:
|
| 193 |
+
"""Get total messages including summarized ones."""
|
| 194 |
+
return len(self._messages) + self._summarized_message_count
|
| 195 |
+
|
| 196 |
+
@property
|
| 197 |
+
def has_history(self) -> bool:
|
| 198 |
+
"""Check if there's any conversation history."""
|
| 199 |
+
return len(self._messages) > 0 or self._conversation_summary is not None
|
| 200 |
+
|
| 201 |
+
@property
|
| 202 |
+
def has_summary(self) -> bool:
|
| 203 |
+
"""Check if there's a summary of older messages."""
|
| 204 |
+
return self._conversation_summary is not None
|
| 205 |
+
|
| 206 |
+
@property
|
| 207 |
+
def estimated_tokens(self) -> int:
|
| 208 |
+
"""Get estimated token count of active history."""
|
| 209 |
+
return self._total_token_estimate
|
| 210 |
+
|
| 211 |
+
def get_restore_messages(self, max_messages: int = MAX_RESTORE_MESSAGES) -> list[dict]:
|
| 212 |
+
"""Get messages formatted for OpenAI conversation.item.create.
|
| 213 |
+
|
| 214 |
+
Returns recent messages that can be sent to OpenAI to restore context
|
| 215 |
+
after a session reset. Includes summary context if available.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
max_messages: Maximum messages to include.
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
List of message dicts for OpenAI API.
|
| 222 |
+
"""
|
| 223 |
+
formatted = []
|
| 224 |
+
|
| 225 |
+
# If we have a summary, include it as context
|
| 226 |
+
if self._conversation_summary:
|
| 227 |
+
formatted.append({
|
| 228 |
+
"type": "message",
|
| 229 |
+
"role": "user",
|
| 230 |
+
"content": [
|
| 231 |
+
{
|
| 232 |
+
"type": "input_text",
|
| 233 |
+
"text": f"[Context from earlier in our conversation: {self._conversation_summary}]",
|
| 234 |
+
}
|
| 235 |
+
],
|
| 236 |
+
})
|
| 237 |
+
|
| 238 |
+
if not self._messages:
|
| 239 |
+
return formatted
|
| 240 |
+
|
| 241 |
+
# Get the most recent messages
|
| 242 |
+
recent = self._messages[-max_messages:]
|
| 243 |
+
|
| 244 |
+
# Format for OpenAI Realtime API
|
| 245 |
+
for msg in recent:
|
| 246 |
+
formatted.append({
|
| 247 |
+
"type": "message",
|
| 248 |
+
"role": msg.role,
|
| 249 |
+
"content": [
|
| 250 |
+
{
|
| 251 |
+
"type": "input_text" if msg.role == "user" else "text",
|
| 252 |
+
"text": msg.content,
|
| 253 |
+
}
|
| 254 |
+
],
|
| 255 |
+
})
|
| 256 |
+
|
| 257 |
+
return formatted
|
| 258 |
+
|
| 259 |
+
def get_context_summary(self) -> str:
|
| 260 |
+
"""Get a brief summary of conversation context.
|
| 261 |
+
|
| 262 |
+
Useful for debugging or injecting as context note.
|
| 263 |
+
|
| 264 |
+
Returns:
|
| 265 |
+
Summary string.
|
| 266 |
+
"""
|
| 267 |
+
if not self._messages and not self._conversation_summary:
|
| 268 |
+
return "No previous conversation."
|
| 269 |
+
|
| 270 |
+
parts = []
|
| 271 |
+
|
| 272 |
+
if self._conversation_summary:
|
| 273 |
+
parts.append(f"Summary: {self._conversation_summary[:150]}")
|
| 274 |
+
|
| 275 |
+
msg_count = len(self._messages)
|
| 276 |
+
if msg_count > 0:
|
| 277 |
+
user_count = sum(1 for m in self._messages if m.role == "user")
|
| 278 |
+
assistant_count = msg_count - user_count
|
| 279 |
+
parts.append(f"Active: {msg_count} messages ({user_count} user, {assistant_count} assistant)")
|
| 280 |
+
|
| 281 |
+
if self._summarized_message_count > 0:
|
| 282 |
+
parts.append(f"Summarized: {self._summarized_message_count} older messages")
|
| 283 |
+
|
| 284 |
+
parts.append(f"Est. tokens: ~{self._total_token_estimate}")
|
| 285 |
+
|
| 286 |
+
return " | ".join(parts)
|
| 287 |
+
|
| 288 |
+
def get_stats(self) -> dict:
|
| 289 |
+
"""Get conversation statistics.
|
| 290 |
+
|
| 291 |
+
Returns:
|
| 292 |
+
Dictionary with conversation stats.
|
| 293 |
+
"""
|
| 294 |
+
return {
|
| 295 |
+
"active_messages": len(self._messages),
|
| 296 |
+
"summarized_messages": self._summarized_message_count,
|
| 297 |
+
"total_messages": self.total_message_count,
|
| 298 |
+
"estimated_tokens": self._total_token_estimate,
|
| 299 |
+
"has_summary": self.has_summary,
|
| 300 |
+
"session_start": self._session_start.isoformat() if self._session_start else None,
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
# Global instance
|
| 305 |
+
_history: Optional[ConversationHistory] = None
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def get_history() -> ConversationHistory:
|
| 309 |
+
"""Get the global conversation history instance.
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
The ConversationHistory singleton.
|
| 313 |
+
"""
|
| 314 |
+
global _history
|
| 315 |
+
if _history is None:
|
| 316 |
+
_history = ConversationHistory()
|
| 317 |
+
return _history
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def clear_history() -> None:
|
| 321 |
+
"""Clear the global conversation history."""
|
| 322 |
+
global _history
|
| 323 |
+
if _history:
|
| 324 |
+
_history.clear()
|
reachys_brain/routes/conversation/meeting_callbacks.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Meeting-specific callbacks for conversation handling.
|
| 2 |
+
|
| 3 |
+
Handles meeting started, stopped, and transcript update events.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
from ..broadcast_manager import safe_broadcast
|
| 9 |
+
from ..task_tracker import create_tracked_task
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def on_meeting_started(meeting_id: str, title: str) -> None:
|
| 15 |
+
"""Handle meeting recording started."""
|
| 16 |
+
logger.info(f"📝 Meeting started: {title} ({meeting_id})")
|
| 17 |
+
create_tracked_task(safe_broadcast({
|
| 18 |
+
"type": "meeting_started",
|
| 19 |
+
"meeting_id": meeting_id,
|
| 20 |
+
"title": title,
|
| 21 |
+
}))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def on_meeting_stopped(meeting_id: str) -> None:
|
| 25 |
+
"""Handle meeting recording stopped."""
|
| 26 |
+
logger.info(f"📝 Meeting stopped: {meeting_id}")
|
| 27 |
+
create_tracked_task(safe_broadcast({
|
| 28 |
+
"type": "meeting_stopped",
|
| 29 |
+
"meeting_id": meeting_id,
|
| 30 |
+
}))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def on_meeting_transcript_update(meeting_id: str, transcript: str) -> None:
|
| 34 |
+
"""Handle meeting transcript update."""
|
| 35 |
+
# Only send the last portion for live updates (full transcript can be large)
|
| 36 |
+
# Get last 500 characters for preview
|
| 37 |
+
preview = transcript[-500:] if len(transcript) > 500 else transcript
|
| 38 |
+
|
| 39 |
+
create_tracked_task(safe_broadcast({
|
| 40 |
+
"type": "meeting_transcript_update",
|
| 41 |
+
"meeting_id": meeting_id,
|
| 42 |
+
"transcript_preview": preview,
|
| 43 |
+
"total_length": len(transcript),
|
| 44 |
+
}))
|
| 45 |
+
|
reachys_brain/routes/conversation/messages.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Message constants and utilities for conversation handling.
|
| 2 |
+
|
| 3 |
+
Contains stop commands and other conversation-related message patterns.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
# Re-export from existing conversation_messages module
|
| 7 |
+
from ..conversation_messages import STOP_COMMANDS
|
| 8 |
+
|
| 9 |
+
__all__ = ["STOP_COMMANDS"]
|
| 10 |
+
|
reachys_brain/routes/conversation/request_handlers.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""iOS request handlers for conversation WebSocket.
|
| 2 |
+
|
| 3 |
+
Handles request/response patterns for reminders, contacts, websites,
|
| 4 |
+
and scheduled messages delegated to iOS.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
from fastapi import WebSocket
|
| 11 |
+
|
| 12 |
+
from ..broadcast_manager import safe_broadcast
|
| 13 |
+
from ..task_tracker import create_tracked_task
|
| 14 |
+
from ...tools.reminders import handle_reminder_result
|
| 15 |
+
from ...tools.contacts import handle_contacts_result
|
| 16 |
+
from ...tools.scheduled_messages import handle_scheduled_message_result
|
| 17 |
+
from ...tools.website_generator import save_website_from_ios
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# MARK: - Request Callbacks (called by tools to request iOS action)
|
| 23 |
+
|
| 24 |
+
def on_website_request(
|
| 25 |
+
website_id: str,
|
| 26 |
+
description: str,
|
| 27 |
+
is_edit: bool,
|
| 28 |
+
existing_html: Optional[str],
|
| 29 |
+
) -> None:
|
| 30 |
+
"""Handle website generation request - delegates to iOS."""
|
| 31 |
+
try:
|
| 32 |
+
if not website_id:
|
| 33 |
+
logger.error("on_website_request: Missing website_id")
|
| 34 |
+
return
|
| 35 |
+
|
| 36 |
+
if not description:
|
| 37 |
+
logger.error("on_website_request: Missing description")
|
| 38 |
+
return
|
| 39 |
+
|
| 40 |
+
action = "Editing" if is_edit else "Creating"
|
| 41 |
+
desc_preview = description[:50] if description else ""
|
| 42 |
+
logger.info(f"📱 Requesting iOS to generate website {website_id}: {desc_preview}...")
|
| 43 |
+
|
| 44 |
+
# Limit existing_html size for broadcast to prevent WebSocket issues
|
| 45 |
+
broadcast_existing_html = existing_html
|
| 46 |
+
if existing_html and len(existing_html) > 100000:
|
| 47 |
+
logger.warning(f"Truncating existing_html for broadcast ({len(existing_html)} chars)")
|
| 48 |
+
broadcast_existing_html = existing_html[:100000]
|
| 49 |
+
|
| 50 |
+
create_tracked_task(safe_broadcast({
|
| 51 |
+
"type": "website_request",
|
| 52 |
+
"website_id": website_id,
|
| 53 |
+
"description": description,
|
| 54 |
+
"is_edit": is_edit,
|
| 55 |
+
"existing_html": broadcast_existing_html,
|
| 56 |
+
}))
|
| 57 |
+
except Exception as e:
|
| 58 |
+
logger.error(f"Error in on_website_request: {e}", exc_info=True)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def on_reminder_request(
|
| 62 |
+
request_id: str,
|
| 63 |
+
action: str,
|
| 64 |
+
params: dict,
|
| 65 |
+
) -> None:
|
| 66 |
+
"""Handle reminder operation request - delegates to iOS."""
|
| 67 |
+
try:
|
| 68 |
+
if not request_id:
|
| 69 |
+
logger.error("on_reminder_request: Missing request_id")
|
| 70 |
+
return
|
| 71 |
+
|
| 72 |
+
logger.info(f"📱 Requesting iOS to {action} reminder: {request_id}")
|
| 73 |
+
|
| 74 |
+
create_tracked_task(safe_broadcast({
|
| 75 |
+
"type": "reminder_request",
|
| 76 |
+
"request_id": request_id,
|
| 77 |
+
"action": action,
|
| 78 |
+
"params": params,
|
| 79 |
+
}))
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.error(f"Error in on_reminder_request: {e}", exc_info=True)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def on_contacts_request(
|
| 85 |
+
request_id: str,
|
| 86 |
+
action: str,
|
| 87 |
+
params: dict,
|
| 88 |
+
) -> None:
|
| 89 |
+
"""Handle contacts operation request - delegates to iOS."""
|
| 90 |
+
try:
|
| 91 |
+
if not request_id:
|
| 92 |
+
logger.error("on_contacts_request: Missing request_id")
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
logger.info(f"📇 Requesting iOS to {action} contacts: {request_id}")
|
| 96 |
+
|
| 97 |
+
create_tracked_task(safe_broadcast({
|
| 98 |
+
"type": "contacts_request",
|
| 99 |
+
"request_id": request_id,
|
| 100 |
+
"action": action,
|
| 101 |
+
"params": params,
|
| 102 |
+
}))
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"Error in on_contacts_request: {e}", exc_info=True)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def on_scheduled_message_request(
|
| 108 |
+
request_id: str,
|
| 109 |
+
action: str,
|
| 110 |
+
params: dict,
|
| 111 |
+
) -> None:
|
| 112 |
+
"""Handle scheduled message operation request - delegates to iOS."""
|
| 113 |
+
try:
|
| 114 |
+
if not request_id:
|
| 115 |
+
logger.error("on_scheduled_message_request: Missing request_id")
|
| 116 |
+
return
|
| 117 |
+
|
| 118 |
+
logger.info(f"📬 Requesting iOS to {action} scheduled message: {request_id}")
|
| 119 |
+
|
| 120 |
+
create_tracked_task(safe_broadcast({
|
| 121 |
+
"type": "scheduled_message_request",
|
| 122 |
+
"request_id": request_id,
|
| 123 |
+
"action": action,
|
| 124 |
+
"params": params,
|
| 125 |
+
}))
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"Error in on_scheduled_message_request: {e}", exc_info=True)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# MARK: - WebSocket Command Handlers (handle responses from iOS)
|
| 131 |
+
|
| 132 |
+
async def handle_upload_website(websocket: WebSocket, data: dict) -> None:
|
| 133 |
+
"""Handle the upload_website command."""
|
| 134 |
+
try:
|
| 135 |
+
website_id = data.get("website_id", "")
|
| 136 |
+
html_content = data.get("html_content", "")
|
| 137 |
+
title = data.get("title", "Generated Website")
|
| 138 |
+
description = data.get("description", "")
|
| 139 |
+
|
| 140 |
+
# Validate required fields
|
| 141 |
+
if not website_id:
|
| 142 |
+
logger.error("upload_website: Missing website_id")
|
| 143 |
+
await websocket.send_json({
|
| 144 |
+
"type": "error",
|
| 145 |
+
"message": "website_id is required",
|
| 146 |
+
})
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
if not html_content:
|
| 150 |
+
logger.error("upload_website: Missing html_content")
|
| 151 |
+
await websocket.send_json({
|
| 152 |
+
"type": "error",
|
| 153 |
+
"message": "html_content is required",
|
| 154 |
+
})
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
# Log upload size for debugging
|
| 158 |
+
logger.info(f"📤 Receiving website upload: {website_id} ({len(html_content)} bytes)")
|
| 159 |
+
|
| 160 |
+
# Save the website to robot storage
|
| 161 |
+
result = await save_website_from_ios(
|
| 162 |
+
website_id=website_id,
|
| 163 |
+
html_content=html_content,
|
| 164 |
+
title=title,
|
| 165 |
+
description=description,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if result.get("success"):
|
| 169 |
+
logger.info(f"💾 Website uploaded from iOS: {website_id}")
|
| 170 |
+
await websocket.send_json({
|
| 171 |
+
"type": "website_uploaded",
|
| 172 |
+
"website_id": website_id,
|
| 173 |
+
"url": result.get("url"),
|
| 174 |
+
"title": title,
|
| 175 |
+
"success": True,
|
| 176 |
+
})
|
| 177 |
+
|
| 178 |
+
# Broadcast to all clients that website is ready
|
| 179 |
+
await safe_broadcast({
|
| 180 |
+
"type": "website_ready",
|
| 181 |
+
"website_id": website_id,
|
| 182 |
+
"url": result.get("url"),
|
| 183 |
+
"title": title,
|
| 184 |
+
"is_edit": data.get("is_edit", False),
|
| 185 |
+
})
|
| 186 |
+
else:
|
| 187 |
+
error_msg = result.get("error", "Failed to save website")
|
| 188 |
+
logger.error(f"upload_website failed: {error_msg}")
|
| 189 |
+
await websocket.send_json({
|
| 190 |
+
"type": "error",
|
| 191 |
+
"message": error_msg,
|
| 192 |
+
})
|
| 193 |
+
except Exception as e:
|
| 194 |
+
logger.error(f"Error handling upload_website: {e}", exc_info=True)
|
| 195 |
+
await websocket.send_json({
|
| 196 |
+
"type": "error",
|
| 197 |
+
"message": f"Upload failed: {str(e)}",
|
| 198 |
+
})
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
async def handle_reminder_result_cmd(websocket: WebSocket, data: dict) -> None:
|
| 202 |
+
"""Handle the reminder_result command from iOS."""
|
| 203 |
+
try:
|
| 204 |
+
request_id = data.get("request_id", "")
|
| 205 |
+
result = data.get("result", {})
|
| 206 |
+
|
| 207 |
+
if not request_id:
|
| 208 |
+
logger.error("reminder_result: Missing request_id")
|
| 209 |
+
await websocket.send_json({
|
| 210 |
+
"type": "error",
|
| 211 |
+
"message": "request_id is required",
|
| 212 |
+
})
|
| 213 |
+
return
|
| 214 |
+
|
| 215 |
+
logger.info(f"📱 Received reminder result: {request_id} (success: {result.get('success', False)})")
|
| 216 |
+
|
| 217 |
+
# Pass the result to the reminders module to resolve the pending future
|
| 218 |
+
handle_reminder_result(request_id, result)
|
| 219 |
+
|
| 220 |
+
# Acknowledge receipt
|
| 221 |
+
await websocket.send_json({
|
| 222 |
+
"type": "reminder_result_received",
|
| 223 |
+
"request_id": request_id,
|
| 224 |
+
"success": True,
|
| 225 |
+
})
|
| 226 |
+
|
| 227 |
+
except Exception as e:
|
| 228 |
+
logger.error(f"Error handling reminder_result: {e}", exc_info=True)
|
| 229 |
+
await websocket.send_json({
|
| 230 |
+
"type": "error",
|
| 231 |
+
"message": f"Reminder result failed: {str(e)}",
|
| 232 |
+
})
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
async def handle_contacts_result_cmd(websocket: WebSocket, data: dict) -> None:
|
| 236 |
+
"""Handle the contacts_result command from iOS."""
|
| 237 |
+
try:
|
| 238 |
+
request_id = data.get("request_id", "")
|
| 239 |
+
result = data.get("result", {})
|
| 240 |
+
|
| 241 |
+
if not request_id:
|
| 242 |
+
logger.error("contacts_result: Missing request_id")
|
| 243 |
+
await websocket.send_json({
|
| 244 |
+
"type": "error",
|
| 245 |
+
"message": "request_id is required",
|
| 246 |
+
})
|
| 247 |
+
return
|
| 248 |
+
|
| 249 |
+
logger.info(f"📇 Received contacts result: {request_id} (success: {result.get('success', False)})")
|
| 250 |
+
|
| 251 |
+
# Pass the result to the contacts module to resolve the pending future
|
| 252 |
+
handle_contacts_result(request_id, result)
|
| 253 |
+
|
| 254 |
+
# Acknowledge receipt
|
| 255 |
+
await websocket.send_json({
|
| 256 |
+
"type": "contacts_result_received",
|
| 257 |
+
"request_id": request_id,
|
| 258 |
+
"success": True,
|
| 259 |
+
})
|
| 260 |
+
|
| 261 |
+
except Exception as e:
|
| 262 |
+
logger.error(f"Error handling contacts_result: {e}", exc_info=True)
|
| 263 |
+
await websocket.send_json({
|
| 264 |
+
"type": "error",
|
| 265 |
+
"message": f"Contacts result failed: {str(e)}",
|
| 266 |
+
})
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
async def handle_scheduled_message_result_cmd(websocket: WebSocket, data: dict) -> None:
|
| 270 |
+
"""Handle the scheduled_message_result command from iOS."""
|
| 271 |
+
try:
|
| 272 |
+
request_id = data.get("request_id", "")
|
| 273 |
+
result = data.get("result", {})
|
| 274 |
+
|
| 275 |
+
if not request_id:
|
| 276 |
+
logger.error("scheduled_message_result: Missing request_id")
|
| 277 |
+
await websocket.send_json({
|
| 278 |
+
"type": "error",
|
| 279 |
+
"message": "request_id is required",
|
| 280 |
+
})
|
| 281 |
+
return
|
| 282 |
+
|
| 283 |
+
logger.info(f"📬 Received scheduled message result: {request_id} (success: {result.get('success', False)})")
|
| 284 |
+
|
| 285 |
+
# Pass the result to the scheduled_messages module to resolve the pending future
|
| 286 |
+
handle_scheduled_message_result(request_id, result)
|
| 287 |
+
|
| 288 |
+
# Acknowledge receipt
|
| 289 |
+
await websocket.send_json({
|
| 290 |
+
"type": "scheduled_message_result_received",
|
| 291 |
+
"request_id": request_id,
|
| 292 |
+
"success": True,
|
| 293 |
+
})
|
| 294 |
+
|
| 295 |
+
except Exception as e:
|
| 296 |
+
logger.error(f"Error handling scheduled_message_result: {e}", exc_info=True)
|
| 297 |
+
await websocket.send_json({
|
| 298 |
+
"type": "error",
|
| 299 |
+
"message": f"Scheduled message result failed: {str(e)}",
|
| 300 |
+
})
|
| 301 |
+
|
reachys_brain/routes/conversation/services.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-exports from conversation_services for backwards compatibility.
|
| 2 |
+
|
| 3 |
+
This module provides a bridge to the existing conversation_services module,
|
| 4 |
+
making it accessible from within the conversation package.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from ..conversation_services import (
|
| 8 |
+
ConversationServices,
|
| 9 |
+
ConversationState,
|
| 10 |
+
get_services,
|
| 11 |
+
get_state,
|
| 12 |
+
init_services as _init_services,
|
| 13 |
+
cleanup_services as _cleanup_services,
|
| 14 |
+
wire_openai_callbacks,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
__all__ = [
|
| 18 |
+
"ConversationServices",
|
| 19 |
+
"ConversationState",
|
| 20 |
+
"get_services",
|
| 21 |
+
"get_state",
|
| 22 |
+
"wire_openai_callbacks",
|
| 23 |
+
]
|
| 24 |
+
|
reachys_brain/routes/conversation/state.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-exports state from conversation_services.
|
| 2 |
+
|
| 3 |
+
Provides access to conversation state from within the package.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from ..conversation_services import get_state, ConversationState
|
| 7 |
+
|
| 8 |
+
__all__ = ["get_state", "ConversationState"]
|
| 9 |
+
|
reachys_brain/routes/conversation/websocket.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""WebSocket endpoint for real-time conversation with iOS app.
|
| 2 |
+
|
| 3 |
+
This module provides the WebSocket endpoint for voice conversations.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
| 9 |
+
|
| 10 |
+
from ..broadcast_manager import add_client, remove_client
|
| 11 |
+
from .services import get_services, get_state
|
| 12 |
+
from . import commands
|
| 13 |
+
from . import request_handlers
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
router = APIRouter(tags=["Conversation"])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@router.websocket("/ws/conversation")
|
| 21 |
+
async def conversation_websocket(websocket: WebSocket) -> None:
|
| 22 |
+
"""WebSocket endpoint for iOS conversation app.
|
| 23 |
+
|
| 24 |
+
Accepts commands from iOS and relays events from OpenAI.
|
| 25 |
+
|
| 26 |
+
Commands:
|
| 27 |
+
- connect: Connect to OpenAI (requires api_key)
|
| 28 |
+
- disconnect: Disconnect from OpenAI
|
| 29 |
+
- start_listening: Start microphone capture
|
| 30 |
+
- stop_listening: Stop microphone capture
|
| 31 |
+
- set_language: Set conversation language
|
| 32 |
+
- set_voice: Set voice ID (also updates language)
|
| 33 |
+
- send_text: Send a text message
|
| 34 |
+
- set_system_prompt: Set custom personality (requires system_prompt)
|
| 35 |
+
- clear_system_prompt: Clear custom personality, revert to default
|
| 36 |
+
- cancel_response: Cancel current AI response
|
| 37 |
+
- clear_audio_buffer: Clear the input audio buffer
|
| 38 |
+
|
| 39 |
+
Events (sent to iOS):
|
| 40 |
+
- connection_state: OpenAI connection state
|
| 41 |
+
- speaking_state: AI speaking state
|
| 42 |
+
- transcript_update: User transcript (real-time)
|
| 43 |
+
- response_text: AI response text
|
| 44 |
+
- personality_set: Custom personality was set
|
| 45 |
+
- personality_cleared: Reverted to default personality
|
| 46 |
+
- error: Error messages
|
| 47 |
+
"""
|
| 48 |
+
await websocket.accept()
|
| 49 |
+
count = add_client(websocket)
|
| 50 |
+
logger.info(f"iOS client connected ({count} total)")
|
| 51 |
+
|
| 52 |
+
services = get_services()
|
| 53 |
+
|
| 54 |
+
# Send current state
|
| 55 |
+
if services.openai:
|
| 56 |
+
await websocket.send_json({
|
| 57 |
+
"type": "connection_state",
|
| 58 |
+
"state": services.openai.connection_state.value,
|
| 59 |
+
})
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
while True:
|
| 63 |
+
data = await websocket.receive_json()
|
| 64 |
+
command = data.get("command", "")
|
| 65 |
+
await _handle_command(websocket, command, data)
|
| 66 |
+
|
| 67 |
+
except WebSocketDisconnect:
|
| 68 |
+
logger.info("iOS client disconnected")
|
| 69 |
+
except Exception as e:
|
| 70 |
+
logger.error(f"WebSocket error: {e}")
|
| 71 |
+
finally:
|
| 72 |
+
count = remove_client(websocket)
|
| 73 |
+
logger.info(f"iOS client removed ({count} remaining)")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
async def _handle_command(websocket: WebSocket, command: str, data: dict) -> None:
|
| 77 |
+
"""Handle a command from the iOS app."""
|
| 78 |
+
from . import init_services
|
| 79 |
+
|
| 80 |
+
services = get_services()
|
| 81 |
+
|
| 82 |
+
if not services.is_initialized:
|
| 83 |
+
init_services()
|
| 84 |
+
services = get_services()
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
if command == "connect":
|
| 88 |
+
await commands.handle_connect(websocket, data)
|
| 89 |
+
|
| 90 |
+
elif command == "disconnect":
|
| 91 |
+
await commands.handle_disconnect()
|
| 92 |
+
|
| 93 |
+
elif command == "start_listening":
|
| 94 |
+
await commands.handle_start_listening(websocket)
|
| 95 |
+
|
| 96 |
+
elif command == "stop_listening":
|
| 97 |
+
await commands.handle_stop_listening(websocket)
|
| 98 |
+
|
| 99 |
+
elif command == "set_language":
|
| 100 |
+
await commands.handle_set_language(websocket, data)
|
| 101 |
+
|
| 102 |
+
elif command == "set_voice":
|
| 103 |
+
await commands.handle_set_voice(websocket, data)
|
| 104 |
+
|
| 105 |
+
elif command == "send_text":
|
| 106 |
+
text = data.get("text", "")
|
| 107 |
+
if text:
|
| 108 |
+
await services.openai.send_text_message(text)
|
| 109 |
+
|
| 110 |
+
elif command == "cancel_response":
|
| 111 |
+
if services.audio_player:
|
| 112 |
+
services.audio_player.cancel()
|
| 113 |
+
await services.openai.cancel_response()
|
| 114 |
+
|
| 115 |
+
elif command == "interrupt_response":
|
| 116 |
+
await commands.handle_interrupt(websocket)
|
| 117 |
+
|
| 118 |
+
elif command == "clear_audio_buffer":
|
| 119 |
+
await services.openai.clear_audio_buffer()
|
| 120 |
+
|
| 121 |
+
elif command == "set_system_prompt":
|
| 122 |
+
await commands.handle_set_system_prompt(websocket, data)
|
| 123 |
+
|
| 124 |
+
elif command == "clear_system_prompt":
|
| 125 |
+
await commands.handle_clear_system_prompt(websocket)
|
| 126 |
+
|
| 127 |
+
elif command == "set_emotion_animations":
|
| 128 |
+
await commands.handle_set_emotion_animations(websocket, data)
|
| 129 |
+
|
| 130 |
+
elif command == "clear_emotion_animations":
|
| 131 |
+
await commands.handle_clear_emotion_animations(websocket)
|
| 132 |
+
|
| 133 |
+
elif command == "upload_website":
|
| 134 |
+
await request_handlers.handle_upload_website(websocket, data)
|
| 135 |
+
|
| 136 |
+
elif command == "reminder_result":
|
| 137 |
+
await request_handlers.handle_reminder_result_cmd(websocket, data)
|
| 138 |
+
|
| 139 |
+
elif command == "contacts_result":
|
| 140 |
+
await request_handlers.handle_contacts_result_cmd(websocket, data)
|
| 141 |
+
|
| 142 |
+
elif command == "scheduled_message_result":
|
| 143 |
+
await request_handlers.handle_scheduled_message_result_cmd(websocket, data)
|
| 144 |
+
|
| 145 |
+
else:
|
| 146 |
+
logger.warning(f"Unknown command: {command}")
|
| 147 |
+
await websocket.send_json({
|
| 148 |
+
"type": "error",
|
| 149 |
+
"message": f"Unknown command: {command}",
|
| 150 |
+
})
|
| 151 |
+
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Error handling command {command}: {e}")
|
| 154 |
+
await websocket.send_json({
|
| 155 |
+
"type": "error",
|
| 156 |
+
"message": str(e),
|
| 157 |
+
})
|
| 158 |
+
|
reachys_brain/routes/voice.py
DELETED
|
@@ -1,1139 +0,0 @@
|
|
| 1 |
-
"""Voice configuration endpoints for OpenAI Realtime voices."""
|
| 2 |
-
|
| 3 |
-
import asyncio
|
| 4 |
-
import io
|
| 5 |
-
import logging
|
| 6 |
-
import os
|
| 7 |
-
import subprocess
|
| 8 |
-
import tempfile
|
| 9 |
-
from typing import Optional
|
| 10 |
-
|
| 11 |
-
from fastapi import APIRouter, HTTPException
|
| 12 |
-
import httpx
|
| 13 |
-
|
| 14 |
-
from ..models import Voice, VoiceRequest, VoiceResponse, VoicesListResponse
|
| 15 |
-
from ..audio_playback import is_kids_mode_enabled, set_kids_mode, KIDS_MODE_PITCH_CENTS, KIDS_MODE_TEMPO
|
| 16 |
-
|
| 17 |
-
logger = logging.getLogger(__name__)
|
| 18 |
-
|
| 19 |
-
router = APIRouter(tags=["Voice"])
|
| 20 |
-
|
| 21 |
-
# Database keys for voice settings
|
| 22 |
-
VOICE_ID_KEY = "voice_id"
|
| 23 |
-
PREFERRED_LANGUAGE_KEY = "preferred_language"
|
| 24 |
-
KIDS_MODE_KEY = "kids_mode_enabled"
|
| 25 |
-
|
| 26 |
-
# Database keys for VAD (Voice Activity Detection) settings
|
| 27 |
-
VAD_THRESHOLD_KEY = "vad_threshold"
|
| 28 |
-
VAD_SILENCE_MS_KEY = "vad_silence_ms"
|
| 29 |
-
VAD_PREFIX_MS_KEY = "vad_prefix_ms"
|
| 30 |
-
|
| 31 |
-
# Default VAD values
|
| 32 |
-
# Increased threshold to reduce sensitivity to background noise and echo
|
| 33 |
-
DEFAULT_VAD_THRESHOLD = 0.8 # 0.0-1.0, higher = less sensitive (increased from 0.7)
|
| 34 |
-
DEFAULT_VAD_SILENCE_MS = 800 # Silence duration before responding (ms) (increased from 700)
|
| 35 |
-
DEFAULT_VAD_PREFIX_MS = 300 # Audio padding before speech detection (ms)
|
| 36 |
-
|
| 37 |
-
# VAD Presets for different environments
|
| 38 |
-
VAD_PRESETS = {
|
| 39 |
-
"quiet_room": {
|
| 40 |
-
"id": "quiet_room",
|
| 41 |
-
"name": "Quiet Room",
|
| 42 |
-
"description": "More sensitive, responds faster. Best for quiet environments.",
|
| 43 |
-
"threshold": 0.5,
|
| 44 |
-
"silence_ms": 500,
|
| 45 |
-
"prefix_ms": 300,
|
| 46 |
-
},
|
| 47 |
-
"normal": {
|
| 48 |
-
"id": "normal",
|
| 49 |
-
"name": "Normal",
|
| 50 |
-
"description": "Balanced settings for typical use.",
|
| 51 |
-
"threshold": 0.8,
|
| 52 |
-
"silence_ms": 800,
|
| 53 |
-
"prefix_ms": 300,
|
| 54 |
-
},
|
| 55 |
-
"noisy": {
|
| 56 |
-
"id": "noisy",
|
| 57 |
-
"name": "Noisy Environment",
|
| 58 |
-
"description": "Less sensitive, ignores background noise better.",
|
| 59 |
-
"threshold": 0.85,
|
| 60 |
-
"silence_ms": 1000,
|
| 61 |
-
"prefix_ms": 400,
|
| 62 |
-
},
|
| 63 |
-
"conference": {
|
| 64 |
-
"id": "conference",
|
| 65 |
-
"name": "Conference Room",
|
| 66 |
-
"description": "Very strict, ignores most background voices.",
|
| 67 |
-
"threshold": 0.9,
|
| 68 |
-
"silence_ms": 1200,
|
| 69 |
-
"prefix_ms": 500,
|
| 70 |
-
},
|
| 71 |
-
}
|
| 72 |
-
|
| 73 |
-
# OpenAI TTS API endpoint
|
| 74 |
-
OPENAI_TTS_URL = "https://api.openai.com/v1/audio/speech"
|
| 75 |
-
|
| 76 |
-
# Reachy audio device
|
| 77 |
-
AUDIO_DEVICE = "plug:reachymini_audio_sink"
|
| 78 |
-
|
| 79 |
-
# Available OpenAI Realtime voices with their characteristics
|
| 80 |
-
OPENAI_VOICES = [
|
| 81 |
-
{
|
| 82 |
-
"id": "alloy",
|
| 83 |
-
"name": "Alloy",
|
| 84 |
-
"description": "Neutral, balanced voice",
|
| 85 |
-
"gender": "neutral",
|
| 86 |
-
},
|
| 87 |
-
{
|
| 88 |
-
"id": "ash",
|
| 89 |
-
"name": "Ash",
|
| 90 |
-
"description": "Soft, warm voice",
|
| 91 |
-
"gender": "male",
|
| 92 |
-
},
|
| 93 |
-
{
|
| 94 |
-
"id": "ballad",
|
| 95 |
-
"name": "Ballad",
|
| 96 |
-
"description": "Expressive, storytelling voice",
|
| 97 |
-
"gender": "male",
|
| 98 |
-
},
|
| 99 |
-
{
|
| 100 |
-
"id": "coral",
|
| 101 |
-
"name": "Coral",
|
| 102 |
-
"description": "Clear, friendly voice",
|
| 103 |
-
"gender": "female",
|
| 104 |
-
},
|
| 105 |
-
{
|
| 106 |
-
"id": "echo",
|
| 107 |
-
"name": "Echo",
|
| 108 |
-
"description": "Deep, resonant voice",
|
| 109 |
-
"gender": "male",
|
| 110 |
-
},
|
| 111 |
-
{
|
| 112 |
-
"id": "sage",
|
| 113 |
-
"name": "Sage",
|
| 114 |
-
"description": "Calm, wise voice",
|
| 115 |
-
"gender": "female",
|
| 116 |
-
},
|
| 117 |
-
{
|
| 118 |
-
"id": "shimmer",
|
| 119 |
-
"name": "Shimmer",
|
| 120 |
-
"description": "Bright, energetic voice",
|
| 121 |
-
"gender": "female",
|
| 122 |
-
},
|
| 123 |
-
{
|
| 124 |
-
"id": "verse",
|
| 125 |
-
"name": "Verse",
|
| 126 |
-
"description": "Dynamic, engaging voice",
|
| 127 |
-
"gender": "male",
|
| 128 |
-
},
|
| 129 |
-
]
|
| 130 |
-
|
| 131 |
-
# MARK: - Voice Settings Manager
|
| 132 |
-
|
| 133 |
-
class VoiceSettings:
|
| 134 |
-
"""Encapsulates voice and language settings for the application.
|
| 135 |
-
|
| 136 |
-
Settings are persisted to the database for survival across restarts.
|
| 137 |
-
"""
|
| 138 |
-
|
| 139 |
-
def __init__(self):
|
| 140 |
-
self._current_voice: str = "alloy"
|
| 141 |
-
self._current_language: str = "en"
|
| 142 |
-
self._preferred_language: str = "en"
|
| 143 |
-
self._initialized: bool = False
|
| 144 |
-
|
| 145 |
-
# VAD settings
|
| 146 |
-
self._vad_threshold: float = DEFAULT_VAD_THRESHOLD
|
| 147 |
-
self._vad_silence_ms: int = DEFAULT_VAD_SILENCE_MS
|
| 148 |
-
self._vad_prefix_ms: int = DEFAULT_VAD_PREFIX_MS
|
| 149 |
-
|
| 150 |
-
# Callback for VAD settings changes (set by OpenAIRealtimeService)
|
| 151 |
-
self.on_vad_settings_changed: Optional[callable] = None
|
| 152 |
-
|
| 153 |
-
@property
|
| 154 |
-
def current_voice(self) -> str:
|
| 155 |
-
return self._current_voice
|
| 156 |
-
|
| 157 |
-
@property
|
| 158 |
-
def current_language(self) -> str:
|
| 159 |
-
return self._current_language
|
| 160 |
-
|
| 161 |
-
@property
|
| 162 |
-
def preferred_language(self) -> str:
|
| 163 |
-
return self._preferred_language
|
| 164 |
-
|
| 165 |
-
@property
|
| 166 |
-
def kids_mode(self) -> bool:
|
| 167 |
-
"""Check if kids mode (pitch shifting) is enabled."""
|
| 168 |
-
return is_kids_mode_enabled()
|
| 169 |
-
|
| 170 |
-
@property
|
| 171 |
-
def vad_threshold(self) -> float:
|
| 172 |
-
"""Get the VAD threshold (0.0-1.0, higher = less sensitive)."""
|
| 173 |
-
return self._vad_threshold
|
| 174 |
-
|
| 175 |
-
@property
|
| 176 |
-
def vad_silence_ms(self) -> int:
|
| 177 |
-
"""Get the VAD silence duration in milliseconds."""
|
| 178 |
-
return self._vad_silence_ms
|
| 179 |
-
|
| 180 |
-
@property
|
| 181 |
-
def vad_prefix_ms(self) -> int:
|
| 182 |
-
"""Get the VAD prefix padding in milliseconds."""
|
| 183 |
-
return self._vad_prefix_ms
|
| 184 |
-
|
| 185 |
-
def set_voice(self, voice_id: str) -> bool:
|
| 186 |
-
"""Set the current OpenAI voice. Returns True if successful."""
|
| 187 |
-
valid_ids = [v["id"] for v in OPENAI_VOICES]
|
| 188 |
-
if voice_id not in valid_ids:
|
| 189 |
-
return False
|
| 190 |
-
self._current_voice = voice_id
|
| 191 |
-
logger.info(f"🔊 Voice changed to: {voice_id}")
|
| 192 |
-
# Persist to database in background
|
| 193 |
-
self._persist_setting(VOICE_ID_KEY, voice_id)
|
| 194 |
-
return True
|
| 195 |
-
|
| 196 |
-
def set_language(self, language: str) -> None:
|
| 197 |
-
"""Set the current language for the conversation."""
|
| 198 |
-
self._current_language = language
|
| 199 |
-
logger.info(f"🌍 Language changed to: {language}")
|
| 200 |
-
|
| 201 |
-
def set_preferred_language(self, language: str) -> bool:
|
| 202 |
-
"""Set the user's preferred language. Returns True if successful."""
|
| 203 |
-
valid_codes = [lang["code"] for lang in SUPPORTED_LANGUAGES]
|
| 204 |
-
if language not in valid_codes:
|
| 205 |
-
return False
|
| 206 |
-
self._preferred_language = language
|
| 207 |
-
self._current_language = language
|
| 208 |
-
logger.info(f"🌍 Preferred language set to: {language}")
|
| 209 |
-
# Persist to database in background
|
| 210 |
-
self._persist_setting(PREFERRED_LANGUAGE_KEY, language)
|
| 211 |
-
return True
|
| 212 |
-
|
| 213 |
-
def set_kids_mode(self, enabled: bool) -> None:
|
| 214 |
-
"""Enable or disable kids mode (pitch shifting for younger voice)."""
|
| 215 |
-
set_kids_mode(enabled)
|
| 216 |
-
logger.info(f"🧒 Kids mode {'enabled' if enabled else 'disabled'}")
|
| 217 |
-
# Persist to database in background
|
| 218 |
-
self._persist_setting(KIDS_MODE_KEY, str(enabled).lower())
|
| 219 |
-
|
| 220 |
-
def set_vad_threshold(self, threshold: float) -> bool:
|
| 221 |
-
"""Set the VAD threshold (0.0-1.0, higher = less sensitive).
|
| 222 |
-
|
| 223 |
-
Returns True if successful.
|
| 224 |
-
"""
|
| 225 |
-
if not 0.0 <= threshold <= 1.0:
|
| 226 |
-
return False
|
| 227 |
-
self._vad_threshold = threshold
|
| 228 |
-
logger.info(f"🎤 VAD threshold set to: {threshold}")
|
| 229 |
-
self._persist_setting(VAD_THRESHOLD_KEY, str(threshold))
|
| 230 |
-
self._notify_vad_change()
|
| 231 |
-
return True
|
| 232 |
-
|
| 233 |
-
def set_vad_silence_ms(self, silence_ms: int) -> bool:
|
| 234 |
-
"""Set the VAD silence duration in milliseconds (200-3000).
|
| 235 |
-
|
| 236 |
-
Returns True if successful.
|
| 237 |
-
"""
|
| 238 |
-
if not 200 <= silence_ms <= 3000:
|
| 239 |
-
return False
|
| 240 |
-
self._vad_silence_ms = silence_ms
|
| 241 |
-
logger.info(f"🎤 VAD silence duration set to: {silence_ms}ms")
|
| 242 |
-
self._persist_setting(VAD_SILENCE_MS_KEY, str(silence_ms))
|
| 243 |
-
self._notify_vad_change()
|
| 244 |
-
return True
|
| 245 |
-
|
| 246 |
-
def set_vad_prefix_ms(self, prefix_ms: int) -> bool:
|
| 247 |
-
"""Set the VAD prefix padding in milliseconds (100-1000).
|
| 248 |
-
|
| 249 |
-
Returns True if successful.
|
| 250 |
-
"""
|
| 251 |
-
if not 100 <= prefix_ms <= 1000:
|
| 252 |
-
return False
|
| 253 |
-
self._vad_prefix_ms = prefix_ms
|
| 254 |
-
logger.info(f"🎤 VAD prefix padding set to: {prefix_ms}ms")
|
| 255 |
-
self._persist_setting(VAD_PREFIX_MS_KEY, str(prefix_ms))
|
| 256 |
-
self._notify_vad_change()
|
| 257 |
-
return True
|
| 258 |
-
|
| 259 |
-
def set_vad_settings(
|
| 260 |
-
self,
|
| 261 |
-
threshold: Optional[float] = None,
|
| 262 |
-
silence_ms: Optional[int] = None,
|
| 263 |
-
prefix_ms: Optional[int] = None,
|
| 264 |
-
) -> bool:
|
| 265 |
-
"""Set multiple VAD settings at once.
|
| 266 |
-
|
| 267 |
-
Only provided values are updated. Returns True if all valid.
|
| 268 |
-
"""
|
| 269 |
-
success = True
|
| 270 |
-
changed = False
|
| 271 |
-
|
| 272 |
-
if threshold is not None:
|
| 273 |
-
if 0.0 <= threshold <= 1.0:
|
| 274 |
-
self._vad_threshold = threshold
|
| 275 |
-
self._persist_setting(VAD_THRESHOLD_KEY, str(threshold))
|
| 276 |
-
changed = True
|
| 277 |
-
else:
|
| 278 |
-
success = False
|
| 279 |
-
|
| 280 |
-
if silence_ms is not None:
|
| 281 |
-
if 200 <= silence_ms <= 3000:
|
| 282 |
-
self._vad_silence_ms = silence_ms
|
| 283 |
-
self._persist_setting(VAD_SILENCE_MS_KEY, str(silence_ms))
|
| 284 |
-
changed = True
|
| 285 |
-
else:
|
| 286 |
-
success = False
|
| 287 |
-
|
| 288 |
-
if prefix_ms is not None:
|
| 289 |
-
if 100 <= prefix_ms <= 1000:
|
| 290 |
-
self._vad_prefix_ms = prefix_ms
|
| 291 |
-
self._persist_setting(VAD_PREFIX_MS_KEY, str(prefix_ms))
|
| 292 |
-
changed = True
|
| 293 |
-
else:
|
| 294 |
-
success = False
|
| 295 |
-
|
| 296 |
-
if changed:
|
| 297 |
-
logger.info(
|
| 298 |
-
f"🎤 VAD settings updated: threshold={self._vad_threshold}, "
|
| 299 |
-
f"silence={self._vad_silence_ms}ms, prefix={self._vad_prefix_ms}ms"
|
| 300 |
-
)
|
| 301 |
-
self._notify_vad_change()
|
| 302 |
-
|
| 303 |
-
return success
|
| 304 |
-
|
| 305 |
-
def apply_vad_preset(self, preset_id: str) -> bool:
|
| 306 |
-
"""Apply a VAD preset by ID.
|
| 307 |
-
|
| 308 |
-
Available presets: quiet_room, normal, noisy, conference
|
| 309 |
-
"""
|
| 310 |
-
preset = VAD_PRESETS.get(preset_id)
|
| 311 |
-
if not preset:
|
| 312 |
-
return False
|
| 313 |
-
|
| 314 |
-
self._vad_threshold = preset["threshold"]
|
| 315 |
-
self._vad_silence_ms = preset["silence_ms"]
|
| 316 |
-
self._vad_prefix_ms = preset["prefix_ms"]
|
| 317 |
-
|
| 318 |
-
# Persist all settings
|
| 319 |
-
self._persist_setting(VAD_THRESHOLD_KEY, str(preset["threshold"]))
|
| 320 |
-
self._persist_setting(VAD_SILENCE_MS_KEY, str(preset["silence_ms"]))
|
| 321 |
-
self._persist_setting(VAD_PREFIX_MS_KEY, str(preset["prefix_ms"]))
|
| 322 |
-
|
| 323 |
-
logger.info(f"🎤 Applied VAD preset: {preset['name']}")
|
| 324 |
-
self._notify_vad_change()
|
| 325 |
-
return True
|
| 326 |
-
|
| 327 |
-
def _notify_vad_change(self) -> None:
|
| 328 |
-
"""Notify listeners that VAD settings have changed."""
|
| 329 |
-
if self.on_vad_settings_changed:
|
| 330 |
-
try:
|
| 331 |
-
self.on_vad_settings_changed()
|
| 332 |
-
except Exception as e:
|
| 333 |
-
logger.error(f"Error in VAD change callback: {e}")
|
| 334 |
-
|
| 335 |
-
def _persist_setting(self, key: str, value: str) -> None:
|
| 336 |
-
"""Persist a setting to the database in a background task."""
|
| 337 |
-
try:
|
| 338 |
-
loop = asyncio.get_running_loop()
|
| 339 |
-
loop.create_task(self._save_to_database(key, value))
|
| 340 |
-
except RuntimeError:
|
| 341 |
-
# No running event loop - this can happen during initialization
|
| 342 |
-
# Settings will be persisted on the next change
|
| 343 |
-
logger.debug(f"No event loop for persisting {key}, will persist later")
|
| 344 |
-
|
| 345 |
-
async def _save_to_database(self, key: str, value: str) -> None:
|
| 346 |
-
"""Save a setting to the database."""
|
| 347 |
-
try:
|
| 348 |
-
from ..database import get_database
|
| 349 |
-
db = get_database()
|
| 350 |
-
await db.set_user_setting(key, value)
|
| 351 |
-
logger.debug(f"💾 Persisted voice setting: {key}={value}")
|
| 352 |
-
except Exception as e:
|
| 353 |
-
logger.warning(f"Failed to persist voice setting {key}: {e}")
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
# Singleton instance
|
| 357 |
-
_voice_settings = VoiceSettings()
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
async def init_voice_settings() -> None:
|
| 361 |
-
"""Initialize voice settings from the database.
|
| 362 |
-
|
| 363 |
-
Should be called after database initialization during app startup.
|
| 364 |
-
"""
|
| 365 |
-
global _voice_settings
|
| 366 |
-
|
| 367 |
-
try:
|
| 368 |
-
from ..database import get_database
|
| 369 |
-
db = get_database()
|
| 370 |
-
|
| 371 |
-
# Load voice ID
|
| 372 |
-
voice_id = await db.get_user_setting(VOICE_ID_KEY)
|
| 373 |
-
if voice_id:
|
| 374 |
-
valid_ids = [v["id"] for v in OPENAI_VOICES]
|
| 375 |
-
if voice_id in valid_ids:
|
| 376 |
-
_voice_settings._current_voice = voice_id
|
| 377 |
-
logger.info(f"🔊 Loaded voice from database: {voice_id}")
|
| 378 |
-
|
| 379 |
-
# Load preferred language
|
| 380 |
-
language = await db.get_user_setting(PREFERRED_LANGUAGE_KEY)
|
| 381 |
-
if language:
|
| 382 |
-
_voice_settings._preferred_language = language
|
| 383 |
-
_voice_settings._current_language = language
|
| 384 |
-
logger.info(f"🌍 Loaded language from database: {language}")
|
| 385 |
-
|
| 386 |
-
# Load kids mode
|
| 387 |
-
kids_mode_str = await db.get_user_setting(KIDS_MODE_KEY)
|
| 388 |
-
if kids_mode_str:
|
| 389 |
-
kids_mode_enabled = kids_mode_str.lower() == "true"
|
| 390 |
-
set_kids_mode(kids_mode_enabled)
|
| 391 |
-
logger.info(f"🧒 Loaded kids mode from database: {kids_mode_enabled}")
|
| 392 |
-
|
| 393 |
-
# Load VAD settings
|
| 394 |
-
vad_threshold = await db.get_user_setting(VAD_THRESHOLD_KEY)
|
| 395 |
-
if vad_threshold:
|
| 396 |
-
try:
|
| 397 |
-
threshold = float(vad_threshold)
|
| 398 |
-
if 0.0 <= threshold <= 1.0:
|
| 399 |
-
_voice_settings._vad_threshold = threshold
|
| 400 |
-
logger.info(f"🎤 Loaded VAD threshold from database: {threshold}")
|
| 401 |
-
except ValueError:
|
| 402 |
-
pass
|
| 403 |
-
|
| 404 |
-
vad_silence = await db.get_user_setting(VAD_SILENCE_MS_KEY)
|
| 405 |
-
if vad_silence:
|
| 406 |
-
try:
|
| 407 |
-
silence_ms = int(vad_silence)
|
| 408 |
-
if 200 <= silence_ms <= 3000:
|
| 409 |
-
_voice_settings._vad_silence_ms = silence_ms
|
| 410 |
-
logger.info(f"🎤 Loaded VAD silence duration from database: {silence_ms}ms")
|
| 411 |
-
except ValueError:
|
| 412 |
-
pass
|
| 413 |
-
|
| 414 |
-
vad_prefix = await db.get_user_setting(VAD_PREFIX_MS_KEY)
|
| 415 |
-
if vad_prefix:
|
| 416 |
-
try:
|
| 417 |
-
prefix_ms = int(vad_prefix)
|
| 418 |
-
if 100 <= prefix_ms <= 1000:
|
| 419 |
-
_voice_settings._vad_prefix_ms = prefix_ms
|
| 420 |
-
logger.info(f"🎤 Loaded VAD prefix from database: {prefix_ms}ms")
|
| 421 |
-
except ValueError:
|
| 422 |
-
pass
|
| 423 |
-
|
| 424 |
-
_voice_settings._initialized = True
|
| 425 |
-
logger.info("✅ Voice settings initialized from database")
|
| 426 |
-
|
| 427 |
-
except Exception as e:
|
| 428 |
-
logger.warning(f"Failed to load voice settings from database: {e}")
|
| 429 |
-
logger.info("Using default voice settings")
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
# Supported languages with display names
|
| 433 |
-
SUPPORTED_LANGUAGES = [
|
| 434 |
-
{"code": "en", "name": "English", "flag": "🇺🇸"},
|
| 435 |
-
{"code": "nl", "name": "Dutch", "flag": "🇳🇱"},
|
| 436 |
-
{"code": "de", "name": "German", "flag": "🇩🇪"},
|
| 437 |
-
{"code": "fr", "name": "French", "flag": "🇫🇷"},
|
| 438 |
-
{"code": "es", "name": "Spanish", "flag": "🇪🇸"},
|
| 439 |
-
{"code": "it", "name": "Italian", "flag": "🇮🇹"},
|
| 440 |
-
{"code": "pt", "name": "Portuguese", "flag": "🇵🇹"},
|
| 441 |
-
{"code": "ja", "name": "Japanese", "flag": "🇯🇵"},
|
| 442 |
-
{"code": "ko", "name": "Korean", "flag": "🇰🇷"},
|
| 443 |
-
{"code": "zh", "name": "Chinese", "flag": "🇨🇳"},
|
| 444 |
-
{"code": "ar", "name": "Arabic", "flag": "🇸🇦"},
|
| 445 |
-
{"code": "hi", "name": "Hindi", "flag": "🇮🇳"},
|
| 446 |
-
{"code": "ru", "name": "Russian", "flag": "🇷🇺"},
|
| 447 |
-
{"code": "pl", "name": "Polish", "flag": "🇵🇱"},
|
| 448 |
-
{"code": "tr", "name": "Turkish", "flag": "🇹🇷"},
|
| 449 |
-
{"code": "sv", "name": "Swedish", "flag": "🇸🇪"},
|
| 450 |
-
{"code": "da", "name": "Danish", "flag": "🇩🇰"},
|
| 451 |
-
{"code": "no", "name": "Norwegian", "flag": "🇳🇴"},
|
| 452 |
-
{"code": "fi", "name": "Finnish", "flag": "🇫🇮"},
|
| 453 |
-
]
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
# MARK: - Public API Functions (delegate to VoiceSettings)
|
| 457 |
-
|
| 458 |
-
def get_current_voice() -> str:
|
| 459 |
-
"""Get the currently selected OpenAI voice."""
|
| 460 |
-
return _voice_settings.current_voice
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
def get_current_language() -> str:
|
| 464 |
-
"""Get the currently selected language."""
|
| 465 |
-
return _voice_settings.current_language
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
def set_current_voice(voice_id: str) -> bool:
|
| 469 |
-
"""Set the current OpenAI voice.
|
| 470 |
-
|
| 471 |
-
Returns True if successful, False if voice not found.
|
| 472 |
-
"""
|
| 473 |
-
return _voice_settings.set_voice(voice_id)
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
def set_current_language(language: str) -> None:
|
| 477 |
-
"""Set the current language for the conversation."""
|
| 478 |
-
_voice_settings.set_language(language)
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
def get_preferred_language() -> str:
|
| 482 |
-
"""Get the user's preferred language setting."""
|
| 483 |
-
return _voice_settings.preferred_language
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
def set_preferred_language(language: str) -> bool:
|
| 487 |
-
"""Set the user's preferred language.
|
| 488 |
-
|
| 489 |
-
Returns True if successful, False if language not supported.
|
| 490 |
-
"""
|
| 491 |
-
return _voice_settings.set_preferred_language(language)
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
# MARK: - VAD Settings Public API Functions
|
| 495 |
-
|
| 496 |
-
def get_vad_threshold() -> float:
|
| 497 |
-
"""Get the current VAD threshold."""
|
| 498 |
-
return _voice_settings.vad_threshold
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
def get_vad_silence_ms() -> int:
|
| 502 |
-
"""Get the current VAD silence duration in ms."""
|
| 503 |
-
return _voice_settings.vad_silence_ms
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
def get_vad_prefix_ms() -> int:
|
| 507 |
-
"""Get the current VAD prefix padding in ms."""
|
| 508 |
-
return _voice_settings.vad_prefix_ms
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
def get_vad_settings() -> dict:
|
| 512 |
-
"""Get all VAD settings as a dictionary."""
|
| 513 |
-
return {
|
| 514 |
-
"threshold": _voice_settings.vad_threshold,
|
| 515 |
-
"silence_ms": _voice_settings.vad_silence_ms,
|
| 516 |
-
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 517 |
-
}
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
def set_vad_settings_callback(callback: callable) -> None:
|
| 521 |
-
"""Set the callback for VAD settings changes.
|
| 522 |
-
|
| 523 |
-
Called when VAD settings are updated so OpenAI session can be reconfigured.
|
| 524 |
-
"""
|
| 525 |
-
_voice_settings.on_vad_settings_changed = callback
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
@router.get("/voices", response_model=VoicesListResponse)
|
| 529 |
-
async def get_voices() -> VoicesListResponse:
|
| 530 |
-
"""Get available OpenAI Realtime voices and current selection.
|
| 531 |
-
|
| 532 |
-
OpenAI's Realtime API supports these voices that can speak any language:
|
| 533 |
-
- alloy: Neutral, balanced voice
|
| 534 |
-
- ash: Soft, warm voice
|
| 535 |
-
- ballad: Expressive, storytelling voice
|
| 536 |
-
- coral: Clear, friendly voice
|
| 537 |
-
- echo: Deep, resonant voice
|
| 538 |
-
- sage: Calm, wise voice
|
| 539 |
-
- shimmer: Bright, energetic voice
|
| 540 |
-
- verse: Dynamic, engaging voice
|
| 541 |
-
"""
|
| 542 |
-
voices = [
|
| 543 |
-
Voice(
|
| 544 |
-
id=v["id"],
|
| 545 |
-
name=v["name"],
|
| 546 |
-
language="multilingual", # All OpenAI voices support all languages
|
| 547 |
-
)
|
| 548 |
-
for v in OPENAI_VOICES
|
| 549 |
-
]
|
| 550 |
-
|
| 551 |
-
logger.info(f"Returning {len(voices)} OpenAI voices, current: {_voice_settings.current_voice}, language: {_voice_settings.preferred_language}")
|
| 552 |
-
|
| 553 |
-
return VoicesListResponse(
|
| 554 |
-
voices=voices,
|
| 555 |
-
current_voice=_voice_settings.current_voice,
|
| 556 |
-
preferred_language=_voice_settings.preferred_language,
|
| 557 |
-
)
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
@router.get("/voice", response_model=VoiceResponse)
|
| 561 |
-
async def get_current_voice_endpoint() -> VoiceResponse:
|
| 562 |
-
"""Get the currently selected OpenAI voice."""
|
| 563 |
-
return VoiceResponse(
|
| 564 |
-
success=True,
|
| 565 |
-
current_voice=_voice_settings.current_voice,
|
| 566 |
-
)
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
@router.post("/voice", response_model=VoiceResponse)
|
| 570 |
-
async def set_voice(request: VoiceRequest) -> VoiceResponse:
|
| 571 |
-
"""Set the OpenAI Realtime voice.
|
| 572 |
-
|
| 573 |
-
Available voices:
|
| 574 |
-
- alloy: Neutral, balanced voice
|
| 575 |
-
- ash: Soft, warm voice
|
| 576 |
-
- ballad: Expressive, storytelling voice
|
| 577 |
-
- coral: Clear, friendly voice
|
| 578 |
-
- echo: Deep, resonant voice
|
| 579 |
-
- sage: Calm, wise voice
|
| 580 |
-
- shimmer: Bright, energetic voice
|
| 581 |
-
- verse: Dynamic, engaging voice
|
| 582 |
-
"""
|
| 583 |
-
if set_current_voice(request.voice_id):
|
| 584 |
-
return VoiceResponse(
|
| 585 |
-
success=True,
|
| 586 |
-
current_voice=request.voice_id,
|
| 587 |
-
message=f"Voice set to {request.voice_id}",
|
| 588 |
-
)
|
| 589 |
-
else:
|
| 590 |
-
valid_voices = [v["id"] for v in OPENAI_VOICES]
|
| 591 |
-
return VoiceResponse(
|
| 592 |
-
success=False,
|
| 593 |
-
current_voice=_voice_settings.current_voice,
|
| 594 |
-
message=f"Invalid voice. Available: {', '.join(valid_voices)}",
|
| 595 |
-
)
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
@router.get("/voice/info")
|
| 599 |
-
async def get_voice_info() -> dict:
|
| 600 |
-
"""Get detailed information about all available OpenAI voices."""
|
| 601 |
-
return {
|
| 602 |
-
"provider": "openai",
|
| 603 |
-
"voices": OPENAI_VOICES,
|
| 604 |
-
"current_voice": _voice_settings.current_voice,
|
| 605 |
-
"current_language": _voice_settings.current_language,
|
| 606 |
-
"note": "All OpenAI voices support any language. The language is set separately.",
|
| 607 |
-
}
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
@router.post("/voice/language")
|
| 611 |
-
async def set_language(language: str = "en") -> dict:
|
| 612 |
-
"""Set the conversation language.
|
| 613 |
-
|
| 614 |
-
This affects the system prompt for OpenAI Realtime.
|
| 615 |
-
All OpenAI voices natively support all languages.
|
| 616 |
-
"""
|
| 617 |
-
set_current_language(language)
|
| 618 |
-
|
| 619 |
-
return {
|
| 620 |
-
"success": True,
|
| 621 |
-
"language": language,
|
| 622 |
-
"message": f"Language set to {language}",
|
| 623 |
-
}
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
@router.get("/voice/preferred-language")
|
| 627 |
-
async def get_preferred_language_endpoint() -> dict:
|
| 628 |
-
"""Get the user's preferred language setting.
|
| 629 |
-
|
| 630 |
-
Returns the preferred language code and all supported languages.
|
| 631 |
-
"""
|
| 632 |
-
return {
|
| 633 |
-
"preferred_language": _voice_settings.preferred_language,
|
| 634 |
-
"supported_languages": SUPPORTED_LANGUAGES,
|
| 635 |
-
}
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
@router.post("/voice/preferred-language")
|
| 639 |
-
async def set_preferred_language_endpoint(language: str = "en") -> dict:
|
| 640 |
-
"""Set the user's preferred language.
|
| 641 |
-
|
| 642 |
-
This sets both the preferred language and the current conversation language.
|
| 643 |
-
All OpenAI voices support all languages natively.
|
| 644 |
-
|
| 645 |
-
Args:
|
| 646 |
-
language: Language code (e.g., 'en', 'nl', 'de', 'fr')
|
| 647 |
-
"""
|
| 648 |
-
if set_preferred_language(language):
|
| 649 |
-
# Find language name for message
|
| 650 |
-
lang_info = next((l for l in SUPPORTED_LANGUAGES if l["code"] == language), None)
|
| 651 |
-
lang_name = lang_info["name"] if lang_info else language
|
| 652 |
-
|
| 653 |
-
return {
|
| 654 |
-
"success": True,
|
| 655 |
-
"preferred_language": language,
|
| 656 |
-
"message": f"Preferred language set to {lang_name}",
|
| 657 |
-
}
|
| 658 |
-
else:
|
| 659 |
-
valid_codes = [l["code"] for l in SUPPORTED_LANGUAGES]
|
| 660 |
-
return {
|
| 661 |
-
"success": False,
|
| 662 |
-
"preferred_language": _voice_settings.preferred_language,
|
| 663 |
-
"message": f"Invalid language. Supported: {', '.join(valid_codes)}",
|
| 664 |
-
}
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
# MARK: - Kids Mode Endpoints
|
| 668 |
-
|
| 669 |
-
@router.get("/voice/kids-mode")
|
| 670 |
-
async def get_kids_mode() -> dict:
|
| 671 |
-
"""Get the current kids mode setting.
|
| 672 |
-
|
| 673 |
-
Kids mode pitch-shifts Reachy's voice to sound younger/child-like.
|
| 674 |
-
|
| 675 |
-
Returns:
|
| 676 |
-
kids_mode: Boolean indicating if kids mode is enabled.
|
| 677 |
-
"""
|
| 678 |
-
return {
|
| 679 |
-
"kids_mode": _voice_settings.kids_mode,
|
| 680 |
-
}
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
@router.post("/voice/kids-mode")
|
| 684 |
-
async def set_kids_mode_endpoint(enabled: bool = False) -> dict:
|
| 685 |
-
"""Enable or disable kids mode.
|
| 686 |
-
|
| 687 |
-
Kids mode pitch-shifts Reachy's voice to sound younger/child-like.
|
| 688 |
-
This applies a pitch shift to the audio output making the voice
|
| 689 |
-
sound more suitable for conversations with children.
|
| 690 |
-
|
| 691 |
-
Args:
|
| 692 |
-
enabled: True to enable kids mode, False to disable.
|
| 693 |
-
|
| 694 |
-
Returns:
|
| 695 |
-
Success status and current kids mode state.
|
| 696 |
-
"""
|
| 697 |
-
_voice_settings.set_kids_mode(enabled)
|
| 698 |
-
|
| 699 |
-
return {
|
| 700 |
-
"success": True,
|
| 701 |
-
"kids_mode": enabled,
|
| 702 |
-
"message": f"Kids mode {'enabled' if enabled else 'disabled'}",
|
| 703 |
-
}
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
# MARK: - VAD Settings Endpoints
|
| 707 |
-
|
| 708 |
-
@router.get("/voice/vad-settings")
|
| 709 |
-
async def get_vad_settings_endpoint() -> dict:
|
| 710 |
-
"""Get the current VAD (Voice Activity Detection) settings.
|
| 711 |
-
|
| 712 |
-
VAD settings control how Reachy detects when you're speaking:
|
| 713 |
-
- threshold: Sensitivity (0.0-1.0, higher = less sensitive to background noise)
|
| 714 |
-
- silence_ms: How long to wait after you stop speaking before responding
|
| 715 |
-
- prefix_ms: How much audio to buffer before speech is detected
|
| 716 |
-
|
| 717 |
-
Returns:
|
| 718 |
-
Current VAD settings and available presets.
|
| 719 |
-
"""
|
| 720 |
-
return {
|
| 721 |
-
"threshold": _voice_settings.vad_threshold,
|
| 722 |
-
"silence_ms": _voice_settings.vad_silence_ms,
|
| 723 |
-
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 724 |
-
"presets": list(VAD_PRESETS.values()),
|
| 725 |
-
}
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
@router.post("/voice/vad-settings")
|
| 729 |
-
async def set_vad_settings_endpoint(
|
| 730 |
-
threshold: Optional[float] = None,
|
| 731 |
-
silence_ms: Optional[int] = None,
|
| 732 |
-
prefix_ms: Optional[int] = None,
|
| 733 |
-
) -> dict:
|
| 734 |
-
"""Update VAD (Voice Activity Detection) settings.
|
| 735 |
-
|
| 736 |
-
Adjust these settings to help Reachy work better in different environments:
|
| 737 |
-
|
| 738 |
-
- In quiet rooms: Use lower threshold (0.5) and shorter silence (500ms)
|
| 739 |
-
- In noisy environments: Use higher threshold (0.85-0.9) and longer silence (1000-1200ms)
|
| 740 |
-
|
| 741 |
-
Args:
|
| 742 |
-
threshold: Sensitivity 0.0-1.0 (higher = less sensitive to background)
|
| 743 |
-
silence_ms: Wait time before responding (200-3000ms)
|
| 744 |
-
prefix_ms: Audio buffer before speech (100-1000ms)
|
| 745 |
-
|
| 746 |
-
Returns:
|
| 747 |
-
Success status and current settings.
|
| 748 |
-
"""
|
| 749 |
-
success = _voice_settings.set_vad_settings(
|
| 750 |
-
threshold=threshold,
|
| 751 |
-
silence_ms=silence_ms,
|
| 752 |
-
prefix_ms=prefix_ms,
|
| 753 |
-
)
|
| 754 |
-
|
| 755 |
-
if success:
|
| 756 |
-
return {
|
| 757 |
-
"success": True,
|
| 758 |
-
"threshold": _voice_settings.vad_threshold,
|
| 759 |
-
"silence_ms": _voice_settings.vad_silence_ms,
|
| 760 |
-
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 761 |
-
"message": "VAD settings updated",
|
| 762 |
-
}
|
| 763 |
-
else:
|
| 764 |
-
return {
|
| 765 |
-
"success": False,
|
| 766 |
-
"threshold": _voice_settings.vad_threshold,
|
| 767 |
-
"silence_ms": _voice_settings.vad_silence_ms,
|
| 768 |
-
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 769 |
-
"message": "Invalid values. threshold: 0.0-1.0, silence_ms: 200-3000, prefix_ms: 100-1000",
|
| 770 |
-
}
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
@router.post("/voice/vad-preset")
|
| 774 |
-
async def apply_vad_preset_endpoint(preset_id: str) -> dict:
|
| 775 |
-
"""Apply a VAD preset for a specific environment.
|
| 776 |
-
|
| 777 |
-
Available presets:
|
| 778 |
-
- quiet_room: More sensitive, faster responses (for quiet environments)
|
| 779 |
-
- normal: Balanced default settings
|
| 780 |
-
- noisy: Less sensitive, ignores background noise
|
| 781 |
-
- conference: Very strict, ignores most background voices
|
| 782 |
-
|
| 783 |
-
Args:
|
| 784 |
-
preset_id: ID of the preset to apply.
|
| 785 |
-
|
| 786 |
-
Returns:
|
| 787 |
-
Success status and applied settings.
|
| 788 |
-
"""
|
| 789 |
-
if _voice_settings.apply_vad_preset(preset_id):
|
| 790 |
-
preset = VAD_PRESETS[preset_id]
|
| 791 |
-
return {
|
| 792 |
-
"success": True,
|
| 793 |
-
"preset": preset,
|
| 794 |
-
"threshold": _voice_settings.vad_threshold,
|
| 795 |
-
"silence_ms": _voice_settings.vad_silence_ms,
|
| 796 |
-
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 797 |
-
"message": f"Applied preset: {preset['name']}",
|
| 798 |
-
}
|
| 799 |
-
else:
|
| 800 |
-
return {
|
| 801 |
-
"success": False,
|
| 802 |
-
"available_presets": list(VAD_PRESETS.keys()),
|
| 803 |
-
"message": f"Unknown preset '{preset_id}'",
|
| 804 |
-
}
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
@router.get("/voice/vad-presets")
|
| 808 |
-
async def get_vad_presets_endpoint() -> dict:
|
| 809 |
-
"""Get all available VAD presets.
|
| 810 |
-
|
| 811 |
-
Returns:
|
| 812 |
-
List of available presets with their settings.
|
| 813 |
-
"""
|
| 814 |
-
return {
|
| 815 |
-
"presets": list(VAD_PRESETS.values()),
|
| 816 |
-
"current_threshold": _voice_settings.vad_threshold,
|
| 817 |
-
"current_silence_ms": _voice_settings.vad_silence_ms,
|
| 818 |
-
"current_prefix_ms": _voice_settings.vad_prefix_ms,
|
| 819 |
-
}
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
# Sample phrases by language for voice testing
|
| 823 |
-
VOICE_SAMPLE_PHRASES = {
|
| 824 |
-
"en": {
|
| 825 |
-
"alloy": "Hello! I'm Alloy, a balanced and versatile voice.",
|
| 826 |
-
"ash": "Hi there. I'm Ash, with a soft and warm tone.",
|
| 827 |
-
"ballad": "Greetings! I'm Ballad, ready to tell you a story.",
|
| 828 |
-
"coral": "Hey! I'm Coral, clear and friendly!",
|
| 829 |
-
"echo": "Hello. I'm Echo, with a deep and resonant voice.",
|
| 830 |
-
"sage": "Welcome. I'm Sage, calm and thoughtful.",
|
| 831 |
-
"shimmer": "Hi! I'm Shimmer, bright and full of energy!",
|
| 832 |
-
"verse": "Hello! I'm Verse, dynamic and engaging.",
|
| 833 |
-
},
|
| 834 |
-
"nl": {
|
| 835 |
-
"alloy": "Hallo! Ik ben Alloy, een evenwichtige en veelzijdige stem.",
|
| 836 |
-
"ash": "Hoi! Ik ben Ash, met een zachte en warme toon.",
|
| 837 |
-
"ballad": "Goedendag! Ik ben Ballad, klaar om je een verhaal te vertellen.",
|
| 838 |
-
"coral": "Hey! Ik ben Coral, helder en vriendelijk!",
|
| 839 |
-
"echo": "Hallo. Ik ben Echo, met een diepe en resonante stem.",
|
| 840 |
-
"sage": "Welkom. Ik ben Sage, kalm en bedachtzaam.",
|
| 841 |
-
"shimmer": "Hoi! Ik ben Shimmer, vol energie!",
|
| 842 |
-
"verse": "Hallo! Ik ben Verse, dynamisch en boeiend.",
|
| 843 |
-
},
|
| 844 |
-
"de": {
|
| 845 |
-
"alloy": "Hallo! Ich bin Alloy, eine ausgewogene und vielseitige Stimme.",
|
| 846 |
-
"ash": "Hallo! Ich bin Ash, mit einem sanften und warmen Ton.",
|
| 847 |
-
"ballad": "Guten Tag! Ich bin Ballad, bereit dir eine Geschichte zu erzählen.",
|
| 848 |
-
"coral": "Hey! Ich bin Coral, klar und freundlich!",
|
| 849 |
-
"echo": "Hallo. Ich bin Echo, mit einer tiefen und resonanten Stimme.",
|
| 850 |
-
"sage": "Willkommen. Ich bin Sage, ruhig und nachdenklich.",
|
| 851 |
-
"shimmer": "Hallo! Ich bin Shimmer, voller Energie!",
|
| 852 |
-
"verse": "Hallo! Ich bin Verse, dynamisch und fesselnd.",
|
| 853 |
-
},
|
| 854 |
-
"fr": {
|
| 855 |
-
"alloy": "Bonjour! Je suis Alloy, une voix équilibrée et polyvalente.",
|
| 856 |
-
"ash": "Salut! Je suis Ash, avec un ton doux et chaleureux.",
|
| 857 |
-
"ballad": "Bonjour! Je suis Ballad, prêt à vous raconter une histoire.",
|
| 858 |
-
"coral": "Hey! Je suis Coral, claire et amicale!",
|
| 859 |
-
"echo": "Bonjour. Je suis Echo, avec une voix profonde et résonnante.",
|
| 860 |
-
"sage": "Bienvenue. Je suis Sage, calme et réfléchi.",
|
| 861 |
-
"shimmer": "Salut! Je suis Shimmer, pleine d'énergie!",
|
| 862 |
-
"verse": "Bonjour! Je suis Verse, dynamique et captivant.",
|
| 863 |
-
},
|
| 864 |
-
"es": {
|
| 865 |
-
"alloy": "¡Hola! Soy Alloy, una voz equilibrada y versátil.",
|
| 866 |
-
"ash": "¡Hola! Soy Ash, con un tono suave y cálido.",
|
| 867 |
-
"ballad": "¡Hola! Soy Ballad, listo para contarte una historia.",
|
| 868 |
-
"coral": "¡Hey! Soy Coral, clara y amigable!",
|
| 869 |
-
"echo": "Hola. Soy Echo, con una voz profunda y resonante.",
|
| 870 |
-
"sage": "Bienvenido. Soy Sage, tranquilo y reflexivo.",
|
| 871 |
-
"shimmer": "¡Hola! Soy Shimmer, llena de energía!",
|
| 872 |
-
"verse": "¡Hola! Soy Verse, dinámico y cautivador.",
|
| 873 |
-
},
|
| 874 |
-
"it": {
|
| 875 |
-
"alloy": "Ciao! Sono Alloy, una voce equilibrata e versatile.",
|
| 876 |
-
"ash": "Ciao! Sono Ash, con un tono morbido e caldo.",
|
| 877 |
-
"ballad": "Salve! Sono Ballad, pronto a raccontarti una storia.",
|
| 878 |
-
"coral": "Hey! Sono Coral, chiara e amichevole!",
|
| 879 |
-
"echo": "Ciao. Sono Echo, con una voce profonda e risonante.",
|
| 880 |
-
"sage": "Benvenuto. Sono Sage, calmo e riflessivo.",
|
| 881 |
-
"shimmer": "Ciao! Sono Shimmer, piena di energia!",
|
| 882 |
-
"verse": "Ciao! Sono Verse, dinamico e coinvolgente.",
|
| 883 |
-
},
|
| 884 |
-
"pt": {
|
| 885 |
-
"alloy": "Olá! Eu sou Alloy, uma voz equilibrada e versátil.",
|
| 886 |
-
"ash": "Oi! Eu sou Ash, com um tom suave e caloroso.",
|
| 887 |
-
"ballad": "Olá! Eu sou Ballad, pronto para contar uma história.",
|
| 888 |
-
"coral": "Hey! Eu sou Coral, clara e amigável!",
|
| 889 |
-
"echo": "Olá. Eu sou Echo, com uma voz profunda e ressonante.",
|
| 890 |
-
"sage": "Bem-vindo. Eu sou Sage, calmo e reflexivo.",
|
| 891 |
-
"shimmer": "Oi! Eu sou Shimmer, cheia de energia!",
|
| 892 |
-
"verse": "Olá! Eu sou Verse, dinâmico e envolvente.",
|
| 893 |
-
},
|
| 894 |
-
"ja": {
|
| 895 |
-
"alloy": "こんにちは!私はAlloyです。バランスの取れた多用途な声です。",
|
| 896 |
-
"ash": "こんにちは!私はAshです。柔らかく温かみのある声です。",
|
| 897 |
-
"ballad": "こんにちは!私はBalladです。物語を語る準備ができています。",
|
| 898 |
-
"coral": "ハイ!私はCoralです。明るくフレンドリーな声です!",
|
| 899 |
-
"echo": "こんにちは。私はEchoです。深く響く声です。",
|
| 900 |
-
"sage": "ようこそ。私はSageです。穏やかで思慮深い声です。",
|
| 901 |
-
"shimmer": "こんにちは!私はShimmerです。エネルギッシュな声です!",
|
| 902 |
-
"verse": "こんにちは!私はVerseです。ダイナミックで魅力的な声です。",
|
| 903 |
-
},
|
| 904 |
-
"ko": {
|
| 905 |
-
"alloy": "안녕하세요! 저는 Alloy입니다. 균형 잡힌 다재다능한 목소리예요.",
|
| 906 |
-
"ash": "안녕하세요! 저는 Ash입니다. 부드럽고 따뜻한 톤이에요.",
|
| 907 |
-
"ballad": "안녕하세요! 저는 Ballad입니다. 이야기를 들려드릴 준비가 됐어요.",
|
| 908 |
-
"coral": "안녕! 저는 Coral이에요. 맑고 친근한 목소리예요!",
|
| 909 |
-
"echo": "안녕하세요. 저는 Echo입니다. 깊고 울림 있는 목소리예요.",
|
| 910 |
-
"sage": "환영합니다. 저는 Sage입니다. 차분하고 사려 깊은 목소리예요.",
|
| 911 |
-
"shimmer": "안녕하세요! 저는 Shimmer예요. 활기찬 목소리예요!",
|
| 912 |
-
"verse": "안녕하세요! 저는 Verse입니다. 역동적이고 매력적인 목소리예요.",
|
| 913 |
-
},
|
| 914 |
-
"zh": {
|
| 915 |
-
"alloy": "你好!我是Alloy,一个平衡且多功能的声音。",
|
| 916 |
-
"ash": "你好!我是Ash,声音柔和温暖。",
|
| 917 |
-
"ballad": "你好!我是Ballad,准备给你讲故事。",
|
| 918 |
-
"coral": "嗨!我是Coral,清晰又友好!",
|
| 919 |
-
"echo": "你好。我是Echo,声音深沉有共鸣。",
|
| 920 |
-
"sage": "欢迎。我是Sage,冷静而深思熟虑。",
|
| 921 |
-
"shimmer": "你好!我是Shimmer,充满活力!",
|
| 922 |
-
"verse": "你好!我是Verse,充满活力和魅力。",
|
| 923 |
-
},
|
| 924 |
-
}
|
| 925 |
-
|
| 926 |
-
|
| 927 |
-
def _get_voice_sample_phrase(voice: str, language: str) -> str:
|
| 928 |
-
"""Get a sample phrase for voice testing in the specified language.
|
| 929 |
-
|
| 930 |
-
Args:
|
| 931 |
-
voice: Voice ID (e.g., 'alloy', 'coral')
|
| 932 |
-
language: Language code (e.g., 'en', 'nl', 'de')
|
| 933 |
-
|
| 934 |
-
Returns:
|
| 935 |
-
Sample phrase in the specified language.
|
| 936 |
-
"""
|
| 937 |
-
# Get phrases for the language, fallback to English
|
| 938 |
-
phrases = VOICE_SAMPLE_PHRASES.get(language, VOICE_SAMPLE_PHRASES["en"])
|
| 939 |
-
return phrases.get(voice, phrases.get("alloy", f"Hello! I'm testing the {voice} voice."))
|
| 940 |
-
|
| 941 |
-
|
| 942 |
-
@router.post("/voice/test")
|
| 943 |
-
async def test_voice(request: Optional[VoiceRequest] = None) -> dict:
|
| 944 |
-
"""Test an OpenAI voice by playing a sample phrase.
|
| 945 |
-
|
| 946 |
-
Uses OpenAI's TTS API to generate audio with the specified voice,
|
| 947 |
-
then plays it through Reachy's speaker in the user's preferred language.
|
| 948 |
-
|
| 949 |
-
Args:
|
| 950 |
-
request: VoiceRequest with voice_id to test. If not provided, uses current voice.
|
| 951 |
-
|
| 952 |
-
Returns:
|
| 953 |
-
Success status and message.
|
| 954 |
-
"""
|
| 955 |
-
# Get API key from environment
|
| 956 |
-
api_key = os.environ.get("OPENAI_API_KEY")
|
| 957 |
-
if not api_key:
|
| 958 |
-
raise HTTPException(
|
| 959 |
-
status_code=503,
|
| 960 |
-
detail="OpenAI API key not set. Start a conversation first or configure the API key in Settings."
|
| 961 |
-
)
|
| 962 |
-
|
| 963 |
-
# Use provided voice or current voice
|
| 964 |
-
voice = (request.voice_id if request else None) or _voice_settings.current_voice
|
| 965 |
-
|
| 966 |
-
# Validate voice
|
| 967 |
-
valid_ids = [v["id"] for v in OPENAI_VOICES]
|
| 968 |
-
if voice not in valid_ids:
|
| 969 |
-
raise HTTPException(
|
| 970 |
-
status_code=400,
|
| 971 |
-
detail=f"Invalid voice '{voice}'. Available: {', '.join(valid_ids)}"
|
| 972 |
-
)
|
| 973 |
-
|
| 974 |
-
# TTS API supports different voices than Realtime API
|
| 975 |
-
# Map Realtime-only voices to similar TTS voices for preview
|
| 976 |
-
TTS_VOICE_MAP = {
|
| 977 |
-
"alloy": "alloy",
|
| 978 |
-
"ash": "ash",
|
| 979 |
-
"ballad": "fable", # ballad -> fable (expressive, storytelling)
|
| 980 |
-
"coral": "coral",
|
| 981 |
-
"echo": "echo",
|
| 982 |
-
"sage": "sage",
|
| 983 |
-
"shimmer": "shimmer",
|
| 984 |
-
"verse": "nova", # verse -> nova (dynamic, engaging)
|
| 985 |
-
}
|
| 986 |
-
|
| 987 |
-
tts_voice = TTS_VOICE_MAP.get(voice, "alloy")
|
| 988 |
-
is_mapped = tts_voice != voice
|
| 989 |
-
|
| 990 |
-
# Get sample phrase in user's preferred language
|
| 991 |
-
language = _voice_settings.preferred_language
|
| 992 |
-
sample_text = _get_voice_sample_phrase(voice, language)
|
| 993 |
-
|
| 994 |
-
logger.info(f"🎤 Testing voice '{voice}' (TTS: {tts_voice}) with OpenAI TTS API")
|
| 995 |
-
|
| 996 |
-
try:
|
| 997 |
-
# Call OpenAI TTS API
|
| 998 |
-
async with httpx.AsyncClient() as client:
|
| 999 |
-
response = await client.post(
|
| 1000 |
-
OPENAI_TTS_URL,
|
| 1001 |
-
headers={
|
| 1002 |
-
"Authorization": f"Bearer {api_key}",
|
| 1003 |
-
"Content-Type": "application/json",
|
| 1004 |
-
},
|
| 1005 |
-
json={
|
| 1006 |
-
"model": "tts-1",
|
| 1007 |
-
"input": sample_text,
|
| 1008 |
-
"voice": tts_voice, # Use mapped TTS voice
|
| 1009 |
-
"response_format": "pcm", # Raw PCM for direct playback
|
| 1010 |
-
},
|
| 1011 |
-
timeout=30.0,
|
| 1012 |
-
)
|
| 1013 |
-
|
| 1014 |
-
if response.status_code != 200:
|
| 1015 |
-
error_detail = response.text
|
| 1016 |
-
logger.error(f"OpenAI TTS API error: {response.status_code} - {error_detail}")
|
| 1017 |
-
raise HTTPException(
|
| 1018 |
-
status_code=response.status_code,
|
| 1019 |
-
detail=f"OpenAI TTS error: {error_detail}"
|
| 1020 |
-
)
|
| 1021 |
-
|
| 1022 |
-
audio_data = response.content
|
| 1023 |
-
logger.info(f"✅ Received {len(audio_data)} bytes of audio from OpenAI")
|
| 1024 |
-
|
| 1025 |
-
# Play audio using aplay (OpenAI TTS PCM is 24kHz 16-bit mono)
|
| 1026 |
-
await _play_pcm_audio(audio_data)
|
| 1027 |
-
|
| 1028 |
-
logger.info(f"✅ Voice test complete for '{voice}' (TTS: {tts_voice})")
|
| 1029 |
-
|
| 1030 |
-
message = f"Played sample for {voice}"
|
| 1031 |
-
if is_mapped:
|
| 1032 |
-
message += f" (preview uses {tts_voice})"
|
| 1033 |
-
|
| 1034 |
-
return {
|
| 1035 |
-
"success": True,
|
| 1036 |
-
"voice": voice,
|
| 1037 |
-
"tts_voice": tts_voice,
|
| 1038 |
-
"message": message,
|
| 1039 |
-
}
|
| 1040 |
-
|
| 1041 |
-
except httpx.TimeoutException:
|
| 1042 |
-
logger.error("OpenAI TTS API timeout")
|
| 1043 |
-
raise HTTPException(status_code=504, detail="OpenAI TTS API timeout")
|
| 1044 |
-
except HTTPException:
|
| 1045 |
-
raise
|
| 1046 |
-
except Exception as e:
|
| 1047 |
-
logger.error(f"Error testing voice: {e}")
|
| 1048 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 1049 |
-
|
| 1050 |
-
|
| 1051 |
-
async def _play_pcm_audio(audio_data: bytes) -> None:
|
| 1052 |
-
"""Play PCM audio data through Reachy's speaker.
|
| 1053 |
-
|
| 1054 |
-
Applies pitch shifting if kids mode is enabled.
|
| 1055 |
-
|
| 1056 |
-
Args:
|
| 1057 |
-
audio_data: Raw PCM16 audio at 24kHz mono.
|
| 1058 |
-
"""
|
| 1059 |
-
import shutil
|
| 1060 |
-
|
| 1061 |
-
# Check if kids mode is enabled
|
| 1062 |
-
use_pitch_shift = is_kids_mode_enabled() and shutil.which("sox") is not None
|
| 1063 |
-
|
| 1064 |
-
if is_kids_mode_enabled() and not shutil.which("sox"):
|
| 1065 |
-
logger.warning("🧒 Kids mode enabled but sox not found, using normal playback")
|
| 1066 |
-
|
| 1067 |
-
# OpenAI TTS PCM format: 24kHz, 16-bit signed little-endian, mono
|
| 1068 |
-
aplay_cmd = [
|
| 1069 |
-
"aplay",
|
| 1070 |
-
"-r", "24000",
|
| 1071 |
-
"-f", "S16_LE",
|
| 1072 |
-
"-t", "raw",
|
| 1073 |
-
"-c", "1",
|
| 1074 |
-
"-D", AUDIO_DEVICE,
|
| 1075 |
-
"-q",
|
| 1076 |
-
]
|
| 1077 |
-
|
| 1078 |
-
loop = asyncio.get_event_loop()
|
| 1079 |
-
|
| 1080 |
-
def _run_playback():
|
| 1081 |
-
if use_pitch_shift:
|
| 1082 |
-
# Pitch shift with sox, then pipe to aplay
|
| 1083 |
-
# Sox command: read raw PCM, pitch shift, output raw PCM
|
| 1084 |
-
sox_cmd = [
|
| 1085 |
-
"sox",
|
| 1086 |
-
"-t", "raw",
|
| 1087 |
-
"-r", "24000",
|
| 1088 |
-
"-e", "signed",
|
| 1089 |
-
"-b", "16",
|
| 1090 |
-
"-c", "1",
|
| 1091 |
-
"-", # Input from stdin
|
| 1092 |
-
"-t", "raw",
|
| 1093 |
-
"-", # Output to stdout
|
| 1094 |
-
"pitch", str(KIDS_MODE_PITCH_CENTS),
|
| 1095 |
-
"tempo", str(KIDS_MODE_TEMPO),
|
| 1096 |
-
]
|
| 1097 |
-
|
| 1098 |
-
# Create sox process
|
| 1099 |
-
sox_process = subprocess.Popen(
|
| 1100 |
-
sox_cmd,
|
| 1101 |
-
stdin=subprocess.PIPE,
|
| 1102 |
-
stdout=subprocess.PIPE,
|
| 1103 |
-
stderr=subprocess.PIPE,
|
| 1104 |
-
)
|
| 1105 |
-
|
| 1106 |
-
# Create aplay process reading from sox output
|
| 1107 |
-
aplay_process = subprocess.Popen(
|
| 1108 |
-
aplay_cmd,
|
| 1109 |
-
stdin=sox_process.stdout,
|
| 1110 |
-
stdout=subprocess.PIPE,
|
| 1111 |
-
stderr=subprocess.PIPE,
|
| 1112 |
-
)
|
| 1113 |
-
|
| 1114 |
-
# Close sox stdout in parent
|
| 1115 |
-
sox_process.stdout.close()
|
| 1116 |
-
|
| 1117 |
-
# Write audio to sox
|
| 1118 |
-
sox_process.stdin.write(audio_data)
|
| 1119 |
-
sox_process.stdin.close()
|
| 1120 |
-
|
| 1121 |
-
# Wait for both processes
|
| 1122 |
-
sox_process.wait(timeout=30)
|
| 1123 |
-
aplay_process.wait(timeout=30)
|
| 1124 |
-
|
| 1125 |
-
logger.info("🧒 Played audio with kids mode pitch shift")
|
| 1126 |
-
else:
|
| 1127 |
-
# Normal playback without pitch shifting
|
| 1128 |
-
process = subprocess.Popen(
|
| 1129 |
-
aplay_cmd,
|
| 1130 |
-
stdin=subprocess.PIPE,
|
| 1131 |
-
stdout=subprocess.PIPE,
|
| 1132 |
-
stderr=subprocess.PIPE,
|
| 1133 |
-
)
|
| 1134 |
-
stdout, stderr = process.communicate(input=audio_data, timeout=30)
|
| 1135 |
-
if process.returncode != 0:
|
| 1136 |
-
error_msg = stderr.decode() if stderr else "Unknown error"
|
| 1137 |
-
raise RuntimeError(f"aplay failed: {error_msg}")
|
| 1138 |
-
|
| 1139 |
-
await loop.run_in_executor(None, _run_playback)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reachys_brain/routes/voice/__init__.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Voice settings management for the iOS Bridge.
|
| 2 |
+
|
| 3 |
+
This package provides voice, language, and VAD configuration:
|
| 4 |
+
- constants: Voice definitions, languages, and database keys
|
| 5 |
+
- settings: VoiceSettings class with database persistence
|
| 6 |
+
- vad: VAD presets for different environments
|
| 7 |
+
- samples: Voice sample phrases for testing
|
| 8 |
+
- playback: Audio playback utilities
|
| 9 |
+
- endpoints: FastAPI REST endpoints
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from .constants import (
|
| 13 |
+
OPENAI_VOICES,
|
| 14 |
+
SUPPORTED_LANGUAGES,
|
| 15 |
+
)
|
| 16 |
+
from .settings import (
|
| 17 |
+
VoiceSettings,
|
| 18 |
+
get_voice_settings,
|
| 19 |
+
init_voice_settings,
|
| 20 |
+
get_current_voice,
|
| 21 |
+
set_current_voice,
|
| 22 |
+
get_preferred_language,
|
| 23 |
+
set_preferred_language,
|
| 24 |
+
get_kids_mode_enabled,
|
| 25 |
+
set_kids_mode_enabled,
|
| 26 |
+
get_vad_threshold,
|
| 27 |
+
get_vad_silence_ms,
|
| 28 |
+
get_vad_prefix_ms,
|
| 29 |
+
set_vad_settings_callback,
|
| 30 |
+
)
|
| 31 |
+
from .vad import VAD_PRESETS
|
| 32 |
+
from .samples import get_voice_sample_phrase
|
| 33 |
+
from .endpoints import router
|
| 34 |
+
|
| 35 |
+
__all__ = [
|
| 36 |
+
# Router
|
| 37 |
+
"router",
|
| 38 |
+
|
| 39 |
+
# Constants
|
| 40 |
+
"OPENAI_VOICES",
|
| 41 |
+
"SUPPORTED_LANGUAGES",
|
| 42 |
+
"VAD_PRESETS",
|
| 43 |
+
|
| 44 |
+
# Settings class
|
| 45 |
+
"VoiceSettings",
|
| 46 |
+
"get_voice_settings",
|
| 47 |
+
"init_voice_settings",
|
| 48 |
+
|
| 49 |
+
# Voice functions
|
| 50 |
+
"get_current_voice",
|
| 51 |
+
"set_current_voice",
|
| 52 |
+
|
| 53 |
+
# Language functions
|
| 54 |
+
"get_preferred_language",
|
| 55 |
+
"set_preferred_language",
|
| 56 |
+
|
| 57 |
+
# Kids mode functions
|
| 58 |
+
"get_kids_mode_enabled",
|
| 59 |
+
"set_kids_mode_enabled",
|
| 60 |
+
|
| 61 |
+
# VAD functions
|
| 62 |
+
"get_vad_threshold",
|
| 63 |
+
"get_vad_silence_ms",
|
| 64 |
+
"get_vad_prefix_ms",
|
| 65 |
+
"set_vad_settings_callback",
|
| 66 |
+
|
| 67 |
+
# Sample utilities
|
| 68 |
+
"get_voice_sample_phrase",
|
| 69 |
+
]
|
| 70 |
+
|
reachys_brain/routes/voice/constants.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Constants for voice configuration.
|
| 2 |
+
|
| 3 |
+
Contains OpenAI voice definitions, supported languages, and database keys.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
# Database keys for voice settings
|
| 7 |
+
VOICE_ID_KEY = "voice_id"
|
| 8 |
+
PREFERRED_LANGUAGE_KEY = "preferred_language"
|
| 9 |
+
KIDS_MODE_KEY = "kids_mode_enabled"
|
| 10 |
+
|
| 11 |
+
# Database keys for VAD (Voice Activity Detection) settings
|
| 12 |
+
VAD_THRESHOLD_KEY = "vad_threshold"
|
| 13 |
+
VAD_SILENCE_MS_KEY = "vad_silence_ms"
|
| 14 |
+
VAD_PREFIX_MS_KEY = "vad_prefix_ms"
|
| 15 |
+
|
| 16 |
+
# Default VAD values
|
| 17 |
+
DEFAULT_VAD_THRESHOLD = 0.8
|
| 18 |
+
DEFAULT_VAD_SILENCE_MS = 800
|
| 19 |
+
DEFAULT_VAD_PREFIX_MS = 300
|
| 20 |
+
|
| 21 |
+
# OpenAI TTS API endpoint
|
| 22 |
+
OPENAI_TTS_URL = "https://api.openai.com/v1/audio/speech"
|
| 23 |
+
|
| 24 |
+
# Reachy audio device
|
| 25 |
+
AUDIO_DEVICE = "plug:reachymini_audio_sink"
|
| 26 |
+
|
| 27 |
+
# Available OpenAI Realtime voices with their characteristics
|
| 28 |
+
OPENAI_VOICES = [
|
| 29 |
+
{
|
| 30 |
+
"id": "alloy",
|
| 31 |
+
"name": "Alloy",
|
| 32 |
+
"description": "Neutral, balanced voice",
|
| 33 |
+
"gender": "neutral",
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"id": "ash",
|
| 37 |
+
"name": "Ash",
|
| 38 |
+
"description": "Soft, warm voice",
|
| 39 |
+
"gender": "male",
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"id": "ballad",
|
| 43 |
+
"name": "Ballad",
|
| 44 |
+
"description": "Expressive, storytelling voice",
|
| 45 |
+
"gender": "male",
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"id": "coral",
|
| 49 |
+
"name": "Coral",
|
| 50 |
+
"description": "Clear, friendly voice",
|
| 51 |
+
"gender": "female",
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"id": "echo",
|
| 55 |
+
"name": "Echo",
|
| 56 |
+
"description": "Deep, resonant voice",
|
| 57 |
+
"gender": "male",
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"id": "sage",
|
| 61 |
+
"name": "Sage",
|
| 62 |
+
"description": "Calm, wise voice",
|
| 63 |
+
"gender": "female",
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"id": "shimmer",
|
| 67 |
+
"name": "Shimmer",
|
| 68 |
+
"description": "Bright, energetic voice",
|
| 69 |
+
"gender": "female",
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"id": "verse",
|
| 73 |
+
"name": "Verse",
|
| 74 |
+
"description": "Dynamic, engaging voice",
|
| 75 |
+
"gender": "male",
|
| 76 |
+
},
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
# Supported languages with display names
|
| 80 |
+
SUPPORTED_LANGUAGES = [
|
| 81 |
+
{"code": "en", "name": "English", "flag": "🇺🇸"},
|
| 82 |
+
{"code": "nl", "name": "Dutch", "flag": "🇳🇱"},
|
| 83 |
+
{"code": "de", "name": "German", "flag": "🇩🇪"},
|
| 84 |
+
{"code": "fr", "name": "French", "flag": "🇫🇷"},
|
| 85 |
+
{"code": "es", "name": "Spanish", "flag": "🇪🇸"},
|
| 86 |
+
{"code": "it", "name": "Italian", "flag": "🇮🇹"},
|
| 87 |
+
{"code": "pt", "name": "Portuguese", "flag": "🇵🇹"},
|
| 88 |
+
{"code": "ja", "name": "Japanese", "flag": "🇯🇵"},
|
| 89 |
+
{"code": "ko", "name": "Korean", "flag": "🇰🇷"},
|
| 90 |
+
{"code": "zh", "name": "Chinese", "flag": "🇨🇳"},
|
| 91 |
+
{"code": "ar", "name": "Arabic", "flag": "🇸🇦"},
|
| 92 |
+
{"code": "hi", "name": "Hindi", "flag": "🇮🇳"},
|
| 93 |
+
{"code": "ru", "name": "Russian", "flag": "🇷🇺"},
|
| 94 |
+
{"code": "pl", "name": "Polish", "flag": "🇵🇱"},
|
| 95 |
+
{"code": "tr", "name": "Turkish", "flag": "🇹🇷"},
|
| 96 |
+
{"code": "sv", "name": "Swedish", "flag": "🇸🇪"},
|
| 97 |
+
{"code": "da", "name": "Danish", "flag": "🇩🇰"},
|
| 98 |
+
{"code": "no", "name": "Norwegian", "flag": "🇳🇴"},
|
| 99 |
+
{"code": "fi", "name": "Finnish", "flag": "🇫🇮"},
|
| 100 |
+
]
|
| 101 |
+
|
reachys_brain/routes/voice/endpoints.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI endpoints for voice settings.
|
| 2 |
+
|
| 3 |
+
Provides REST API endpoints for managing voice, language, and VAD settings.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from fastapi import APIRouter, HTTPException
|
| 10 |
+
from pydantic import BaseModel
|
| 11 |
+
|
| 12 |
+
from .constants import OPENAI_VOICES, SUPPORTED_LANGUAGES
|
| 13 |
+
from .vad import VAD_PRESETS
|
| 14 |
+
from .samples import get_voice_sample_phrase
|
| 15 |
+
from .settings import (
|
| 16 |
+
get_voice_settings,
|
| 17 |
+
get_current_voice,
|
| 18 |
+
set_current_voice,
|
| 19 |
+
get_preferred_language,
|
| 20 |
+
set_preferred_language,
|
| 21 |
+
get_kids_mode_enabled,
|
| 22 |
+
set_kids_mode_enabled,
|
| 23 |
+
)
|
| 24 |
+
from .playback import test_voice
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
router = APIRouter(prefix="/voice", tags=["Voice"])
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# MARK: - Request/Response Models
|
| 32 |
+
|
| 33 |
+
class SetVoiceRequest(BaseModel):
|
| 34 |
+
"""Request to set the OpenAI voice."""
|
| 35 |
+
voice_id: str
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class SetVoiceResponse(BaseModel):
|
| 39 |
+
"""Response from setting the voice."""
|
| 40 |
+
success: bool
|
| 41 |
+
voice_id: str
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class SetLanguageRequest(BaseModel):
|
| 45 |
+
"""Request to set preferred language."""
|
| 46 |
+
language: str
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class SetLanguageResponse(BaseModel):
|
| 50 |
+
"""Response from setting language."""
|
| 51 |
+
success: bool
|
| 52 |
+
language: str
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class TestVoiceRequest(BaseModel):
|
| 56 |
+
"""Request to test a voice with sample text."""
|
| 57 |
+
voice_id: str
|
| 58 |
+
language: str = "en"
|
| 59 |
+
pitch_shift: float = 0
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class TestVoiceResponse(BaseModel):
|
| 63 |
+
"""Response from voice test."""
|
| 64 |
+
success: bool
|
| 65 |
+
voice_id: str
|
| 66 |
+
language: str
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class SetKidsModeRequest(BaseModel):
|
| 70 |
+
"""Request to set kids mode."""
|
| 71 |
+
enabled: bool
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class SetKidsModeResponse(BaseModel):
|
| 75 |
+
"""Response from setting kids mode."""
|
| 76 |
+
success: bool
|
| 77 |
+
enabled: bool
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class VADSettingsRequest(BaseModel):
|
| 81 |
+
"""Request to set VAD settings."""
|
| 82 |
+
threshold: Optional[float] = None
|
| 83 |
+
silence_ms: Optional[int] = None
|
| 84 |
+
prefix_ms: Optional[int] = None
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class VADSettingsResponse(BaseModel):
|
| 88 |
+
"""Response with current VAD settings."""
|
| 89 |
+
threshold: float
|
| 90 |
+
silence_ms: int
|
| 91 |
+
prefix_ms: int
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# MARK: - Voice Endpoints
|
| 95 |
+
|
| 96 |
+
@router.get("/voices")
|
| 97 |
+
async def get_voices():
|
| 98 |
+
"""Get available OpenAI voices."""
|
| 99 |
+
current_voice = get_current_voice()
|
| 100 |
+
return {
|
| 101 |
+
"voices": OPENAI_VOICES,
|
| 102 |
+
"current_voice": current_voice,
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@router.get("/current")
|
| 107 |
+
async def get_current_voice_endpoint():
|
| 108 |
+
"""Get the current voice ID."""
|
| 109 |
+
return {
|
| 110 |
+
"voice_id": get_current_voice(),
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@router.post("/set", response_model=SetVoiceResponse)
|
| 115 |
+
async def set_voice(request: SetVoiceRequest):
|
| 116 |
+
"""Set the OpenAI voice."""
|
| 117 |
+
success = set_current_voice(request.voice_id)
|
| 118 |
+
if not success:
|
| 119 |
+
raise HTTPException(status_code=400, detail=f"Invalid voice ID: {request.voice_id}")
|
| 120 |
+
|
| 121 |
+
return SetVoiceResponse(success=True, voice_id=request.voice_id)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@router.post("/test", response_model=TestVoiceResponse)
|
| 125 |
+
async def test_voice_endpoint(request: TestVoiceRequest):
|
| 126 |
+
"""Test a voice by playing a sample phrase."""
|
| 127 |
+
# Validate voice ID
|
| 128 |
+
valid_voices = [v["id"] for v in OPENAI_VOICES]
|
| 129 |
+
if request.voice_id not in valid_voices:
|
| 130 |
+
raise HTTPException(status_code=400, detail=f"Invalid voice ID: {request.voice_id}")
|
| 131 |
+
|
| 132 |
+
# Get sample phrase
|
| 133 |
+
text = get_voice_sample_phrase(request.voice_id, request.language)
|
| 134 |
+
|
| 135 |
+
# Test the voice
|
| 136 |
+
success = await test_voice(request.voice_id, text, request.pitch_shift)
|
| 137 |
+
|
| 138 |
+
return TestVoiceResponse(
|
| 139 |
+
success=success,
|
| 140 |
+
voice_id=request.voice_id,
|
| 141 |
+
language=request.language,
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# MARK: - Language Endpoints
|
| 146 |
+
|
| 147 |
+
@router.get("/languages")
|
| 148 |
+
async def get_languages():
|
| 149 |
+
"""Get supported languages."""
|
| 150 |
+
current_language = get_preferred_language()
|
| 151 |
+
return {
|
| 152 |
+
"languages": SUPPORTED_LANGUAGES,
|
| 153 |
+
"current_language": current_language,
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@router.get("/language")
|
| 158 |
+
async def get_language_endpoint():
|
| 159 |
+
"""Get the current preferred language."""
|
| 160 |
+
return {
|
| 161 |
+
"language": get_preferred_language(),
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@router.post("/language", response_model=SetLanguageResponse)
|
| 166 |
+
async def set_language_endpoint(request: SetLanguageRequest):
|
| 167 |
+
"""Set the preferred language."""
|
| 168 |
+
success = set_preferred_language(request.language)
|
| 169 |
+
if not success:
|
| 170 |
+
raise HTTPException(status_code=400, detail=f"Invalid language: {request.language}")
|
| 171 |
+
|
| 172 |
+
return SetLanguageResponse(success=True, language=request.language)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
# MARK: - Kids Mode Endpoints
|
| 176 |
+
|
| 177 |
+
@router.get("/kids-mode")
|
| 178 |
+
async def get_kids_mode():
|
| 179 |
+
"""Get kids mode status."""
|
| 180 |
+
return {
|
| 181 |
+
"enabled": get_kids_mode_enabled(),
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@router.post("/kids-mode", response_model=SetKidsModeResponse)
|
| 186 |
+
async def set_kids_mode_endpoint(request: SetKidsModeRequest):
|
| 187 |
+
"""Set kids mode enabled/disabled."""
|
| 188 |
+
set_kids_mode_enabled(request.enabled)
|
| 189 |
+
|
| 190 |
+
return SetKidsModeResponse(success=True, enabled=request.enabled)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# MARK: - VAD Endpoints
|
| 194 |
+
|
| 195 |
+
@router.get("/vad/settings", response_model=VADSettingsResponse)
|
| 196 |
+
async def get_vad_settings():
|
| 197 |
+
"""Get current VAD settings."""
|
| 198 |
+
settings = get_voice_settings()
|
| 199 |
+
return VADSettingsResponse(
|
| 200 |
+
threshold=settings.vad_threshold,
|
| 201 |
+
silence_ms=settings.vad_silence_ms,
|
| 202 |
+
prefix_ms=settings.vad_prefix_ms,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
@router.post("/vad/settings", response_model=VADSettingsResponse)
|
| 207 |
+
async def set_vad_settings(request: VADSettingsRequest):
|
| 208 |
+
"""Set VAD settings."""
|
| 209 |
+
settings = get_voice_settings()
|
| 210 |
+
settings.set_vad_settings(
|
| 211 |
+
threshold=request.threshold,
|
| 212 |
+
silence_ms=request.silence_ms,
|
| 213 |
+
prefix_ms=request.prefix_ms,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
return VADSettingsResponse(
|
| 217 |
+
threshold=settings.vad_threshold,
|
| 218 |
+
silence_ms=settings.vad_silence_ms,
|
| 219 |
+
prefix_ms=settings.vad_prefix_ms,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@router.get("/vad/presets")
|
| 224 |
+
async def get_vad_presets():
|
| 225 |
+
"""Get available VAD presets."""
|
| 226 |
+
return {
|
| 227 |
+
"presets": list(VAD_PRESETS.values()),
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@router.post("/vad/preset/{preset_id}", response_model=VADSettingsResponse)
|
| 232 |
+
async def apply_vad_preset(preset_id: str):
|
| 233 |
+
"""Apply a VAD preset."""
|
| 234 |
+
preset = VAD_PRESETS.get(preset_id)
|
| 235 |
+
if not preset:
|
| 236 |
+
raise HTTPException(status_code=404, detail=f"Unknown preset: {preset_id}")
|
| 237 |
+
|
| 238 |
+
settings = get_voice_settings()
|
| 239 |
+
settings.set_vad_settings(
|
| 240 |
+
threshold=preset["threshold"],
|
| 241 |
+
silence_ms=preset["silence_ms"],
|
| 242 |
+
prefix_ms=preset["prefix_ms"],
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
return VADSettingsResponse(
|
| 246 |
+
threshold=settings.vad_threshold,
|
| 247 |
+
silence_ms=settings.vad_silence_ms,
|
| 248 |
+
prefix_ms=settings.vad_prefix_ms,
|
| 249 |
+
)
|
| 250 |
+
|
reachys_brain/routes/voice/playback.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audio playback utilities for voice testing.
|
| 2 |
+
|
| 3 |
+
Provides functions for playing audio samples through Reachy's speakers.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import subprocess
|
| 10 |
+
import tempfile
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Optional
|
| 13 |
+
|
| 14 |
+
import httpx
|
| 15 |
+
|
| 16 |
+
from .constants import OPENAI_TTS_URL, AUDIO_DEVICE
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
async def generate_voice_sample(voice: str, text: str) -> Optional[bytes]:
|
| 22 |
+
"""Generate a voice sample using OpenAI TTS API.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
voice: Voice ID (e.g., 'alloy', 'coral').
|
| 26 |
+
text: Text to speak.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
Audio data as bytes, or None on failure.
|
| 30 |
+
"""
|
| 31 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 32 |
+
if not api_key:
|
| 33 |
+
logger.error("OPENAI_API_KEY not set")
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
try:
|
| 37 |
+
async with httpx.AsyncClient() as client:
|
| 38 |
+
response = await client.post(
|
| 39 |
+
OPENAI_TTS_URL,
|
| 40 |
+
headers={
|
| 41 |
+
"Authorization": f"Bearer {api_key}",
|
| 42 |
+
"Content-Type": "application/json",
|
| 43 |
+
},
|
| 44 |
+
json={
|
| 45 |
+
"model": "tts-1",
|
| 46 |
+
"voice": voice,
|
| 47 |
+
"input": text,
|
| 48 |
+
"response_format": "mp3",
|
| 49 |
+
},
|
| 50 |
+
timeout=30.0,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if response.status_code == 200:
|
| 54 |
+
return response.content
|
| 55 |
+
else:
|
| 56 |
+
logger.error(f"TTS API error: {response.status_code} - {response.text}")
|
| 57 |
+
return None
|
| 58 |
+
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Error generating voice sample: {e}")
|
| 61 |
+
return None
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def apply_pitch_shift(audio_data: bytes, semitones: float) -> Optional[bytes]:
|
| 65 |
+
"""Apply pitch shift to audio data.
|
| 66 |
+
|
| 67 |
+
Uses sox to shift pitch. Positive semitones = higher pitch.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
audio_data: MP3 audio data.
|
| 71 |
+
semitones: Number of semitones to shift (positive = higher).
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Pitch-shifted audio data, or None on failure.
|
| 75 |
+
"""
|
| 76 |
+
if semitones == 0:
|
| 77 |
+
return audio_data
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
# Create temp files for sox processing
|
| 81 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as in_file:
|
| 82 |
+
in_file.write(audio_data)
|
| 83 |
+
in_path = in_file.name
|
| 84 |
+
|
| 85 |
+
out_path = in_path.replace(".mp3", "_shifted.mp3")
|
| 86 |
+
|
| 87 |
+
# Calculate pitch ratio (semitones to ratio)
|
| 88 |
+
# 12 semitones = 2x frequency
|
| 89 |
+
pitch_ratio = 2 ** (semitones / 12)
|
| 90 |
+
|
| 91 |
+
# Run sox with pitch shift
|
| 92 |
+
result = subprocess.run(
|
| 93 |
+
["sox", in_path, out_path, "pitch", str(int(semitones * 100))],
|
| 94 |
+
capture_output=True,
|
| 95 |
+
timeout=10,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if result.returncode != 0:
|
| 99 |
+
logger.error(f"sox pitch shift failed: {result.stderr.decode()}")
|
| 100 |
+
os.unlink(in_path)
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
# Read output file
|
| 104 |
+
with open(out_path, "rb") as f:
|
| 105 |
+
shifted_audio = f.read()
|
| 106 |
+
|
| 107 |
+
# Cleanup temp files
|
| 108 |
+
os.unlink(in_path)
|
| 109 |
+
os.unlink(out_path)
|
| 110 |
+
|
| 111 |
+
return shifted_audio
|
| 112 |
+
|
| 113 |
+
except FileNotFoundError:
|
| 114 |
+
logger.warning("sox not found - skipping pitch shift")
|
| 115 |
+
return audio_data
|
| 116 |
+
except subprocess.TimeoutExpired:
|
| 117 |
+
logger.error("sox pitch shift timed out")
|
| 118 |
+
return None
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Error applying pitch shift: {e}")
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
async def play_audio_on_reachy(audio_data: bytes) -> bool:
|
| 125 |
+
"""Play audio through Reachy's speakers.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
audio_data: MP3 audio data to play.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
True if playback succeeded.
|
| 132 |
+
"""
|
| 133 |
+
try:
|
| 134 |
+
# Write to temp file
|
| 135 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as f:
|
| 136 |
+
f.write(audio_data)
|
| 137 |
+
audio_path = f.name
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
# Play using aplay with Reachy's audio device
|
| 141 |
+
process = await asyncio.create_subprocess_exec(
|
| 142 |
+
"mpv",
|
| 143 |
+
"--no-video",
|
| 144 |
+
f"--audio-device=alsa/{AUDIO_DEVICE}",
|
| 145 |
+
audio_path,
|
| 146 |
+
stdout=asyncio.subprocess.DEVNULL,
|
| 147 |
+
stderr=asyncio.subprocess.DEVNULL,
|
| 148 |
+
)
|
| 149 |
+
await asyncio.wait_for(process.wait(), timeout=30.0)
|
| 150 |
+
return process.returncode == 0
|
| 151 |
+
finally:
|
| 152 |
+
# Cleanup temp file
|
| 153 |
+
os.unlink(audio_path)
|
| 154 |
+
|
| 155 |
+
except asyncio.TimeoutError:
|
| 156 |
+
logger.error("Audio playback timed out")
|
| 157 |
+
return False
|
| 158 |
+
except FileNotFoundError:
|
| 159 |
+
logger.error("mpv not found - cannot play audio")
|
| 160 |
+
return False
|
| 161 |
+
except Exception as e:
|
| 162 |
+
logger.error(f"Error playing audio: {e}")
|
| 163 |
+
return False
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
async def test_voice(voice: str, text: str, pitch_shift: float = 0) -> bool:
|
| 167 |
+
"""Test a voice by generating and playing a sample.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
voice: Voice ID to test.
|
| 171 |
+
text: Text to speak.
|
| 172 |
+
pitch_shift: Optional pitch shift in semitones.
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
True if test succeeded.
|
| 176 |
+
"""
|
| 177 |
+
logger.info(f"🔊 Testing voice: {voice} (pitch: {pitch_shift})")
|
| 178 |
+
|
| 179 |
+
# Generate audio
|
| 180 |
+
audio_data = await generate_voice_sample(voice, text)
|
| 181 |
+
if not audio_data:
|
| 182 |
+
return False
|
| 183 |
+
|
| 184 |
+
# Apply pitch shift if needed
|
| 185 |
+
if pitch_shift != 0:
|
| 186 |
+
audio_data = apply_pitch_shift(audio_data, pitch_shift)
|
| 187 |
+
if not audio_data:
|
| 188 |
+
return False
|
| 189 |
+
|
| 190 |
+
# Play on Reachy
|
| 191 |
+
return await play_audio_on_reachy(audio_data)
|
| 192 |
+
|
reachys_brain/routes/voice/samples.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Voice sample phrases for testing voices.
|
| 2 |
+
|
| 3 |
+
Contains sample phrases in multiple languages for voice testing.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
# Sample phrases by language for voice testing
|
| 7 |
+
VOICE_SAMPLE_PHRASES = {
|
| 8 |
+
"en": {
|
| 9 |
+
"alloy": "Hello! I'm Alloy, a balanced and versatile voice.",
|
| 10 |
+
"ash": "Hi there. I'm Ash, with a soft and warm tone.",
|
| 11 |
+
"ballad": "Greetings! I'm Ballad, ready to tell you a story.",
|
| 12 |
+
"coral": "Hey! I'm Coral, clear and friendly!",
|
| 13 |
+
"echo": "Hello. I'm Echo, with a deep and resonant voice.",
|
| 14 |
+
"sage": "Welcome. I'm Sage, calm and thoughtful.",
|
| 15 |
+
"shimmer": "Hi! I'm Shimmer, bright and full of energy!",
|
| 16 |
+
"verse": "Hello! I'm Verse, dynamic and engaging.",
|
| 17 |
+
},
|
| 18 |
+
"nl": {
|
| 19 |
+
"alloy": "Hallo! Ik ben Alloy, een evenwichtige en veelzijdige stem.",
|
| 20 |
+
"ash": "Hoi! Ik ben Ash, met een zachte en warme toon.",
|
| 21 |
+
"ballad": "Goedendag! Ik ben Ballad, klaar om je een verhaal te vertellen.",
|
| 22 |
+
"coral": "Hey! Ik ben Coral, helder en vriendelijk!",
|
| 23 |
+
"echo": "Hallo. Ik ben Echo, met een diepe en resonante stem.",
|
| 24 |
+
"sage": "Welkom. Ik ben Sage, kalm en bedachtzaam.",
|
| 25 |
+
"shimmer": "Hoi! Ik ben Shimmer, vol energie!",
|
| 26 |
+
"verse": "Hallo! Ik ben Verse, dynamisch en boeiend.",
|
| 27 |
+
},
|
| 28 |
+
"de": {
|
| 29 |
+
"alloy": "Hallo! Ich bin Alloy, eine ausgewogene und vielseitige Stimme.",
|
| 30 |
+
"ash": "Hallo! Ich bin Ash, mit einem sanften und warmen Ton.",
|
| 31 |
+
"ballad": "Guten Tag! Ich bin Ballad, bereit dir eine Geschichte zu erzählen.",
|
| 32 |
+
"coral": "Hey! Ich bin Coral, klar und freundlich!",
|
| 33 |
+
"echo": "Hallo. Ich bin Echo, mit einer tiefen und resonanten Stimme.",
|
| 34 |
+
"sage": "Willkommen. Ich bin Sage, ruhig und nachdenklich.",
|
| 35 |
+
"shimmer": "Hallo! Ich bin Shimmer, voller Energie!",
|
| 36 |
+
"verse": "Hallo! Ich bin Verse, dynamisch und fesselnd.",
|
| 37 |
+
},
|
| 38 |
+
"fr": {
|
| 39 |
+
"alloy": "Bonjour! Je suis Alloy, une voix équilibrée et polyvalente.",
|
| 40 |
+
"ash": "Salut! Je suis Ash, avec un ton doux et chaleureux.",
|
| 41 |
+
"ballad": "Bonjour! Je suis Ballad, prêt à vous raconter une histoire.",
|
| 42 |
+
"coral": "Hey! Je suis Coral, claire et amicale!",
|
| 43 |
+
"echo": "Bonjour. Je suis Echo, avec une voix profonde et résonnante.",
|
| 44 |
+
"sage": "Bienvenue. Je suis Sage, calme et réfléchi.",
|
| 45 |
+
"shimmer": "Salut! Je suis Shimmer, pleine d'énergie!",
|
| 46 |
+
"verse": "Bonjour! Je suis Verse, dynamique et captivant.",
|
| 47 |
+
},
|
| 48 |
+
"es": {
|
| 49 |
+
"alloy": "¡Hola! Soy Alloy, una voz equilibrada y versátil.",
|
| 50 |
+
"ash": "¡Hola! Soy Ash, con un tono suave y cálido.",
|
| 51 |
+
"ballad": "¡Hola! Soy Ballad, listo para contarte una historia.",
|
| 52 |
+
"coral": "¡Hey! Soy Coral, clara y amigable!",
|
| 53 |
+
"echo": "Hola. Soy Echo, con una voz profunda y resonante.",
|
| 54 |
+
"sage": "Bienvenido. Soy Sage, tranquilo y reflexivo.",
|
| 55 |
+
"shimmer": "¡Hola! Soy Shimmer, llena de energía!",
|
| 56 |
+
"verse": "¡Hola! Soy Verse, dinámico y cautivador.",
|
| 57 |
+
},
|
| 58 |
+
"it": {
|
| 59 |
+
"alloy": "Ciao! Sono Alloy, una voce equilibrata e versatile.",
|
| 60 |
+
"ash": "Ciao! Sono Ash, con un tono morbido e caldo.",
|
| 61 |
+
"ballad": "Salve! Sono Ballad, pronto a raccontarti una storia.",
|
| 62 |
+
"coral": "Hey! Sono Coral, chiara e amichevole!",
|
| 63 |
+
"echo": "Ciao. Sono Echo, con una voce profonda e risonante.",
|
| 64 |
+
"sage": "Benvenuto. Sono Sage, calmo e riflessivo.",
|
| 65 |
+
"shimmer": "Ciao! Sono Shimmer, piena di energia!",
|
| 66 |
+
"verse": "Ciao! Sono Verse, dinamico e coinvolgente.",
|
| 67 |
+
},
|
| 68 |
+
"pt": {
|
| 69 |
+
"alloy": "Olá! Eu sou Alloy, uma voz equilibrada e versátil.",
|
| 70 |
+
"ash": "Oi! Eu sou Ash, com um tom suave e caloroso.",
|
| 71 |
+
"ballad": "Olá! Eu sou Ballad, pronto para contar uma história.",
|
| 72 |
+
"coral": "Hey! Eu sou Coral, clara e amigável!",
|
| 73 |
+
"echo": "Olá. Eu sou Echo, com uma voz profunda e ressonante.",
|
| 74 |
+
"sage": "Bem-vindo. Eu sou Sage, calmo e reflexivo.",
|
| 75 |
+
"shimmer": "Oi! Eu sou Shimmer, cheia de energia!",
|
| 76 |
+
"verse": "Olá! Eu sou Verse, dinâmico e envolvente.",
|
| 77 |
+
},
|
| 78 |
+
"ja": {
|
| 79 |
+
"alloy": "こんにちは!私はAlloyです。バランスの取れた多用途な声です。",
|
| 80 |
+
"ash": "こんにちは!私はAshです。柔らかく温かみのある声です。",
|
| 81 |
+
"ballad": "こんにちは!私はBalladです。物語を語る準備ができています。",
|
| 82 |
+
"coral": "ハイ!私はCoralです。明るくフレンドリーな声です!",
|
| 83 |
+
"echo": "こんにちは。私はEchoです。深く響く声です。",
|
| 84 |
+
"sage": "ようこそ。私はSageです。穏やかで思慮深い声です。",
|
| 85 |
+
"shimmer": "こんにちは!私はShimmerです。エネルギッシュな声です!",
|
| 86 |
+
"verse": "こんにちは!私はVerseです。ダイナミックで魅力的な声です。",
|
| 87 |
+
},
|
| 88 |
+
"ko": {
|
| 89 |
+
"alloy": "안녕하세요! 저는 Alloy입니다. 균형 잡힌 다재다능한 목소리���요.",
|
| 90 |
+
"ash": "안녕하세요! 저는 Ash입니다. 부드럽고 따뜻한 톤이에요.",
|
| 91 |
+
"ballad": "안녕하세요! 저는 Ballad입니다. 이야기를 들려드릴 준비가 됐어요.",
|
| 92 |
+
"coral": "안녕! 저는 Coral이에요. 맑고 친근한 목소리예요!",
|
| 93 |
+
"echo": "안녕하세요. 저는 Echo입니다. 깊고 울림 있는 목소리예요.",
|
| 94 |
+
"sage": "환영합니다. 저는 Sage입니다. 차분하고 사려 깊은 목소리예요.",
|
| 95 |
+
"shimmer": "안녕하세요! 저는 Shimmer예요. 활기찬 목소리예요!",
|
| 96 |
+
"verse": "안녕하세요! 저는 Verse입니다. 역동적이고 매력적인 목소리예요.",
|
| 97 |
+
},
|
| 98 |
+
"zh": {
|
| 99 |
+
"alloy": "你好!我是Alloy,一个平衡且多功能的声音。",
|
| 100 |
+
"ash": "你好!我是Ash,声音柔和温暖。",
|
| 101 |
+
"ballad": "你好!我是Ballad,准备给你讲故事。",
|
| 102 |
+
"coral": "嗨!我是Coral,清晰又友好!",
|
| 103 |
+
"echo": "你好。我是Echo,声音深沉有共鸣。",
|
| 104 |
+
"sage": "欢迎。我是Sage,冷静而深思熟虑。",
|
| 105 |
+
"shimmer": "你好!我是Shimmer,充满活力!",
|
| 106 |
+
"verse": "你好!我是Verse,充满活力和魅力。",
|
| 107 |
+
},
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def get_voice_sample_phrase(voice: str, language: str) -> str:
|
| 112 |
+
"""Get a sample phrase for voice testing in the specified language.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
voice: Voice ID (e.g., 'alloy', 'coral')
|
| 116 |
+
language: Language code (e.g., 'en', 'nl', 'de')
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Sample phrase in the specified language.
|
| 120 |
+
"""
|
| 121 |
+
phrases = VOICE_SAMPLE_PHRASES.get(language, VOICE_SAMPLE_PHRASES["en"])
|
| 122 |
+
return phrases.get(voice, phrases.get("alloy", f"Hello! I'm testing the {voice} voice."))
|
| 123 |
+
|
reachys_brain/routes/voice/settings.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Voice settings management.
|
| 2 |
+
|
| 3 |
+
Provides the VoiceSettings class for managing voice configuration including
|
| 4 |
+
voice selection, language, kids mode, and VAD settings.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import logging
|
| 9 |
+
from typing import Callable, Optional
|
| 10 |
+
|
| 11 |
+
from ...database import get_database
|
| 12 |
+
from .constants import (
|
| 13 |
+
VOICE_ID_KEY,
|
| 14 |
+
PREFERRED_LANGUAGE_KEY,
|
| 15 |
+
KIDS_MODE_KEY,
|
| 16 |
+
VAD_THRESHOLD_KEY,
|
| 17 |
+
VAD_SILENCE_MS_KEY,
|
| 18 |
+
VAD_PREFIX_MS_KEY,
|
| 19 |
+
DEFAULT_VAD_THRESHOLD,
|
| 20 |
+
DEFAULT_VAD_SILENCE_MS,
|
| 21 |
+
DEFAULT_VAD_PREFIX_MS,
|
| 22 |
+
OPENAI_VOICES,
|
| 23 |
+
SUPPORTED_LANGUAGES,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class VoiceSettings:
|
| 30 |
+
"""Manages voice, language, and VAD settings with database persistence.
|
| 31 |
+
|
| 32 |
+
This class provides a centralized way to manage all voice-related settings.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self) -> None:
|
| 36 |
+
"""Initialize with default values."""
|
| 37 |
+
self._voice_id = "coral" # Default voice
|
| 38 |
+
self._language = "en" # Default language
|
| 39 |
+
self._kids_mode_enabled = False # Default kids mode
|
| 40 |
+
self._vad_threshold = DEFAULT_VAD_THRESHOLD
|
| 41 |
+
self._vad_silence_ms = DEFAULT_VAD_SILENCE_MS
|
| 42 |
+
self._vad_prefix_ms = DEFAULT_VAD_PREFIX_MS
|
| 43 |
+
|
| 44 |
+
# Callback for VAD settings changes
|
| 45 |
+
self._vad_changed_callback: Optional[Callable[[], None]] = None
|
| 46 |
+
|
| 47 |
+
# MARK: - Voice Settings
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def voice_id(self) -> str:
|
| 51 |
+
"""Get current voice ID."""
|
| 52 |
+
return self._voice_id
|
| 53 |
+
|
| 54 |
+
def set_voice(self, voice_id: str) -> bool:
|
| 55 |
+
"""Set the current voice.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
voice_id: OpenAI voice ID.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
True if voice was valid and set.
|
| 62 |
+
"""
|
| 63 |
+
valid_voices = [v["id"] for v in OPENAI_VOICES]
|
| 64 |
+
if voice_id not in valid_voices:
|
| 65 |
+
logger.warning(f"Invalid voice ID: {voice_id}")
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
if voice_id != self._voice_id:
|
| 69 |
+
self._voice_id = voice_id
|
| 70 |
+
self._persist_setting(VOICE_ID_KEY, voice_id)
|
| 71 |
+
logger.info(f"🔊 Voice set to: {voice_id}")
|
| 72 |
+
|
| 73 |
+
return True
|
| 74 |
+
|
| 75 |
+
# MARK: - Language Settings
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def language(self) -> str:
|
| 79 |
+
"""Get current language code."""
|
| 80 |
+
return self._language
|
| 81 |
+
|
| 82 |
+
def set_language(self, language: str) -> bool:
|
| 83 |
+
"""Set the preferred language.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
language: Language code (e.g., 'en', 'nl').
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
True if language was valid and set.
|
| 90 |
+
"""
|
| 91 |
+
valid_languages = [lang["code"] for lang in SUPPORTED_LANGUAGES]
|
| 92 |
+
if language not in valid_languages:
|
| 93 |
+
logger.warning(f"Invalid language: {language}")
|
| 94 |
+
return False
|
| 95 |
+
|
| 96 |
+
if language != self._language:
|
| 97 |
+
self._language = language
|
| 98 |
+
self._persist_setting(PREFERRED_LANGUAGE_KEY, language)
|
| 99 |
+
logger.info(f"🌍 Language set to: {language}")
|
| 100 |
+
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
# MARK: - Kids Mode
|
| 104 |
+
|
| 105 |
+
@property
|
| 106 |
+
def kids_mode_enabled(self) -> bool:
|
| 107 |
+
"""Get kids mode enabled state."""
|
| 108 |
+
return self._kids_mode_enabled
|
| 109 |
+
|
| 110 |
+
def set_kids_mode(self, enabled: bool) -> None:
|
| 111 |
+
"""Set kids mode enabled state.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
enabled: Whether kids mode is enabled.
|
| 115 |
+
"""
|
| 116 |
+
if enabled != self._kids_mode_enabled:
|
| 117 |
+
self._kids_mode_enabled = enabled
|
| 118 |
+
self._persist_setting(KIDS_MODE_KEY, "true" if enabled else "false")
|
| 119 |
+
logger.info(f"👶 Kids mode: {'enabled' if enabled else 'disabled'}")
|
| 120 |
+
|
| 121 |
+
# Also update the audio playback module's kids mode state
|
| 122 |
+
from ...audio_playback import set_kids_mode as set_audio_kids_mode
|
| 123 |
+
set_audio_kids_mode(enabled)
|
| 124 |
+
|
| 125 |
+
# MARK: - VAD Settings
|
| 126 |
+
|
| 127 |
+
@property
|
| 128 |
+
def vad_threshold(self) -> float:
|
| 129 |
+
"""Get VAD threshold."""
|
| 130 |
+
return self._vad_threshold
|
| 131 |
+
|
| 132 |
+
@property
|
| 133 |
+
def vad_silence_ms(self) -> int:
|
| 134 |
+
"""Get VAD silence duration in milliseconds."""
|
| 135 |
+
return self._vad_silence_ms
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def vad_prefix_ms(self) -> int:
|
| 139 |
+
"""Get VAD prefix padding in milliseconds."""
|
| 140 |
+
return self._vad_prefix_ms
|
| 141 |
+
|
| 142 |
+
def set_vad_settings(
|
| 143 |
+
self,
|
| 144 |
+
threshold: Optional[float] = None,
|
| 145 |
+
silence_ms: Optional[int] = None,
|
| 146 |
+
prefix_ms: Optional[int] = None,
|
| 147 |
+
) -> None:
|
| 148 |
+
"""Set VAD settings.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
threshold: VAD activation threshold (0.0 - 1.0).
|
| 152 |
+
silence_ms: Silence duration before speech end (ms).
|
| 153 |
+
prefix_ms: Prefix padding before speech start (ms).
|
| 154 |
+
"""
|
| 155 |
+
changed = False
|
| 156 |
+
|
| 157 |
+
if threshold is not None:
|
| 158 |
+
threshold = max(0.0, min(1.0, threshold))
|
| 159 |
+
if threshold != self._vad_threshold:
|
| 160 |
+
self._vad_threshold = threshold
|
| 161 |
+
self._persist_setting(VAD_THRESHOLD_KEY, str(threshold))
|
| 162 |
+
changed = True
|
| 163 |
+
|
| 164 |
+
if silence_ms is not None:
|
| 165 |
+
silence_ms = max(100, min(5000, silence_ms))
|
| 166 |
+
if silence_ms != self._vad_silence_ms:
|
| 167 |
+
self._vad_silence_ms = silence_ms
|
| 168 |
+
self._persist_setting(VAD_SILENCE_MS_KEY, str(silence_ms))
|
| 169 |
+
changed = True
|
| 170 |
+
|
| 171 |
+
if prefix_ms is not None:
|
| 172 |
+
prefix_ms = max(100, min(2000, prefix_ms))
|
| 173 |
+
if prefix_ms != self._vad_prefix_ms:
|
| 174 |
+
self._vad_prefix_ms = prefix_ms
|
| 175 |
+
self._persist_setting(VAD_PREFIX_MS_KEY, str(prefix_ms))
|
| 176 |
+
changed = True
|
| 177 |
+
|
| 178 |
+
if changed:
|
| 179 |
+
logger.info(
|
| 180 |
+
f"🎤 VAD settings updated: threshold={self._vad_threshold}, "
|
| 181 |
+
f"silence={self._vad_silence_ms}ms, prefix={self._vad_prefix_ms}ms"
|
| 182 |
+
)
|
| 183 |
+
if self._vad_changed_callback:
|
| 184 |
+
self._vad_changed_callback()
|
| 185 |
+
|
| 186 |
+
def set_vad_callback(self, callback: Optional[Callable[[], None]]) -> None:
|
| 187 |
+
"""Set callback for VAD settings changes.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
callback: Function to call when VAD settings change.
|
| 191 |
+
"""
|
| 192 |
+
self._vad_changed_callback = callback
|
| 193 |
+
|
| 194 |
+
# MARK: - Persistence
|
| 195 |
+
|
| 196 |
+
def _persist_setting(self, key: str, value: str) -> None:
|
| 197 |
+
"""Persist a setting to the database asynchronously."""
|
| 198 |
+
asyncio.create_task(self._persist_setting_async(key, value))
|
| 199 |
+
|
| 200 |
+
async def _persist_setting_async(self, key: str, value: str) -> None:
|
| 201 |
+
"""Persist a setting to the database."""
|
| 202 |
+
try:
|
| 203 |
+
db = get_database()
|
| 204 |
+
await db.set_user_setting(key, value)
|
| 205 |
+
except Exception as e:
|
| 206 |
+
logger.error(f"Failed to persist setting {key}: {e}")
|
| 207 |
+
|
| 208 |
+
async def load_from_database(self) -> None:
|
| 209 |
+
"""Load settings from database."""
|
| 210 |
+
try:
|
| 211 |
+
db = get_database()
|
| 212 |
+
|
| 213 |
+
# Load voice ID
|
| 214 |
+
voice_id = await db.get_user_setting(VOICE_ID_KEY)
|
| 215 |
+
if voice_id:
|
| 216 |
+
valid_voices = [v["id"] for v in OPENAI_VOICES]
|
| 217 |
+
if voice_id in valid_voices:
|
| 218 |
+
self._voice_id = voice_id
|
| 219 |
+
logger.info(f"📀 Loaded voice ID from database: {voice_id}")
|
| 220 |
+
|
| 221 |
+
# Load language
|
| 222 |
+
language = await db.get_user_setting(PREFERRED_LANGUAGE_KEY)
|
| 223 |
+
if language:
|
| 224 |
+
valid_languages = [lang["code"] for lang in SUPPORTED_LANGUAGES]
|
| 225 |
+
if language in valid_languages:
|
| 226 |
+
self._language = language
|
| 227 |
+
logger.info(f"📀 Loaded language from database: {language}")
|
| 228 |
+
|
| 229 |
+
# Load kids mode
|
| 230 |
+
kids_mode = await db.get_user_setting(KIDS_MODE_KEY)
|
| 231 |
+
if kids_mode:
|
| 232 |
+
self._kids_mode_enabled = kids_mode.lower() == "true"
|
| 233 |
+
logger.info(f"📀 Loaded kids mode from database: {self._kids_mode_enabled}")
|
| 234 |
+
|
| 235 |
+
# Sync audio_playback module
|
| 236 |
+
from ...audio_playback import set_kids_mode as set_audio_kids_mode
|
| 237 |
+
set_audio_kids_mode(self._kids_mode_enabled)
|
| 238 |
+
|
| 239 |
+
# Load VAD settings
|
| 240 |
+
threshold_str = await db.get_user_setting(VAD_THRESHOLD_KEY)
|
| 241 |
+
if threshold_str:
|
| 242 |
+
try:
|
| 243 |
+
self._vad_threshold = float(threshold_str)
|
| 244 |
+
logger.info(f"📀 Loaded VAD threshold: {self._vad_threshold}")
|
| 245 |
+
except ValueError:
|
| 246 |
+
pass
|
| 247 |
+
|
| 248 |
+
silence_str = await db.get_user_setting(VAD_SILENCE_MS_KEY)
|
| 249 |
+
if silence_str:
|
| 250 |
+
try:
|
| 251 |
+
self._vad_silence_ms = int(silence_str)
|
| 252 |
+
logger.info(f"📀 Loaded VAD silence: {self._vad_silence_ms}ms")
|
| 253 |
+
except ValueError:
|
| 254 |
+
pass
|
| 255 |
+
|
| 256 |
+
prefix_str = await db.get_user_setting(VAD_PREFIX_MS_KEY)
|
| 257 |
+
if prefix_str:
|
| 258 |
+
try:
|
| 259 |
+
self._vad_prefix_ms = int(prefix_str)
|
| 260 |
+
logger.info(f"📀 Loaded VAD prefix: {self._vad_prefix_ms}ms")
|
| 261 |
+
except ValueError:
|
| 262 |
+
pass
|
| 263 |
+
|
| 264 |
+
except Exception as e:
|
| 265 |
+
logger.error(f"Failed to load settings from database: {e}")
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# Global settings instance
|
| 269 |
+
_voice_settings: Optional[VoiceSettings] = None
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def get_voice_settings() -> VoiceSettings:
|
| 273 |
+
"""Get the global VoiceSettings instance.
|
| 274 |
+
|
| 275 |
+
Returns:
|
| 276 |
+
The VoiceSettings singleton.
|
| 277 |
+
"""
|
| 278 |
+
global _voice_settings
|
| 279 |
+
if _voice_settings is None:
|
| 280 |
+
_voice_settings = VoiceSettings()
|
| 281 |
+
return _voice_settings
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
async def init_voice_settings() -> VoiceSettings:
|
| 285 |
+
"""Initialize voice settings from database.
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
The initialized VoiceSettings instance.
|
| 289 |
+
"""
|
| 290 |
+
settings = get_voice_settings()
|
| 291 |
+
await settings.load_from_database()
|
| 292 |
+
return settings
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# Convenience functions
|
| 296 |
+
def get_current_voice() -> str:
|
| 297 |
+
"""Get the current voice ID."""
|
| 298 |
+
return get_voice_settings().voice_id
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def set_current_voice(voice_id: str) -> bool:
|
| 302 |
+
"""Set the current voice ID."""
|
| 303 |
+
return get_voice_settings().set_voice(voice_id)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def get_preferred_language() -> str:
|
| 307 |
+
"""Get the preferred language code."""
|
| 308 |
+
return get_voice_settings().language
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def set_preferred_language(language: str) -> bool:
|
| 312 |
+
"""Set the preferred language."""
|
| 313 |
+
return get_voice_settings().set_language(language)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def get_kids_mode_enabled() -> bool:
|
| 317 |
+
"""Get kids mode enabled state."""
|
| 318 |
+
return get_voice_settings().kids_mode_enabled
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def set_kids_mode_enabled(enabled: bool) -> None:
|
| 322 |
+
"""Set kids mode enabled state."""
|
| 323 |
+
get_voice_settings().set_kids_mode(enabled)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def get_vad_threshold() -> float:
|
| 327 |
+
"""Get VAD threshold."""
|
| 328 |
+
return get_voice_settings().vad_threshold
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def get_vad_silence_ms() -> int:
|
| 332 |
+
"""Get VAD silence duration in milliseconds."""
|
| 333 |
+
return get_voice_settings().vad_silence_ms
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def get_vad_prefix_ms() -> int:
|
| 337 |
+
"""Get VAD prefix padding in milliseconds."""
|
| 338 |
+
return get_voice_settings().vad_prefix_ms
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def set_vad_settings_callback(callback: Optional[Callable[[], None]]) -> None:
|
| 342 |
+
"""Set callback for VAD settings changes."""
|
| 343 |
+
get_voice_settings().set_vad_callback(callback)
|
| 344 |
+
|
reachys_brain/routes/voice/vad.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""VAD (Voice Activity Detection) settings and presets.
|
| 2 |
+
|
| 3 |
+
Provides VAD presets for different environments and helper functions.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
# VAD Presets for different environments
|
| 7 |
+
VAD_PRESETS = {
|
| 8 |
+
"quiet_room": {
|
| 9 |
+
"id": "quiet_room",
|
| 10 |
+
"name": "Quiet Room",
|
| 11 |
+
"description": "More sensitive, responds faster. Best for quiet environments.",
|
| 12 |
+
"threshold": 0.5,
|
| 13 |
+
"silence_ms": 500,
|
| 14 |
+
"prefix_ms": 300,
|
| 15 |
+
},
|
| 16 |
+
"normal": {
|
| 17 |
+
"id": "normal",
|
| 18 |
+
"name": "Normal",
|
| 19 |
+
"description": "Balanced settings for typical use.",
|
| 20 |
+
"threshold": 0.8,
|
| 21 |
+
"silence_ms": 800,
|
| 22 |
+
"prefix_ms": 300,
|
| 23 |
+
},
|
| 24 |
+
"noisy": {
|
| 25 |
+
"id": "noisy",
|
| 26 |
+
"name": "Noisy Environment",
|
| 27 |
+
"description": "Less sensitive, ignores background noise better.",
|
| 28 |
+
"threshold": 0.85,
|
| 29 |
+
"silence_ms": 1000,
|
| 30 |
+
"prefix_ms": 400,
|
| 31 |
+
},
|
| 32 |
+
"conference": {
|
| 33 |
+
"id": "conference",
|
| 34 |
+
"name": "Conference Room",
|
| 35 |
+
"description": "Very strict, ignores most background voices.",
|
| 36 |
+
"threshold": 0.9,
|
| 37 |
+
"silence_ms": 1200,
|
| 38 |
+
"prefix_ms": 500,
|
| 39 |
+
},
|
| 40 |
+
}
|
| 41 |
+
|
reachys_brain/tools/datetime_tool.py
CHANGED
|
@@ -20,12 +20,9 @@ DATETIME_TOOL_DEFINITION = {
|
|
| 20 |
"type": "function",
|
| 21 |
"name": "get_current_datetime",
|
| 22 |
"description": (
|
| 23 |
-
"Get
|
| 24 |
-
"
|
| 25 |
-
"
|
| 26 |
-
"Call this at the start of conversations to greet the user appropriately. "
|
| 27 |
-
"IMPORTANT: Do NOT ask for a timezone - just call this tool without parameters. "
|
| 28 |
-
"The timezone is automatically determined from the user's saved preferred location."
|
| 29 |
),
|
| 30 |
"parameters": {
|
| 31 |
"type": "object",
|
|
|
|
| 20 |
"type": "function",
|
| 21 |
"name": "get_current_datetime",
|
| 22 |
"description": (
|
| 23 |
+
"Get current date/time for greetings or time queries. "
|
| 24 |
+
"No announcement needed - just use directly. "
|
| 25 |
+
"Don't ask for timezone - it's auto-detected from user's country."
|
|
|
|
|
|
|
|
|
|
| 26 |
),
|
| 27 |
"parameters": {
|
| 28 |
"type": "object",
|
reachys_brain/tools/reminders.py
CHANGED
|
@@ -58,9 +58,8 @@ ADD_REMINDER_TOOL_DEFINITION = {
|
|
| 58 |
"type": "function",
|
| 59 |
"name": "add_reminder",
|
| 60 |
"description": (
|
| 61 |
-
"Add a
|
| 62 |
-
"
|
| 63 |
-
"The reminder will appear in their default Reminders list."
|
| 64 |
),
|
| 65 |
"parameters": {
|
| 66 |
"type": "object",
|
|
|
|
| 58 |
"type": "function",
|
| 59 |
"name": "add_reminder",
|
| 60 |
"description": (
|
| 61 |
+
"Add a reminder to user's iOS Reminders. Quick tool - no announcement needed. "
|
| 62 |
+
"Just add it and confirm it was added."
|
|
|
|
| 63 |
),
|
| 64 |
"parameters": {
|
| 65 |
"type": "object",
|