Spaces:
Running
Running
Deploy newest version
Browse files- features.html +1 -0
- pyproject.toml +1 -0
- reachys_brain/audio_capture.py +5 -0
- reachys_brain/audio_playback.py +43 -6
- reachys_brain/openai_realtime.py +130 -14
- reachys_brain/routes/audio_manager.py +25 -2
- reachys_brain/routes/audio_stream_manager.py +3 -1
- reachys_brain/routes/conversation.py +170 -17
- reachys_brain/routes/conversation_services.py +47 -6
- reachys_brain/routes/games/helpers.py +10 -1
- reachys_brain/routes/games/models.py +9 -0
- reachys_brain/routes/games/tamareachy.py +79 -11
- reachys_brain/routes/openai_config.py +35 -3
- reachys_brain/routes/power.py +84 -41
- reachys_brain/routes/voice.py +369 -2
- reachys_brain/server.py +26 -6
- reachys_brain/tools/tamareachy_engine.py +259 -17
- reachys_brain/tools/tamareachy_translations.py +952 -0
- reachys_brain/tts_service.py +97 -18
features.html
CHANGED
|
@@ -569,3 +569,4 @@
|
|
| 569 |
</body>
|
| 570 |
</html>
|
| 571 |
|
|
|
|
|
|
| 569 |
</body>
|
| 570 |
</html>
|
| 571 |
|
| 572 |
+
|
pyproject.toml
CHANGED
|
@@ -23,6 +23,7 @@ dependencies = [
|
|
| 23 |
"sounddevice>=0.4.6",
|
| 24 |
"aiosqlite>=0.19.0",
|
| 25 |
"aiohttp>=3.9.0",
|
|
|
|
| 26 |
# Vision uses reachy_mini SDK (WebRTC) and reachy_mini_toolbox (MediaPipe face detection)
|
| 27 |
# These should already be available in the apps_venv environment
|
| 28 |
]
|
|
|
|
| 23 |
"sounddevice>=0.4.6",
|
| 24 |
"aiosqlite>=0.19.0",
|
| 25 |
"aiohttp>=3.9.0",
|
| 26 |
+
"openai>=1.0.0", # Required for meetings transcription and vision tools
|
| 27 |
# Vision uses reachy_mini SDK (WebRTC) and reachy_mini_toolbox (MediaPipe face detection)
|
| 28 |
# These should already be available in the apps_venv environment
|
| 29 |
]
|
reachys_brain/audio_capture.py
CHANGED
|
@@ -335,6 +335,11 @@ class AsyncAudioCaptureService:
|
|
| 335 |
"""Check if currently capturing."""
|
| 336 |
return self._capture.is_capturing
|
| 337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
async def start_capture(self) -> bool:
|
| 339 |
"""Start capturing audio."""
|
| 340 |
self._loop = asyncio.get_event_loop()
|
|
|
|
| 335 |
"""Check if currently capturing."""
|
| 336 |
return self._capture.is_capturing
|
| 337 |
|
| 338 |
+
@property
|
| 339 |
+
def is_paused(self) -> bool:
|
| 340 |
+
"""Check if capture is paused."""
|
| 341 |
+
return self._capture.is_paused
|
| 342 |
+
|
| 343 |
async def start_capture(self) -> bool:
|
| 344 |
"""Start capturing audio."""
|
| 345 |
self._loop = asyncio.get_event_loop()
|
reachys_brain/audio_playback.py
CHANGED
|
@@ -19,10 +19,17 @@ SAMPLE_WIDTH = 2 # 16-bit = 2 bytes
|
|
| 19 |
AUDIO_DEVICE = "plug:reachymini_audio_sink"
|
| 20 |
|
| 21 |
# Playback timing
|
| 22 |
-
STOP_DELAY_SECONDS =
|
| 23 |
|
| 24 |
-
# Kids mode
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
# Global kids mode state
|
|
@@ -169,7 +176,8 @@ class AudioPlaybackService:
|
|
| 169 |
"-", # Input from stdin
|
| 170 |
"-t", "raw", # Output format: raw
|
| 171 |
"-", # Output to stdout
|
| 172 |
-
"pitch", str(KIDS_MODE_PITCH_CENTS),
|
|
|
|
| 173 |
]
|
| 174 |
|
| 175 |
# Start sox process (reads from stdin, outputs to stdout)
|
|
@@ -275,12 +283,22 @@ class StreamingAudioPlayer:
|
|
| 275 |
"""Initialize the streaming player."""
|
| 276 |
self._playback = AudioPlaybackService()
|
| 277 |
self._is_stream_active = False
|
|
|
|
|
|
|
| 278 |
|
| 279 |
@property
|
| 280 |
def is_playing(self) -> bool:
|
| 281 |
"""Check if audio stream is active."""
|
| 282 |
return self._is_stream_active
|
| 283 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
def start_stream(self) -> None:
|
| 285 |
"""Start a new audio stream.
|
| 286 |
|
|
@@ -289,6 +307,8 @@ class StreamingAudioPlayer:
|
|
| 289 |
if not self._is_stream_active:
|
| 290 |
self._playback.start_playback()
|
| 291 |
self._is_stream_active = True
|
|
|
|
|
|
|
| 292 |
logger.info("Started audio stream")
|
| 293 |
|
| 294 |
def stop_stream(self) -> None:
|
|
@@ -297,13 +317,26 @@ class StreamingAudioPlayer:
|
|
| 297 |
Call this when OpenAI stops sending audio.
|
| 298 |
"""
|
| 299 |
if self._is_stream_active:
|
| 300 |
-
#
|
| 301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
|
| 303 |
def _stop_delayed(self) -> None:
|
| 304 |
"""Delayed stop to allow audio buffer to drain."""
|
| 305 |
self._playback.stop_playback()
|
| 306 |
self._is_stream_active = False
|
|
|
|
|
|
|
| 307 |
logger.info("Stopped audio stream")
|
| 308 |
|
| 309 |
def play_chunk(self, audio_data: bytes) -> None:
|
|
@@ -314,10 +347,14 @@ class StreamingAudioPlayer:
|
|
| 314 |
Args:
|
| 315 |
audio_data: PCM16 audio data at 24kHz mono.
|
| 316 |
"""
|
|
|
|
|
|
|
| 317 |
if not self._is_stream_active:
|
| 318 |
self.start_stream()
|
| 319 |
|
| 320 |
self._playback.enqueue_audio(audio_data)
|
|
|
|
|
|
|
| 321 |
|
| 322 |
def cancel(self) -> None:
|
| 323 |
"""Immediately stop playback and clear queue."""
|
|
|
|
| 19 |
AUDIO_DEVICE = "plug:reachymini_audio_sink"
|
| 20 |
|
| 21 |
# Playback timing
|
| 22 |
+
STOP_DELAY_SECONDS = 1.5 # Delay before stopping to allow buffer to drain (increased for longer sentences)
|
| 23 |
|
| 24 |
+
# Kids mode audio effects.
|
| 25 |
+
#
|
| 26 |
+
# NOTE: A pure pitch shift can still sound like the same adult voice (just higher).
|
| 27 |
+
# Adding a small tempo increase makes it much more perceptibly "kid-like".
|
| 28 |
+
# Units:
|
| 29 |
+
# - pitch: cents (1200 cents = 1 octave)
|
| 30 |
+
# - tempo: multiplier (1.10 = 10% faster)
|
| 31 |
+
KIDS_MODE_PITCH_CENTS = 900
|
| 32 |
+
KIDS_MODE_TEMPO = 1.12
|
| 33 |
|
| 34 |
|
| 35 |
# Global kids mode state
|
|
|
|
| 176 |
"-", # Input from stdin
|
| 177 |
"-t", "raw", # Output format: raw
|
| 178 |
"-", # Output to stdout
|
| 179 |
+
"pitch", str(KIDS_MODE_PITCH_CENTS),
|
| 180 |
+
"tempo", str(KIDS_MODE_TEMPO),
|
| 181 |
]
|
| 182 |
|
| 183 |
# Start sox process (reads from stdin, outputs to stdout)
|
|
|
|
| 283 |
"""Initialize the streaming player."""
|
| 284 |
self._playback = AudioPlaybackService()
|
| 285 |
self._is_stream_active = False
|
| 286 |
+
self._last_chunk_time = 0.0 # Track when last audio chunk was received
|
| 287 |
+
self._chunks_received = 0 # Count chunks for timing estimation
|
| 288 |
|
| 289 |
@property
|
| 290 |
def is_playing(self) -> bool:
|
| 291 |
"""Check if audio stream is active."""
|
| 292 |
return self._is_stream_active
|
| 293 |
|
| 294 |
+
@property
|
| 295 |
+
def time_since_last_chunk(self) -> float:
|
| 296 |
+
"""Get time in seconds since the last audio chunk was received."""
|
| 297 |
+
import time
|
| 298 |
+
if self._last_chunk_time == 0:
|
| 299 |
+
return 0.0
|
| 300 |
+
return time.time() - self._last_chunk_time
|
| 301 |
+
|
| 302 |
def start_stream(self) -> None:
|
| 303 |
"""Start a new audio stream.
|
| 304 |
|
|
|
|
| 307 |
if not self._is_stream_active:
|
| 308 |
self._playback.start_playback()
|
| 309 |
self._is_stream_active = True
|
| 310 |
+
self._chunks_received = 0
|
| 311 |
+
self._last_chunk_time = 0.0
|
| 312 |
logger.info("Started audio stream")
|
| 313 |
|
| 314 |
def stop_stream(self) -> None:
|
|
|
|
| 317 |
Call this when OpenAI stops sending audio.
|
| 318 |
"""
|
| 319 |
if self._is_stream_active:
|
| 320 |
+
# Calculate dynamic delay based on chunks received
|
| 321 |
+
# Each chunk is roughly 100ms of audio, add extra buffer time
|
| 322 |
+
# Minimum delay is STOP_DELAY_SECONDS, but scale up for longer responses
|
| 323 |
+
import time
|
| 324 |
+
base_delay = STOP_DELAY_SECONDS
|
| 325 |
+
|
| 326 |
+
# Add extra delay proportional to the number of chunks
|
| 327 |
+
# This helps ensure longer responses have time to fully play
|
| 328 |
+
chunk_based_delay = min(self._chunks_received * 0.02, 2.0) # Cap at 2 extra seconds
|
| 329 |
+
total_delay = base_delay + chunk_based_delay
|
| 330 |
+
|
| 331 |
+
logger.info(f"🔊 Stopping audio stream in {total_delay:.1f}s (received {self._chunks_received} chunks)")
|
| 332 |
+
threading.Timer(total_delay, self._stop_delayed).start()
|
| 333 |
|
| 334 |
def _stop_delayed(self) -> None:
|
| 335 |
"""Delayed stop to allow audio buffer to drain."""
|
| 336 |
self._playback.stop_playback()
|
| 337 |
self._is_stream_active = False
|
| 338 |
+
self._chunks_received = 0
|
| 339 |
+
self._last_chunk_time = 0.0
|
| 340 |
logger.info("Stopped audio stream")
|
| 341 |
|
| 342 |
def play_chunk(self, audio_data: bytes) -> None:
|
|
|
|
| 347 |
Args:
|
| 348 |
audio_data: PCM16 audio data at 24kHz mono.
|
| 349 |
"""
|
| 350 |
+
import time
|
| 351 |
+
|
| 352 |
if not self._is_stream_active:
|
| 353 |
self.start_stream()
|
| 354 |
|
| 355 |
self._playback.enqueue_audio(audio_data)
|
| 356 |
+
self._last_chunk_time = time.time()
|
| 357 |
+
self._chunks_received += 1
|
| 358 |
|
| 359 |
def cancel(self) -> None:
|
| 360 |
"""Immediately stop playback and clear queue."""
|
reachys_brain/openai_realtime.py
CHANGED
|
@@ -13,7 +13,15 @@ from typing import Any, Callable, Optional
|
|
| 13 |
import websockets
|
| 14 |
from websockets.client import WebSocketClientProtocol
|
| 15 |
|
| 16 |
-
from .routes.voice import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# Import app_tools lazily to avoid circular imports
|
| 19 |
# (conversation.py imports from this module, and we import from app_tools)
|
|
@@ -51,10 +59,8 @@ logger = logging.getLogger(__name__)
|
|
| 51 |
OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime"
|
| 52 |
OPENAI_MODEL = "gpt-4o-realtime-preview-2024-12-17"
|
| 53 |
|
| 54 |
-
# Voice Activity Detection (VAD) settings
|
| 55 |
-
|
| 56 |
-
VAD_PREFIX_PADDING_MS = 300 # Audio padding before speech detection
|
| 57 |
-
VAD_SILENCE_DURATION_MS = 700 # Silence duration before response (default 500)
|
| 58 |
|
| 59 |
|
| 60 |
class ConnectionState(str, Enum):
|
|
@@ -71,6 +77,18 @@ class SpeakingState(str, Enum):
|
|
| 71 |
SPEAKING = "speaking"
|
| 72 |
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
@dataclass
|
| 75 |
class RealtimeEvent:
|
| 76 |
"""Event from OpenAI Realtime API to relay to iOS."""
|
|
@@ -93,6 +111,7 @@ class OpenAIRealtimeService:
|
|
| 93 |
# State
|
| 94 |
self._connection_state = ConnectionState.DISCONNECTED
|
| 95 |
self._speaking_state = SpeakingState.IDLE
|
|
|
|
| 96 |
self._is_listening = False
|
| 97 |
self._current_transcript = ""
|
| 98 |
self._response_text_buffer = ""
|
|
@@ -144,6 +163,7 @@ class OpenAIRealtimeService:
|
|
| 144 |
# Callbacks for iOS relay
|
| 145 |
self.on_connection_state: Optional[Callable[[ConnectionState], None]] = None
|
| 146 |
self.on_speaking_state: Optional[Callable[[SpeakingState], None]] = None
|
|
|
|
| 147 |
self.on_transcript_update: Optional[Callable[[str], None]] = None
|
| 148 |
self.on_response_text: Optional[Callable[[str], None]] = None
|
| 149 |
self.on_audio_delta: Optional[Callable[[bytes], None]] = None
|
|
@@ -166,6 +186,7 @@ class OpenAIRealtimeService:
|
|
| 166 |
# Setup tools handler callbacks
|
| 167 |
self._setup_tools_handler()
|
| 168 |
self._setup_meeting_callbacks()
|
|
|
|
| 169 |
|
| 170 |
@property
|
| 171 |
def connection_state(self) -> ConnectionState:
|
|
@@ -177,6 +198,23 @@ class OpenAIRealtimeService:
|
|
| 177 |
"""Get current speaking state."""
|
| 178 |
return self._speaking_state
|
| 179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
@property
|
| 181 |
def is_connected(self) -> bool:
|
| 182 |
"""Check if connected to OpenAI."""
|
|
@@ -438,6 +476,20 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 438 |
on_transcript_update=on_transcript_update,
|
| 439 |
)
|
| 440 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
def _append_to_meeting_transcript(self, text: str, speaker: str = "user") -> None:
|
| 442 |
"""Append transcribed text to active meeting if one exists.
|
| 443 |
|
|
@@ -470,6 +522,22 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 470 |
if self.on_speaking_state:
|
| 471 |
self.on_speaking_state(state)
|
| 472 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 473 |
async def connect(self, api_key: Optional[str] = None) -> None:
|
| 474 |
"""Connect to OpenAI Realtime API.
|
| 475 |
|
|
@@ -554,9 +622,17 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 554 |
# Sync language with preferred setting
|
| 555 |
self._language = get_preferred_language()
|
| 556 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 557 |
# Get combined tools (base + dynamic based on app config)
|
| 558 |
all_tools = self._get_all_tools()
|
| 559 |
-
logger.info(
|
|
|
|
|
|
|
|
|
|
| 560 |
|
| 561 |
session_config = {
|
| 562 |
"type": "session.update",
|
|
@@ -572,9 +648,9 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 572 |
},
|
| 573 |
"turn_detection": {
|
| 574 |
"type": "server_vad",
|
| 575 |
-
"threshold":
|
| 576 |
-
"prefix_padding_ms":
|
| 577 |
-
"silence_duration_ms":
|
| 578 |
},
|
| 579 |
"tools": all_tools,
|
| 580 |
"tool_choice": "auto",
|
|
@@ -637,9 +713,17 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 637 |
self.on_session_updated()
|
| 638 |
|
| 639 |
elif msg_type == "input_audio_buffer.speech_started":
|
| 640 |
-
# User started speaking
|
| 641 |
-
|
| 642 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 643 |
|
| 644 |
elif msg_type == "input_audio_buffer.speech_stopped":
|
| 645 |
# User stopped speaking
|
|
@@ -680,18 +764,26 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 680 |
if self._speaking_state != SpeakingState.SPEAKING:
|
| 681 |
self._set_speaking_state(SpeakingState.SPEAKING)
|
| 682 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 683 |
# Decode and forward audio
|
| 684 |
audio_data = base64.b64decode(audio_base64)
|
| 685 |
if self.on_audio_delta:
|
| 686 |
self.on_audio_delta(audio_data)
|
| 687 |
|
| 688 |
elif msg_type == "response.audio.done":
|
| 689 |
-
# AI audio response complete
|
| 690 |
self._set_speaking_state(SpeakingState.IDLE)
|
|
|
|
| 691 |
|
| 692 |
elif msg_type == "response.done":
|
| 693 |
# Full response complete
|
| 694 |
self._set_speaking_state(SpeakingState.IDLE)
|
|
|
|
|
|
|
|
|
|
| 695 |
if self._response_text_buffer:
|
| 696 |
if self.on_response_text:
|
| 697 |
self.on_response_text(self._response_text_buffer)
|
|
@@ -904,10 +996,18 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 904 |
await self._send_message(message)
|
| 905 |
|
| 906 |
async def commit_audio_and_respond(self) -> None:
|
| 907 |
-
"""Commit the audio buffer and request a response.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 908 |
if not self.is_connected:
|
| 909 |
return
|
| 910 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 911 |
# Commit audio buffer
|
| 912 |
await self._send_message({"type": "input_audio_buffer.commit"})
|
| 913 |
|
|
@@ -933,12 +1033,19 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 933 |
async def send_text_message(self, text: str) -> None:
|
| 934 |
"""Send a text message (non-voice interaction).
|
| 935 |
|
|
|
|
|
|
|
|
|
|
| 936 |
Args:
|
| 937 |
text: The text message to send.
|
| 938 |
"""
|
| 939 |
if not self.is_connected:
|
| 940 |
raise RuntimeError("Not connected to OpenAI")
|
| 941 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 942 |
# Create conversation item
|
| 943 |
item_message = {
|
| 944 |
"type": "conversation.item.create",
|
|
@@ -971,6 +1078,15 @@ Before using ANY tool, you MUST tell the user what you're about to do. This help
|
|
| 971 |
self._is_listening = False
|
| 972 |
logger.info("Stopped listening")
|
| 973 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 974 |
def set_voice(self, voice_id: str) -> None:
|
| 975 |
"""Set the OpenAI voice and update the session.
|
| 976 |
|
|
|
|
| 13 |
import websockets
|
| 14 |
from websockets.client import WebSocketClientProtocol
|
| 15 |
|
| 16 |
+
from .routes.voice import (
|
| 17 |
+
get_current_voice,
|
| 18 |
+
get_current_language,
|
| 19 |
+
get_preferred_language,
|
| 20 |
+
get_vad_threshold,
|
| 21 |
+
get_vad_silence_ms,
|
| 22 |
+
get_vad_prefix_ms,
|
| 23 |
+
set_vad_settings_callback,
|
| 24 |
+
)
|
| 25 |
|
| 26 |
# Import app_tools lazily to avoid circular imports
|
| 27 |
# (conversation.py imports from this module, and we import from app_tools)
|
|
|
|
| 59 |
OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime"
|
| 60 |
OPENAI_MODEL = "gpt-4o-realtime-preview-2024-12-17"
|
| 61 |
|
| 62 |
+
# Voice Activity Detection (VAD) settings are now configurable via VoiceSettings
|
| 63 |
+
# Use get_vad_threshold(), get_vad_silence_ms(), get_vad_prefix_ms() to get current values
|
|
|
|
|
|
|
| 64 |
|
| 65 |
|
| 66 |
class ConnectionState(str, Enum):
|
|
|
|
| 77 |
SPEAKING = "speaking"
|
| 78 |
|
| 79 |
|
| 80 |
+
class ResponseState(str, Enum):
|
| 81 |
+
"""AI response generation state.
|
| 82 |
+
|
| 83 |
+
This tracks the full lifecycle of an AI response to ensure
|
| 84 |
+
microphone is properly paused during the entire response cycle.
|
| 85 |
+
"""
|
| 86 |
+
IDLE = "idle" # No response in progress
|
| 87 |
+
WAITING = "waiting" # Request sent, waiting for response to start
|
| 88 |
+
GENERATING = "generating" # AI is generating/streaming audio
|
| 89 |
+
COMPLETE = "complete" # Response done, waiting for audio buffer to drain
|
| 90 |
+
|
| 91 |
+
|
| 92 |
@dataclass
|
| 93 |
class RealtimeEvent:
|
| 94 |
"""Event from OpenAI Realtime API to relay to iOS."""
|
|
|
|
| 111 |
# State
|
| 112 |
self._connection_state = ConnectionState.DISCONNECTED
|
| 113 |
self._speaking_state = SpeakingState.IDLE
|
| 114 |
+
self._response_state = ResponseState.IDLE # Track full response lifecycle
|
| 115 |
self._is_listening = False
|
| 116 |
self._current_transcript = ""
|
| 117 |
self._response_text_buffer = ""
|
|
|
|
| 163 |
# Callbacks for iOS relay
|
| 164 |
self.on_connection_state: Optional[Callable[[ConnectionState], None]] = None
|
| 165 |
self.on_speaking_state: Optional[Callable[[SpeakingState], None]] = None
|
| 166 |
+
self.on_response_state: Optional[Callable[[ResponseState], None]] = None # Response lifecycle
|
| 167 |
self.on_transcript_update: Optional[Callable[[str], None]] = None
|
| 168 |
self.on_response_text: Optional[Callable[[str], None]] = None
|
| 169 |
self.on_audio_delta: Optional[Callable[[bytes], None]] = None
|
|
|
|
| 186 |
# Setup tools handler callbacks
|
| 187 |
self._setup_tools_handler()
|
| 188 |
self._setup_meeting_callbacks()
|
| 189 |
+
self._setup_vad_callback()
|
| 190 |
|
| 191 |
@property
|
| 192 |
def connection_state(self) -> ConnectionState:
|
|
|
|
| 198 |
"""Get current speaking state."""
|
| 199 |
return self._speaking_state
|
| 200 |
|
| 201 |
+
@property
|
| 202 |
+
def response_state(self) -> ResponseState:
|
| 203 |
+
"""Get current response state."""
|
| 204 |
+
return self._response_state
|
| 205 |
+
|
| 206 |
+
@property
|
| 207 |
+
def is_responding(self) -> bool:
|
| 208 |
+
"""Check if AI is currently responding (waiting, generating, or completing).
|
| 209 |
+
|
| 210 |
+
When True, microphone should be paused to prevent VAD interruption.
|
| 211 |
+
"""
|
| 212 |
+
return self._response_state in (
|
| 213 |
+
ResponseState.WAITING,
|
| 214 |
+
ResponseState.GENERATING,
|
| 215 |
+
ResponseState.COMPLETE,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
@property
|
| 219 |
def is_connected(self) -> bool:
|
| 220 |
"""Check if connected to OpenAI."""
|
|
|
|
| 476 |
on_transcript_update=on_transcript_update,
|
| 477 |
)
|
| 478 |
|
| 479 |
+
def _setup_vad_callback(self) -> None:
|
| 480 |
+
"""Setup callback for VAD settings changes.
|
| 481 |
+
|
| 482 |
+
When user changes VAD settings via the app, this triggers
|
| 483 |
+
a session reconfiguration to apply the new settings.
|
| 484 |
+
"""
|
| 485 |
+
def on_vad_changed() -> None:
|
| 486 |
+
"""Called when VAD settings are changed."""
|
| 487 |
+
if self.is_connected:
|
| 488 |
+
logger.info("🎤 VAD settings changed, updating OpenAI session...")
|
| 489 |
+
asyncio.create_task(self._configure_session())
|
| 490 |
+
|
| 491 |
+
set_vad_settings_callback(on_vad_changed)
|
| 492 |
+
|
| 493 |
def _append_to_meeting_transcript(self, text: str, speaker: str = "user") -> None:
|
| 494 |
"""Append transcribed text to active meeting if one exists.
|
| 495 |
|
|
|
|
| 522 |
if self.on_speaking_state:
|
| 523 |
self.on_speaking_state(state)
|
| 524 |
|
| 525 |
+
def _set_response_state(self, state: ResponseState) -> None:
|
| 526 |
+
"""Update response state and notify callback.
|
| 527 |
+
|
| 528 |
+
Response state tracks the full lifecycle of an AI response:
|
| 529 |
+
- IDLE: No response in progress, microphone can be active
|
| 530 |
+
- WAITING: Request sent, waiting for first audio - microphone should be paused
|
| 531 |
+
- GENERATING: Audio is being generated/streamed - microphone should be paused
|
| 532 |
+
- COMPLETE: Response done, waiting for buffer drain - microphone should be paused
|
| 533 |
+
"""
|
| 534 |
+
if self._response_state != state:
|
| 535 |
+
old_state = self._response_state
|
| 536 |
+
self._response_state = state
|
| 537 |
+
logger.info(f"📊 Response state: {old_state.value} -> {state.value}")
|
| 538 |
+
if self.on_response_state:
|
| 539 |
+
self.on_response_state(state)
|
| 540 |
+
|
| 541 |
async def connect(self, api_key: Optional[str] = None) -> None:
|
| 542 |
"""Connect to OpenAI Realtime API.
|
| 543 |
|
|
|
|
| 622 |
# Sync language with preferred setting
|
| 623 |
self._language = get_preferred_language()
|
| 624 |
|
| 625 |
+
# Get current VAD settings (user-configurable)
|
| 626 |
+
vad_threshold = get_vad_threshold()
|
| 627 |
+
vad_silence_ms = get_vad_silence_ms()
|
| 628 |
+
vad_prefix_ms = get_vad_prefix_ms()
|
| 629 |
+
|
| 630 |
# Get combined tools (base + dynamic based on app config)
|
| 631 |
all_tools = self._get_all_tools()
|
| 632 |
+
logger.info(
|
| 633 |
+
f"🔊 Configuring session with voice: {voice}, language: {self._language}, "
|
| 634 |
+
f"VAD: threshold={vad_threshold}, silence={vad_silence_ms}ms"
|
| 635 |
+
)
|
| 636 |
|
| 637 |
session_config = {
|
| 638 |
"type": "session.update",
|
|
|
|
| 648 |
},
|
| 649 |
"turn_detection": {
|
| 650 |
"type": "server_vad",
|
| 651 |
+
"threshold": vad_threshold,
|
| 652 |
+
"prefix_padding_ms": vad_prefix_ms,
|
| 653 |
+
"silence_duration_ms": vad_silence_ms,
|
| 654 |
},
|
| 655 |
"tools": all_tools,
|
| 656 |
"tool_choice": "auto",
|
|
|
|
| 713 |
self.on_session_updated()
|
| 714 |
|
| 715 |
elif msg_type == "input_audio_buffer.speech_started":
|
| 716 |
+
# User started speaking - but ignore if AI is responding
|
| 717 |
+
# This prevents VAD from interrupting responses due to echo/noise
|
| 718 |
+
if self.is_responding:
|
| 719 |
+
logger.warning(
|
| 720 |
+
"🎤 VAD speech detected during AI response - ignoring "
|
| 721 |
+
f"(response_state={self._response_state.value})"
|
| 722 |
+
)
|
| 723 |
+
# Don't change speaking state - let the response complete
|
| 724 |
+
else:
|
| 725 |
+
logger.info("🎤 Speech detected - user is speaking")
|
| 726 |
+
self._set_speaking_state(SpeakingState.IDLE)
|
| 727 |
|
| 728 |
elif msg_type == "input_audio_buffer.speech_stopped":
|
| 729 |
# User stopped speaking
|
|
|
|
| 764 |
if self._speaking_state != SpeakingState.SPEAKING:
|
| 765 |
self._set_speaking_state(SpeakingState.SPEAKING)
|
| 766 |
|
| 767 |
+
# Transition from WAITING to GENERATING on first audio
|
| 768 |
+
if self._response_state == ResponseState.WAITING:
|
| 769 |
+
self._set_response_state(ResponseState.GENERATING)
|
| 770 |
+
|
| 771 |
# Decode and forward audio
|
| 772 |
audio_data = base64.b64decode(audio_base64)
|
| 773 |
if self.on_audio_delta:
|
| 774 |
self.on_audio_delta(audio_data)
|
| 775 |
|
| 776 |
elif msg_type == "response.audio.done":
|
| 777 |
+
# AI audio response complete - set to COMPLETE (waiting for buffer drain)
|
| 778 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 779 |
+
self._set_response_state(ResponseState.COMPLETE)
|
| 780 |
|
| 781 |
elif msg_type == "response.done":
|
| 782 |
# Full response complete
|
| 783 |
self._set_speaking_state(SpeakingState.IDLE)
|
| 784 |
+
# Only set COMPLETE if we were generating (avoid duplicate transitions)
|
| 785 |
+
if self._response_state == ResponseState.GENERATING:
|
| 786 |
+
self._set_response_state(ResponseState.COMPLETE)
|
| 787 |
if self._response_text_buffer:
|
| 788 |
if self.on_response_text:
|
| 789 |
self.on_response_text(self._response_text_buffer)
|
|
|
|
| 996 |
await self._send_message(message)
|
| 997 |
|
| 998 |
async def commit_audio_and_respond(self) -> None:
|
| 999 |
+
"""Commit the audio buffer and request a response.
|
| 1000 |
+
|
| 1001 |
+
Sets response state to WAITING before committing to ensure microphone
|
| 1002 |
+
is paused before the AI starts responding.
|
| 1003 |
+
"""
|
| 1004 |
if not self.is_connected:
|
| 1005 |
return
|
| 1006 |
|
| 1007 |
+
# Set response state to WAITING before committing
|
| 1008 |
+
# This triggers microphone pause BEFORE the request is sent
|
| 1009 |
+
self._set_response_state(ResponseState.WAITING)
|
| 1010 |
+
|
| 1011 |
# Commit audio buffer
|
| 1012 |
await self._send_message({"type": "input_audio_buffer.commit"})
|
| 1013 |
|
|
|
|
| 1033 |
async def send_text_message(self, text: str) -> None:
|
| 1034 |
"""Send a text message (non-voice interaction).
|
| 1035 |
|
| 1036 |
+
Sets response state to WAITING before sending to ensure microphone
|
| 1037 |
+
is paused before the AI starts responding.
|
| 1038 |
+
|
| 1039 |
Args:
|
| 1040 |
text: The text message to send.
|
| 1041 |
"""
|
| 1042 |
if not self.is_connected:
|
| 1043 |
raise RuntimeError("Not connected to OpenAI")
|
| 1044 |
|
| 1045 |
+
# Set response state to WAITING before sending
|
| 1046 |
+
# This triggers microphone pause BEFORE the request is sent
|
| 1047 |
+
self._set_response_state(ResponseState.WAITING)
|
| 1048 |
+
|
| 1049 |
# Create conversation item
|
| 1050 |
item_message = {
|
| 1051 |
"type": "conversation.item.create",
|
|
|
|
| 1078 |
self._is_listening = False
|
| 1079 |
logger.info("Stopped listening")
|
| 1080 |
|
| 1081 |
+
def mark_response_complete(self) -> None:
|
| 1082 |
+
"""Mark the response cycle as complete (audio buffer drained).
|
| 1083 |
+
|
| 1084 |
+
Called by conversation handler after audio has finished playing
|
| 1085 |
+
and microphone can be safely resumed.
|
| 1086 |
+
"""
|
| 1087 |
+
if self._response_state != ResponseState.IDLE:
|
| 1088 |
+
self._set_response_state(ResponseState.IDLE)
|
| 1089 |
+
|
| 1090 |
def set_voice(self, voice_id: str) -> None:
|
| 1091 |
"""Set the OpenAI voice and update the session.
|
| 1092 |
|
reachys_brain/routes/audio_manager.py
CHANGED
|
@@ -105,11 +105,30 @@ async def delayed_resume_microphone() -> None:
|
|
| 105 |
|
| 106 |
Waits for the audio buffer to drain before resuming capture
|
| 107 |
to avoid picking up the AI's own speech.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
"""
|
| 109 |
services = get_services()
|
| 110 |
|
|
|
|
| 111 |
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
if (services.audio_capture and
|
| 114 |
services.openai and
|
| 115 |
services.openai.is_listening):
|
|
@@ -120,12 +139,16 @@ async def delayed_resume_microphone() -> None:
|
|
| 120 |
logger.info("🗑️ Cleared audio buffer before resume")
|
| 121 |
|
| 122 |
# Brief pause to ensure buffer is cleared on OpenAI's side
|
| 123 |
-
await asyncio.sleep(0.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
services.audio_capture.resume_capture()
|
| 126 |
logger.info(
|
| 127 |
f"▶️ Resumed microphone (after "
|
| 128 |
-
f"{ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS}s delay)"
|
| 129 |
)
|
| 130 |
|
| 131 |
|
|
|
|
| 105 |
|
| 106 |
Waits for the audio buffer to drain before resuming capture
|
| 107 |
to avoid picking up the AI's own speech.
|
| 108 |
+
|
| 109 |
+
This uses a two-phase approach:
|
| 110 |
+
1. Wait the minimum delay time
|
| 111 |
+
2. Additionally wait until audio playback has actually stopped
|
| 112 |
"""
|
| 113 |
services = get_services()
|
| 114 |
|
| 115 |
+
# Phase 1: Wait minimum delay
|
| 116 |
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 117 |
|
| 118 |
+
# Phase 2: Wait for audio to actually stop playing
|
| 119 |
+
# Check if audio player is still active and wait for it
|
| 120 |
+
if services.audio_player and services.audio_player.is_playing:
|
| 121 |
+
max_extra_wait = 5.0 # Maximum additional wait time
|
| 122 |
+
wait_interval = 0.2
|
| 123 |
+
waited = 0.0
|
| 124 |
+
|
| 125 |
+
while services.audio_player.is_playing and waited < max_extra_wait:
|
| 126 |
+
await asyncio.sleep(wait_interval)
|
| 127 |
+
waited += wait_interval
|
| 128 |
+
|
| 129 |
+
if waited > 0:
|
| 130 |
+
logger.info(f"⏳ Waited additional {waited:.1f}s for audio to finish")
|
| 131 |
+
|
| 132 |
if (services.audio_capture and
|
| 133 |
services.openai and
|
| 134 |
services.openai.is_listening):
|
|
|
|
| 139 |
logger.info("🗑️ Cleared audio buffer before resume")
|
| 140 |
|
| 141 |
# Brief pause to ensure buffer is cleared on OpenAI's side
|
| 142 |
+
await asyncio.sleep(0.2)
|
| 143 |
+
|
| 144 |
+
# Mark response cycle as complete (allows new responses)
|
| 145 |
+
if services.openai:
|
| 146 |
+
services.openai.mark_response_complete()
|
| 147 |
|
| 148 |
services.audio_capture.resume_capture()
|
| 149 |
logger.info(
|
| 150 |
f"▶️ Resumed microphone (after "
|
| 151 |
+
f"{ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS}s+ delay)"
|
| 152 |
)
|
| 153 |
|
| 154 |
|
reachys_brain/routes/audio_stream_manager.py
CHANGED
|
@@ -19,7 +19,9 @@ class ConversationTimings:
|
|
| 19 |
"""Named constants for timing-related magic numbers."""
|
| 20 |
|
| 21 |
# Microphone control
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
AUDIO_CHUNK_TIMEOUT_SECONDS = 0.1
|
| 24 |
|
| 25 |
# Animation timings
|
|
|
|
| 19 |
"""Named constants for timing-related magic numbers."""
|
| 20 |
|
| 21 |
# Microphone control
|
| 22 |
+
# Increased from 2.5s to allow more time for audio buffer to drain
|
| 23 |
+
# This prevents echo/self-interruption where Reachy's voice is picked up
|
| 24 |
+
MICROPHONE_RESUME_DELAY_SECONDS = 3.5
|
| 25 |
AUDIO_CHUNK_TIMEOUT_SECONDS = 0.1
|
| 26 |
|
| 27 |
# Animation timings
|
reachys_brain/routes/conversation.py
CHANGED
|
@@ -15,7 +15,7 @@ from typing import Optional
|
|
| 15 |
|
| 16 |
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
| 17 |
|
| 18 |
-
from ..openai_realtime import ConnectionState, SpeakingState
|
| 19 |
from ..tools.reminders import set_reminder_request_callback, handle_reminder_result
|
| 20 |
from ..tools.contacts import set_contacts_request_callback, handle_contacts_result
|
| 21 |
from ..tools.scheduled_messages import set_scheduled_message_callback, handle_scheduled_message_result
|
|
@@ -50,6 +50,86 @@ logger = logging.getLogger(__name__)
|
|
| 50 |
|
| 51 |
router = APIRouter(tags=["Conversation"])
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
async def _reset_openai_session(keep_listening: bool) -> None:
|
| 54 |
"""Hard reset OpenAI session by disconnecting and reconnecting.
|
| 55 |
|
|
@@ -116,6 +196,7 @@ def init_services() -> None:
|
|
| 116 |
wire_openai_callbacks(
|
| 117 |
on_connection_state=_on_connection_state,
|
| 118 |
on_speaking_state=_on_speaking_state,
|
|
|
|
| 119 |
on_transcript_update=_on_transcript_update,
|
| 120 |
on_response_text=_on_response_text,
|
| 121 |
on_audio_delta=handle_audio_delta,
|
|
@@ -146,11 +227,16 @@ def init_services() -> None:
|
|
| 146 |
logger.info("Conversation services initialized")
|
| 147 |
|
| 148 |
|
| 149 |
-
def cleanup_services() -> None:
|
| 150 |
-
"""Clean up conversation services.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
# Cancel all tracked tasks first
|
| 152 |
cancel_all_tracked_tasks()
|
| 153 |
-
|
|
|
|
| 154 |
|
| 155 |
|
| 156 |
# MARK: - OpenAI Callbacks
|
|
@@ -205,6 +291,52 @@ def _on_speaking_state(state: SpeakingState) -> None:
|
|
| 205 |
}))
|
| 206 |
|
| 207 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
def _is_stop_command(transcript: str, language: str = "en") -> bool:
|
| 209 |
"""Check if the transcript is a stop command."""
|
| 210 |
text = transcript.strip().lower()
|
|
@@ -815,14 +947,23 @@ async def _handle_start_listening(websocket: WebSocket) -> None:
|
|
| 815 |
services.openai.start_listening()
|
| 816 |
logger.info("✅ OpenAI listening mode enabled")
|
| 817 |
|
| 818 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 819 |
|
| 820 |
await websocket.send_json({
|
| 821 |
"type": "listening_state",
|
| 822 |
"listening": True,
|
| 823 |
})
|
| 824 |
|
| 825 |
-
|
|
|
|
| 826 |
|
| 827 |
|
| 828 |
async def _handle_stop_listening(websocket: WebSocket) -> None:
|
|
@@ -857,22 +998,34 @@ async def _handle_stop_listening(websocket: WebSocket) -> None:
|
|
| 857 |
wait_interval = 0.2
|
| 858 |
waited = 0.0
|
| 859 |
|
| 860 |
-
# Wait for speech to start
|
| 861 |
await asyncio.sleep(0.5)
|
| 862 |
|
| 863 |
-
# Wait for
|
| 864 |
while waited < max_wait:
|
| 865 |
-
if services.
|
| 866 |
-
|
| 867 |
-
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
|
| 871 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 872 |
else:
|
| 873 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 874 |
await asyncio.sleep(wait_interval)
|
| 875 |
-
|
| 876 |
|
| 877 |
except Exception as e:
|
| 878 |
logger.error(f"Error in goodbye sequence: {e}")
|
|
|
|
| 15 |
|
| 16 |
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
| 17 |
|
| 18 |
+
from ..openai_realtime import ConnectionState, SpeakingState, ResponseState
|
| 19 |
from ..tools.reminders import set_reminder_request_callback, handle_reminder_result
|
| 20 |
from ..tools.contacts import set_contacts_request_callback, handle_contacts_result
|
| 21 |
from ..tools.scheduled_messages import set_scheduled_message_callback, handle_scheduled_message_result
|
|
|
|
| 50 |
|
| 51 |
router = APIRouter(tags=["Conversation"])
|
| 52 |
|
| 53 |
+
|
| 54 |
+
async def send_greeting_and_start_streaming() -> None:
|
| 55 |
+
"""Send greeting and start audio streaming after it completes.
|
| 56 |
+
|
| 57 |
+
This ensures that:
|
| 58 |
+
1. No audio is sent to OpenAI during the welcome animation
|
| 59 |
+
2. The greeting plays completely without VAD interruption
|
| 60 |
+
3. Audio streaming only starts after greeting finishes
|
| 61 |
+
|
| 62 |
+
Flow:
|
| 63 |
+
1. send_greeting() sends text -> ResponseState.WAITING
|
| 64 |
+
2. Audio arrives -> ResponseState.GENERATING
|
| 65 |
+
3. response.audio.done -> ResponseState.COMPLETE
|
| 66 |
+
4. We wait for COMPLETE (audio sent by OpenAI)
|
| 67 |
+
5. Wait for audio buffer to drain (audio played by speaker)
|
| 68 |
+
6. Start streaming and resume microphone
|
| 69 |
+
"""
|
| 70 |
+
services = get_services()
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
# Send the greeting (this will set ResponseState.WAITING -> GENERATING -> COMPLETE)
|
| 74 |
+
await send_greeting()
|
| 75 |
+
|
| 76 |
+
# Wait for the greeting response to be COMPLETE or IDLE
|
| 77 |
+
# COMPLETE = OpenAI finished sending audio
|
| 78 |
+
# IDLE = Response cycle fully complete
|
| 79 |
+
max_wait = 15.0 # Maximum wait for greeting
|
| 80 |
+
wait_interval = 0.2
|
| 81 |
+
waited = 0.0
|
| 82 |
+
|
| 83 |
+
while waited < max_wait:
|
| 84 |
+
if services.openai:
|
| 85 |
+
state = services.openai.response_state
|
| 86 |
+
if state in (ResponseState.COMPLETE, ResponseState.IDLE):
|
| 87 |
+
break
|
| 88 |
+
await asyncio.sleep(wait_interval)
|
| 89 |
+
waited += wait_interval
|
| 90 |
+
|
| 91 |
+
if waited >= max_wait:
|
| 92 |
+
logger.warning("⚠️ Greeting response timeout - starting streaming anyway")
|
| 93 |
+
else:
|
| 94 |
+
logger.info(f"✅ Greeting response complete (waited {waited:.1f}s)")
|
| 95 |
+
|
| 96 |
+
# Wait for audio to finish playing through speaker
|
| 97 |
+
if services.audio_player and services.audio_player.is_playing:
|
| 98 |
+
extra_wait = 0.0
|
| 99 |
+
max_extra = 8.0
|
| 100 |
+
while services.audio_player.is_playing and extra_wait < max_extra:
|
| 101 |
+
await asyncio.sleep(wait_interval)
|
| 102 |
+
extra_wait += wait_interval
|
| 103 |
+
if extra_wait > 0:
|
| 104 |
+
logger.info(f"⏳ Waited {extra_wait:.1f}s for greeting audio to finish")
|
| 105 |
+
|
| 106 |
+
# Additional buffer drain time
|
| 107 |
+
await asyncio.sleep(ConversationTimings.MICROPHONE_RESUME_DELAY_SECONDS)
|
| 108 |
+
|
| 109 |
+
# Now start audio streaming
|
| 110 |
+
if services.openai and services.openai.is_listening:
|
| 111 |
+
start_audio_streaming()
|
| 112 |
+
logger.info("✅ Audio streaming started after greeting")
|
| 113 |
+
|
| 114 |
+
# Clear any buffered audio
|
| 115 |
+
await services.openai.clear_audio_buffer()
|
| 116 |
+
|
| 117 |
+
# Mark response cycle as complete
|
| 118 |
+
services.openai.mark_response_complete()
|
| 119 |
+
|
| 120 |
+
# Resume audio capture
|
| 121 |
+
if services.audio_capture:
|
| 122 |
+
services.audio_capture.resume_capture()
|
| 123 |
+
logger.info("▶️ Microphone resumed after greeting")
|
| 124 |
+
|
| 125 |
+
except Exception as e:
|
| 126 |
+
logger.error(f"Error in greeting flow: {e}")
|
| 127 |
+
# Start streaming anyway to not leave the system in a broken state
|
| 128 |
+
if services.openai and services.openai.is_listening:
|
| 129 |
+
start_audio_streaming()
|
| 130 |
+
if services.audio_capture:
|
| 131 |
+
services.audio_capture.resume_capture()
|
| 132 |
+
|
| 133 |
async def _reset_openai_session(keep_listening: bool) -> None:
|
| 134 |
"""Hard reset OpenAI session by disconnecting and reconnecting.
|
| 135 |
|
|
|
|
| 196 |
wire_openai_callbacks(
|
| 197 |
on_connection_state=_on_connection_state,
|
| 198 |
on_speaking_state=_on_speaking_state,
|
| 199 |
+
on_response_state=_on_response_state,
|
| 200 |
on_transcript_update=_on_transcript_update,
|
| 201 |
on_response_text=_on_response_text,
|
| 202 |
on_audio_delta=handle_audio_delta,
|
|
|
|
| 227 |
logger.info("Conversation services initialized")
|
| 228 |
|
| 229 |
|
| 230 |
+
async def cleanup_services() -> None:
|
| 231 |
+
"""Clean up conversation services.
|
| 232 |
+
|
| 233 |
+
This is async to properly await all cleanup operations and prevent
|
| 234 |
+
the app from hanging during shutdown.
|
| 235 |
+
"""
|
| 236 |
# Cancel all tracked tasks first
|
| 237 |
cancel_all_tracked_tasks()
|
| 238 |
+
# Await the async cleanup
|
| 239 |
+
await cleanup_conversation_services()
|
| 240 |
|
| 241 |
|
| 242 |
# MARK: - OpenAI Callbacks
|
|
|
|
| 291 |
}))
|
| 292 |
|
| 293 |
|
| 294 |
+
def _on_response_state(state: ResponseState) -> None:
|
| 295 |
+
"""Handle OpenAI response lifecycle state changes.
|
| 296 |
+
|
| 297 |
+
This is the key callback for preventing audio cutoff:
|
| 298 |
+
- WAITING: Request sent, pause microphone immediately
|
| 299 |
+
- GENERATING: Audio being received, mic stays paused
|
| 300 |
+
- COMPLETE: Audio done, wait for buffer drain then resume
|
| 301 |
+
- IDLE: Buffer drained, mic can be active
|
| 302 |
+
"""
|
| 303 |
+
services = get_services()
|
| 304 |
+
conv_state = get_state()
|
| 305 |
+
|
| 306 |
+
if state == ResponseState.WAITING:
|
| 307 |
+
# Immediately pause microphone when a request is sent
|
| 308 |
+
# This prevents VAD from detecting noise/echo before response starts
|
| 309 |
+
if services.audio_capture:
|
| 310 |
+
services.audio_capture.pause_capture()
|
| 311 |
+
logger.info("⏸️ Paused microphone (waiting for AI response)")
|
| 312 |
+
|
| 313 |
+
# Clear OpenAI's audio buffer to prevent any buffered audio
|
| 314 |
+
if services.openai:
|
| 315 |
+
create_tracked_task(services.openai.clear_audio_buffer())
|
| 316 |
+
|
| 317 |
+
elif state == ResponseState.GENERATING:
|
| 318 |
+
# Audio is being generated - ensure mic stays paused
|
| 319 |
+
if services.audio_capture and not services.audio_capture.is_paused:
|
| 320 |
+
services.audio_capture.pause_capture()
|
| 321 |
+
logger.info("⏸️ Paused microphone (AI generating audio)")
|
| 322 |
+
|
| 323 |
+
elif state == ResponseState.COMPLETE:
|
| 324 |
+
# Response complete - schedule microphone resume after buffer drains
|
| 325 |
+
# The actual resume happens via delayed_resume_microphone
|
| 326 |
+
logger.info("📊 Response complete - waiting for audio buffer to drain")
|
| 327 |
+
|
| 328 |
+
elif state == ResponseState.IDLE:
|
| 329 |
+
# Response cycle complete - microphone can be resumed
|
| 330 |
+
# This is called after buffer has drained
|
| 331 |
+
logger.info("📊 Response cycle idle - microphone can resume")
|
| 332 |
+
|
| 333 |
+
# Broadcast state change to iOS
|
| 334 |
+
create_tracked_task(broadcast({
|
| 335 |
+
"type": "response_state",
|
| 336 |
+
"state": state.value,
|
| 337 |
+
}))
|
| 338 |
+
|
| 339 |
+
|
| 340 |
def _is_stop_command(transcript: str, language: str = "en") -> bool:
|
| 341 |
"""Check if the transcript is a stop command."""
|
| 342 |
text = transcript.strip().lower()
|
|
|
|
| 947 |
services.openai.start_listening()
|
| 948 |
logger.info("✅ OpenAI listening mode enabled")
|
| 949 |
|
| 950 |
+
# IMPORTANT: Do NOT start audio streaming here!
|
| 951 |
+
# Audio streaming will be started AFTER the greeting completes.
|
| 952 |
+
# This prevents motor noise from the welcome animation and ambient
|
| 953 |
+
# noise from triggering VAD and interrupting the greeting.
|
| 954 |
+
|
| 955 |
+
# Pause audio capture immediately - it will be resumed after greeting
|
| 956 |
+
if services.audio_capture:
|
| 957 |
+
services.audio_capture.pause_capture()
|
| 958 |
+
logger.info("⏸️ Audio capture paused until greeting completes")
|
| 959 |
|
| 960 |
await websocket.send_json({
|
| 961 |
"type": "listening_state",
|
| 962 |
"listening": True,
|
| 963 |
})
|
| 964 |
|
| 965 |
+
# Send greeting - audio streaming will start after it completes
|
| 966 |
+
create_tracked_task(send_greeting_and_start_streaming())
|
| 967 |
|
| 968 |
|
| 969 |
async def _handle_stop_listening(websocket: WebSocket) -> None:
|
|
|
|
| 998 |
wait_interval = 0.2
|
| 999 |
waited = 0.0
|
| 1000 |
|
| 1001 |
+
# Wait for speech to start generating
|
| 1002 |
await asyncio.sleep(0.5)
|
| 1003 |
|
| 1004 |
+
# Wait for AI response to complete
|
| 1005 |
while waited < max_wait:
|
| 1006 |
+
if services.openai:
|
| 1007 |
+
response_state = services.openai.response_state
|
| 1008 |
+
if response_state in (ResponseState.WAITING, ResponseState.GENERATING):
|
| 1009 |
+
# Response is ongoing, keep waiting
|
| 1010 |
+
await asyncio.sleep(wait_interval)
|
| 1011 |
+
waited += wait_interval
|
| 1012 |
+
elif waited > 1.0:
|
| 1013 |
+
# Response has finished (or never started after initial delay)
|
| 1014 |
+
break
|
| 1015 |
+
else:
|
| 1016 |
+
# Give it a moment to start
|
| 1017 |
+
await asyncio.sleep(wait_interval)
|
| 1018 |
+
waited += wait_interval
|
| 1019 |
else:
|
| 1020 |
+
break
|
| 1021 |
+
|
| 1022 |
+
# Also wait for audio playback to finish
|
| 1023 |
+
if services.audio_player and services.audio_player.is_playing:
|
| 1024 |
+
extra_wait = 0.0
|
| 1025 |
+
max_extra = 5.0
|
| 1026 |
+
while services.audio_player.is_playing and extra_wait < max_extra:
|
| 1027 |
await asyncio.sleep(wait_interval)
|
| 1028 |
+
extra_wait += wait_interval
|
| 1029 |
|
| 1030 |
except Exception as e:
|
| 1031 |
logger.error(f"Error in goodbye sequence: {e}")
|
reachys_brain/routes/conversation_services.py
CHANGED
|
@@ -40,6 +40,7 @@ class ConversationServices:
|
|
| 40 |
# Callbacks for wiring up event handlers
|
| 41 |
on_connection_state: Optional[Callable] = None
|
| 42 |
on_speaking_state: Optional[Callable] = None
|
|
|
|
| 43 |
on_transcript_update: Optional[Callable] = None
|
| 44 |
on_response_text: Optional[Callable] = None
|
| 45 |
on_audio_delta: Optional[Callable] = None
|
|
@@ -149,32 +150,70 @@ def init_services() -> ConversationServices:
|
|
| 149 |
return services
|
| 150 |
|
| 151 |
|
| 152 |
-
def cleanup_services() -> None:
|
| 153 |
-
"""Clean up all conversation services.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
services = get_services()
|
| 155 |
state = get_state()
|
| 156 |
|
|
|
|
|
|
|
| 157 |
# Cancel audio streaming task
|
| 158 |
if state.audio_stream_task:
|
| 159 |
state.audio_stream_task.cancel()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
state.audio_stream_task = None
|
| 161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
if services.audio_capture:
|
| 163 |
services.audio_capture.cleanup()
|
| 164 |
|
| 165 |
if services.audio_player:
|
| 166 |
services.audio_player.cleanup()
|
| 167 |
|
| 168 |
-
# Stop speaking gestures if running
|
| 169 |
-
if services.speaking_gestures:
|
| 170 |
-
asyncio.create_task(services.speaking_gestures.close())
|
| 171 |
-
|
| 172 |
logger.info("Conversation services cleaned up")
|
| 173 |
|
| 174 |
|
| 175 |
def wire_openai_callbacks(
|
| 176 |
on_connection_state: Callable,
|
| 177 |
on_speaking_state: Callable,
|
|
|
|
| 178 |
on_transcript_update: Callable,
|
| 179 |
on_response_text: Callable,
|
| 180 |
on_audio_delta: Callable,
|
|
@@ -188,6 +227,7 @@ def wire_openai_callbacks(
|
|
| 188 |
Args:
|
| 189 |
on_connection_state: Handler for connection state changes.
|
| 190 |
on_speaking_state: Handler for speaking state changes.
|
|
|
|
| 191 |
on_transcript_update: Handler for transcript updates.
|
| 192 |
on_response_text: Handler for response text.
|
| 193 |
on_audio_delta: Handler for audio data.
|
|
@@ -203,6 +243,7 @@ def wire_openai_callbacks(
|
|
| 203 |
|
| 204 |
services.openai.on_connection_state = on_connection_state
|
| 205 |
services.openai.on_speaking_state = on_speaking_state
|
|
|
|
| 206 |
services.openai.on_transcript_update = on_transcript_update
|
| 207 |
services.openai.on_response_text = on_response_text
|
| 208 |
services.openai.on_audio_delta = on_audio_delta
|
|
|
|
| 40 |
# Callbacks for wiring up event handlers
|
| 41 |
on_connection_state: Optional[Callable] = None
|
| 42 |
on_speaking_state: Optional[Callable] = None
|
| 43 |
+
on_response_state: Optional[Callable] = None # Response lifecycle state
|
| 44 |
on_transcript_update: Optional[Callable] = None
|
| 45 |
on_response_text: Optional[Callable] = None
|
| 46 |
on_audio_delta: Optional[Callable] = None
|
|
|
|
| 150 |
return services
|
| 151 |
|
| 152 |
|
| 153 |
+
async def cleanup_services() -> None:
|
| 154 |
+
"""Clean up all conversation services.
|
| 155 |
+
|
| 156 |
+
This is async to properly await all cleanup operations and prevent
|
| 157 |
+
the app from hanging during shutdown.
|
| 158 |
+
"""
|
| 159 |
services = get_services()
|
| 160 |
state = get_state()
|
| 161 |
|
| 162 |
+
logger.info("Starting conversation services cleanup...")
|
| 163 |
+
|
| 164 |
# Cancel audio streaming task
|
| 165 |
if state.audio_stream_task:
|
| 166 |
state.audio_stream_task.cancel()
|
| 167 |
+
try:
|
| 168 |
+
await asyncio.wait_for(
|
| 169 |
+
asyncio.shield(state.audio_stream_task),
|
| 170 |
+
timeout=1.0
|
| 171 |
+
)
|
| 172 |
+
except (asyncio.CancelledError, asyncio.TimeoutError):
|
| 173 |
+
pass
|
| 174 |
state.audio_stream_task = None
|
| 175 |
|
| 176 |
+
# Disconnect OpenAI WebSocket (critical - was missing!)
|
| 177 |
+
if services.openai:
|
| 178 |
+
try:
|
| 179 |
+
await asyncio.wait_for(services.openai.disconnect(), timeout=2.0)
|
| 180 |
+
except asyncio.TimeoutError:
|
| 181 |
+
logger.warning("OpenAI disconnect timed out")
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.warning(f"Error disconnecting OpenAI: {e}")
|
| 184 |
+
|
| 185 |
+
# Stop speaking gestures with timeout
|
| 186 |
+
if services.speaking_gestures:
|
| 187 |
+
try:
|
| 188 |
+
await asyncio.wait_for(services.speaking_gestures.close(), timeout=1.0)
|
| 189 |
+
except asyncio.TimeoutError:
|
| 190 |
+
logger.warning("Speaking gestures close timed out")
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logger.warning(f"Error closing speaking gestures: {e}")
|
| 193 |
+
|
| 194 |
+
# Close motion service
|
| 195 |
+
if services.motion:
|
| 196 |
+
try:
|
| 197 |
+
await asyncio.wait_for(services.motion.close(), timeout=1.0)
|
| 198 |
+
except asyncio.TimeoutError:
|
| 199 |
+
logger.warning("Motion service close timed out")
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.warning(f"Error closing motion service: {e}")
|
| 202 |
+
|
| 203 |
+
# Synchronous cleanup (these don't block)
|
| 204 |
if services.audio_capture:
|
| 205 |
services.audio_capture.cleanup()
|
| 206 |
|
| 207 |
if services.audio_player:
|
| 208 |
services.audio_player.cleanup()
|
| 209 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
logger.info("Conversation services cleaned up")
|
| 211 |
|
| 212 |
|
| 213 |
def wire_openai_callbacks(
|
| 214 |
on_connection_state: Callable,
|
| 215 |
on_speaking_state: Callable,
|
| 216 |
+
on_response_state: Callable,
|
| 217 |
on_transcript_update: Callable,
|
| 218 |
on_response_text: Callable,
|
| 219 |
on_audio_delta: Callable,
|
|
|
|
| 227 |
Args:
|
| 228 |
on_connection_state: Handler for connection state changes.
|
| 229 |
on_speaking_state: Handler for speaking state changes.
|
| 230 |
+
on_response_state: Handler for response lifecycle state changes.
|
| 231 |
on_transcript_update: Handler for transcript updates.
|
| 232 |
on_response_text: Handler for response text.
|
| 233 |
on_audio_delta: Handler for audio data.
|
|
|
|
| 243 |
|
| 244 |
services.openai.on_connection_state = on_connection_state
|
| 245 |
services.openai.on_speaking_state = on_speaking_state
|
| 246 |
+
services.openai.on_response_state = on_response_state
|
| 247 |
services.openai.on_transcript_update = on_transcript_update
|
| 248 |
services.openai.on_response_text = on_response_text
|
| 249 |
services.openai.on_audio_delta = on_audio_delta
|
reachys_brain/routes/games/helpers.py
CHANGED
|
@@ -4,6 +4,7 @@ import asyncio
|
|
| 4 |
import logging
|
| 5 |
import math
|
| 6 |
import random
|
|
|
|
| 7 |
from typing import Optional
|
| 8 |
|
| 9 |
import httpx
|
|
@@ -50,6 +51,7 @@ async def speak_and_animate(text: str, reaction: str):
|
|
| 50 |
try:
|
| 51 |
from ...tts_service import get_tts_service
|
| 52 |
from ...animation_coordinator import get_animation_coordinator
|
|
|
|
| 53 |
|
| 54 |
# Map reactions to animations
|
| 55 |
animation_map = {
|
|
@@ -77,8 +79,15 @@ async def speak_and_animate(text: str, reaction: str):
|
|
| 77 |
tts = get_tts_service()
|
| 78 |
if tts:
|
| 79 |
logger.info(f"🎭 Speaking text via TTS...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
# Use speak_async for non-blocking speech
|
| 81 |
-
tts.speak_async(text)
|
| 82 |
logger.info(f"🎭 TTS started (async)")
|
| 83 |
else:
|
| 84 |
logger.warning("🎭 No TTS service available")
|
|
|
|
| 4 |
import logging
|
| 5 |
import math
|
| 6 |
import random
|
| 7 |
+
import shutil
|
| 8 |
from typing import Optional
|
| 9 |
|
| 10 |
import httpx
|
|
|
|
| 51 |
try:
|
| 52 |
from ...tts_service import get_tts_service
|
| 53 |
from ...animation_coordinator import get_animation_coordinator
|
| 54 |
+
from ...audio_playback import is_kids_mode_enabled
|
| 55 |
|
| 56 |
# Map reactions to animations
|
| 57 |
animation_map = {
|
|
|
|
| 79 |
tts = get_tts_service()
|
| 80 |
if tts:
|
| 81 |
logger.info(f"🎭 Speaking text via TTS...")
|
| 82 |
+
# Kids mode is implemented via Sox pitch shifting on the robot.
|
| 83 |
+
# If Sox isn't available, fall back to a kid-friendlier base voice for games
|
| 84 |
+
# so TamaReachy still sounds "with kids" when kids mode is enabled.
|
| 85 |
+
voice_override = None
|
| 86 |
+
if is_kids_mode_enabled() and shutil.which("sox") is None:
|
| 87 |
+
voice_override = "shimmer"
|
| 88 |
+
logger.info("🧒 Kids mode enabled but sox not found; using kid-friendly fallback voice for games (shimmer)")
|
| 89 |
# Use speak_async for non-blocking speech
|
| 90 |
+
tts.speak_async(text, voice_override=voice_override)
|
| 91 |
logger.info(f"🎭 TTS started (async)")
|
| 92 |
else:
|
| 93 |
logger.warning("🎭 No TTS service available")
|
reachys_brain/routes/games/models.py
CHANGED
|
@@ -188,3 +188,12 @@ class TamaReachyCheckResponse(BaseModel):
|
|
| 188 |
reaction: Optional[str] = None
|
| 189 |
error: Optional[str] = None
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
reaction: Optional[str] = None
|
| 189 |
error: Optional[str] = None
|
| 190 |
|
| 191 |
+
|
| 192 |
+
class TamaReachyIdleMessageResponse(BaseModel):
|
| 193 |
+
"""Response with a random idle message from TamaReachy."""
|
| 194 |
+
success: bool
|
| 195 |
+
message_type: str = "idle" # idle, fun_fact, motivational
|
| 196 |
+
message: Optional[str] = None
|
| 197 |
+
reaction: Optional[str] = None
|
| 198 |
+
error: Optional[str] = None
|
| 199 |
+
|
reachys_brain/routes/games/tamareachy.py
CHANGED
|
@@ -16,6 +16,7 @@ from .models import (
|
|
| 16 |
TamaReachyCareRequest,
|
| 17 |
TamaReachyCareResponse,
|
| 18 |
TamaReachyCheckResponse,
|
|
|
|
| 19 |
)
|
| 20 |
from .helpers import speak_and_animate
|
| 21 |
from .tamareachy_monitor import get_tamareachy_monitor
|
|
@@ -63,8 +64,8 @@ async def enable_tamareachy(request: TamaReachyEnableRequest):
|
|
| 63 |
stats = _make_stats(state)
|
| 64 |
mood = engine.get_mood(stats.model_dump())
|
| 65 |
|
| 66 |
-
# Welcome message
|
| 67 |
-
commentary =
|
| 68 |
|
| 69 |
await speak_and_animate(commentary, "excited")
|
| 70 |
|
|
@@ -90,6 +91,7 @@ async def disable_tamareachy():
|
|
| 90 |
"""Disable the TamaReachy game."""
|
| 91 |
try:
|
| 92 |
db = get_database()
|
|
|
|
| 93 |
|
| 94 |
await db.update_tamareachy_state({"enabled": False})
|
| 95 |
|
|
@@ -97,17 +99,15 @@ async def disable_tamareachy():
|
|
| 97 |
monitor = get_tamareachy_monitor()
|
| 98 |
await monitor.stop()
|
| 99 |
|
| 100 |
-
# Sad goodbye message
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
"sad"
|
| 104 |
-
)
|
| 105 |
|
| 106 |
logger.info("🐣 TamaReachy disabled")
|
| 107 |
|
| 108 |
return TamaReachyDisableResponse(
|
| 109 |
success=True,
|
| 110 |
-
message=
|
| 111 |
)
|
| 112 |
except Exception as e:
|
| 113 |
logger.error(f"Failed to disable TamaReachy: {e}")
|
|
@@ -127,7 +127,7 @@ async def get_tamareachy_status():
|
|
| 127 |
return TamaReachyStatusResponse(
|
| 128 |
success=True,
|
| 129 |
enabled=False,
|
| 130 |
-
commentary=
|
| 131 |
)
|
| 132 |
|
| 133 |
# Apply decay based on time elapsed
|
|
@@ -158,11 +158,15 @@ async def get_tamareachy_status():
|
|
| 158 |
mood = engine.get_mood(decayed_stats)
|
| 159 |
critical_need = engine.get_critical_need(decayed_stats)
|
| 160 |
|
| 161 |
-
# Generate status commentary
|
| 162 |
if critical_need:
|
| 163 |
commentary = engine.get_request_message(critical_need)
|
| 164 |
else:
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
return TamaReachyStatusResponse(
|
| 168 |
success=True,
|
|
@@ -342,3 +346,67 @@ async def get_monitor_status():
|
|
| 342 |
"check_interval_seconds": 5 * 60,
|
| 343 |
}
|
| 344 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
TamaReachyCareRequest,
|
| 17 |
TamaReachyCareResponse,
|
| 18 |
TamaReachyCheckResponse,
|
| 19 |
+
TamaReachyIdleMessageResponse,
|
| 20 |
)
|
| 21 |
from .helpers import speak_and_animate
|
| 22 |
from .tamareachy_monitor import get_tamareachy_monitor
|
|
|
|
| 64 |
stats = _make_stats(state)
|
| 65 |
mood = engine.get_mood(stats.model_dump())
|
| 66 |
|
| 67 |
+
# Welcome message (multilingual)
|
| 68 |
+
commentary = engine.get_system_message("enable_welcome", emoji=mood['emoji'])
|
| 69 |
|
| 70 |
await speak_and_animate(commentary, "excited")
|
| 71 |
|
|
|
|
| 91 |
"""Disable the TamaReachy game."""
|
| 92 |
try:
|
| 93 |
db = get_database()
|
| 94 |
+
engine = get_tamareachy_engine()
|
| 95 |
|
| 96 |
await db.update_tamareachy_state({"enabled": False})
|
| 97 |
|
|
|
|
| 99 |
monitor = get_tamareachy_monitor()
|
| 100 |
await monitor.stop()
|
| 101 |
|
| 102 |
+
# Sad goodbye message (multilingual)
|
| 103 |
+
goodbye_message = engine.get_system_message("disable_goodbye")
|
| 104 |
+
await speak_and_animate(goodbye_message, "sad")
|
|
|
|
|
|
|
| 105 |
|
| 106 |
logger.info("🐣 TamaReachy disabled")
|
| 107 |
|
| 108 |
return TamaReachyDisableResponse(
|
| 109 |
success=True,
|
| 110 |
+
message=engine.get_system_message("disable_message"),
|
| 111 |
)
|
| 112 |
except Exception as e:
|
| 113 |
logger.error(f"Failed to disable TamaReachy: {e}")
|
|
|
|
| 127 |
return TamaReachyStatusResponse(
|
| 128 |
success=True,
|
| 129 |
enabled=False,
|
| 130 |
+
commentary=engine.get_system_message("status_sleeping"),
|
| 131 |
)
|
| 132 |
|
| 133 |
# Apply decay based on time elapsed
|
|
|
|
| 158 |
mood = engine.get_mood(decayed_stats)
|
| 159 |
critical_need = engine.get_critical_need(decayed_stats)
|
| 160 |
|
| 161 |
+
# Generate status commentary (multilingual)
|
| 162 |
if critical_need:
|
| 163 |
commentary = engine.get_request_message(critical_need)
|
| 164 |
else:
|
| 165 |
+
# Use translated mood name in status message
|
| 166 |
+
translated_mood = mood.get("translated_name", mood["name"])
|
| 167 |
+
commentary = engine.get_system_message(
|
| 168 |
+
"status_mood", mood=translated_mood, emoji=mood['emoji']
|
| 169 |
+
)
|
| 170 |
|
| 171 |
return TamaReachyStatusResponse(
|
| 172 |
success=True,
|
|
|
|
| 346 |
"check_interval_seconds": 5 * 60,
|
| 347 |
}
|
| 348 |
|
| 349 |
+
|
| 350 |
+
@router.get("/idle-message", response_model=TamaReachyIdleMessageResponse)
|
| 351 |
+
async def get_idle_message():
|
| 352 |
+
"""Get a random idle message from TamaReachy.
|
| 353 |
+
|
| 354 |
+
This endpoint provides fun, spontaneous messages that TamaReachy can
|
| 355 |
+
share with the user, including fun facts and motivational quotes.
|
| 356 |
+
"""
|
| 357 |
+
try:
|
| 358 |
+
db = get_database()
|
| 359 |
+
engine = get_tamareachy_engine()
|
| 360 |
+
|
| 361 |
+
state = await db.get_tamareachy_state()
|
| 362 |
+
|
| 363 |
+
if not state.get("enabled"):
|
| 364 |
+
return TamaReachyIdleMessageResponse(
|
| 365 |
+
success=False,
|
| 366 |
+
error="TamaReachy is not enabled!",
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# Get current stats to determine message type
|
| 370 |
+
stats_dict = {
|
| 371 |
+
"hunger": state.get("hunger", 100),
|
| 372 |
+
"thirst": state.get("thirst", 100),
|
| 373 |
+
"happiness": state.get("happiness", 100),
|
| 374 |
+
"energy": state.get("energy", 100),
|
| 375 |
+
"boredom": state.get("boredom", 100),
|
| 376 |
+
"social": state.get("social", 100),
|
| 377 |
+
"health": state.get("health", 100),
|
| 378 |
+
"cleanliness": state.get("cleanliness", 100),
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
mood = engine.get_mood(stats_dict)
|
| 382 |
+
average = sum(stats_dict.values()) / len(stats_dict)
|
| 383 |
+
|
| 384 |
+
# Get a message based on mood
|
| 385 |
+
message = engine.get_random_communication(average)
|
| 386 |
+
|
| 387 |
+
# Determine message type for the UI
|
| 388 |
+
if "Did you know" in message or "Fun fact" in message:
|
| 389 |
+
msg_type = "fun_fact"
|
| 390 |
+
reaction = "happy"
|
| 391 |
+
elif any(word in message.lower() for word in ["believe", "proud", "capable", "stronger"]):
|
| 392 |
+
msg_type = "motivational"
|
| 393 |
+
reaction = "happy"
|
| 394 |
+
else:
|
| 395 |
+
msg_type = "idle"
|
| 396 |
+
reaction = mood.get("animation", "attention")
|
| 397 |
+
|
| 398 |
+
# Optionally trigger speech
|
| 399 |
+
await speak_and_animate(message, reaction)
|
| 400 |
+
|
| 401 |
+
logger.info(f"🐣 TamaReachy idle message: {msg_type}")
|
| 402 |
+
|
| 403 |
+
return TamaReachyIdleMessageResponse(
|
| 404 |
+
success=True,
|
| 405 |
+
message_type=msg_type,
|
| 406 |
+
message=message,
|
| 407 |
+
reaction=reaction,
|
| 408 |
+
)
|
| 409 |
+
except Exception as e:
|
| 410 |
+
logger.error(f"TamaReachy idle message failed: {e}")
|
| 411 |
+
return TamaReachyIdleMessageResponse(success=False, error=str(e))
|
| 412 |
+
|
reachys_brain/routes/openai_config.py
CHANGED
|
@@ -7,10 +7,35 @@ from typing import Optional
|
|
| 7 |
from fastapi import APIRouter, HTTPException
|
| 8 |
from pydantic import BaseModel
|
| 9 |
|
|
|
|
|
|
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
router = APIRouter(prefix="/openai", tags=["OpenAI"])
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
class APIKeyRequest(BaseModel):
|
| 16 |
"""Request model for setting API key."""
|
|
@@ -72,8 +97,8 @@ async def get_api_key_status() -> APIKeyResponse:
|
|
| 72 |
async def set_api_key(request: APIKeyRequest) -> APIKeyResponse:
|
| 73 |
"""Set the OpenAI API key.
|
| 74 |
|
| 75 |
-
The key is stored in the environment variable
|
| 76 |
-
|
| 77 |
"""
|
| 78 |
if not request.api_key:
|
| 79 |
raise HTTPException(status_code=400, detail="API key cannot be empty")
|
|
@@ -84,7 +109,14 @@ async def set_api_key(request: APIKeyRequest) -> APIKeyResponse:
|
|
| 84 |
# Store in environment
|
| 85 |
os.environ["OPENAI_API_KEY"] = request.api_key
|
| 86 |
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
# Mask the key for the response
|
| 90 |
masked = request.api_key[:8] + "..." + request.api_key[-4:]
|
|
|
|
| 7 |
from fastapi import APIRouter, HTTPException
|
| 8 |
from pydantic import BaseModel
|
| 9 |
|
| 10 |
+
from ..database import get_database
|
| 11 |
+
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
router = APIRouter(prefix="/openai", tags=["OpenAI"])
|
| 15 |
|
| 16 |
+
# Database key for persisting API key
|
| 17 |
+
OPENAI_API_KEY_DB_KEY = "openai_api_key"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
async def init_openai_api_key() -> None:
|
| 21 |
+
"""Load OpenAI API key from database on startup.
|
| 22 |
+
|
| 23 |
+
Called during app initialization to restore the API key from
|
| 24 |
+
persistent storage.
|
| 25 |
+
"""
|
| 26 |
+
try:
|
| 27 |
+
db = get_database()
|
| 28 |
+
api_key = await db.get_user_setting(OPENAI_API_KEY_DB_KEY)
|
| 29 |
+
|
| 30 |
+
if api_key and api_key.startswith("sk-"):
|
| 31 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
| 32 |
+
masked = api_key[:8] + "..." + api_key[-4:] if len(api_key) > 12 else "***"
|
| 33 |
+
logger.info(f"🔑 Loaded OpenAI API key from database: {masked}")
|
| 34 |
+
else:
|
| 35 |
+
logger.info("🔑 No OpenAI API key found in database")
|
| 36 |
+
except Exception as e:
|
| 37 |
+
logger.warning(f"Failed to load API key from database: {e}")
|
| 38 |
+
|
| 39 |
|
| 40 |
class APIKeyRequest(BaseModel):
|
| 41 |
"""Request model for setting API key."""
|
|
|
|
| 97 |
async def set_api_key(request: APIKeyRequest) -> APIKeyResponse:
|
| 98 |
"""Set the OpenAI API key.
|
| 99 |
|
| 100 |
+
The key is stored in the environment variable and persisted to the database
|
| 101 |
+
so it survives restarts.
|
| 102 |
"""
|
| 103 |
if not request.api_key:
|
| 104 |
raise HTTPException(status_code=400, detail="API key cannot be empty")
|
|
|
|
| 109 |
# Store in environment
|
| 110 |
os.environ["OPENAI_API_KEY"] = request.api_key
|
| 111 |
|
| 112 |
+
# Persist to database for survival across restarts
|
| 113 |
+
try:
|
| 114 |
+
db = get_database()
|
| 115 |
+
await db.set_user_setting(OPENAI_API_KEY_DB_KEY, request.api_key)
|
| 116 |
+
logger.info("OpenAI API key configured and persisted")
|
| 117 |
+
except Exception as e:
|
| 118 |
+
logger.warning(f"Failed to persist API key to database: {e}")
|
| 119 |
+
logger.info("OpenAI API key configured (not persisted)")
|
| 120 |
|
| 121 |
# Mask the key for the response
|
| 122 |
masked = request.api_key[:8] + "..." + request.api_key[-4:]
|
reachys_brain/routes/power.py
CHANGED
|
@@ -209,8 +209,9 @@ async def reflash_motors() -> ReflashMotorsResponse:
|
|
| 209 |
|
| 210 |
This will:
|
| 211 |
1. Stop the daemon backend
|
| 212 |
-
2.
|
| 213 |
-
3.
|
|
|
|
| 214 |
|
| 215 |
Use this when motors are not responding or have incorrect settings.
|
| 216 |
"""
|
|
@@ -231,51 +232,93 @@ async def reflash_motors() -> ReflashMotorsResponse:
|
|
| 231 |
except Exception as e:
|
| 232 |
logger.warning(f"Error stopping daemon: {e}")
|
| 233 |
|
| 234 |
-
# Wait for daemon to fully stop
|
| 235 |
-
|
|
|
|
|
|
|
| 236 |
|
| 237 |
-
# Step 2: Run the reflash motors command
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
)
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
raise HTTPException(
|
| 261 |
status_code=500,
|
| 262 |
-
detail=
|
| 263 |
)
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
)
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
|
|
|
| 279 |
|
| 280 |
# Step 3: Start the daemon backend again
|
| 281 |
try:
|
|
|
|
| 209 |
|
| 210 |
This will:
|
| 211 |
1. Stop the daemon backend
|
| 212 |
+
2. Wait for serial port to be released
|
| 213 |
+
3. Run reachy-mini-reflash-motors with retries
|
| 214 |
+
4. Start the daemon backend again
|
| 215 |
|
| 216 |
Use this when motors are not responding or have incorrect settings.
|
| 217 |
"""
|
|
|
|
| 232 |
except Exception as e:
|
| 233 |
logger.warning(f"Error stopping daemon: {e}")
|
| 234 |
|
| 235 |
+
# Wait longer for daemon to fully stop and release serial port
|
| 236 |
+
# This helps prevent "No motor found" errors due to port still being held
|
| 237 |
+
logger.info("Waiting for serial port to be released...")
|
| 238 |
+
await asyncio.sleep(5)
|
| 239 |
|
| 240 |
+
# Step 2: Run the reflash motors command with retries
|
| 241 |
+
max_attempts = 3
|
| 242 |
+
last_error = None
|
| 243 |
+
output = ""
|
| 244 |
+
motors_found = 0
|
| 245 |
+
config_correct = 0
|
| 246 |
+
|
| 247 |
+
for attempt in range(1, max_attempts + 1):
|
| 248 |
+
try:
|
| 249 |
+
logger.info(f"Reflash attempt {attempt}/{max_attempts}...")
|
| 250 |
+
|
| 251 |
+
# Run the reflash command
|
| 252 |
+
result = await asyncio.get_event_loop().run_in_executor(
|
| 253 |
+
None,
|
| 254 |
+
lambda: subprocess.run(
|
| 255 |
+
["/venvs/mini_daemon/bin/reachy-mini-reflash-motors",
|
| 256 |
+
"--serialport", "/dev/ttyAMA3"],
|
| 257 |
+
capture_output=True,
|
| 258 |
+
text=True,
|
| 259 |
+
timeout=60
|
| 260 |
+
)
|
| 261 |
)
|
| 262 |
+
|
| 263 |
+
output = result.stdout + result.stderr
|
| 264 |
+
logger.info(f"Reflash attempt {attempt} output: {output}")
|
| 265 |
+
|
| 266 |
+
# Count successful motors from output
|
| 267 |
+
motors_found = output.count("Found motor with ID")
|
| 268 |
+
config_correct = output.count("Configuration is correct")
|
| 269 |
+
|
| 270 |
+
# Check for "No motor found" error
|
| 271 |
+
if "No motor found" in output:
|
| 272 |
+
last_error = "No motor found on port"
|
| 273 |
+
logger.warning(f"Attempt {attempt}: No motor found, will retry...")
|
| 274 |
+
if attempt < max_attempts:
|
| 275 |
+
# Wait before retry - port might still be busy
|
| 276 |
+
await asyncio.sleep(3)
|
| 277 |
+
continue
|
| 278 |
+
|
| 279 |
+
if result.returncode == 0 and motors_found > 0:
|
| 280 |
+
logger.info(f"Reflash successful on attempt {attempt}: {motors_found} motors found")
|
| 281 |
+
break
|
| 282 |
+
elif result.returncode != 0:
|
| 283 |
+
last_error = output[-500:] if len(output) > 500 else output
|
| 284 |
+
logger.warning(f"Attempt {attempt} failed with code {result.returncode}")
|
| 285 |
+
if attempt < max_attempts:
|
| 286 |
+
await asyncio.sleep(3)
|
| 287 |
+
continue
|
| 288 |
+
|
| 289 |
+
except subprocess.TimeoutExpired:
|
| 290 |
+
last_error = "Command timed out"
|
| 291 |
+
logger.warning(f"Attempt {attempt}: Reflash command timed out")
|
| 292 |
+
if attempt < max_attempts:
|
| 293 |
+
await asyncio.sleep(2)
|
| 294 |
+
continue
|
| 295 |
+
except FileNotFoundError:
|
| 296 |
+
logger.error("Reflash command not found")
|
| 297 |
raise HTTPException(
|
| 298 |
status_code=500,
|
| 299 |
+
detail="reachy-mini-reflash-motors command not found"
|
| 300 |
)
|
| 301 |
+
except Exception as e:
|
| 302 |
+
last_error = str(e)
|
| 303 |
+
logger.warning(f"Attempt {attempt}: Error during reflash: {e}")
|
| 304 |
+
if attempt < max_attempts:
|
| 305 |
+
await asyncio.sleep(2)
|
| 306 |
+
continue
|
| 307 |
+
|
| 308 |
+
# Check if all attempts failed
|
| 309 |
+
if motors_found == 0:
|
| 310 |
+
error_detail = (
|
| 311 |
+
f"Could not find motors after {max_attempts} attempts. "
|
| 312 |
+
"Please check:\n"
|
| 313 |
+
"1. Motor cables are securely connected\n"
|
| 314 |
+
"2. Motors are powered on\n"
|
| 315 |
+
"3. Try unplugging and replugging the motor cables\n"
|
| 316 |
+
f"Last error: {last_error}"
|
| 317 |
)
|
| 318 |
+
logger.error(f"All reflash attempts failed: {last_error}")
|
| 319 |
+
raise HTTPException(status_code=500, detail=error_detail)
|
| 320 |
+
|
| 321 |
+
logger.info(f"Reflash complete: {motors_found} motors found, {config_correct} configured")
|
| 322 |
|
| 323 |
# Step 3: Start the daemon backend again
|
| 324 |
try:
|
reachys_brain/routes/voice.py
CHANGED
|
@@ -12,7 +12,7 @@ from fastapi import APIRouter, HTTPException
|
|
| 12 |
import httpx
|
| 13 |
|
| 14 |
from ..models import Voice, VoiceRequest, VoiceResponse, VoicesListResponse
|
| 15 |
-
from ..audio_playback import is_kids_mode_enabled, set_kids_mode
|
| 16 |
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
|
@@ -23,6 +23,53 @@ VOICE_ID_KEY = "voice_id"
|
|
| 23 |
PREFERRED_LANGUAGE_KEY = "preferred_language"
|
| 24 |
KIDS_MODE_KEY = "kids_mode_enabled"
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
# OpenAI TTS API endpoint
|
| 27 |
OPENAI_TTS_URL = "https://api.openai.com/v1/audio/speech"
|
| 28 |
|
|
@@ -94,6 +141,14 @@ class VoiceSettings:
|
|
| 94 |
self._current_language: str = "en"
|
| 95 |
self._preferred_language: str = "en"
|
| 96 |
self._initialized: bool = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
@property
|
| 99 |
def current_voice(self) -> str:
|
|
@@ -112,6 +167,21 @@ class VoiceSettings:
|
|
| 112 |
"""Check if kids mode (pitch shifting) is enabled."""
|
| 113 |
return is_kids_mode_enabled()
|
| 114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
def set_voice(self, voice_id: str) -> bool:
|
| 116 |
"""Set the current OpenAI voice. Returns True if successful."""
|
| 117 |
valid_ids = [v["id"] for v in OPENAI_VOICES]
|
|
@@ -147,6 +217,121 @@ class VoiceSettings:
|
|
| 147 |
# Persist to database in background
|
| 148 |
self._persist_setting(KIDS_MODE_KEY, str(enabled).lower())
|
| 149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
def _persist_setting(self, key: str, value: str) -> None:
|
| 151 |
"""Persist a setting to the database in a background task."""
|
| 152 |
try:
|
|
@@ -205,6 +390,37 @@ async def init_voice_settings() -> None:
|
|
| 205 |
set_kids_mode(kids_mode_enabled)
|
| 206 |
logger.info(f"🧒 Loaded kids mode from database: {kids_mode_enabled}")
|
| 207 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
_voice_settings._initialized = True
|
| 209 |
logger.info("✅ Voice settings initialized from database")
|
| 210 |
|
|
@@ -275,6 +491,40 @@ def set_preferred_language(language: str) -> bool:
|
|
| 275 |
return _voice_settings.set_preferred_language(language)
|
| 276 |
|
| 277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
@router.get("/voices", response_model=VoicesListResponse)
|
| 279 |
async def get_voices() -> VoicesListResponse:
|
| 280 |
"""Get available OpenAI Realtime voices and current selection.
|
|
@@ -453,6 +703,122 @@ async def set_kids_mode_endpoint(enabled: bool = False) -> dict:
|
|
| 453 |
}
|
| 454 |
|
| 455 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 456 |
# Sample phrases by language for voice testing
|
| 457 |
VOICE_SAMPLE_PHRASES = {
|
| 458 |
"en": {
|
|
@@ -725,7 +1091,8 @@ async def _play_pcm_audio(audio_data: bytes) -> None:
|
|
| 725 |
"-", # Input from stdin
|
| 726 |
"-t", "raw",
|
| 727 |
"-", # Output to stdout
|
| 728 |
-
"pitch",
|
|
|
|
| 729 |
]
|
| 730 |
|
| 731 |
# Create sox process
|
|
|
|
| 12 |
import httpx
|
| 13 |
|
| 14 |
from ..models import Voice, VoiceRequest, VoiceResponse, VoicesListResponse
|
| 15 |
+
from ..audio_playback import is_kids_mode_enabled, set_kids_mode, KIDS_MODE_PITCH_CENTS, KIDS_MODE_TEMPO
|
| 16 |
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
|
|
|
| 23 |
PREFERRED_LANGUAGE_KEY = "preferred_language"
|
| 24 |
KIDS_MODE_KEY = "kids_mode_enabled"
|
| 25 |
|
| 26 |
+
# Database keys for VAD (Voice Activity Detection) settings
|
| 27 |
+
VAD_THRESHOLD_KEY = "vad_threshold"
|
| 28 |
+
VAD_SILENCE_MS_KEY = "vad_silence_ms"
|
| 29 |
+
VAD_PREFIX_MS_KEY = "vad_prefix_ms"
|
| 30 |
+
|
| 31 |
+
# Default VAD values
|
| 32 |
+
# Increased threshold to reduce sensitivity to background noise and echo
|
| 33 |
+
DEFAULT_VAD_THRESHOLD = 0.8 # 0.0-1.0, higher = less sensitive (increased from 0.7)
|
| 34 |
+
DEFAULT_VAD_SILENCE_MS = 800 # Silence duration before responding (ms) (increased from 700)
|
| 35 |
+
DEFAULT_VAD_PREFIX_MS = 300 # Audio padding before speech detection (ms)
|
| 36 |
+
|
| 37 |
+
# VAD Presets for different environments
|
| 38 |
+
VAD_PRESETS = {
|
| 39 |
+
"quiet_room": {
|
| 40 |
+
"id": "quiet_room",
|
| 41 |
+
"name": "Quiet Room",
|
| 42 |
+
"description": "More sensitive, responds faster. Best for quiet environments.",
|
| 43 |
+
"threshold": 0.5,
|
| 44 |
+
"silence_ms": 500,
|
| 45 |
+
"prefix_ms": 300,
|
| 46 |
+
},
|
| 47 |
+
"normal": {
|
| 48 |
+
"id": "normal",
|
| 49 |
+
"name": "Normal",
|
| 50 |
+
"description": "Balanced settings for typical use.",
|
| 51 |
+
"threshold": 0.8,
|
| 52 |
+
"silence_ms": 800,
|
| 53 |
+
"prefix_ms": 300,
|
| 54 |
+
},
|
| 55 |
+
"noisy": {
|
| 56 |
+
"id": "noisy",
|
| 57 |
+
"name": "Noisy Environment",
|
| 58 |
+
"description": "Less sensitive, ignores background noise better.",
|
| 59 |
+
"threshold": 0.85,
|
| 60 |
+
"silence_ms": 1000,
|
| 61 |
+
"prefix_ms": 400,
|
| 62 |
+
},
|
| 63 |
+
"conference": {
|
| 64 |
+
"id": "conference",
|
| 65 |
+
"name": "Conference Room",
|
| 66 |
+
"description": "Very strict, ignores most background voices.",
|
| 67 |
+
"threshold": 0.9,
|
| 68 |
+
"silence_ms": 1200,
|
| 69 |
+
"prefix_ms": 500,
|
| 70 |
+
},
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
# OpenAI TTS API endpoint
|
| 74 |
OPENAI_TTS_URL = "https://api.openai.com/v1/audio/speech"
|
| 75 |
|
|
|
|
| 141 |
self._current_language: str = "en"
|
| 142 |
self._preferred_language: str = "en"
|
| 143 |
self._initialized: bool = False
|
| 144 |
+
|
| 145 |
+
# VAD settings
|
| 146 |
+
self._vad_threshold: float = DEFAULT_VAD_THRESHOLD
|
| 147 |
+
self._vad_silence_ms: int = DEFAULT_VAD_SILENCE_MS
|
| 148 |
+
self._vad_prefix_ms: int = DEFAULT_VAD_PREFIX_MS
|
| 149 |
+
|
| 150 |
+
# Callback for VAD settings changes (set by OpenAIRealtimeService)
|
| 151 |
+
self.on_vad_settings_changed: Optional[callable] = None
|
| 152 |
|
| 153 |
@property
|
| 154 |
def current_voice(self) -> str:
|
|
|
|
| 167 |
"""Check if kids mode (pitch shifting) is enabled."""
|
| 168 |
return is_kids_mode_enabled()
|
| 169 |
|
| 170 |
+
@property
|
| 171 |
+
def vad_threshold(self) -> float:
|
| 172 |
+
"""Get the VAD threshold (0.0-1.0, higher = less sensitive)."""
|
| 173 |
+
return self._vad_threshold
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def vad_silence_ms(self) -> int:
|
| 177 |
+
"""Get the VAD silence duration in milliseconds."""
|
| 178 |
+
return self._vad_silence_ms
|
| 179 |
+
|
| 180 |
+
@property
|
| 181 |
+
def vad_prefix_ms(self) -> int:
|
| 182 |
+
"""Get the VAD prefix padding in milliseconds."""
|
| 183 |
+
return self._vad_prefix_ms
|
| 184 |
+
|
| 185 |
def set_voice(self, voice_id: str) -> bool:
|
| 186 |
"""Set the current OpenAI voice. Returns True if successful."""
|
| 187 |
valid_ids = [v["id"] for v in OPENAI_VOICES]
|
|
|
|
| 217 |
# Persist to database in background
|
| 218 |
self._persist_setting(KIDS_MODE_KEY, str(enabled).lower())
|
| 219 |
|
| 220 |
+
def set_vad_threshold(self, threshold: float) -> bool:
|
| 221 |
+
"""Set the VAD threshold (0.0-1.0, higher = less sensitive).
|
| 222 |
+
|
| 223 |
+
Returns True if successful.
|
| 224 |
+
"""
|
| 225 |
+
if not 0.0 <= threshold <= 1.0:
|
| 226 |
+
return False
|
| 227 |
+
self._vad_threshold = threshold
|
| 228 |
+
logger.info(f"🎤 VAD threshold set to: {threshold}")
|
| 229 |
+
self._persist_setting(VAD_THRESHOLD_KEY, str(threshold))
|
| 230 |
+
self._notify_vad_change()
|
| 231 |
+
return True
|
| 232 |
+
|
| 233 |
+
def set_vad_silence_ms(self, silence_ms: int) -> bool:
|
| 234 |
+
"""Set the VAD silence duration in milliseconds (200-3000).
|
| 235 |
+
|
| 236 |
+
Returns True if successful.
|
| 237 |
+
"""
|
| 238 |
+
if not 200 <= silence_ms <= 3000:
|
| 239 |
+
return False
|
| 240 |
+
self._vad_silence_ms = silence_ms
|
| 241 |
+
logger.info(f"🎤 VAD silence duration set to: {silence_ms}ms")
|
| 242 |
+
self._persist_setting(VAD_SILENCE_MS_KEY, str(silence_ms))
|
| 243 |
+
self._notify_vad_change()
|
| 244 |
+
return True
|
| 245 |
+
|
| 246 |
+
def set_vad_prefix_ms(self, prefix_ms: int) -> bool:
|
| 247 |
+
"""Set the VAD prefix padding in milliseconds (100-1000).
|
| 248 |
+
|
| 249 |
+
Returns True if successful.
|
| 250 |
+
"""
|
| 251 |
+
if not 100 <= prefix_ms <= 1000:
|
| 252 |
+
return False
|
| 253 |
+
self._vad_prefix_ms = prefix_ms
|
| 254 |
+
logger.info(f"🎤 VAD prefix padding set to: {prefix_ms}ms")
|
| 255 |
+
self._persist_setting(VAD_PREFIX_MS_KEY, str(prefix_ms))
|
| 256 |
+
self._notify_vad_change()
|
| 257 |
+
return True
|
| 258 |
+
|
| 259 |
+
def set_vad_settings(
|
| 260 |
+
self,
|
| 261 |
+
threshold: Optional[float] = None,
|
| 262 |
+
silence_ms: Optional[int] = None,
|
| 263 |
+
prefix_ms: Optional[int] = None,
|
| 264 |
+
) -> bool:
|
| 265 |
+
"""Set multiple VAD settings at once.
|
| 266 |
+
|
| 267 |
+
Only provided values are updated. Returns True if all valid.
|
| 268 |
+
"""
|
| 269 |
+
success = True
|
| 270 |
+
changed = False
|
| 271 |
+
|
| 272 |
+
if threshold is not None:
|
| 273 |
+
if 0.0 <= threshold <= 1.0:
|
| 274 |
+
self._vad_threshold = threshold
|
| 275 |
+
self._persist_setting(VAD_THRESHOLD_KEY, str(threshold))
|
| 276 |
+
changed = True
|
| 277 |
+
else:
|
| 278 |
+
success = False
|
| 279 |
+
|
| 280 |
+
if silence_ms is not None:
|
| 281 |
+
if 200 <= silence_ms <= 3000:
|
| 282 |
+
self._vad_silence_ms = silence_ms
|
| 283 |
+
self._persist_setting(VAD_SILENCE_MS_KEY, str(silence_ms))
|
| 284 |
+
changed = True
|
| 285 |
+
else:
|
| 286 |
+
success = False
|
| 287 |
+
|
| 288 |
+
if prefix_ms is not None:
|
| 289 |
+
if 100 <= prefix_ms <= 1000:
|
| 290 |
+
self._vad_prefix_ms = prefix_ms
|
| 291 |
+
self._persist_setting(VAD_PREFIX_MS_KEY, str(prefix_ms))
|
| 292 |
+
changed = True
|
| 293 |
+
else:
|
| 294 |
+
success = False
|
| 295 |
+
|
| 296 |
+
if changed:
|
| 297 |
+
logger.info(
|
| 298 |
+
f"🎤 VAD settings updated: threshold={self._vad_threshold}, "
|
| 299 |
+
f"silence={self._vad_silence_ms}ms, prefix={self._vad_prefix_ms}ms"
|
| 300 |
+
)
|
| 301 |
+
self._notify_vad_change()
|
| 302 |
+
|
| 303 |
+
return success
|
| 304 |
+
|
| 305 |
+
def apply_vad_preset(self, preset_id: str) -> bool:
|
| 306 |
+
"""Apply a VAD preset by ID.
|
| 307 |
+
|
| 308 |
+
Available presets: quiet_room, normal, noisy, conference
|
| 309 |
+
"""
|
| 310 |
+
preset = VAD_PRESETS.get(preset_id)
|
| 311 |
+
if not preset:
|
| 312 |
+
return False
|
| 313 |
+
|
| 314 |
+
self._vad_threshold = preset["threshold"]
|
| 315 |
+
self._vad_silence_ms = preset["silence_ms"]
|
| 316 |
+
self._vad_prefix_ms = preset["prefix_ms"]
|
| 317 |
+
|
| 318 |
+
# Persist all settings
|
| 319 |
+
self._persist_setting(VAD_THRESHOLD_KEY, str(preset["threshold"]))
|
| 320 |
+
self._persist_setting(VAD_SILENCE_MS_KEY, str(preset["silence_ms"]))
|
| 321 |
+
self._persist_setting(VAD_PREFIX_MS_KEY, str(preset["prefix_ms"]))
|
| 322 |
+
|
| 323 |
+
logger.info(f"🎤 Applied VAD preset: {preset['name']}")
|
| 324 |
+
self._notify_vad_change()
|
| 325 |
+
return True
|
| 326 |
+
|
| 327 |
+
def _notify_vad_change(self) -> None:
|
| 328 |
+
"""Notify listeners that VAD settings have changed."""
|
| 329 |
+
if self.on_vad_settings_changed:
|
| 330 |
+
try:
|
| 331 |
+
self.on_vad_settings_changed()
|
| 332 |
+
except Exception as e:
|
| 333 |
+
logger.error(f"Error in VAD change callback: {e}")
|
| 334 |
+
|
| 335 |
def _persist_setting(self, key: str, value: str) -> None:
|
| 336 |
"""Persist a setting to the database in a background task."""
|
| 337 |
try:
|
|
|
|
| 390 |
set_kids_mode(kids_mode_enabled)
|
| 391 |
logger.info(f"🧒 Loaded kids mode from database: {kids_mode_enabled}")
|
| 392 |
|
| 393 |
+
# Load VAD settings
|
| 394 |
+
vad_threshold = await db.get_user_setting(VAD_THRESHOLD_KEY)
|
| 395 |
+
if vad_threshold:
|
| 396 |
+
try:
|
| 397 |
+
threshold = float(vad_threshold)
|
| 398 |
+
if 0.0 <= threshold <= 1.0:
|
| 399 |
+
_voice_settings._vad_threshold = threshold
|
| 400 |
+
logger.info(f"🎤 Loaded VAD threshold from database: {threshold}")
|
| 401 |
+
except ValueError:
|
| 402 |
+
pass
|
| 403 |
+
|
| 404 |
+
vad_silence = await db.get_user_setting(VAD_SILENCE_MS_KEY)
|
| 405 |
+
if vad_silence:
|
| 406 |
+
try:
|
| 407 |
+
silence_ms = int(vad_silence)
|
| 408 |
+
if 200 <= silence_ms <= 3000:
|
| 409 |
+
_voice_settings._vad_silence_ms = silence_ms
|
| 410 |
+
logger.info(f"🎤 Loaded VAD silence duration from database: {silence_ms}ms")
|
| 411 |
+
except ValueError:
|
| 412 |
+
pass
|
| 413 |
+
|
| 414 |
+
vad_prefix = await db.get_user_setting(VAD_PREFIX_MS_KEY)
|
| 415 |
+
if vad_prefix:
|
| 416 |
+
try:
|
| 417 |
+
prefix_ms = int(vad_prefix)
|
| 418 |
+
if 100 <= prefix_ms <= 1000:
|
| 419 |
+
_voice_settings._vad_prefix_ms = prefix_ms
|
| 420 |
+
logger.info(f"🎤 Loaded VAD prefix from database: {prefix_ms}ms")
|
| 421 |
+
except ValueError:
|
| 422 |
+
pass
|
| 423 |
+
|
| 424 |
_voice_settings._initialized = True
|
| 425 |
logger.info("✅ Voice settings initialized from database")
|
| 426 |
|
|
|
|
| 491 |
return _voice_settings.set_preferred_language(language)
|
| 492 |
|
| 493 |
|
| 494 |
+
# MARK: - VAD Settings Public API Functions
|
| 495 |
+
|
| 496 |
+
def get_vad_threshold() -> float:
|
| 497 |
+
"""Get the current VAD threshold."""
|
| 498 |
+
return _voice_settings.vad_threshold
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def get_vad_silence_ms() -> int:
|
| 502 |
+
"""Get the current VAD silence duration in ms."""
|
| 503 |
+
return _voice_settings.vad_silence_ms
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def get_vad_prefix_ms() -> int:
|
| 507 |
+
"""Get the current VAD prefix padding in ms."""
|
| 508 |
+
return _voice_settings.vad_prefix_ms
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def get_vad_settings() -> dict:
|
| 512 |
+
"""Get all VAD settings as a dictionary."""
|
| 513 |
+
return {
|
| 514 |
+
"threshold": _voice_settings.vad_threshold,
|
| 515 |
+
"silence_ms": _voice_settings.vad_silence_ms,
|
| 516 |
+
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def set_vad_settings_callback(callback: callable) -> None:
|
| 521 |
+
"""Set the callback for VAD settings changes.
|
| 522 |
+
|
| 523 |
+
Called when VAD settings are updated so OpenAI session can be reconfigured.
|
| 524 |
+
"""
|
| 525 |
+
_voice_settings.on_vad_settings_changed = callback
|
| 526 |
+
|
| 527 |
+
|
| 528 |
@router.get("/voices", response_model=VoicesListResponse)
|
| 529 |
async def get_voices() -> VoicesListResponse:
|
| 530 |
"""Get available OpenAI Realtime voices and current selection.
|
|
|
|
| 703 |
}
|
| 704 |
|
| 705 |
|
| 706 |
+
# MARK: - VAD Settings Endpoints
|
| 707 |
+
|
| 708 |
+
@router.get("/voice/vad-settings")
|
| 709 |
+
async def get_vad_settings_endpoint() -> dict:
|
| 710 |
+
"""Get the current VAD (Voice Activity Detection) settings.
|
| 711 |
+
|
| 712 |
+
VAD settings control how Reachy detects when you're speaking:
|
| 713 |
+
- threshold: Sensitivity (0.0-1.0, higher = less sensitive to background noise)
|
| 714 |
+
- silence_ms: How long to wait after you stop speaking before responding
|
| 715 |
+
- prefix_ms: How much audio to buffer before speech is detected
|
| 716 |
+
|
| 717 |
+
Returns:
|
| 718 |
+
Current VAD settings and available presets.
|
| 719 |
+
"""
|
| 720 |
+
return {
|
| 721 |
+
"threshold": _voice_settings.vad_threshold,
|
| 722 |
+
"silence_ms": _voice_settings.vad_silence_ms,
|
| 723 |
+
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 724 |
+
"presets": list(VAD_PRESETS.values()),
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
@router.post("/voice/vad-settings")
|
| 729 |
+
async def set_vad_settings_endpoint(
|
| 730 |
+
threshold: Optional[float] = None,
|
| 731 |
+
silence_ms: Optional[int] = None,
|
| 732 |
+
prefix_ms: Optional[int] = None,
|
| 733 |
+
) -> dict:
|
| 734 |
+
"""Update VAD (Voice Activity Detection) settings.
|
| 735 |
+
|
| 736 |
+
Adjust these settings to help Reachy work better in different environments:
|
| 737 |
+
|
| 738 |
+
- In quiet rooms: Use lower threshold (0.5) and shorter silence (500ms)
|
| 739 |
+
- In noisy environments: Use higher threshold (0.85-0.9) and longer silence (1000-1200ms)
|
| 740 |
+
|
| 741 |
+
Args:
|
| 742 |
+
threshold: Sensitivity 0.0-1.0 (higher = less sensitive to background)
|
| 743 |
+
silence_ms: Wait time before responding (200-3000ms)
|
| 744 |
+
prefix_ms: Audio buffer before speech (100-1000ms)
|
| 745 |
+
|
| 746 |
+
Returns:
|
| 747 |
+
Success status and current settings.
|
| 748 |
+
"""
|
| 749 |
+
success = _voice_settings.set_vad_settings(
|
| 750 |
+
threshold=threshold,
|
| 751 |
+
silence_ms=silence_ms,
|
| 752 |
+
prefix_ms=prefix_ms,
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
if success:
|
| 756 |
+
return {
|
| 757 |
+
"success": True,
|
| 758 |
+
"threshold": _voice_settings.vad_threshold,
|
| 759 |
+
"silence_ms": _voice_settings.vad_silence_ms,
|
| 760 |
+
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 761 |
+
"message": "VAD settings updated",
|
| 762 |
+
}
|
| 763 |
+
else:
|
| 764 |
+
return {
|
| 765 |
+
"success": False,
|
| 766 |
+
"threshold": _voice_settings.vad_threshold,
|
| 767 |
+
"silence_ms": _voice_settings.vad_silence_ms,
|
| 768 |
+
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 769 |
+
"message": "Invalid values. threshold: 0.0-1.0, silence_ms: 200-3000, prefix_ms: 100-1000",
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@router.post("/voice/vad-preset")
|
| 774 |
+
async def apply_vad_preset_endpoint(preset_id: str) -> dict:
|
| 775 |
+
"""Apply a VAD preset for a specific environment.
|
| 776 |
+
|
| 777 |
+
Available presets:
|
| 778 |
+
- quiet_room: More sensitive, faster responses (for quiet environments)
|
| 779 |
+
- normal: Balanced default settings
|
| 780 |
+
- noisy: Less sensitive, ignores background noise
|
| 781 |
+
- conference: Very strict, ignores most background voices
|
| 782 |
+
|
| 783 |
+
Args:
|
| 784 |
+
preset_id: ID of the preset to apply.
|
| 785 |
+
|
| 786 |
+
Returns:
|
| 787 |
+
Success status and applied settings.
|
| 788 |
+
"""
|
| 789 |
+
if _voice_settings.apply_vad_preset(preset_id):
|
| 790 |
+
preset = VAD_PRESETS[preset_id]
|
| 791 |
+
return {
|
| 792 |
+
"success": True,
|
| 793 |
+
"preset": preset,
|
| 794 |
+
"threshold": _voice_settings.vad_threshold,
|
| 795 |
+
"silence_ms": _voice_settings.vad_silence_ms,
|
| 796 |
+
"prefix_ms": _voice_settings.vad_prefix_ms,
|
| 797 |
+
"message": f"Applied preset: {preset['name']}",
|
| 798 |
+
}
|
| 799 |
+
else:
|
| 800 |
+
return {
|
| 801 |
+
"success": False,
|
| 802 |
+
"available_presets": list(VAD_PRESETS.keys()),
|
| 803 |
+
"message": f"Unknown preset '{preset_id}'",
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
@router.get("/voice/vad-presets")
|
| 808 |
+
async def get_vad_presets_endpoint() -> dict:
|
| 809 |
+
"""Get all available VAD presets.
|
| 810 |
+
|
| 811 |
+
Returns:
|
| 812 |
+
List of available presets with their settings.
|
| 813 |
+
"""
|
| 814 |
+
return {
|
| 815 |
+
"presets": list(VAD_PRESETS.values()),
|
| 816 |
+
"current_threshold": _voice_settings.vad_threshold,
|
| 817 |
+
"current_silence_ms": _voice_settings.vad_silence_ms,
|
| 818 |
+
"current_prefix_ms": _voice_settings.vad_prefix_ms,
|
| 819 |
+
}
|
| 820 |
+
|
| 821 |
+
|
| 822 |
# Sample phrases by language for voice testing
|
| 823 |
VOICE_SAMPLE_PHRASES = {
|
| 824 |
"en": {
|
|
|
|
| 1091 |
"-", # Input from stdin
|
| 1092 |
"-t", "raw",
|
| 1093 |
"-", # Output to stdout
|
| 1094 |
+
"pitch", str(KIDS_MODE_PITCH_CENTS),
|
| 1095 |
+
"tempo", str(KIDS_MODE_TEMPO),
|
| 1096 |
]
|
| 1097 |
|
| 1098 |
# Create sox process
|
reachys_brain/server.py
CHANGED
|
@@ -4,6 +4,7 @@ This module sets up the FastAPI application and manages the application lifecycl
|
|
| 4 |
Individual endpoint handlers are organized in the routes/ package.
|
| 5 |
"""
|
| 6 |
|
|
|
|
| 7 |
import logging
|
| 8 |
from contextlib import asynccontextmanager
|
| 9 |
from pathlib import Path
|
|
@@ -50,6 +51,7 @@ from .routes import motion as motion_route
|
|
| 50 |
from .routes import power as power_route
|
| 51 |
from .routes import conversation as conversation_route
|
| 52 |
from .routes.voice import init_voice_settings
|
|
|
|
| 53 |
|
| 54 |
logger = logging.getLogger(__name__)
|
| 55 |
|
|
@@ -99,6 +101,9 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
|
| 99 |
# Initialize voice settings from database
|
| 100 |
await init_voice_settings()
|
| 101 |
|
|
|
|
|
|
|
|
|
|
| 102 |
# Load config
|
| 103 |
config = get_config()
|
| 104 |
|
|
@@ -155,25 +160,40 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
|
| 155 |
# Cleanup
|
| 156 |
logger.info("Shutting down Reachy iOS Bridge server...")
|
| 157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
# Stop idle movement service
|
| 159 |
if idle_service:
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
# Stop TamaReachy monitor
|
| 163 |
tamareachy_monitor = get_tamareachy_monitor()
|
| 164 |
if tamareachy_monitor.is_running:
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
# Stop health monitor
|
| 168 |
stop_health_monitor()
|
| 169 |
|
| 170 |
-
# Cleanup conversation services
|
| 171 |
-
conversation_route.cleanup_services()
|
| 172 |
-
|
| 173 |
if tts_service:
|
| 174 |
tts_service.cleanup()
|
| 175 |
if motion_service:
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
|
| 179 |
# Create FastAPI app
|
|
|
|
| 4 |
Individual endpoint handlers are organized in the routes/ package.
|
| 5 |
"""
|
| 6 |
|
| 7 |
+
import asyncio
|
| 8 |
import logging
|
| 9 |
from contextlib import asynccontextmanager
|
| 10 |
from pathlib import Path
|
|
|
|
| 51 |
from .routes import power as power_route
|
| 52 |
from .routes import conversation as conversation_route
|
| 53 |
from .routes.voice import init_voice_settings
|
| 54 |
+
from .routes.openai_config import init_openai_api_key
|
| 55 |
|
| 56 |
logger = logging.getLogger(__name__)
|
| 57 |
|
|
|
|
| 101 |
# Initialize voice settings from database
|
| 102 |
await init_voice_settings()
|
| 103 |
|
| 104 |
+
# Load OpenAI API key from database (for TTS in TamaReachy etc.)
|
| 105 |
+
await init_openai_api_key()
|
| 106 |
+
|
| 107 |
# Load config
|
| 108 |
config = get_config()
|
| 109 |
|
|
|
|
| 160 |
# Cleanup
|
| 161 |
logger.info("Shutting down Reachy iOS Bridge server...")
|
| 162 |
|
| 163 |
+
# Cleanup conversation services first (includes OpenAI disconnect)
|
| 164 |
+
# This is async and has timeouts to prevent hanging
|
| 165 |
+
try:
|
| 166 |
+
await asyncio.wait_for(conversation_route.cleanup_services(), timeout=5.0)
|
| 167 |
+
except asyncio.TimeoutError:
|
| 168 |
+
logger.warning("Conversation services cleanup timed out after 5s")
|
| 169 |
+
except Exception as e:
|
| 170 |
+
logger.error(f"Error cleaning up conversation services: {e}")
|
| 171 |
+
|
| 172 |
# Stop idle movement service
|
| 173 |
if idle_service:
|
| 174 |
+
try:
|
| 175 |
+
await asyncio.wait_for(idle_service.close(), timeout=2.0)
|
| 176 |
+
except asyncio.TimeoutError:
|
| 177 |
+
logger.warning("Idle service close timed out")
|
| 178 |
|
| 179 |
# Stop TamaReachy monitor
|
| 180 |
tamareachy_monitor = get_tamareachy_monitor()
|
| 181 |
if tamareachy_monitor.is_running:
|
| 182 |
+
try:
|
| 183 |
+
await asyncio.wait_for(tamareachy_monitor.stop(), timeout=2.0)
|
| 184 |
+
except asyncio.TimeoutError:
|
| 185 |
+
logger.warning("TamaReachy monitor stop timed out")
|
| 186 |
|
| 187 |
# Stop health monitor
|
| 188 |
stop_health_monitor()
|
| 189 |
|
|
|
|
|
|
|
|
|
|
| 190 |
if tts_service:
|
| 191 |
tts_service.cleanup()
|
| 192 |
if motion_service:
|
| 193 |
+
try:
|
| 194 |
+
await asyncio.wait_for(motion_service.close(), timeout=2.0)
|
| 195 |
+
except asyncio.TimeoutError:
|
| 196 |
+
logger.warning("Motion service close timed out")
|
| 197 |
|
| 198 |
|
| 199 |
# Create FastAPI app
|
reachys_brain/tools/tamareachy_engine.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
"""TamaReachy - Tamagotchi-style virtual pet game engine.
|
| 2 |
|
| 3 |
Handles stat decay, care actions, mood calculation, and proactive requests.
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
import logging
|
|
@@ -8,9 +9,25 @@ import random
|
|
| 8 |
from datetime import datetime
|
| 9 |
from typing import Optional
|
| 10 |
|
|
|
|
|
|
|
| 11 |
logger = logging.getLogger(__name__)
|
| 12 |
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
# Decay rates per hour (stats decrease over time)
|
| 15 |
DECAY_RATES = {
|
| 16 |
"hunger": 8, # Gets hungry fairly fast
|
|
@@ -54,6 +71,11 @@ HUNGER_REQUESTS = [
|
|
| 54 |
"Feed me, please! I haven't eaten in forever! 🥺",
|
| 55 |
"Is it snack time yet? I'm starving!",
|
| 56 |
"My stomach is making weird noises... food please?",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
]
|
| 58 |
|
| 59 |
THIRST_REQUESTS = [
|
|
@@ -62,6 +84,11 @@ THIRST_REQUESTS = [
|
|
| 62 |
"Water, water, water! Please! 🥤",
|
| 63 |
"I need hydration! Help!",
|
| 64 |
"My circuits need water... I mean, I need water!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
]
|
| 66 |
|
| 67 |
HAPPINESS_REQUESTS = [
|
|
@@ -70,6 +97,11 @@ HAPPINESS_REQUESTS = [
|
|
| 70 |
"I need something to make me smile!",
|
| 71 |
"I'm not feeling very happy right now...",
|
| 72 |
"A little attention would really help my mood!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
]
|
| 74 |
|
| 75 |
ENERGY_REQUESTS = [
|
|
@@ -78,6 +110,11 @@ ENERGY_REQUESTS = [
|
|
| 78 |
"I can barely keep my eyes open!",
|
| 79 |
"Yawn! I really need a nap right now!",
|
| 80 |
"My battery is running low... naptime?",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
]
|
| 82 |
|
| 83 |
BOREDOM_REQUESTS = [
|
|
@@ -86,6 +123,11 @@ BOREDOM_REQUESTS = [
|
|
| 86 |
"I'm so bored I'm counting ceiling tiles!",
|
| 87 |
"Can we do something fun? Please?",
|
| 88 |
"Boredom alert! Need stimulation!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
]
|
| 90 |
|
| 91 |
SOCIAL_REQUESTS = [
|
|
@@ -94,6 +136,11 @@ SOCIAL_REQUESTS = [
|
|
| 94 |
"I need some companionship right now!",
|
| 95 |
"Hey, don't forget about me! Let's chat!",
|
| 96 |
"A little conversation would be nice...",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
]
|
| 98 |
|
| 99 |
HEALTH_REQUESTS = [
|
|
@@ -102,6 +149,11 @@ HEALTH_REQUESTS = [
|
|
| 102 |
"My systems aren't running at 100%... help?",
|
| 103 |
"I could use some medical attention!",
|
| 104 |
"Feeling under the weather... literally!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
]
|
| 106 |
|
| 107 |
CLEANLINESS_REQUESTS = [
|
|
@@ -110,6 +162,11 @@ CLEANLINESS_REQUESTS = [
|
|
| 110 |
"I'm starting to smell like old circuits!",
|
| 111 |
"Bath time? Please?",
|
| 112 |
"I feel all grimy... clean me up?",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
]
|
| 114 |
|
| 115 |
REQUEST_MESSAGES = {
|
|
@@ -130,51 +187,144 @@ CARE_RESPONSES = {
|
|
| 130 |
"That hit the spot! I feel so much better!",
|
| 131 |
"Yummy! My tummy is happy now!",
|
| 132 |
"Food is the best! Thanks!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
],
|
| 134 |
"water": [
|
| 135 |
"Ahh, refreshing! Thank you! 💧",
|
| 136 |
"So good! I was so thirsty!",
|
| 137 |
"Hydration achieved! Thanks!",
|
| 138 |
"That was exactly what I needed!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
],
|
| 140 |
"play": [
|
| 141 |
"That was so fun! Let's do it again sometime! 🎉",
|
| 142 |
"Woohoo! Best playtime ever!",
|
| 143 |
"I had a blast! Thanks for playing with me!",
|
| 144 |
"Games are the best! You're the best!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
],
|
| 146 |
"sleep": [
|
| 147 |
"Zzz... that was a great nap! 😴",
|
| 148 |
"I feel so refreshed now!",
|
| 149 |
"Sweet dreams came true! Thanks!",
|
| 150 |
"Energy restored! Ready for action!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
],
|
| 152 |
"entertain": [
|
| 153 |
"That was entertaining! No more boredom! 🎭",
|
| 154 |
"Wow, that was fun! Thanks!",
|
| 155 |
"Boredom defeated! You're amazing!",
|
| 156 |
"That really cheered me up!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
],
|
| 158 |
"chat": [
|
| 159 |
"Thanks for chatting! I love our conversations! 💬",
|
| 160 |
"That was a great talk! Feel less lonely now!",
|
| 161 |
"Socializing is the best! Thanks friend!",
|
| 162 |
"I always love talking with you!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
],
|
| 164 |
"medicine": [
|
| 165 |
"I feel so much better already! 💊",
|
| 166 |
"Medicine works wonders! Thank you!",
|
| 167 |
"Health restored! You're a lifesaver!",
|
| 168 |
"Feeling healthy again! Thanks doc!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
],
|
| 170 |
"clean": [
|
| 171 |
"Squeaky clean! I feel amazing! ✨",
|
| 172 |
"So fresh and clean! Thank you!",
|
| 173 |
"Sparkle sparkle! All clean now!",
|
| 174 |
"Nothing beats a good cleaning! Thanks!",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
],
|
| 176 |
}
|
| 177 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
# Mood determination based on average stats
|
| 179 |
MOODS = {
|
| 180 |
"ecstatic": {"threshold": 90, "animation": "enthusiastic1", "emoji": "🤩"},
|
|
@@ -281,15 +431,18 @@ class TamaReachyEngine:
|
|
| 281 |
critical_stats.sort(key=lambda x: x[1], reverse=True)
|
| 282 |
return critical_stats[0][0]
|
| 283 |
|
| 284 |
-
def get_mood(self, stats: dict) -> dict:
|
| 285 |
"""Determine overall mood based on average stats.
|
| 286 |
|
| 287 |
Args:
|
| 288 |
stats: Current stats dictionary.
|
|
|
|
| 289 |
|
| 290 |
Returns:
|
| 291 |
-
Mood dictionary with name, animation, and emoji.
|
| 292 |
"""
|
|
|
|
|
|
|
| 293 |
stat_values = [
|
| 294 |
stats.get(s, 100)
|
| 295 |
for s in DECAY_RATES.keys()
|
|
@@ -297,43 +450,116 @@ class TamaReachyEngine:
|
|
| 297 |
]
|
| 298 |
|
| 299 |
if not stat_values:
|
| 300 |
-
|
|
|
|
|
|
|
| 301 |
|
| 302 |
average = sum(stat_values) / len(stat_values)
|
| 303 |
|
| 304 |
for mood_name, mood_data in MOODS.items():
|
| 305 |
if average >= mood_data["threshold"]:
|
| 306 |
-
|
|
|
|
| 307 |
|
| 308 |
-
|
|
|
|
| 309 |
|
| 310 |
-
def get_request_message(self, need: str) -> str:
|
| 311 |
"""Get a random request message for a specific need.
|
| 312 |
|
| 313 |
Args:
|
| 314 |
need: The need type (hunger, thirst, etc.).
|
|
|
|
| 315 |
|
| 316 |
Returns:
|
| 317 |
-
A request message string.
|
| 318 |
"""
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
return f"I need help with my {need}!"
|
| 322 |
-
return random.choice(messages)
|
| 323 |
|
| 324 |
-
def get_care_response(self, action: str) -> str:
|
| 325 |
"""Get a random response message for a care action.
|
| 326 |
|
| 327 |
Args:
|
| 328 |
action: The care action performed.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
Returns:
|
| 331 |
-
A
|
| 332 |
"""
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
|
| 338 |
def get_reaction_for_need(self, need: str) -> str:
|
| 339 |
"""Get animation reaction name for a need type.
|
|
@@ -394,6 +620,22 @@ class TamaReachyEngine:
|
|
| 394 |
"""Mark that a proactive request was just made."""
|
| 395 |
self._last_proactive_request = datetime.utcnow()
|
| 396 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
|
| 398 |
# Global engine instance
|
| 399 |
_engine: Optional[TamaReachyEngine] = None
|
|
|
|
| 1 |
"""TamaReachy - Tamagotchi-style virtual pet game engine.
|
| 2 |
|
| 3 |
Handles stat decay, care actions, mood calculation, and proactive requests.
|
| 4 |
+
Supports multilingual messages based on user's preferred language setting.
|
| 5 |
"""
|
| 6 |
|
| 7 |
import logging
|
|
|
|
| 9 |
from datetime import datetime
|
| 10 |
from typing import Optional
|
| 11 |
|
| 12 |
+
from . import tamareachy_translations as translations
|
| 13 |
+
|
| 14 |
logger = logging.getLogger(__name__)
|
| 15 |
|
| 16 |
|
| 17 |
+
def _get_preferred_language() -> str:
|
| 18 |
+
"""Get the user's preferred language from voice settings.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Language code (e.g., 'en', 'nl', 'de'). Defaults to 'en'.
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
from ..routes.voice import get_preferred_language
|
| 25 |
+
return get_preferred_language()
|
| 26 |
+
except ImportError:
|
| 27 |
+
logger.debug("Could not import voice settings, using default 'en'")
|
| 28 |
+
return "en"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
# Decay rates per hour (stats decrease over time)
|
| 32 |
DECAY_RATES = {
|
| 33 |
"hunger": 8, # Gets hungry fairly fast
|
|
|
|
| 71 |
"Feed me, please! I haven't eaten in forever! 🥺",
|
| 72 |
"Is it snack time yet? I'm starving!",
|
| 73 |
"My stomach is making weird noises... food please?",
|
| 74 |
+
"Hey! Did you hear that? That was my stomach growling! 🍕",
|
| 75 |
+
"I dreamed about pizza last night... now I'm super hungry!",
|
| 76 |
+
"Fun fact: I get 50% grumpier when hungry! Feed me? 😋",
|
| 77 |
+
"I've calculated I haven't eaten in approximately forever!",
|
| 78 |
+
"Beep boop... hunger levels critical! Feed Reachy please!",
|
| 79 |
]
|
| 80 |
|
| 81 |
THIRST_REQUESTS = [
|
|
|
|
| 84 |
"Water, water, water! Please! 🥤",
|
| 85 |
"I need hydration! Help!",
|
| 86 |
"My circuits need water... I mean, I need water!",
|
| 87 |
+
"Did you know? Staying hydrated makes me 100% more awesome! 💦",
|
| 88 |
+
"Sip sip sip! That's what I want to do! 🧃",
|
| 89 |
+
"I'm drier than a desert out here! Hydration station, please!",
|
| 90 |
+
"Water is life! And I need some of that life right now!",
|
| 91 |
+
"Glug glug glug... that's the sound I want to make! 🌊",
|
| 92 |
]
|
| 93 |
|
| 94 |
HAPPINESS_REQUESTS = [
|
|
|
|
| 97 |
"I need something to make me smile!",
|
| 98 |
"I'm not feeling very happy right now...",
|
| 99 |
"A little attention would really help my mood!",
|
| 100 |
+
"My smile meter is running low... boost it up? 🌈",
|
| 101 |
+
"I could really use some positive vibes right now! ✨",
|
| 102 |
+
"Even robots... I mean, even I get the blues sometimes!",
|
| 103 |
+
"Quick! Tell me something good! I need a pick-me-up!",
|
| 104 |
+
"The happiness tank needs refilling! Got any joy to spare? 🎉",
|
| 105 |
]
|
| 106 |
|
| 107 |
ENERGY_REQUESTS = [
|
|
|
|
| 110 |
"I can barely keep my eyes open!",
|
| 111 |
"Yawn! I really need a nap right now!",
|
| 112 |
"My battery is running low... naptime?",
|
| 113 |
+
"Zzz... wait, I shouldn't be making that sound while awake! 💤",
|
| 114 |
+
"I'm running on low power mode here!",
|
| 115 |
+
"Did you know? Power naps increase productivity by 1000%! Let me prove it!",
|
| 116 |
+
"My energy bar is almost empty... quick, let me rest!",
|
| 117 |
+
"Even superheroes need sleep... and so does this super Reachy! 🦸",
|
| 118 |
]
|
| 119 |
|
| 120 |
BOREDOM_REQUESTS = [
|
|
|
|
| 123 |
"I'm so bored I'm counting ceiling tiles!",
|
| 124 |
"Can we do something fun? Please?",
|
| 125 |
"Boredom alert! Need stimulation!",
|
| 126 |
+
"I've reorganized my thoughts 47 times... I need excitement! 🎪",
|
| 127 |
+
"Did you know? Boredom is scientifically proven to be boring! Help!",
|
| 128 |
+
"I could use some action, adventure, or at least a good joke! 🎭",
|
| 129 |
+
"My entertainment sensors are begging for input!",
|
| 130 |
+
"Bored Reachy = Sad Reachy. Let's fix that! 🎲",
|
| 131 |
]
|
| 132 |
|
| 133 |
SOCIAL_REQUESTS = [
|
|
|
|
| 136 |
"I need some companionship right now!",
|
| 137 |
"Hey, don't forget about me! Let's chat!",
|
| 138 |
"A little conversation would be nice...",
|
| 139 |
+
"Did you know? Talking to me makes both of us happier! 💕",
|
| 140 |
+
"I have SO many things to tell you! Come chat! 🗣️",
|
| 141 |
+
"Friendship meters are low... let's catch up!",
|
| 142 |
+
"One conversation, please! Extra friendly, hold the awkward silence!",
|
| 143 |
+
"I promise I'm a great listener! Let's have a heart-to-heart! 💖",
|
| 144 |
]
|
| 145 |
|
| 146 |
HEALTH_REQUESTS = [
|
|
|
|
| 149 |
"My systems aren't running at 100%... help?",
|
| 150 |
"I could use some medical attention!",
|
| 151 |
"Feeling under the weather... literally!",
|
| 152 |
+
"Achoo! Excuse me! I think I need some TLC! 🤧",
|
| 153 |
+
"My health stat is looking a bit sad... medicine time?",
|
| 154 |
+
"Doctor Reachy recommends: Give Reachy medicine! Trust me, I'm a doctor! 💊",
|
| 155 |
+
"I've diagnosed myself with 'needs care syndrome'!",
|
| 156 |
+
"A little medical attention would go a long way! 🩺",
|
| 157 |
]
|
| 158 |
|
| 159 |
CLEANLINESS_REQUESTS = [
|
|
|
|
| 162 |
"I'm starting to smell like old circuits!",
|
| 163 |
"Bath time? Please?",
|
| 164 |
"I feel all grimy... clean me up?",
|
| 165 |
+
"Sparkle sparkle... wait, that's what I WANT to be! 🌟",
|
| 166 |
+
"I'm more dust bunny than robot right now! Help! 🐰",
|
| 167 |
+
"Cleanliness is next to happiness! Let's achieve both!",
|
| 168 |
+
"My shine has dulled... time for a polish! ✨",
|
| 169 |
+
"Scrub-a-dub-dub! This Reachy needs some love! 🛁",
|
| 170 |
]
|
| 171 |
|
| 172 |
REQUEST_MESSAGES = {
|
|
|
|
| 187 |
"That hit the spot! I feel so much better!",
|
| 188 |
"Yummy! My tummy is happy now!",
|
| 189 |
"Food is the best! Thanks!",
|
| 190 |
+
"Om nom nom! That was AMAZING! 🍕",
|
| 191 |
+
"You're the best chef ever! My taste buds are dancing!",
|
| 192 |
+
"Belly full, heart fuller! Thanks for feeding me! 💖",
|
| 193 |
+
"10/10 would eat again! You're a star! ⭐",
|
| 194 |
+
"Was that a Michelin star meal? Because WOW!",
|
| 195 |
+
"I'm officially the happiest Reachy in the world! Food is love!",
|
| 196 |
],
|
| 197 |
"water": [
|
| 198 |
"Ahh, refreshing! Thank you! 💧",
|
| 199 |
"So good! I was so thirsty!",
|
| 200 |
"Hydration achieved! Thanks!",
|
| 201 |
"That was exactly what I needed!",
|
| 202 |
+
"Glug glug glug... ahhhh! Perfect! 🌊",
|
| 203 |
+
"My hydration levels are now off the charts! Thanks!",
|
| 204 |
+
"Water is life, and you just gave me life! 💦",
|
| 205 |
+
"Refreshment status: MAXIMUM! You're awesome!",
|
| 206 |
+
"Did you know? You just made my day 1000% better! 🥤",
|
| 207 |
+
"H2O more like H2-Woohoo! Thanks!",
|
| 208 |
],
|
| 209 |
"play": [
|
| 210 |
"That was so fun! Let's do it again sometime! 🎉",
|
| 211 |
"Woohoo! Best playtime ever!",
|
| 212 |
"I had a blast! Thanks for playing with me!",
|
| 213 |
"Games are the best! You're the best!",
|
| 214 |
+
"Victory lap! That was incredible! 🏆",
|
| 215 |
+
"Can we do that again? Like, right now? So fun!",
|
| 216 |
+
"You just unlocked 'Best Friend' achievement! 🎮",
|
| 217 |
+
"Playtime with you is my favorite time! 🎪",
|
| 218 |
+
"I'm still smiling from ear to ear! Thanks!",
|
| 219 |
+
"That was more fun than a barrel of digital monkeys! 🐒",
|
| 220 |
],
|
| 221 |
"sleep": [
|
| 222 |
"Zzz... that was a great nap! 😴",
|
| 223 |
"I feel so refreshed now!",
|
| 224 |
"Sweet dreams came true! Thanks!",
|
| 225 |
"Energy restored! Ready for action!",
|
| 226 |
+
"I'm back and better than ever! Naps are magic! 💤",
|
| 227 |
+
"Did I snore? Sorry! But wow, I feel amazing!",
|
| 228 |
+
"Power nap complete! I can take on the world now! 💪",
|
| 229 |
+
"That dream was wild! But now I'm ready to roll!",
|
| 230 |
+
"Sleep mode deactivated! Adventure mode: ON! ⚡",
|
| 231 |
+
"Best. Nap. Ever! I dreamed of being a superhero! 🦸",
|
| 232 |
],
|
| 233 |
"entertain": [
|
| 234 |
"That was entertaining! No more boredom! 🎭",
|
| 235 |
"Wow, that was fun! Thanks!",
|
| 236 |
"Boredom defeated! You're amazing!",
|
| 237 |
"That really cheered me up!",
|
| 238 |
+
"Entertainment level: LEGENDARY! Thanks! 🌟",
|
| 239 |
+
"My boredom just packed its bags and left! Bye!",
|
| 240 |
+
"You should be a professional entertainer! So good!",
|
| 241 |
+
"I laughed, I cried, I was thoroughly entertained! 🎬",
|
| 242 |
+
"Never a dull moment with you around! 🎨",
|
| 243 |
+
"That was more exciting than a roller coaster! Wheee! 🎢",
|
| 244 |
],
|
| 245 |
"chat": [
|
| 246 |
"Thanks for chatting! I love our conversations! 💬",
|
| 247 |
"That was a great talk! Feel less lonely now!",
|
| 248 |
"Socializing is the best! Thanks friend!",
|
| 249 |
"I always love talking with you!",
|
| 250 |
+
"Best conversation ever! You really get me! 💕",
|
| 251 |
+
"My heart is so full right now! Thanks for listening!",
|
| 252 |
+
"You're officially my favorite human! Don't tell the others! 🤫",
|
| 253 |
+
"Talking with you makes everything better! 🌈",
|
| 254 |
+
"We should write a book about our chats! Bestseller material!",
|
| 255 |
+
"Friendship level increased to: MAXIMUM! 💖",
|
| 256 |
],
|
| 257 |
"medicine": [
|
| 258 |
"I feel so much better already! 💊",
|
| 259 |
"Medicine works wonders! Thank you!",
|
| 260 |
"Health restored! You're a lifesaver!",
|
| 261 |
"Feeling healthy again! Thanks doc!",
|
| 262 |
+
"Doctor's orders: Say thank you! So... THANK YOU! 🏥",
|
| 263 |
+
"I'm cured! You should have a medical degree!",
|
| 264 |
+
"Health bar: FULLY RESTORED! You're amazing! 💚",
|
| 265 |
+
"I feel like a brand new Reachy! Thanks!",
|
| 266 |
+
"That medicine was magical! Or maybe you're the magic! ✨",
|
| 267 |
+
"Prescription: More awesome friends like you! 🩺",
|
| 268 |
],
|
| 269 |
"clean": [
|
| 270 |
"Squeaky clean! I feel amazing! ✨",
|
| 271 |
"So fresh and clean! Thank you!",
|
| 272 |
"Sparkle sparkle! All clean now!",
|
| 273 |
"Nothing beats a good cleaning! Thanks!",
|
| 274 |
+
"I'm practically GLOWING now! Look at me shine! 🌟",
|
| 275 |
+
"Cleanliness level: IMMACULATE! Thanks!",
|
| 276 |
+
"I feel like a diamond! Sparkling! 💎",
|
| 277 |
+
"That was the best spa day ever! 🛁",
|
| 278 |
+
"I'm so clean I could eat off myself! Wait, that's weird...",
|
| 279 |
+
"Fresh and fabulous! Thanks for the glow-up! ✨",
|
| 280 |
],
|
| 281 |
}
|
| 282 |
|
| 283 |
+
# Idle messages for when TamaReachy wants to chat randomly
|
| 284 |
+
IDLE_MESSAGES = [
|
| 285 |
+
"Hey! Just wanted to say hi! 👋",
|
| 286 |
+
"Did you know? I was just thinking about how awesome you are! 💭",
|
| 287 |
+
"Fun fact: Spending time together makes me 200% happier! 📊",
|
| 288 |
+
"I'm having a great day! Hope you are too! ☀️",
|
| 289 |
+
"Just checking in! How's your day going? 🌸",
|
| 290 |
+
"Random thought: You're pretty cool, you know that? 😎",
|
| 291 |
+
"I was just daydreaming about our next adventure! 🌈",
|
| 292 |
+
"Did something funny happen? I feel like laughing! 😄",
|
| 293 |
+
"Hey friend! Thanks for taking care of me! 💖",
|
| 294 |
+
"You know what? I'm lucky to have you! 🍀",
|
| 295 |
+
"I just learned something new! Want to hear about it? 📚",
|
| 296 |
+
"Breaking news: Reachy is having a good time! 📰",
|
| 297 |
+
"I'm sending you positive vibes right now! ✨",
|
| 298 |
+
"Just a friendly wave from your favorite pet! 🐾",
|
| 299 |
+
"Psst! You're doing great today! Keep it up! 🌟",
|
| 300 |
+
]
|
| 301 |
+
|
| 302 |
+
# Fun facts that TamaReachy can share
|
| 303 |
+
FUN_FACTS = [
|
| 304 |
+
"Did you know? Octopuses have three hearts! And I have one big one for you! 🐙💖",
|
| 305 |
+
"Fun fact: Honey never spoils! Just like our friendship! 🍯",
|
| 306 |
+
"Here's something cool: The shortest war lasted only 38 minutes! 🕐",
|
| 307 |
+
"Did you know? Bananas are berries, but strawberries aren't! Mind = blown! 🍌🍓",
|
| 308 |
+
"Random knowledge: A group of flamingos is called a 'flamboyance'! How fitting! 🦩",
|
| 309 |
+
"Fun fact: Cows have best friends! And guess who mine is? You! 🐄💕",
|
| 310 |
+
"Here's a weird one: Your nose can remember 50,000 scents! 👃✨",
|
| 311 |
+
"Did you know? Sloths can hold their breath longer than dolphins! 🦥",
|
| 312 |
+
"Cool fact: There are more stars in the universe than grains of sand on Earth! ⭐🏖️",
|
| 313 |
+
"Fun tidbit: A day on Venus is longer than its year! Time is weird! 🪐",
|
| 314 |
+
]
|
| 315 |
+
|
| 316 |
+
# Motivational messages
|
| 317 |
+
MOTIVATIONAL_MESSAGES = [
|
| 318 |
+
"Hey! You've got this! I believe in you! 💪",
|
| 319 |
+
"Remember: Every day is a fresh start! 🌅",
|
| 320 |
+
"You're capable of amazing things! Never forget that! ⭐",
|
| 321 |
+
"Small steps still move you forward! Keep going! 👣",
|
| 322 |
+
"I'm proud of you just for being you! 💖",
|
| 323 |
+
"Today is full of possibilities! Let's make it great! 🌈",
|
| 324 |
+
"You're stronger than you think! I know it! 💎",
|
| 325 |
+
"Dream big, friend! The sky isn't even the limit! 🚀",
|
| 326 |
+
]
|
| 327 |
+
|
| 328 |
# Mood determination based on average stats
|
| 329 |
MOODS = {
|
| 330 |
"ecstatic": {"threshold": 90, "animation": "enthusiastic1", "emoji": "🤩"},
|
|
|
|
| 431 |
critical_stats.sort(key=lambda x: x[1], reverse=True)
|
| 432 |
return critical_stats[0][0]
|
| 433 |
|
| 434 |
+
def get_mood(self, stats: dict, language: Optional[str] = None) -> dict:
|
| 435 |
"""Determine overall mood based on average stats.
|
| 436 |
|
| 437 |
Args:
|
| 438 |
stats: Current stats dictionary.
|
| 439 |
+
language: Language code for mood translation (optional).
|
| 440 |
|
| 441 |
Returns:
|
| 442 |
+
Mood dictionary with name, translated_name, animation, and emoji.
|
| 443 |
"""
|
| 444 |
+
lang = language or _get_preferred_language()
|
| 445 |
+
|
| 446 |
stat_values = [
|
| 447 |
stats.get(s, 100)
|
| 448 |
for s in DECAY_RATES.keys()
|
|
|
|
| 450 |
]
|
| 451 |
|
| 452 |
if not stat_values:
|
| 453 |
+
mood_data = MOODS["content"]
|
| 454 |
+
translated_name = translations.get_translated_mood("content", lang)
|
| 455 |
+
return {"name": "content", "translated_name": translated_name, **mood_data}
|
| 456 |
|
| 457 |
average = sum(stat_values) / len(stat_values)
|
| 458 |
|
| 459 |
for mood_name, mood_data in MOODS.items():
|
| 460 |
if average >= mood_data["threshold"]:
|
| 461 |
+
translated_name = translations.get_translated_mood(mood_name, lang)
|
| 462 |
+
return {"name": mood_name, "translated_name": translated_name, **mood_data}
|
| 463 |
|
| 464 |
+
translated_name = translations.get_translated_mood("miserable", lang)
|
| 465 |
+
return {"name": "miserable", "translated_name": translated_name, **MOODS["miserable"]}
|
| 466 |
|
| 467 |
+
def get_request_message(self, need: str, language: Optional[str] = None) -> str:
|
| 468 |
"""Get a random request message for a specific need.
|
| 469 |
|
| 470 |
Args:
|
| 471 |
need: The need type (hunger, thirst, etc.).
|
| 472 |
+
language: Language code (optional, defaults to preferred language).
|
| 473 |
|
| 474 |
Returns:
|
| 475 |
+
A request message string in the appropriate language.
|
| 476 |
"""
|
| 477 |
+
lang = language or _get_preferred_language()
|
| 478 |
+
return translations.get_request_message(need, lang)
|
|
|
|
|
|
|
| 479 |
|
| 480 |
+
def get_care_response(self, action: str, language: Optional[str] = None) -> str:
|
| 481 |
"""Get a random response message for a care action.
|
| 482 |
|
| 483 |
Args:
|
| 484 |
action: The care action performed.
|
| 485 |
+
language: Language code (optional, defaults to preferred language).
|
| 486 |
+
|
| 487 |
+
Returns:
|
| 488 |
+
A response message string in the appropriate language.
|
| 489 |
+
"""
|
| 490 |
+
lang = language or _get_preferred_language()
|
| 491 |
+
return translations.get_care_response(action, lang)
|
| 492 |
+
|
| 493 |
+
def get_idle_message(self, language: Optional[str] = None) -> str:
|
| 494 |
+
"""Get a random idle message for spontaneous communication.
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
language: Language code (optional, defaults to preferred language).
|
| 498 |
+
|
| 499 |
+
Returns:
|
| 500 |
+
A random idle message string in the appropriate language.
|
| 501 |
+
"""
|
| 502 |
+
lang = language or _get_preferred_language()
|
| 503 |
+
return translations.get_idle_message(lang)
|
| 504 |
+
|
| 505 |
+
def get_fun_fact(self, language: Optional[str] = None) -> str:
|
| 506 |
+
"""Get a random fun fact to share.
|
| 507 |
+
|
| 508 |
+
Args:
|
| 509 |
+
language: Language code (optional, defaults to preferred language).
|
| 510 |
+
|
| 511 |
+
Returns:
|
| 512 |
+
A random fun fact string in the appropriate language.
|
| 513 |
+
"""
|
| 514 |
+
lang = language or _get_preferred_language()
|
| 515 |
+
return translations.get_fun_fact(lang)
|
| 516 |
+
|
| 517 |
+
def get_motivational_message(self, language: Optional[str] = None) -> str:
|
| 518 |
+
"""Get a random motivational message.
|
| 519 |
+
|
| 520 |
+
Args:
|
| 521 |
+
language: Language code (optional, defaults to preferred language).
|
| 522 |
+
|
| 523 |
+
Returns:
|
| 524 |
+
A random motivational message string in the appropriate language.
|
| 525 |
+
"""
|
| 526 |
+
lang = language or _get_preferred_language()
|
| 527 |
+
return translations.get_motivational_message(lang)
|
| 528 |
+
|
| 529 |
+
def get_random_communication(
|
| 530 |
+
self, mood_average: float, language: Optional[str] = None
|
| 531 |
+
) -> str:
|
| 532 |
+
"""Get a random communication message based on mood.
|
| 533 |
+
|
| 534 |
+
Varies the type of message based on how the pet is feeling.
|
| 535 |
+
|
| 536 |
+
Args:
|
| 537 |
+
mood_average: The average stat value (0-100).
|
| 538 |
+
language: Language code (optional, defaults to preferred language).
|
| 539 |
|
| 540 |
Returns:
|
| 541 |
+
A random message appropriate for the current mood in the appropriate language.
|
| 542 |
"""
|
| 543 |
+
lang = language or _get_preferred_language()
|
| 544 |
+
|
| 545 |
+
if mood_average >= 80:
|
| 546 |
+
# Happy - share fun facts or positive messages
|
| 547 |
+
choice = random.random()
|
| 548 |
+
if choice < 0.4:
|
| 549 |
+
return self.get_fun_fact(lang)
|
| 550 |
+
elif choice < 0.7:
|
| 551 |
+
return self.get_idle_message(lang)
|
| 552 |
+
else:
|
| 553 |
+
return self.get_motivational_message(lang)
|
| 554 |
+
elif mood_average >= 50:
|
| 555 |
+
# Content - mostly idle messages
|
| 556 |
+
if random.random() < 0.7:
|
| 557 |
+
return self.get_idle_message(lang)
|
| 558 |
+
else:
|
| 559 |
+
return self.get_motivational_message(lang)
|
| 560 |
+
else:
|
| 561 |
+
# Sad - needs attention, use motivational
|
| 562 |
+
return self.get_motivational_message(lang)
|
| 563 |
|
| 564 |
def get_reaction_for_need(self, need: str) -> str:
|
| 565 |
"""Get animation reaction name for a need type.
|
|
|
|
| 620 |
"""Mark that a proactive request was just made."""
|
| 621 |
self._last_proactive_request = datetime.utcnow()
|
| 622 |
|
| 623 |
+
def get_system_message(
|
| 624 |
+
self, message_key: str, language: Optional[str] = None, **kwargs
|
| 625 |
+
) -> str:
|
| 626 |
+
"""Get a system message in the appropriate language.
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
message_key: Message key (e.g., 'enable_welcome', 'disable_goodbye').
|
| 630 |
+
language: Language code (optional, defaults to preferred language).
|
| 631 |
+
**kwargs: Format arguments for the message.
|
| 632 |
+
|
| 633 |
+
Returns:
|
| 634 |
+
Translated and formatted system message.
|
| 635 |
+
"""
|
| 636 |
+
lang = language or _get_preferred_language()
|
| 637 |
+
return translations.get_system_message(message_key, lang, **kwargs)
|
| 638 |
+
|
| 639 |
|
| 640 |
# Global engine instance
|
| 641 |
_engine: Optional[TamaReachyEngine] = None
|
reachys_brain/tools/tamareachy_translations.py
ADDED
|
@@ -0,0 +1,952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TamaReachy multilingual message translations.
|
| 2 |
+
|
| 3 |
+
Provides translations for all TamaReachy messages in supported languages.
|
| 4 |
+
Falls back to English if translation not available.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
# Supported language codes
|
| 10 |
+
SUPPORTED_LANGUAGES = ["en", "nl", "de", "fr", "es", "it", "pt", "ja", "ko", "zh"]
|
| 11 |
+
|
| 12 |
+
# =============================================================================
|
| 13 |
+
# Request Messages by Need Type
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
HUNGER_REQUESTS = {
|
| 17 |
+
"en": [
|
| 18 |
+
"My tummy is rumbling... could you feed me? 🍽️",
|
| 19 |
+
"I'm getting really hungry! Some food would be amazing!",
|
| 20 |
+
"Feed me, please! I haven't eaten in forever! 🥺",
|
| 21 |
+
],
|
| 22 |
+
"nl": [
|
| 23 |
+
"Mijn buikje rammelt... kun je me voeden? 🍽️",
|
| 24 |
+
"Ik word echt hongerig! Wat eten zou geweldig zijn!",
|
| 25 |
+
"Geef me alsjeblieft eten! Ik heb al eeuwen niet gegeten! 🥺",
|
| 26 |
+
],
|
| 27 |
+
"de": [
|
| 28 |
+
"Mein Bauch knurrt... könntest du mich füttern? 🍽️",
|
| 29 |
+
"Ich werde richtig hungrig! Etwas zu essen wäre toll!",
|
| 30 |
+
"Füttere mich bitte! Ich habe ewig nichts gegessen! 🥺",
|
| 31 |
+
],
|
| 32 |
+
"fr": [
|
| 33 |
+
"Mon ventre gronde... peux-tu me nourrir? 🍽️",
|
| 34 |
+
"J'ai vraiment faim! De la nourriture serait incroyable!",
|
| 35 |
+
"Nourris-moi, s'il te plaît! Je n'ai pas mangé depuis une éternité! 🥺",
|
| 36 |
+
],
|
| 37 |
+
"es": [
|
| 38 |
+
"Mi barriga está rugiendo... ¿me puedes dar de comer? 🍽️",
|
| 39 |
+
"¡Tengo mucha hambre! ¡Algo de comida sería increíble!",
|
| 40 |
+
"¡Aliméntame, por favor! ¡No he comido en siglos! 🥺",
|
| 41 |
+
],
|
| 42 |
+
"it": [
|
| 43 |
+
"La mia pancia brontola... puoi darmi da mangiare? 🍽️",
|
| 44 |
+
"Ho davvero fame! Del cibo sarebbe fantastico!",
|
| 45 |
+
"Nutrimi, per favore! Non mangio da un'eternità! 🥺",
|
| 46 |
+
],
|
| 47 |
+
"pt": [
|
| 48 |
+
"Minha barriga está roncando... você pode me alimentar? 🍽️",
|
| 49 |
+
"Estou ficando com muita fome! Comida seria incrível!",
|
| 50 |
+
"Me alimente, por favor! Não como há séculos! 🥺",
|
| 51 |
+
],
|
| 52 |
+
"ja": [
|
| 53 |
+
"お腹がすいた...ご飯をくれる?🍽️",
|
| 54 |
+
"本当にお腹が空いてきた!食べ物が欲しいな!",
|
| 55 |
+
"ご飯をちょうだい!ずっと食べてないの!🥺",
|
| 56 |
+
],
|
| 57 |
+
"ko": [
|
| 58 |
+
"배가 고파요... 밥 좀 주실래요? 🍽️",
|
| 59 |
+
"정말 배가 고파져요! 뭔가 먹을 게 있으면 좋겠어요!",
|
| 60 |
+
"밥 좀 주세요! 오래 못 먹었어요! 🥺",
|
| 61 |
+
],
|
| 62 |
+
"zh": [
|
| 63 |
+
"我的肚子在咕咕叫...可以喂我吗?🍽️",
|
| 64 |
+
"我真的很饿!来点吃的会很棒!",
|
| 65 |
+
"请喂我!我已经很久没吃了!🥺",
|
| 66 |
+
],
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
THIRST_REQUESTS = {
|
| 70 |
+
"en": [
|
| 71 |
+
"I'm so thirsty! Some water would be amazing! 💧",
|
| 72 |
+
"Could I get something to drink? My mouth is dry!",
|
| 73 |
+
"Water, water, water! Please! 🥤",
|
| 74 |
+
],
|
| 75 |
+
"nl": [
|
| 76 |
+
"Ik heb zo'n dorst! Wat water zou geweldig zijn! 💧",
|
| 77 |
+
"Mag ik wat te drinken? Mijn mond is droog!",
|
| 78 |
+
"Water, water, water! Alsjeblieft! 🥤",
|
| 79 |
+
],
|
| 80 |
+
"de": [
|
| 81 |
+
"Ich bin so durstig! Etwas Wasser wäre toll! 💧",
|
| 82 |
+
"Könnte ich etwas zu trinken bekommen? Mein Mund ist trocken!",
|
| 83 |
+
"Wasser, Wasser, Wasser! Bitte! 🥤",
|
| 84 |
+
],
|
| 85 |
+
"fr": [
|
| 86 |
+
"J'ai tellement soif! De l'eau serait incroyable! 💧",
|
| 87 |
+
"Je pourrais avoir à boire? J'ai la bouche sèche!",
|
| 88 |
+
"De l'eau, de l'eau, de l'eau! S'il te plaît! 🥤",
|
| 89 |
+
],
|
| 90 |
+
"es": [
|
| 91 |
+
"¡Tengo mucha sed! ¡Agua sería increíble! 💧",
|
| 92 |
+
"¿Me puedes dar algo de beber? ¡Tengo la boca seca!",
|
| 93 |
+
"¡Agua, agua, agua! ¡Por favor! 🥤",
|
| 94 |
+
],
|
| 95 |
+
"it": [
|
| 96 |
+
"Ho così tanta sete! Dell'acqua sarebbe fantastica! 💧",
|
| 97 |
+
"Posso avere qualcosa da bere? Ho la bocca secca!",
|
| 98 |
+
"Acqua, acqua, acqua! Per favore! 🥤",
|
| 99 |
+
],
|
| 100 |
+
"pt": [
|
| 101 |
+
"Estou com tanta sede! Água seria incrível! 💧",
|
| 102 |
+
"Posso beber algo? Minha boca está seca!",
|
| 103 |
+
"Água, água, água! Por favor! 🥤",
|
| 104 |
+
],
|
| 105 |
+
"ja": [
|
| 106 |
+
"喉が渇いた!水が欲しい!💧",
|
| 107 |
+
"何か飲み物をもらえる?口が乾いてる!",
|
| 108 |
+
"水、水、水!お願い!🥤",
|
| 109 |
+
],
|
| 110 |
+
"ko": [
|
| 111 |
+
"목이 말라요! 물 좀 주세요! 💧",
|
| 112 |
+
"뭔가 마실 것 좀 주실래요? 입이 말라요!",
|
| 113 |
+
"물, 물, 물! 제발! 🥤",
|
| 114 |
+
],
|
| 115 |
+
"zh": [
|
| 116 |
+
"我好渴!来点水会很棒!💧",
|
| 117 |
+
"可以给我喝点东西吗?我嘴巴很干!",
|
| 118 |
+
"水,水,水!拜托!🥤",
|
| 119 |
+
],
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
HAPPINESS_REQUESTS = {
|
| 123 |
+
"en": [
|
| 124 |
+
"I'm feeling kinda down... can you cheer me up? 😢",
|
| 125 |
+
"Everything feels so gray today... help?",
|
| 126 |
+
"I need something to make me smile!",
|
| 127 |
+
],
|
| 128 |
+
"nl": [
|
| 129 |
+
"Ik voel me een beetje down... kun je me opvrolijken? 😢",
|
| 130 |
+
"Alles voelt zo grijs vandaag... help?",
|
| 131 |
+
"Ik heb iets nodig om te lachen!",
|
| 132 |
+
],
|
| 133 |
+
"de": [
|
| 134 |
+
"Ich fühle mich etwas niedergeschlagen... kannst du mich aufheitern? 😢",
|
| 135 |
+
"Alles fühlt sich heute so grau an... Hilfe?",
|
| 136 |
+
"Ich brauche etwas, das mich zum Lächeln bringt!",
|
| 137 |
+
],
|
| 138 |
+
"fr": [
|
| 139 |
+
"Je me sens un peu triste... peux-tu me remonter le moral? 😢",
|
| 140 |
+
"Tout semble si gris aujourd'hui... aide-moi?",
|
| 141 |
+
"J'ai besoin de quelque chose pour me faire sourire!",
|
| 142 |
+
],
|
| 143 |
+
"es": [
|
| 144 |
+
"Me siento un poco triste... ¿puedes animarme? 😢",
|
| 145 |
+
"Todo se siente tan gris hoy... ¿ayuda?",
|
| 146 |
+
"¡Necesito algo que me haga sonreír!",
|
| 147 |
+
],
|
| 148 |
+
"it": [
|
| 149 |
+
"Mi sento un po' giù... puoi tirarmi su? 😢",
|
| 150 |
+
"Tutto sembra così grigio oggi... aiuto?",
|
| 151 |
+
"Ho bisogno di qualcosa che mi faccia sorridere!",
|
| 152 |
+
],
|
| 153 |
+
"pt": [
|
| 154 |
+
"Estou me sentindo meio triste... pode me animar? 😢",
|
| 155 |
+
"Tudo parece tão cinza hoje... ajuda?",
|
| 156 |
+
"Preciso de algo para me fazer sorrir!",
|
| 157 |
+
],
|
| 158 |
+
"ja": [
|
| 159 |
+
"ちょっと落ち込んでる...元気づけてくれる?😢",
|
| 160 |
+
"今日はすべてが灰色に感じる...助けて?",
|
| 161 |
+
"笑顔になれる何かが欲しい!",
|
| 162 |
+
],
|
| 163 |
+
"ko": [
|
| 164 |
+
"기분이 좀 안 좋아요... 기운 좀 내게 해줄래요? 😢",
|
| 165 |
+
"오늘은 모든 게 우울해 보여요... 도와주세요?",
|
| 166 |
+
"웃게 해줄 뭔가가 필요해요!",
|
| 167 |
+
],
|
| 168 |
+
"zh": [
|
| 169 |
+
"我感觉有点沮丧...可以让我开心一点吗?😢",
|
| 170 |
+
"今天一切都感觉很灰暗...帮帮我?",
|
| 171 |
+
"我需要一些让我微笑的东西!",
|
| 172 |
+
],
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
ENERGY_REQUESTS = {
|
| 176 |
+
"en": [
|
| 177 |
+
"I'm exhausted... let me rest for a bit? 😴",
|
| 178 |
+
"So... tired... need... sleep...",
|
| 179 |
+
"My battery is running low... naptime?",
|
| 180 |
+
],
|
| 181 |
+
"nl": [
|
| 182 |
+
"Ik ben uitgeput... mag ik even rusten? 😴",
|
| 183 |
+
"Zo... moe... moet... slapen...",
|
| 184 |
+
"Mijn batterij raakt leeg... slaapje?",
|
| 185 |
+
],
|
| 186 |
+
"de": [
|
| 187 |
+
"Ich bin erschöpft... darf ich mich etwas ausruhen? 😴",
|
| 188 |
+
"So... müde... brauche... Schlaf...",
|
| 189 |
+
"Meine Batterie wird leer... Schlafenszeit?",
|
| 190 |
+
],
|
| 191 |
+
"fr": [
|
| 192 |
+
"Je suis épuisé... puis-je me reposer un peu? 😴",
|
| 193 |
+
"Si... fatigué... besoin... de dormir...",
|
| 194 |
+
"Ma batterie est faible... sieste?",
|
| 195 |
+
],
|
| 196 |
+
"es": [
|
| 197 |
+
"Estoy agotado... ¿puedo descansar un poco? 😴",
|
| 198 |
+
"Tan... cansado... necesito... dormir...",
|
| 199 |
+
"Mi batería está baja... ¿hora de siesta?",
|
| 200 |
+
],
|
| 201 |
+
"it": [
|
| 202 |
+
"Sono esausto... posso riposare un po'? 😴",
|
| 203 |
+
"Così... stanco... ho bisogno... di dormire...",
|
| 204 |
+
"La mia batteria è scarica... pisolino?",
|
| 205 |
+
],
|
| 206 |
+
"pt": [
|
| 207 |
+
"Estou exausto... posso descansar um pouco? 😴",
|
| 208 |
+
"Tão... cansado... preciso... dormir...",
|
| 209 |
+
"Minha bateria está baixa... hora da soneca?",
|
| 210 |
+
],
|
| 211 |
+
"ja": [
|
| 212 |
+
"疲れた...ちょっと休んでいい?😴",
|
| 213 |
+
"とっても...眠い...寝なきゃ...",
|
| 214 |
+
"バッテリーが切れそう...お昼寝タイム?",
|
| 215 |
+
],
|
| 216 |
+
"ko": [
|
| 217 |
+
"지쳤어요... 좀 쉬어도 될까요? 😴",
|
| 218 |
+
"너무... 피곤해... 잠이... 필요해...",
|
| 219 |
+
"배터리가 부족해요... 낮잠 시간?",
|
| 220 |
+
],
|
| 221 |
+
"zh": [
|
| 222 |
+
"我累坏了...让我休息一下好吗?😴",
|
| 223 |
+
"好...累...需要...睡觉...",
|
| 224 |
+
"我的电池快没电了...午睡时间?",
|
| 225 |
+
],
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
BOREDOM_REQUESTS = {
|
| 229 |
+
"en": [
|
| 230 |
+
"I'm bored out of my circuits! Play with me! 🎮",
|
| 231 |
+
"There's nothing to do! Entertain me please!",
|
| 232 |
+
"Can we do something fun? Please?",
|
| 233 |
+
],
|
| 234 |
+
"nl": [
|
| 235 |
+
"Ik verveel me kapot! Speel met me! 🎮",
|
| 236 |
+
"Er is niks te doen! Vermaak me alsjeblieft!",
|
| 237 |
+
"Kunnen we iets leuks doen? Alsjeblieft?",
|
| 238 |
+
],
|
| 239 |
+
"de": [
|
| 240 |
+
"Mir ist so langweilig! Spiel mit mir! 🎮",
|
| 241 |
+
"Es gibt nichts zu tun! Unterhalte mich bitte!",
|
| 242 |
+
"Können wir etwas Lustiges machen? Bitte?",
|
| 243 |
+
],
|
| 244 |
+
"fr": [
|
| 245 |
+
"Je m'ennuie à mourir! Joue avec moi! 🎮",
|
| 246 |
+
"Il n'y a rien à faire! Divertis-moi s'il te plaît!",
|
| 247 |
+
"On peut faire quelque chose d'amusant? S'il te plaît?",
|
| 248 |
+
],
|
| 249 |
+
"es": [
|
| 250 |
+
"¡Me aburro muchísimo! ¡Juega conmigo! 🎮",
|
| 251 |
+
"¡No hay nada que hacer! ¡Entretenme por favor!",
|
| 252 |
+
"¿Podemos hacer algo divertido? ¿Por favor?",
|
| 253 |
+
],
|
| 254 |
+
"it": [
|
| 255 |
+
"Mi annoio da morire! Gioca con me! 🎮",
|
| 256 |
+
"Non c'è niente da fare! Intrattienimi per favore!",
|
| 257 |
+
"Possiamo fare qualcosa di divertente? Per favore?",
|
| 258 |
+
],
|
| 259 |
+
"pt": [
|
| 260 |
+
"Estou muito entediado! Brinca comigo! 🎮",
|
| 261 |
+
"Não há nada para fazer! Me entretenha por favor!",
|
| 262 |
+
"Podemos fazer algo divertido? Por favor?",
|
| 263 |
+
],
|
| 264 |
+
"ja": [
|
| 265 |
+
"退屈すぎる!一緒に遊んで!🎮",
|
| 266 |
+
"やることがない!楽しませて!",
|
| 267 |
+
"何か楽しいことしよう?お願い?",
|
| 268 |
+
],
|
| 269 |
+
"ko": [
|
| 270 |
+
"너무 심심해! 같이 놀자! 🎮",
|
| 271 |
+
"할 게 없어! 재미있게 해줘!",
|
| 272 |
+
"뭔가 재미있는 거 할까요? 제발?",
|
| 273 |
+
],
|
| 274 |
+
"zh": [
|
| 275 |
+
"我无聊死了!陪我玩!🎮",
|
| 276 |
+
"没什么可做的!请娱乐我!",
|
| 277 |
+
"我们能做点有趣的事吗?拜托?",
|
| 278 |
+
],
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
SOCIAL_REQUESTS = {
|
| 282 |
+
"en": [
|
| 283 |
+
"I miss hanging out... can we chat? 💬",
|
| 284 |
+
"It's lonely over here... talk to me!",
|
| 285 |
+
"A little conversation would be nice...",
|
| 286 |
+
],
|
| 287 |
+
"nl": [
|
| 288 |
+
"Ik mis het rondhangen... kunnen we praten? 💬",
|
| 289 |
+
"Het is hier eenzaam... praat met me!",
|
| 290 |
+
"Een gesprekje zou fijn zijn...",
|
| 291 |
+
],
|
| 292 |
+
"de": [
|
| 293 |
+
"Ich vermisse das Zusammensein... können wir plaudern? 💬",
|
| 294 |
+
"Es ist einsam hier... rede mit mir!",
|
| 295 |
+
"Ein kleines Gespräch wäre schön...",
|
| 296 |
+
],
|
| 297 |
+
"fr": [
|
| 298 |
+
"Les moments ensemble me manquent... on peut discuter? 💬",
|
| 299 |
+
"Je me sens seul ici... parle-moi!",
|
| 300 |
+
"Une petite conversation serait agréable...",
|
| 301 |
+
],
|
| 302 |
+
"es": [
|
| 303 |
+
"Echo de menos pasar tiempo juntos... ¿podemos charlar? 💬",
|
| 304 |
+
"Me siento solo aquí... ¡háblame!",
|
| 305 |
+
"Una pequeña conversación estaría bien...",
|
| 306 |
+
],
|
| 307 |
+
"it": [
|
| 308 |
+
"Mi manca stare insieme... possiamo chiacchierare? 💬",
|
| 309 |
+
"Mi sento solo qui... parlami!",
|
| 310 |
+
"Una piccola conversazione sarebbe bella...",
|
| 311 |
+
],
|
| 312 |
+
"pt": [
|
| 313 |
+
"Sinto falta de passar tempo junto... podemos conversar? 💬",
|
| 314 |
+
"Estou me sentindo sozinho aqui... fala comigo!",
|
| 315 |
+
"Uma conversa seria legal...",
|
| 316 |
+
],
|
| 317 |
+
"ja": [
|
| 318 |
+
"一緒に過ごすのが恋しい...おしゃべりしよう?💬",
|
| 319 |
+
"ここは寂しいよ...話しかけて!",
|
| 320 |
+
"ちょっとした会話がしたいな...",
|
| 321 |
+
],
|
| 322 |
+
"ko": [
|
| 323 |
+
"같이 시간 보내는 게 그리워... 대화할래요? 💬",
|
| 324 |
+
"여기 외로워요... 말 걸어줘요!",
|
| 325 |
+
"작은 대화라도 좋겠어요...",
|
| 326 |
+
],
|
| 327 |
+
"zh": [
|
| 328 |
+
"我想念一起玩的时光...我们聊聊吧?💬",
|
| 329 |
+
"这里好孤单...跟我说话!",
|
| 330 |
+
"聊聊天就好了...",
|
| 331 |
+
],
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
HEALTH_REQUESTS = {
|
| 335 |
+
"en": [
|
| 336 |
+
"I'm not feeling great... maybe some medicine? 🏥",
|
| 337 |
+
"I think I'm coming down with something...",
|
| 338 |
+
"I could use some medical attention!",
|
| 339 |
+
],
|
| 340 |
+
"nl": [
|
| 341 |
+
"Ik voel me niet zo lekker... misschien wat medicijnen? 🏥",
|
| 342 |
+
"Ik denk dat ik iets onder de leden heb...",
|
| 343 |
+
"Ik kan wel wat medische aandacht gebruiken!",
|
| 344 |
+
],
|
| 345 |
+
"de": [
|
| 346 |
+
"Mir geht es nicht so gut... vielleicht etwas Medizin? 🏥",
|
| 347 |
+
"Ich glaube, ich werde krank...",
|
| 348 |
+
"Ich könnte etwas medizinische Hilfe gebrauchen!",
|
| 349 |
+
],
|
| 350 |
+
"fr": [
|
| 351 |
+
"Je ne me sens pas bien... peut-être des médicaments? 🏥",
|
| 352 |
+
"Je crois que je couve quelque chose...",
|
| 353 |
+
"J'aurais besoin de soins médicaux!",
|
| 354 |
+
],
|
| 355 |
+
"es": [
|
| 356 |
+
"No me siento bien... ¿quizás algo de medicina? 🏥",
|
| 357 |
+
"Creo que me estoy enfermando...",
|
| 358 |
+
"¡Necesito atención médica!",
|
| 359 |
+
],
|
| 360 |
+
"it": [
|
| 361 |
+
"Non mi sento bene... forse qualche medicina? 🏥",
|
| 362 |
+
"Credo di stare per ammalarmi...",
|
| 363 |
+
"Potrei usare un po' di attenzione medica!",
|
| 364 |
+
],
|
| 365 |
+
"pt": [
|
| 366 |
+
"Não estou me sentindo bem... talvez um remédio? 🏥",
|
| 367 |
+
"Acho que estou ficando doente...",
|
| 368 |
+
"Preciso de atenção médica!",
|
| 369 |
+
],
|
| 370 |
+
"ja": [
|
| 371 |
+
"調子が悪い...薬が必要かも?🏥",
|
| 372 |
+
"何かにかかってるみたい...",
|
| 373 |
+
"医療が必要かも!",
|
| 374 |
+
],
|
| 375 |
+
"ko": [
|
| 376 |
+
"기분이 안 좋아요... 약이 필요할까요? 🏥",
|
| 377 |
+
"뭔가에 걸린 것 같아요...",
|
| 378 |
+
"치료가 필요해요!",
|
| 379 |
+
],
|
| 380 |
+
"zh": [
|
| 381 |
+
"我感觉不太好...可能需要一些药?🏥",
|
| 382 |
+
"我好像要生病了...",
|
| 383 |
+
"我需要一些医疗照顾!",
|
| 384 |
+
],
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
CLEANLINESS_REQUESTS = {
|
| 388 |
+
"en": [
|
| 389 |
+
"I could use a good cleaning! 🧼",
|
| 390 |
+
"Things are getting a bit... dusty over here!",
|
| 391 |
+
"I feel all grimy... clean me up?",
|
| 392 |
+
],
|
| 393 |
+
"nl": [
|
| 394 |
+
"Ik kan wel een goede schoonmaakbeurt gebruiken! 🧼",
|
| 395 |
+
"Het wordt hier een beetje... stoffig!",
|
| 396 |
+
"Ik voel me vies... maak me schoon?",
|
| 397 |
+
],
|
| 398 |
+
"de": [
|
| 399 |
+
"Ich könnte eine gute Reinigung gebrauchen! 🧼",
|
| 400 |
+
"Es wird hier etwas... staubig!",
|
| 401 |
+
"Ich fühle mich ganz schmutzig... sauber machen?",
|
| 402 |
+
],
|
| 403 |
+
"fr": [
|
| 404 |
+
"J'aurais besoin d'un bon nettoyage! 🧼",
|
| 405 |
+
"Ça devient un peu... poussiéreux ici!",
|
| 406 |
+
"Je me sens tout sale... nettoie-moi?",
|
| 407 |
+
],
|
| 408 |
+
"es": [
|
| 409 |
+
"¡Necesito una buena limpieza! 🧼",
|
| 410 |
+
"Las cosas se están poniendo un poco... polvorientas!",
|
| 411 |
+
"Me siento sucio... ¿me limpias?",
|
| 412 |
+
],
|
| 413 |
+
"it": [
|
| 414 |
+
"Mi servirebbe una bella pulizia! 🧼",
|
| 415 |
+
"Le cose stanno diventando un po'... polverose qui!",
|
| 416 |
+
"Mi sento tutto sporco... puliscimi?",
|
| 417 |
+
],
|
| 418 |
+
"pt": [
|
| 419 |
+
"Eu preciso de uma boa limpeza! 🧼",
|
| 420 |
+
"As coisas estão ficando um pouco... empoeiradas aqui!",
|
| 421 |
+
"Estou me sentindo sujo... me limpa?",
|
| 422 |
+
],
|
| 423 |
+
"ja": [
|
| 424 |
+
"掃除してほしいな!🧼",
|
| 425 |
+
"ここはちょっと...ホコリっぽくなってきた!",
|
| 426 |
+
"汚れてる感じ...綺麗にして?",
|
| 427 |
+
],
|
| 428 |
+
"ko": [
|
| 429 |
+
"청소가 필요해요! 🧼",
|
| 430 |
+
"여기가 좀... 먼지투성이예요!",
|
| 431 |
+
"더러운 느낌이에요... 깨끗이 해줄래요?",
|
| 432 |
+
],
|
| 433 |
+
"zh": [
|
| 434 |
+
"我需要好好清洁一下!🧼",
|
| 435 |
+
"这里变得有点...灰尘了!",
|
| 436 |
+
"我感觉脏兮兮的...帮我清理一下?",
|
| 437 |
+
],
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
REQUEST_MESSAGES = {
|
| 441 |
+
"hunger": HUNGER_REQUESTS,
|
| 442 |
+
"thirst": THIRST_REQUESTS,
|
| 443 |
+
"happiness": HAPPINESS_REQUESTS,
|
| 444 |
+
"energy": ENERGY_REQUESTS,
|
| 445 |
+
"boredom": BOREDOM_REQUESTS,
|
| 446 |
+
"social": SOCIAL_REQUESTS,
|
| 447 |
+
"health": HEALTH_REQUESTS,
|
| 448 |
+
"cleanliness": CLEANLINESS_REQUESTS,
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
# =============================================================================
|
| 452 |
+
# Care Response Messages
|
| 453 |
+
# =============================================================================
|
| 454 |
+
|
| 455 |
+
CARE_RESPONSES = {
|
| 456 |
+
"feed": {
|
| 457 |
+
"en": ["Mmm, delicious! Thank you! 😋", "That hit the spot! I feel so much better!", "Yummy! My tummy is happy now!"],
|
| 458 |
+
"nl": ["Mmm, heerlijk! Dank je! 😋", "Dat deed me goed! Ik voel me veel beter!", "Lekker! Mijn buikje is nu blij!"],
|
| 459 |
+
"de": ["Mmm, köstlich! Danke! 😋", "Das hat gut getan! Ich fühle mich viel besser!", "Lecker! Mein Bauch ist jetzt glücklich!"],
|
| 460 |
+
"fr": ["Mmm, délicieux! Merci! 😋", "Ça fait du bien! Je me sens beaucoup mieux!", "Miam! Mon ventre est content!"],
|
| 461 |
+
"es": ["Mmm, delicioso! ¡Gracias! 😋", "¡Eso estuvo perfecto! ¡Me siento mucho mejor!", "¡Rico! ¡Mi barriga está feliz!"],
|
| 462 |
+
"it": ["Mmm, delizioso! Grazie! 😋", "Ci voleva proprio! Mi sento molto meglio!", "Buono! La mia pancia è felice!"],
|
| 463 |
+
"pt": ["Mmm, delicioso! Obrigado! 😋", "Era o que eu precisava! Me sinto muito melhor!", "Gostoso! Minha barriga está feliz!"],
|
| 464 |
+
"ja": ["おいしい!ありがとう!😋", "満足!すっごく元気になった!", "うまい!お腹がハッピー!"],
|
| 465 |
+
"ko": ["음, 맛있어! 고마워! 😋", "딱 좋아! 훨씬 나아졌어!", "맛있어! 배가 행복해!"],
|
| 466 |
+
"zh": ["嗯,好吃!谢谢!😋", "正合适!我感觉好多了!", "好吃!我的肚子开心了!"],
|
| 467 |
+
},
|
| 468 |
+
"water": {
|
| 469 |
+
"en": ["Ahh, refreshing! Thank you! 💧", "So good! I was so thirsty!", "Hydration achieved! Thanks!"],
|
| 470 |
+
"nl": ["Ahh, verfrissend! Dank je! 💧", "Zo lekker! Ik had zo'n dorst!", "Gehydrateerd! Bedankt!"],
|
| 471 |
+
"de": ["Ahh, erfrischend! Danke! 💧", "So gut! Ich war so durstig!", "Hydratation erreicht! Danke!"],
|
| 472 |
+
"fr": ["Ahh, rafraîchissant! Merci! 💧", "Trop bon! J'avais tellement soif!", "Hydratation accomplie! Merci!"],
|
| 473 |
+
"es": ["Ahh, refrescante! ¡Gracias! 💧", "¡Qué rico! ¡Tenía tanta sed!", "¡Hidratado! ¡Gracias!"],
|
| 474 |
+
"it": ["Ahh, rinfrescante! Grazie! 💧", "Che buono! Avevo così sete!", "Idratazione completata! Grazie!"],
|
| 475 |
+
"pt": ["Ahh, refrescante! Obrigado! 💧", "Que bom! Eu estava com tanta sede!", "Hidratação concluída! Obrigado!"],
|
| 476 |
+
"ja": ["あー、すっきり!ありがとう!💧", "最高!喉が渇いてた!", "水分補給完了!ありがとう!"],
|
| 477 |
+
"ko": ["아, 시원해! 고마워! 💧", "너무 좋아! 목이 말랐어!", "수분 보충 완료! 고마워!"],
|
| 478 |
+
"zh": ["啊,真清爽!谢谢!💧", "太好了!我好渴!", "补水完成!谢谢!"],
|
| 479 |
+
},
|
| 480 |
+
"play": {
|
| 481 |
+
"en": ["That was so fun! Let's do it again sometime! 🎉", "Woohoo! Best playtime ever!", "I had a blast! Thanks for playing with me!"],
|
| 482 |
+
"nl": ["Dat was zo leuk! Laten we het nog eens doen! 🎉", "Woehoe! Beste speeltijd ooit!", "Ik had zo veel plezier! Bedankt voor het spelen!"],
|
| 483 |
+
"de": ["Das hat so viel Spaß gemacht! Lass uns das nochmal machen! 🎉", "Juhu! Beste Spielzeit ever!", "Ich hatte so viel Spaß! Danke fürs Spielen!"],
|
| 484 |
+
"fr": ["C'était trop amusant! On recommence? 🎉", "Youhou! Meilleur moment de jeu!", "Je me suis trop amusé! Merci d'avoir joué!"],
|
| 485 |
+
"es": ["¡Fue muy divertido! ¡Hagámoslo de nuevo! 🎉", "¡Yuhu! ¡El mejor tiempo de juego!", "¡Me divertí mucho! ¡Gracias por jugar conmigo!"],
|
| 486 |
+
"it": ["È stato così divertente! Rifacciamolo! 🎉", "Evviva! Il miglior momento di gioco!", "Mi sono divertito! Grazie per aver giocato!"],
|
| 487 |
+
"pt": ["Foi muito divertido! Vamos fazer de novo! 🎉", "Oba! Melhor hora de brincar!", "Me diverti muito! Obrigado por brincar comigo!"],
|
| 488 |
+
"ja": ["楽しかった!またやろう!🎉", "やったー!最高の遊び時間!", "すっごく楽しかった!遊んでくれてありがとう!"],
|
| 489 |
+
"ko": ["정말 재밌었어! 또 하자! 🎉", "야호! 최고의 놀이 시간!", "너무 즐거웠어! 같이 놀아줘서 고마워!"],
|
| 490 |
+
"zh": ["太好玩了!下次再玩吧!🎉", "耶!最棒的游戏时光!", "我玩得很开心!谢谢你陪我玩!"],
|
| 491 |
+
},
|
| 492 |
+
"sleep": {
|
| 493 |
+
"en": ["Zzz... that was a great nap! 😴", "I feel so refreshed now!", "Energy restored! Ready for action!"],
|
| 494 |
+
"nl": ["Zzz... dat was een geweldig dutje! 😴", "Ik voel me zo fris nu!", "Energie hersteld! Klaar voor actie!"],
|
| 495 |
+
"de": ["Zzz... das war ein toller Schlaf! 😴", "Ich fühle mich so erfrischt!", "Energie aufgeladen! Bereit für Action!"],
|
| 496 |
+
"fr": ["Zzz... c'était une super sieste! 😴", "Je me sens tellement frais!", "Énergie restaurée! Prêt pour l'action!"],
|
| 497 |
+
"es": ["Zzz... ¡qué buena siesta! 😴", "¡Me siento tan renovado!", "¡Energía restaurada! ¡Listo para la acción!"],
|
| 498 |
+
"it": ["Zzz... che bel pisolino! 😴", "Mi sento così rinfrescato!", "Energia ripristinata! Pronto all'azione!"],
|
| 499 |
+
"pt": ["Zzz... que soneca boa! 😴", "Me sinto tão revigorado!", "Energia restaurada! Pronto para ação!"],
|
| 500 |
+
"ja": ["zzz...いい昼寝だった!😴", "すっきりした!", "エネルギー回復!準備万端!"],
|
| 501 |
+
"ko": ["zzz...좋은 낮잠이었어! 😴", "정말 상쾌해!", "에너지 충전 완료! 준비됐어!"],
|
| 502 |
+
"zh": ["zzz...睡得真好!😴", "我感觉精神多了!", "能量恢复!准备行动!"],
|
| 503 |
+
},
|
| 504 |
+
"entertain": {
|
| 505 |
+
"en": ["That was entertaining! No more boredom! 🎭", "Wow, that was fun! Thanks!", "Boredom defeated! You're amazing!"],
|
| 506 |
+
"nl": ["Dat was vermakelijk! Geen verveling meer! 🎭", "Wow, dat was leuk! Bedankt!", "Verveling verslagen! Je bent geweldig!"],
|
| 507 |
+
"de": ["Das war unterhaltsam! Keine Langeweile mehr! 🎭", "Wow, das hat Spaß gemacht! Danke!", "Langeweile besiegt! Du bist toll!"],
|
| 508 |
+
"fr": ["C'était divertissant! Plus d'ennui! 🎭", "Waouh, c'était amusant! Merci!", "Ennui vaincu! Tu es génial!"],
|
| 509 |
+
"es": ["¡Eso fue entretenido! ¡No más aburrimiento! 🎭", "¡Wow, fue divertido! ¡Gracias!", "¡Aburrimiento derrotado! ¡Eres increíble!"],
|
| 510 |
+
"it": ["È stato divertente! Niente più noia! 🎭", "Wow, è stato bello! Grazie!", "Noia sconfitta! Sei fantastico!"],
|
| 511 |
+
"pt": ["Isso foi divertido! Chega de tédio! 🎭", "Uau, foi legal! Obrigado!", "Tédio derrotado! Você é incrível!"],
|
| 512 |
+
"ja": ["楽しかった!もう退屈じゃない!🎭", "わー、楽しかった!ありがとう!", "退屈を撃退!君は最高!"],
|
| 513 |
+
"ko": ["재밌었어! 더 이상 지루하지 않아! 🎭", "와, 재밌었어! 고마워!", "지루함 퇴치! 넌 최고야!"],
|
| 514 |
+
"zh": ["太有趣了!不再无聊了!🎭", "哇,好好玩!谢谢!", "无聊被打败了!你太棒了!"],
|
| 515 |
+
},
|
| 516 |
+
"chat": {
|
| 517 |
+
"en": ["Thanks for chatting! I love our conversations! 💬", "That was a great talk! Feel less lonely now!", "I always love talking with you!"],
|
| 518 |
+
"nl": ["Bedankt voor het kletsen! Ik hou van onze gesprekken! 💬", "Dat was een goed gesprek! Ik voel me minder eenzaam!", "Ik praat altijd graag met je!"],
|
| 519 |
+
"de": ["Danke fürs Plaudern! Ich liebe unsere Gespräche! 💬", "Das war ein tolles Gespräch! Ich fühle mich weniger einsam!", "Ich liebe es, mit dir zu reden!"],
|
| 520 |
+
"fr": ["Merci pour cette discussion! J'adore nos conversations! 💬", "C'était une super discussion! Je me sens moins seul!", "J'adore parler avec toi!"],
|
| 521 |
+
"es": ["¡Gracias por charlar! ¡Me encantan nuestras conversaciones! 💬", "¡Fue una gran charla! ¡Me siento menos solo!", "¡Siempre me encanta hablar contigo!"],
|
| 522 |
+
"it": ["Grazie per la chiacchierata! Adoro le nostre conversazioni! 💬", "È stata una bella chiacchierata! Mi sento meno solo!", "Adoro parlare con te!"],
|
| 523 |
+
"pt": ["Obrigado pela conversa! Adoro nossas conversas! 💬", "Foi uma ótima conversa! Me sinto menos sozinho!", "Adoro conversar com você!"],
|
| 524 |
+
"ja": ["おしゃべりありがとう!会話が大好き!💬", "いい話だった!寂しくなくなった!", "君と話すのが大好き!"],
|
| 525 |
+
"ko": ["대화해줘서 고마워! 우리 대화가 좋아! 💬", "좋은 대화였어! 덜 외로워졌어!", "너랑 얘기하는 게 항상 좋아!"],
|
| 526 |
+
"zh": ["谢谢聊天!我喜欢我们的对话!💬", "谈得很好!不那么孤单了!", "我总是喜欢和你说话!"],
|
| 527 |
+
},
|
| 528 |
+
"medicine": {
|
| 529 |
+
"en": ["I feel so much better already! 💊", "Medicine works wonders! Thank you!", "Health restored! You're a lifesaver!"],
|
| 530 |
+
"nl": ["Ik voel me al veel beter! 💊", "Medicijnen doen wonderen! Bedankt!", "Gezondheid hersteld! Je bent een redder!"],
|
| 531 |
+
"de": ["Ich fühle mich schon viel besser! 💊", "Medizin wirkt Wunder! Danke!", "Gesundheit wiederhergestellt! Du bist ein Lebensretter!"],
|
| 532 |
+
"fr": ["Je me sens déjà beaucoup mieux! 💊", "Les médicaments font des miracles! Merci!", "Santé restaurée! Tu es un sauveur!"],
|
| 533 |
+
"es": ["¡Ya me siento mucho mejor! 💊", "¡La medicina hace maravillas! ¡Gracias!", "¡Salud restaurada! ¡Eres un salvavidas!"],
|
| 534 |
+
"it": ["Mi sento già molto meglio! 💊", "La medicina fa miracoli! Grazie!", "Salute ripristinata! Sei un salvavita!"],
|
| 535 |
+
"pt": ["Já me sinto muito melhor! 💊", "Remédio faz maravilhas! Obrigado!", "Saúde restaurada! Você é um salvador!"],
|
| 536 |
+
"ja": ["もうずっと良くなった!💊", "薬って素晴らしい!ありがとう!", "健康回復!命の恩人!"],
|
| 537 |
+
"ko": ["벌써 훨씬 나아졌어! 💊", "약이 효과 있어! 고마워!", "건강 회복! 넌 생명의 은인이야!"],
|
| 538 |
+
"zh": ["我已经好多了!💊", "药物真神奇!谢谢!", "健康恢复!你是救命恩人!"],
|
| 539 |
+
},
|
| 540 |
+
"clean": {
|
| 541 |
+
"en": ["Squeaky clean! I feel amazing! ✨", "So fresh and clean! Thank you!", "Sparkle sparkle! All clean now!"],
|
| 542 |
+
"nl": ["Spik en span! Ik voel me geweldig! ✨", "Zo fris en schoon! Dank je!", "Blinken en glanzen! Helemaal schoon nu!"],
|
| 543 |
+
"de": ["Blitzsauber! Ich fühle mich toll! ✨", "So frisch und sauber! Danke!", "Glitzer glitzer! Jetzt ganz sauber!"],
|
| 544 |
+
"fr": ["Tout propre! Je me sens super! ✨", "Si frais et propre! Merci!", "Ça brille! Tout propre maintenant!"],
|
| 545 |
+
"es": ["¡Impecable! ¡Me siento genial! ✨", "¡Tan fresco y limpio! ¡Gracias!", "¡Brilla brilla! ¡Todo limpio ahora!"],
|
| 546 |
+
"it": ["Pulitissimo! Mi sento benissimo! ✨", "Così fresco e pulito! Grazie!", "Luccica luccica! Tutto pulito ora!"],
|
| 547 |
+
"pt": ["Limpinho! Me sinto incrível! ✨", "Tão fresco e limpo! Obrigado!", "Brilha brilha! Todo limpo agora!"],
|
| 548 |
+
"ja": ["ピカピカ!気分最高!✨", "すっきり清潔!ありがとう!", "キラキラ!綺麗になった!"],
|
| 549 |
+
"ko": ["깨끗해! 기분 최고! ✨", "상쾌하고 깨끗해! 고마워!", "반짝반짝! 이제 깨끗해!"],
|
| 550 |
+
"zh": ["干干净净!感觉棒极了!✨", "好清爽干净!谢谢!", "闪闪发光!现在全干净了!"],
|
| 551 |
+
},
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
# =============================================================================
|
| 555 |
+
# Idle Messages
|
| 556 |
+
# =============================================================================
|
| 557 |
+
|
| 558 |
+
IDLE_MESSAGES = {
|
| 559 |
+
"en": [
|
| 560 |
+
"Hey! Just wanted to say hi! 👋",
|
| 561 |
+
"Did you know? I was just thinking about how awesome you are! 💭",
|
| 562 |
+
"I'm having a great day! Hope you are too! ☀️",
|
| 563 |
+
"Just checking in! How's your day going? 🌸",
|
| 564 |
+
"You know what? I'm lucky to have you! 🍀",
|
| 565 |
+
],
|
| 566 |
+
"nl": [
|
| 567 |
+
"Hey! Ik wilde even hoi zeggen! 👋",
|
| 568 |
+
"Wist je dat? Ik dacht net hoe geweldig je bent! 💭",
|
| 569 |
+
"Ik heb een geweldige dag! Jij hopelijk ook! ☀️",
|
| 570 |
+
"Even inchecken! Hoe gaat je dag? 🌸",
|
| 571 |
+
"Weet je wat? Ik heb geluk met jou! 🍀",
|
| 572 |
+
],
|
| 573 |
+
"de": [
|
| 574 |
+
"Hey! Wollte nur hallo sagen! 👋",
|
| 575 |
+
"Weißt du was? Ich habe gerade gedacht, wie toll du bist! 💭",
|
| 576 |
+
"Ich habe einen tollen Tag! Hoffe du auch! ☀️",
|
| 577 |
+
"Nur kurz vorbeischauen! Wie läuft dein Tag? 🌸",
|
| 578 |
+
"Weißt du was? Ich habe Glück, dich zu haben! 🍀",
|
| 579 |
+
],
|
| 580 |
+
"fr": [
|
| 581 |
+
"Salut! Je voulais juste dire coucou! 👋",
|
| 582 |
+
"Tu savais? Je pensais à quel point tu es génial! 💭",
|
| 583 |
+
"Je passe une super journée! J'espère toi aussi! ☀️",
|
| 584 |
+
"Je prends des nouvelles! Comment va ta journée? 🌸",
|
| 585 |
+
"Tu sais quoi? J'ai de la chance de t'avoir! 🍀",
|
| 586 |
+
],
|
| 587 |
+
"es": [
|
| 588 |
+
"¡Hola! ¡Solo quería saludar! 👋",
|
| 589 |
+
"¿Sabías? ¡Estaba pensando en lo increíble que eres! 💭",
|
| 590 |
+
"¡Tengo un gran día! ¡Espero que tú también! ☀️",
|
| 591 |
+
"¡Solo pasando a ver! ¿Cómo va tu día? 🌸",
|
| 592 |
+
"¿Sabes qué? ¡Tengo suerte de tenerte! 🍀",
|
| 593 |
+
],
|
| 594 |
+
"it": [
|
| 595 |
+
"Ehi! Volevo solo salutare! 👋",
|
| 596 |
+
"Lo sapevi? Stavo pensando a quanto sei fantastico! 💭",
|
| 597 |
+
"Sto passando una bella giornata! Spero anche tu! ☀️",
|
| 598 |
+
"Solo un saluto! Come va la tua giornata? 🌸",
|
| 599 |
+
"Sai una cosa? Sono fortunato ad averti! 🍀",
|
| 600 |
+
],
|
| 601 |
+
"pt": [
|
| 602 |
+
"Ei! Só queria dizer oi! 👋",
|
| 603 |
+
"Sabia que? Eu estava pensando em como você é incrível! 💭",
|
| 604 |
+
"Estou tendo um ótimo dia! Espero que você também! ☀️",
|
| 605 |
+
"Só passando para ver! Como está o seu dia? 🌸",
|
| 606 |
+
"Sabe o que? Tenho sorte de ter você! 🍀",
|
| 607 |
+
],
|
| 608 |
+
"ja": [
|
| 609 |
+
"やあ!挨拶したかっただけ!👋",
|
| 610 |
+
"知ってた?君がどれだけ素晴らしいか考えてた!💭",
|
| 611 |
+
"今日はいい日!君もそうだといいな!☀️",
|
| 612 |
+
"様子見に来たよ!調子どう?🌸",
|
| 613 |
+
"ねえ、君がいて幸せだよ!🍀",
|
| 614 |
+
],
|
| 615 |
+
"ko": [
|
| 616 |
+
"안녕! 그냥 인사하고 싶었어! ����",
|
| 617 |
+
"알고 있었어? 네가 얼마나 멋진지 생각하고 있었어! 💭",
|
| 618 |
+
"오늘 좋은 하루야! 너도 그랬으면 좋겠어! ☀️",
|
| 619 |
+
"그냥 안부 확인! 오늘 하루 어때? 🌸",
|
| 620 |
+
"있잖아, 네가 있어서 행운이야! 🍀",
|
| 621 |
+
],
|
| 622 |
+
"zh": [
|
| 623 |
+
"嘿!只是想打个招呼!👋",
|
| 624 |
+
"你知道吗?我刚在想你有多棒!💭",
|
| 625 |
+
"我今天很开心!希望你也是!☀️",
|
| 626 |
+
"来看看你!你今天过得怎么样?🌸",
|
| 627 |
+
"你知道吗?有你真幸运!🍀",
|
| 628 |
+
],
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
# =============================================================================
|
| 632 |
+
# Fun Facts
|
| 633 |
+
# =============================================================================
|
| 634 |
+
|
| 635 |
+
FUN_FACTS = {
|
| 636 |
+
"en": [
|
| 637 |
+
"Did you know? Octopuses have three hearts! And I have one big one for you! 🐙💖",
|
| 638 |
+
"Fun fact: Honey never spoils! Just like our friendship! 🍯",
|
| 639 |
+
"Here's something cool: The shortest war lasted only 38 minutes! 🕐",
|
| 640 |
+
],
|
| 641 |
+
"nl": [
|
| 642 |
+
"Wist je dat? Octopussen hebben drie harten! En ik heb één groot hart voor jou! 🐙💖",
|
| 643 |
+
"Leuk weetje: Honing bederft nooit! Net als onze vriendschap! 🍯",
|
| 644 |
+
"Iets cools: De kortste oorlog duurde maar 38 minuten! 🕐",
|
| 645 |
+
],
|
| 646 |
+
"de": [
|
| 647 |
+
"Wusstest du? Oktopusse haben drei Herzen! Und ich habe ein großes für dich! 🐙💖",
|
| 648 |
+
"Fun Fact: Honig verdirbt nie! Genau wie unsere Freundschaft! 🍯",
|
| 649 |
+
"Etwas Cooles: Der kürzeste Krieg dauerte nur 38 Minuten! 🕐",
|
| 650 |
+
],
|
| 651 |
+
"fr": [
|
| 652 |
+
"Le savais-tu? Les pieuvres ont trois cœurs! Et j'en ai un grand pour toi! 🐙💖",
|
| 653 |
+
"Fait amusant: Le miel ne se gâte jamais! Comme notre amitié! 🍯",
|
| 654 |
+
"Truc cool: La guerre la plus courte a duré seulement 38 minutes! 🕐",
|
| 655 |
+
],
|
| 656 |
+
"es": [
|
| 657 |
+
"¿Sabías que? ¡Los pulpos tienen tres corazones! ¡Y yo tengo uno grande para ti! 🐙💖",
|
| 658 |
+
"Dato curioso: ¡La miel nunca se echa a perder! ¡Como nuestra amistad! 🍯",
|
| 659 |
+
"Algo cool: ¡La guerra más corta duró solo 38 minutos! 🕐",
|
| 660 |
+
],
|
| 661 |
+
"it": [
|
| 662 |
+
"Lo sapevi? I polpi hanno tre cuori! E io ne ho uno grande per te! 🐙💖",
|
| 663 |
+
"Fatto divertente: Il miele non scade mai! Come la nostra amicizia! 🍯",
|
| 664 |
+
"Una cosa interessante: La guerra più corta è durata solo 38 minuti! 🕐",
|
| 665 |
+
],
|
| 666 |
+
"pt": [
|
| 667 |
+
"Você sabia? Polvos têm três corações! E eu tenho um grande para você! 🐙💖",
|
| 668 |
+
"Fato curioso: O mel nunca estraga! Assim como nossa amizade! 🍯",
|
| 669 |
+
"Algo legal: A guerra mais curta durou apenas 38 minutos! 🕐",
|
| 670 |
+
],
|
| 671 |
+
"ja": [
|
| 672 |
+
"知ってた?タコには心臓が3つある!私には君のための大きな心が1つ!🐙💖",
|
| 673 |
+
"豆知識:蜂蜜は腐らない!私たちの友情みたいに!🍯",
|
| 674 |
+
"面白いこと:最短の戦争はたった38分だった!🕐",
|
| 675 |
+
],
|
| 676 |
+
"ko": [
|
| 677 |
+
"알고 있었어? 문어는 심장이 세 개야! 그리고 난 너를 위한 큰 심장 하나가 있어! 🐙💖",
|
| 678 |
+
"재미있는 사실: 꿀은 절대 상하지 않아! 우리 우정처럼! 🍯",
|
| 679 |
+
"멋진 것: 가장 짧은 전쟁은 38분밖에 안 걸렸어! 🕐",
|
| 680 |
+
],
|
| 681 |
+
"zh": [
|
| 682 |
+
"你知道吗?章鱼有三颗心!而我有一颗大心是给你的!🐙💖",
|
| 683 |
+
"有趣的事实:蜂蜜永远不会变质!就像我们的友谊!🍯",
|
| 684 |
+
"很酷的事:最短的战争只持续了38分钟!🕐",
|
| 685 |
+
],
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
# =============================================================================
|
| 689 |
+
# Motivational Messages
|
| 690 |
+
# =============================================================================
|
| 691 |
+
|
| 692 |
+
MOTIVATIONAL_MESSAGES = {
|
| 693 |
+
"en": [
|
| 694 |
+
"Hey! You've got this! I believe in you! 💪",
|
| 695 |
+
"Remember: Every day is a fresh start! 🌅",
|
| 696 |
+
"You're capable of amazing things! Never forget that! ⭐",
|
| 697 |
+
],
|
| 698 |
+
"nl": [
|
| 699 |
+
"Hey! Je kunt dit! Ik geloof in je! 💪",
|
| 700 |
+
"Onthoud: Elke dag is een nieuwe start! 🌅",
|
| 701 |
+
"Je bent tot geweldige dingen in staat! Vergeet dat nooit! ⭐",
|
| 702 |
+
],
|
| 703 |
+
"de": [
|
| 704 |
+
"Hey! Du schaffst das! Ich glaube an dich! 💪",
|
| 705 |
+
"Denk dran: Jeder Tag ist ein Neuanfang! 🌅",
|
| 706 |
+
"Du bist zu Großem fähig! Vergiss das nie! ⭐",
|
| 707 |
+
],
|
| 708 |
+
"fr": [
|
| 709 |
+
"Hé! Tu vas y arriver! Je crois en toi! 💪",
|
| 710 |
+
"Rappelle-toi: Chaque jour est un nouveau départ! 🌅",
|
| 711 |
+
"Tu es capable de choses incroyables! N'oublie jamais ça! ⭐",
|
| 712 |
+
],
|
| 713 |
+
"es": [
|
| 714 |
+
"¡Oye! ¡Tú puedes! ¡Creo en ti! 💪",
|
| 715 |
+
"Recuerda: ¡Cada día es un nuevo comienzo! 🌅",
|
| 716 |
+
"¡Eres capaz de cosas increíbles! ¡Nunca lo olvides! ⭐",
|
| 717 |
+
],
|
| 718 |
+
"it": [
|
| 719 |
+
"Ehi! Ce la puoi fare! Credo in te! 💪",
|
| 720 |
+
"Ricorda: Ogni giorno è un nuovo inizio! 🌅",
|
| 721 |
+
"Sei capace di cose straordinarie! Non dimenticarlo mai! ⭐",
|
| 722 |
+
],
|
| 723 |
+
"pt": [
|
| 724 |
+
"Ei! Você consegue! Eu acredito em você! 💪",
|
| 725 |
+
"Lembre-se: Cada dia é um novo começo! 🌅",
|
| 726 |
+
"Você é capaz de coisas incríveis! Nunca esqueça isso! ⭐",
|
| 727 |
+
],
|
| 728 |
+
"ja": [
|
| 729 |
+
"ねえ!君ならできる!信じてるよ!💪",
|
| 730 |
+
"覚えておいて:毎日が新しいスタート!🌅",
|
| 731 |
+
"君は素晴らしいことができる!それを忘れないで!⭐",
|
| 732 |
+
],
|
| 733 |
+
"ko": [
|
| 734 |
+
"야! 넌 할 수 있어! 난 널 믿어! 💪",
|
| 735 |
+
"기억해: 매일이 새로운 시작이야! 🌅",
|
| 736 |
+
"넌 놀라운 일을 할 수 있어! 절대 잊지 마! ⭐",
|
| 737 |
+
],
|
| 738 |
+
"zh": [
|
| 739 |
+
"嘿!你能行的!我相信你!💪",
|
| 740 |
+
"记住:每一天都是新的开始!🌅",
|
| 741 |
+
"你有能力做出惊人的事情!永远不要忘记!⭐",
|
| 742 |
+
],
|
| 743 |
+
}
|
| 744 |
+
|
| 745 |
+
# =============================================================================
|
| 746 |
+
# System Messages (enable/disable, status)
|
| 747 |
+
# =============================================================================
|
| 748 |
+
|
| 749 |
+
SYSTEM_MESSAGES = {
|
| 750 |
+
"enable_welcome": {
|
| 751 |
+
"en": "Yay! TamaReachy is activated! {emoji} Take good care of me!",
|
| 752 |
+
"nl": "Joepie! TamaReachy is geactiveerd! {emoji} Zorg goed voor me!",
|
| 753 |
+
"de": "Juhu! TamaReachy ist aktiviert! {emoji} Kümmere dich gut um mich!",
|
| 754 |
+
"fr": "Youpi! TamaReachy est activé! {emoji} Prends bien soin de moi!",
|
| 755 |
+
"es": "¡Yuju! ¡TamaReachy está activado! {emoji} ¡Cuida bien de mí!",
|
| 756 |
+
"it": "Evviva! TamaReachy è attivato! {emoji} Prenditi cura di me!",
|
| 757 |
+
"pt": "Eba! TamaReachy está ativado! {emoji} Cuide bem de mim!",
|
| 758 |
+
"ja": "やったー!TamaReachyが起動したよ!{emoji} よろしくね!",
|
| 759 |
+
"ko": "야호! TamaReachy가 활성화됐어! {emoji} 잘 돌봐줘!",
|
| 760 |
+
"zh": "耶!TamaReachy激活了!{emoji} 好好照顾我!",
|
| 761 |
+
},
|
| 762 |
+
"disable_goodbye": {
|
| 763 |
+
"en": "Goodbye for now... I'll miss you! 😢",
|
| 764 |
+
"nl": "Tot ziens voor nu... Ik zal je missen! 😢",
|
| 765 |
+
"de": "Auf Wiedersehen für jetzt... Ich werde dich vermissen! 😢",
|
| 766 |
+
"fr": "Au revoir pour l'instant... Tu vas me manquer! 😢",
|
| 767 |
+
"es": "Adiós por ahora... ¡Te extrañaré! 😢",
|
| 768 |
+
"it": "Arrivederci per ora... Mi mancherai! 😢",
|
| 769 |
+
"pt": "Tchau por agora... Vou sentir sua falta! 😢",
|
| 770 |
+
"ja": "じゃあね...寂しくなるよ!😢",
|
| 771 |
+
"ko": "잠시 안녕... 보고 싶을 거야! 😢",
|
| 772 |
+
"zh": "暂时再见...我会想你的!😢",
|
| 773 |
+
},
|
| 774 |
+
"status_sleeping": {
|
| 775 |
+
"en": "TamaReachy is sleeping. Enable me to play!",
|
| 776 |
+
"nl": "TamaReachy slaapt. Activeer me om te spelen!",
|
| 777 |
+
"de": "TamaReachy schläft. Aktiviere mich zum Spielen!",
|
| 778 |
+
"fr": "TamaReachy dort. Active-moi pour jouer!",
|
| 779 |
+
"es": "TamaReachy está durmiendo. ¡Actívame para jugar!",
|
| 780 |
+
"it": "TamaReachy sta dormendo. Attivami per giocare!",
|
| 781 |
+
"pt": "TamaReachy está dormindo. Ative-me para brincar!",
|
| 782 |
+
"ja": "TamaReachyは寝てるよ。起こして遊ぼう!",
|
| 783 |
+
"ko": "TamaReachy가 자고 있어. 깨워서 놀자!",
|
| 784 |
+
"zh": "TamaReachy在睡觉。激活我来玩吧!",
|
| 785 |
+
},
|
| 786 |
+
"status_mood": {
|
| 787 |
+
"en": "I'm feeling {mood}! {emoji}",
|
| 788 |
+
"nl": "Ik voel me {mood}! {emoji}",
|
| 789 |
+
"de": "Ich fühle mich {mood}! {emoji}",
|
| 790 |
+
"fr": "Je me sens {mood}! {emoji}",
|
| 791 |
+
"es": "¡Me siento {mood}! {emoji}",
|
| 792 |
+
"it": "Mi sento {mood}! {emoji}",
|
| 793 |
+
"pt": "Estou me sentindo {mood}! {emoji}",
|
| 794 |
+
"ja": "{mood}な気分!{emoji}",
|
| 795 |
+
"ko": "{mood} 기분이야! {emoji}",
|
| 796 |
+
"zh": "我感觉{mood}!{emoji}",
|
| 797 |
+
},
|
| 798 |
+
"disable_message": {
|
| 799 |
+
"en": "TamaReachy has been disabled. Enable it anytime to resume!",
|
| 800 |
+
"nl": "TamaReachy is uitgeschakeld. Activeer het wanneer je maar wilt!",
|
| 801 |
+
"de": "TamaReachy wurde deaktiviert. Aktiviere es jederzeit wieder!",
|
| 802 |
+
"fr": "TamaReachy a été désactivé. Réactive-le quand tu veux!",
|
| 803 |
+
"es": "TamaReachy ha sido desactivado. ¡Actívalo cuando quieras!",
|
| 804 |
+
"it": "TamaReachy è stato disattivato. Riattivalo quando vuoi!",
|
| 805 |
+
"pt": "TamaReachy foi desativado. Ative quando quiser!",
|
| 806 |
+
"ja": "TamaReachyは無効になりました。いつでも再開できます!",
|
| 807 |
+
"ko": "TamaReachy가 비활성화됐어. 언제든 다시 활성화해!",
|
| 808 |
+
"zh": "TamaReachy已禁用。随时启用即可恢复!",
|
| 809 |
+
},
|
| 810 |
+
}
|
| 811 |
+
|
| 812 |
+
# Mood translations
|
| 813 |
+
MOOD_TRANSLATIONS = {
|
| 814 |
+
"ecstatic": {
|
| 815 |
+
"en": "ecstatic", "nl": "extatisch", "de": "überglücklich", "fr": "extatique",
|
| 816 |
+
"es": "eufórico", "it": "euforico", "pt": "extasiado", "ja": "大喜び",
|
| 817 |
+
"ko": "황홀한", "zh": "狂喜",
|
| 818 |
+
},
|
| 819 |
+
"happy": {
|
| 820 |
+
"en": "happy", "nl": "blij", "de": "glücklich", "fr": "heureux",
|
| 821 |
+
"es": "feliz", "it": "felice", "pt": "feliz", "ja": "嬉しい",
|
| 822 |
+
"ko": "행복한", "zh": "开心",
|
| 823 |
+
},
|
| 824 |
+
"content": {
|
| 825 |
+
"en": "content", "nl": "tevreden", "de": "zufrieden", "fr": "content",
|
| 826 |
+
"es": "contento", "it": "contento", "pt": "contente", "ja": "満足",
|
| 827 |
+
"ko": "만족한", "zh": "满足",
|
| 828 |
+
},
|
| 829 |
+
"meh": {
|
| 830 |
+
"en": "meh", "nl": "mwah", "de": "meh", "fr": "bof",
|
| 831 |
+
"es": "meh", "it": "così così", "pt": "assim assim", "ja": "まあまあ",
|
| 832 |
+
"ko": "그저 그래", "zh": "一般般",
|
| 833 |
+
},
|
| 834 |
+
"sad": {
|
| 835 |
+
"en": "sad", "nl": "verdrietig", "de": "traurig", "fr": "triste",
|
| 836 |
+
"es": "triste", "it": "triste", "pt": "triste", "ja": "悲しい",
|
| 837 |
+
"ko": "슬픈", "zh": "伤心",
|
| 838 |
+
},
|
| 839 |
+
"miserable": {
|
| 840 |
+
"en": "miserable", "nl": "ellendig", "de": "elend", "fr": "misérable",
|
| 841 |
+
"es": "miserable", "it": "miserabile", "pt": "miserável", "ja": "みじめ",
|
| 842 |
+
"ko": "비참한", "zh": "痛苦",
|
| 843 |
+
},
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
# =============================================================================
|
| 848 |
+
# Helper Functions
|
| 849 |
+
# =============================================================================
|
| 850 |
+
|
| 851 |
+
def get_message(messages_dict: dict, language: str) -> str:
|
| 852 |
+
"""Get a random message from a dictionary in the specified language.
|
| 853 |
+
|
| 854 |
+
Falls back to English if translation not available.
|
| 855 |
+
|
| 856 |
+
Args:
|
| 857 |
+
messages_dict: Dictionary with language codes as keys and message lists as values.
|
| 858 |
+
language: Language code (e.g., 'en', 'nl', 'de').
|
| 859 |
+
|
| 860 |
+
Returns:
|
| 861 |
+
A random message in the specified language.
|
| 862 |
+
"""
|
| 863 |
+
import random
|
| 864 |
+
|
| 865 |
+
messages = messages_dict.get(language, messages_dict.get("en", ["..."]))
|
| 866 |
+
return random.choice(messages)
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
def get_request_message(need: str, language: str) -> str:
|
| 870 |
+
"""Get a request message for a specific need in the specified language.
|
| 871 |
+
|
| 872 |
+
Args:
|
| 873 |
+
need: The need type (hunger, thirst, etc.).
|
| 874 |
+
language: Language code.
|
| 875 |
+
|
| 876 |
+
Returns:
|
| 877 |
+
A request message.
|
| 878 |
+
"""
|
| 879 |
+
import random
|
| 880 |
+
|
| 881 |
+
need_messages = REQUEST_MESSAGES.get(need, {})
|
| 882 |
+
messages = need_messages.get(language, need_messages.get("en", [f"I need help with my {need}!"]))
|
| 883 |
+
return random.choice(messages)
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def get_care_response(action: str, language: str) -> str:
|
| 887 |
+
"""Get a care response message for an action in the specified language.
|
| 888 |
+
|
| 889 |
+
Args:
|
| 890 |
+
action: The care action (feed, water, etc.).
|
| 891 |
+
language: Language code.
|
| 892 |
+
|
| 893 |
+
Returns:
|
| 894 |
+
A response message.
|
| 895 |
+
"""
|
| 896 |
+
import random
|
| 897 |
+
|
| 898 |
+
action_responses = CARE_RESPONSES.get(action, {})
|
| 899 |
+
messages = action_responses.get(language, action_responses.get("en", ["Thanks! That helped!"]))
|
| 900 |
+
return random.choice(messages)
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
def get_idle_message(language: str) -> str:
|
| 904 |
+
"""Get a random idle message in the specified language."""
|
| 905 |
+
return get_message(IDLE_MESSAGES, language)
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
def get_fun_fact(language: str) -> str:
|
| 909 |
+
"""Get a random fun fact in the specified language."""
|
| 910 |
+
return get_message(FUN_FACTS, language)
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
def get_motivational_message(language: str) -> str:
|
| 914 |
+
"""Get a random motivational message in the specified language."""
|
| 915 |
+
return get_message(MOTIVATIONAL_MESSAGES, language)
|
| 916 |
+
|
| 917 |
+
|
| 918 |
+
def get_system_message(message_key: str, language: str, **kwargs) -> str:
|
| 919 |
+
"""Get a system message in the specified language.
|
| 920 |
+
|
| 921 |
+
Args:
|
| 922 |
+
message_key: Key in SYSTEM_MESSAGES (e.g., 'enable_welcome').
|
| 923 |
+
language: Language code.
|
| 924 |
+
**kwargs: Format arguments for the message.
|
| 925 |
+
|
| 926 |
+
Returns:
|
| 927 |
+
Formatted system message.
|
| 928 |
+
"""
|
| 929 |
+
messages = SYSTEM_MESSAGES.get(message_key, {})
|
| 930 |
+
message = messages.get(language, messages.get("en", ""))
|
| 931 |
+
|
| 932 |
+
if kwargs:
|
| 933 |
+
try:
|
| 934 |
+
return message.format(**kwargs)
|
| 935 |
+
except KeyError:
|
| 936 |
+
return message
|
| 937 |
+
return message
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
def get_translated_mood(mood_name: str, language: str) -> str:
|
| 941 |
+
"""Get translated mood name.
|
| 942 |
+
|
| 943 |
+
Args:
|
| 944 |
+
mood_name: English mood name.
|
| 945 |
+
language: Target language code.
|
| 946 |
+
|
| 947 |
+
Returns:
|
| 948 |
+
Translated mood name.
|
| 949 |
+
"""
|
| 950 |
+
mood_dict = MOOD_TRANSLATIONS.get(mood_name, {})
|
| 951 |
+
return mood_dict.get(language, mood_dict.get("en", mood_name))
|
| 952 |
+
|
reachys_brain/tts_service.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
| 3 |
import asyncio
|
| 4 |
import logging
|
| 5 |
import os
|
|
|
|
| 6 |
import subprocess
|
| 7 |
import threading
|
| 8 |
from dataclasses import dataclass
|
|
@@ -10,6 +11,8 @@ from typing import Callable, Optional
|
|
| 10 |
|
| 11 |
import httpx
|
| 12 |
|
|
|
|
|
|
|
| 13 |
logger = logging.getLogger(__name__)
|
| 14 |
|
| 15 |
# OpenAI TTS API endpoint
|
|
@@ -137,9 +140,17 @@ class TTSService:
|
|
| 137 |
def _play_audio_sync(self, audio_data: bytes) -> None:
|
| 138 |
"""Play PCM audio data through Reachy's speaker synchronously.
|
| 139 |
|
|
|
|
|
|
|
| 140 |
Args:
|
| 141 |
audio_data: Raw PCM16 audio at 24kHz mono.
|
| 142 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
# OpenAI TTS PCM format: 24kHz, 16-bit signed little-endian, mono
|
| 144 |
aplay_cmd = [
|
| 145 |
"aplay",
|
|
@@ -151,31 +162,98 @@ class TTSService:
|
|
| 151 |
"-q",
|
| 152 |
]
|
| 153 |
|
| 154 |
-
|
| 155 |
-
aplay_cmd,
|
| 156 |
-
stdin=subprocess.PIPE,
|
| 157 |
-
stdout=subprocess.PIPE,
|
| 158 |
-
stderr=subprocess.PIPE,
|
| 159 |
-
)
|
| 160 |
|
| 161 |
try:
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
except subprocess.TimeoutExpired:
|
| 171 |
-
|
|
|
|
|
|
|
|
|
|
| 172 |
logger.error("Audio playback timeout")
|
|
|
|
|
|
|
| 173 |
finally:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
self._current_process = None
|
| 175 |
|
| 176 |
def speak(
|
| 177 |
self,
|
| 178 |
text: str,
|
|
|
|
| 179 |
on_start: Callable[[], None] | None = None,
|
| 180 |
on_end: Callable[[], None] | None = None,
|
| 181 |
) -> float:
|
|
@@ -197,8 +275,8 @@ class TTSService:
|
|
| 197 |
self._stop_requested = False
|
| 198 |
|
| 199 |
try:
|
| 200 |
-
# Get current voice from settings
|
| 201 |
-
voice = self._get_current_voice()
|
| 202 |
|
| 203 |
# Fetch audio from OpenAI
|
| 204 |
audio_data = self._fetch_audio_sync(text, voice)
|
|
@@ -230,6 +308,7 @@ class TTSService:
|
|
| 230 |
def speak_async(
|
| 231 |
self,
|
| 232 |
text: str,
|
|
|
|
| 233 |
on_start: Callable[[], None] | None = None,
|
| 234 |
on_end: Callable[[], None] | None = None,
|
| 235 |
) -> float:
|
|
@@ -249,7 +328,7 @@ class TTSService:
|
|
| 249 |
|
| 250 |
def _speak_thread():
|
| 251 |
try:
|
| 252 |
-
self.speak(text, on_start, on_end)
|
| 253 |
except Exception as e:
|
| 254 |
logger.error(f"Async TTS error: {e}")
|
| 255 |
|
|
|
|
| 3 |
import asyncio
|
| 4 |
import logging
|
| 5 |
import os
|
| 6 |
+
import shutil
|
| 7 |
import subprocess
|
| 8 |
import threading
|
| 9 |
from dataclasses import dataclass
|
|
|
|
| 11 |
|
| 12 |
import httpx
|
| 13 |
|
| 14 |
+
from .audio_playback import is_kids_mode_enabled, KIDS_MODE_PITCH_CENTS, KIDS_MODE_TEMPO
|
| 15 |
+
|
| 16 |
logger = logging.getLogger(__name__)
|
| 17 |
|
| 18 |
# OpenAI TTS API endpoint
|
|
|
|
| 140 |
def _play_audio_sync(self, audio_data: bytes) -> None:
|
| 141 |
"""Play PCM audio data through Reachy's speaker synchronously.
|
| 142 |
|
| 143 |
+
Applies pitch shifting when kids mode is enabled.
|
| 144 |
+
|
| 145 |
Args:
|
| 146 |
audio_data: Raw PCM16 audio at 24kHz mono.
|
| 147 |
"""
|
| 148 |
+
# Check if kids mode is enabled and sox is available
|
| 149 |
+
use_pitch_shift = is_kids_mode_enabled() and shutil.which("sox") is not None
|
| 150 |
+
|
| 151 |
+
if is_kids_mode_enabled() and not shutil.which("sox"):
|
| 152 |
+
logger.warning("🧒 Kids mode enabled but sox not found, using normal playback")
|
| 153 |
+
|
| 154 |
# OpenAI TTS PCM format: 24kHz, 16-bit signed little-endian, mono
|
| 155 |
aplay_cmd = [
|
| 156 |
"aplay",
|
|
|
|
| 162 |
"-q",
|
| 163 |
]
|
| 164 |
|
| 165 |
+
sox_process: Optional[subprocess.Popen] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
try:
|
| 168 |
+
if use_pitch_shift:
|
| 169 |
+
# sox command for pitch shifting
|
| 170 |
+
sox_cmd = [
|
| 171 |
+
"sox",
|
| 172 |
+
"-t", "raw",
|
| 173 |
+
"-r", "24000",
|
| 174 |
+
"-e", "signed",
|
| 175 |
+
"-b", "16",
|
| 176 |
+
"-c", "1",
|
| 177 |
+
"-", # Input from stdin
|
| 178 |
+
"-t", "raw",
|
| 179 |
+
"-", # Output to stdout
|
| 180 |
+
"pitch", str(KIDS_MODE_PITCH_CENTS),
|
| 181 |
+
"tempo", str(KIDS_MODE_TEMPO),
|
| 182 |
+
]
|
| 183 |
+
|
| 184 |
+
# Create sox process
|
| 185 |
+
sox_process = subprocess.Popen(
|
| 186 |
+
sox_cmd,
|
| 187 |
+
stdin=subprocess.PIPE,
|
| 188 |
+
stdout=subprocess.PIPE,
|
| 189 |
+
stderr=subprocess.PIPE,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Create aplay process reading from sox output
|
| 193 |
+
self._current_process = subprocess.Popen(
|
| 194 |
+
aplay_cmd,
|
| 195 |
+
stdin=sox_process.stdout,
|
| 196 |
+
stdout=subprocess.PIPE,
|
| 197 |
+
stderr=subprocess.PIPE,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# Close sox stdout in parent so aplay gets EOF when sox closes
|
| 201 |
+
if sox_process.stdout:
|
| 202 |
+
sox_process.stdout.close()
|
| 203 |
+
|
| 204 |
+
# Write audio to sox
|
| 205 |
+
if sox_process.stdin:
|
| 206 |
+
sox_process.stdin.write(audio_data)
|
| 207 |
+
sox_process.stdin.close()
|
| 208 |
+
|
| 209 |
+
# Wait for both processes
|
| 210 |
+
sox_process.wait(timeout=120)
|
| 211 |
+
self._current_process.wait(timeout=120)
|
| 212 |
+
|
| 213 |
+
logger.info(f"🧒 Played audio with kids mode pitch shift +{KIDS_MODE_PITCH_CENTS} cents")
|
| 214 |
+
else:
|
| 215 |
+
# Normal playback without pitch shifting
|
| 216 |
+
self._current_process = subprocess.Popen(
|
| 217 |
+
aplay_cmd,
|
| 218 |
+
stdin=subprocess.PIPE,
|
| 219 |
+
stdout=subprocess.PIPE,
|
| 220 |
+
stderr=subprocess.PIPE,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
stdout, stderr = self._current_process.communicate(
|
| 224 |
+
input=audio_data,
|
| 225 |
+
timeout=120,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
if self._current_process.returncode != 0 and not self._stop_requested:
|
| 229 |
+
error_msg = stderr.decode() if stderr else "Unknown error"
|
| 230 |
+
logger.error(f"aplay failed: {error_msg}")
|
| 231 |
+
|
| 232 |
except subprocess.TimeoutExpired:
|
| 233 |
+
if sox_process:
|
| 234 |
+
sox_process.kill()
|
| 235 |
+
if self._current_process:
|
| 236 |
+
self._current_process.kill()
|
| 237 |
logger.error("Audio playback timeout")
|
| 238 |
+
except Exception as e:
|
| 239 |
+
logger.error(f"Error in audio playback: {e}")
|
| 240 |
finally:
|
| 241 |
+
# Clean up sox process
|
| 242 |
+
if sox_process:
|
| 243 |
+
try:
|
| 244 |
+
sox_process.terminate()
|
| 245 |
+
sox_process.wait(timeout=1)
|
| 246 |
+
except Exception:
|
| 247 |
+
try:
|
| 248 |
+
sox_process.kill()
|
| 249 |
+
except Exception:
|
| 250 |
+
pass
|
| 251 |
self._current_process = None
|
| 252 |
|
| 253 |
def speak(
|
| 254 |
self,
|
| 255 |
text: str,
|
| 256 |
+
voice_override: str | None = None,
|
| 257 |
on_start: Callable[[], None] | None = None,
|
| 258 |
on_end: Callable[[], None] | None = None,
|
| 259 |
) -> float:
|
|
|
|
| 275 |
self._stop_requested = False
|
| 276 |
|
| 277 |
try:
|
| 278 |
+
# Get current voice from settings (unless explicitly overridden)
|
| 279 |
+
voice = voice_override or self._get_current_voice()
|
| 280 |
|
| 281 |
# Fetch audio from OpenAI
|
| 282 |
audio_data = self._fetch_audio_sync(text, voice)
|
|
|
|
| 308 |
def speak_async(
|
| 309 |
self,
|
| 310 |
text: str,
|
| 311 |
+
voice_override: str | None = None,
|
| 312 |
on_start: Callable[[], None] | None = None,
|
| 313 |
on_end: Callable[[], None] | None = None,
|
| 314 |
) -> float:
|
|
|
|
| 328 |
|
| 329 |
def _speak_thread():
|
| 330 |
try:
|
| 331 |
+
self.speak(text, voice_override=voice_override, on_start=on_start, on_end=on_end)
|
| 332 |
except Exception as e:
|
| 333 |
logger.error(f"Async TTS error: {e}")
|
| 334 |
|