Spaces:
Running
Running
RemiFabre commited on
Commit ·
daced27
1
Parent(s): 46d5d48
Add PAD-based emotion system with muted speech output
Browse files- README.md +29 -12
- src/feeling_machine/dance_emotion_moves.py +149 -1
- src/feeling_machine/moves.py +9 -0
- src/feeling_machine/openai_realtime.py +8 -1
- src/feeling_machine/pad_motion/__init__.py +19 -0
- src/feeling_machine/pad_motion/pad_emotions.py +73 -0
- src/feeling_machine/pad_motion/pose_generation.py +206 -0
- src/feeling_machine/pad_motion/sound_generation.py +174 -0
- src/feeling_machine/profiles/_feeling_machine_locked_profile/custom_tool.py +0 -38
- src/feeling_machine/profiles/_feeling_machine_locked_profile/instructions.txt +28 -3
- src/feeling_machine/profiles/_feeling_machine_locked_profile/modalities.txt +1 -0
- src/feeling_machine/profiles/_feeling_machine_locked_profile/play_pad_emotion.py +142 -0
- src/feeling_machine/profiles/_feeling_machine_locked_profile/sweep_look.py +0 -127
- src/feeling_machine/profiles/_feeling_machine_locked_profile/tools.txt +2 -16
- src/feeling_machine/prompts.py +24 -0
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
title: Feeling Machine
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: purple
|
| 5 |
-
colorTo:
|
| 6 |
sdk: static
|
| 7 |
pinned: false
|
| 8 |
tags:
|
|
@@ -12,16 +12,33 @@ tags:
|
|
| 12 |
|
| 13 |
# Feeling Machine
|
| 14 |
|
| 15 |
-
|
| 16 |
|
| 17 |
-
|
| 18 |
-
- Edit instructions `_feeling_machine_locked_profile/instructions.txt`
|
| 19 |
-
- Edit available tools in `_feeling_machine_locked_profile/tools.txt`
|
| 20 |
-
- You can create your own tools in `_feeling_machine_locked_profile` by subclassing the `Tool` class.
|
| 21 |
|
| 22 |
-
|
| 23 |
-
- this `README.md` file
|
| 24 |
-
- the `index.html` file (Hugging Face Spaces landing page)
|
| 25 |
-
- the `src/reachy_mini_conversation_app/static/index.html` (the web app parameters page)
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: Feeling Machine
|
| 3 |
+
emoji: 🎭
|
| 4 |
colorFrom: purple
|
| 5 |
+
colorTo: blue
|
| 6 |
sdk: static
|
| 7 |
pinned: false
|
| 8 |
tags:
|
|
|
|
| 12 |
|
| 13 |
# Feeling Machine
|
| 14 |
|
| 15 |
+
An expressive robot app that responds to users through **emotions only** - no speech output.
|
| 16 |
|
| 17 |
+
The robot listens to what you say and responds with expressive motion and procedurally-generated sounds based on the **PAD model** (Pleasure, Arousal, Dominance).
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
## Features
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
- **PAD-based emotion generation**: Motion and sound are generated procedurally from emotional parameters
|
| 22 |
+
- **Named emotions**: joy, happiness, anger, fear, sadness, surprise, boredom, uncertainty, disgust, neutral
|
| 23 |
+
- **Custom PAD values**: The LLM can specify precise P/A/D values for nuanced expressions
|
| 24 |
+
- **No speech**: The robot expresses itself only through body language and generated sounds
|
| 25 |
+
|
| 26 |
+
## PAD Model
|
| 27 |
+
|
| 28 |
+
The PAD model represents emotions in a 3D space:
|
| 29 |
+
- **P**leasure: Positive (+1) vs negative (-1) emotion
|
| 30 |
+
- **A**rousal: Excited/energetic (+1) vs calm/relaxed (-1)
|
| 31 |
+
- **D**ominance: In control (+1) vs submissive (-1)
|
| 32 |
+
|
| 33 |
+
## Usage
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
cd src/feeling_machine
|
| 37 |
+
python main.py --gradio
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
Then open http://127.0.0.1:7861/
|
| 41 |
+
|
| 42 |
+
## Classic Emotions
|
| 43 |
+
|
| 44 |
+
If you specifically ask for "classic" emotions, the robot can play pre-recorded emotions from the Pollen library instead of PAD-generated ones.
|
src/feeling_machine/dance_emotion_moves.py
CHANGED
|
@@ -6,7 +6,7 @@ and executed sequentially by the MovementManager.
|
|
| 6 |
|
| 7 |
from __future__ import annotations
|
| 8 |
import logging
|
| 9 |
-
from typing import Tuple
|
| 10 |
|
| 11 |
import numpy as np
|
| 12 |
from numpy.typing import NDArray
|
|
@@ -19,6 +19,149 @@ from reachy_mini_dances_library.dance_move import DanceMove
|
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
class DanceQueueMove(Move): # type: ignore
|
| 23 |
"""Wrapper for dance moves to work with the movement queue system."""
|
| 24 |
|
|
@@ -66,6 +209,11 @@ class EmotionQueueMove(Move): # type: ignore
|
|
| 66 |
"""Duration property required by official Move interface."""
|
| 67 |
return float(self.emotion_move.duration)
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
def evaluate(self, t: float) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None, float | None]:
|
| 70 |
"""Evaluate emotion move at time t."""
|
| 71 |
try:
|
|
|
|
| 6 |
|
| 7 |
from __future__ import annotations
|
| 8 |
import logging
|
| 9 |
+
from typing import List, Optional, Tuple
|
| 10 |
|
| 11 |
import numpy as np
|
| 12 |
from numpy.typing import NDArray
|
|
|
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
| 21 |
|
| 22 |
+
class PADEmotionMove(Move): # type: ignore
|
| 23 |
+
"""PAD-based procedural emotion move.
|
| 24 |
+
|
| 25 |
+
Generates a sequence of poses based on PAD (Pleasure, Arousal, Dominance)
|
| 26 |
+
values. Each segment smoothly interpolates to a new generated pose.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
P: float,
|
| 32 |
+
A: float,
|
| 33 |
+
D: float,
|
| 34 |
+
total_duration: float = 3.0,
|
| 35 |
+
audio_callback: Optional[callable] = None,
|
| 36 |
+
):
|
| 37 |
+
"""Initialize a PAD emotion move.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
P: Pleasure [0, 1]
|
| 41 |
+
A: Arousal [0, 1]
|
| 42 |
+
D: Dominance [0, 1]
|
| 43 |
+
total_duration: Total duration of the emotion in seconds
|
| 44 |
+
audio_callback: Optional callback to push audio (called at init)
|
| 45 |
+
"""
|
| 46 |
+
from reachy_mini.utils import create_head_pose
|
| 47 |
+
from feeling_machine.pad_motion import generate_pose, generate_sound
|
| 48 |
+
|
| 49 |
+
self.P = P
|
| 50 |
+
self.A = A
|
| 51 |
+
self.D = D
|
| 52 |
+
self._total_duration = total_duration
|
| 53 |
+
|
| 54 |
+
# Generate sequence of poses to fill the duration
|
| 55 |
+
self._poses: List[dict] = []
|
| 56 |
+
self._keyframes: List[float] = [0.0] # timestamps of each pose start
|
| 57 |
+
|
| 58 |
+
accumulated_time = 0.0
|
| 59 |
+
while accumulated_time < total_duration:
|
| 60 |
+
pose = generate_pose(P, A, D)
|
| 61 |
+
# Cap pose duration to not exceed total
|
| 62 |
+
pose_duration = min(pose["duration"], total_duration - accumulated_time)
|
| 63 |
+
pose["duration"] = pose_duration
|
| 64 |
+
self._poses.append(pose)
|
| 65 |
+
accumulated_time += pose_duration
|
| 66 |
+
self._keyframes.append(accumulated_time)
|
| 67 |
+
|
| 68 |
+
# Create head pose matrices for interpolation
|
| 69 |
+
self._head_poses: List[NDArray[np.float32]] = []
|
| 70 |
+
for pose in self._poses:
|
| 71 |
+
head_pose = create_head_pose(
|
| 72 |
+
x=pose["x"] / 1000.0, # mm to meters
|
| 73 |
+
y=pose["y"] / 1000.0,
|
| 74 |
+
z=pose["z"] / 1000.0,
|
| 75 |
+
roll=pose["roll"],
|
| 76 |
+
pitch=pose["pitch"],
|
| 77 |
+
yaw=pose["yaw"],
|
| 78 |
+
mm=False,
|
| 79 |
+
degrees=True,
|
| 80 |
+
)
|
| 81 |
+
self._head_poses.append(head_pose)
|
| 82 |
+
|
| 83 |
+
# Generate and push audio if callback provided
|
| 84 |
+
if audio_callback is not None:
|
| 85 |
+
try:
|
| 86 |
+
audio = generate_sound(P, A, D, total_duration)
|
| 87 |
+
audio_callback(audio)
|
| 88 |
+
except Exception as e:
|
| 89 |
+
logger.warning(f"Failed to generate/push audio: {e}")
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def duration(self) -> float:
|
| 93 |
+
"""Duration property required by official Move interface."""
|
| 94 |
+
return self._total_duration
|
| 95 |
+
|
| 96 |
+
def _find_segment(self, t: float) -> Tuple[int, float]:
|
| 97 |
+
"""Find which segment t falls into and the local progress.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
(segment_index, local_t) where local_t is in [0, 1]
|
| 101 |
+
"""
|
| 102 |
+
for i in range(len(self._poses)):
|
| 103 |
+
if t < self._keyframes[i + 1]:
|
| 104 |
+
segment_start = self._keyframes[i]
|
| 105 |
+
segment_end = self._keyframes[i + 1]
|
| 106 |
+
segment_duration = segment_end - segment_start
|
| 107 |
+
if segment_duration > 0:
|
| 108 |
+
local_t = (t - segment_start) / segment_duration
|
| 109 |
+
else:
|
| 110 |
+
local_t = 1.0
|
| 111 |
+
return i, local_t
|
| 112 |
+
|
| 113 |
+
# Past the end, return last segment at t=1
|
| 114 |
+
return len(self._poses) - 1, 1.0
|
| 115 |
+
|
| 116 |
+
def evaluate(self, t: float) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None, float | None]:
|
| 117 |
+
"""Evaluate PAD emotion move at time t."""
|
| 118 |
+
try:
|
| 119 |
+
from reachy_mini.utils import create_head_pose
|
| 120 |
+
from reachy_mini.utils.interpolation import linear_pose_interpolation
|
| 121 |
+
|
| 122 |
+
# Clamp t to valid range
|
| 123 |
+
t = max(0.0, min(t, self._total_duration))
|
| 124 |
+
|
| 125 |
+
seg_idx, local_t = self._find_segment(t)
|
| 126 |
+
pose = self._poses[seg_idx]
|
| 127 |
+
target_head = self._head_poses[seg_idx]
|
| 128 |
+
|
| 129 |
+
# Get start pose (previous segment's target or neutral)
|
| 130 |
+
if seg_idx > 0:
|
| 131 |
+
start_head = self._head_poses[seg_idx - 1]
|
| 132 |
+
start_antennas = self._poses[seg_idx - 1]["antennas"]
|
| 133 |
+
start_body_yaw = self._poses[seg_idx - 1]["body_yaw"]
|
| 134 |
+
else:
|
| 135 |
+
start_head = create_head_pose(0, 0, 0, 0, 0, 0, degrees=True)
|
| 136 |
+
start_antennas = [0.0, 0.0]
|
| 137 |
+
start_body_yaw = 0.0
|
| 138 |
+
|
| 139 |
+
# Apply minjerk easing
|
| 140 |
+
local_t_eased = 3 * local_t**2 - 2 * local_t**3
|
| 141 |
+
|
| 142 |
+
# Interpolate head pose
|
| 143 |
+
head_pose = linear_pose_interpolation(start_head, target_head, local_t_eased)
|
| 144 |
+
|
| 145 |
+
# Interpolate antennas
|
| 146 |
+
target_antennas = pose["antennas"]
|
| 147 |
+
antennas = np.array([
|
| 148 |
+
start_antennas[0] + (target_antennas[0] - start_antennas[0]) * local_t_eased,
|
| 149 |
+
start_antennas[1] + (target_antennas[1] - start_antennas[1]) * local_t_eased,
|
| 150 |
+
], dtype=np.float64)
|
| 151 |
+
|
| 152 |
+
# Interpolate body yaw
|
| 153 |
+
target_body_yaw = pose["body_yaw"]
|
| 154 |
+
body_yaw = start_body_yaw + (target_body_yaw - start_body_yaw) * local_t_eased
|
| 155 |
+
|
| 156 |
+
return (head_pose, antennas, np.deg2rad(body_yaw))
|
| 157 |
+
|
| 158 |
+
except Exception as e:
|
| 159 |
+
logger.error(f"Error evaluating PAD emotion at t={t}: {e}")
|
| 160 |
+
from reachy_mini.utils import create_head_pose
|
| 161 |
+
neutral = create_head_pose(0, 0, 0, 0, 0, 0, degrees=True)
|
| 162 |
+
return (neutral, np.array([0.0, 0.0], dtype=np.float64), 0.0)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
class DanceQueueMove(Move): # type: ignore
|
| 166 |
"""Wrapper for dance moves to work with the movement queue system."""
|
| 167 |
|
|
|
|
| 209 |
"""Duration property required by official Move interface."""
|
| 210 |
return float(self.emotion_move.duration)
|
| 211 |
|
| 212 |
+
@property
|
| 213 |
+
def sound_path(self):
|
| 214 |
+
"""Get the sound path from the underlying recorded move."""
|
| 215 |
+
return self.emotion_move.sound_path
|
| 216 |
+
|
| 217 |
def evaluate(self, t: float) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None, float | None]:
|
| 218 |
"""Evaluate emotion move at time t."""
|
| 219 |
try:
|
src/feeling_machine/moves.py
CHANGED
|
@@ -489,6 +489,15 @@ class MovementManager:
|
|
| 489 |
self._breathing_active = isinstance(self.state.current_move, BreathingMove)
|
| 490 |
logger.debug(f"Starting new move, duration: {self.state.current_move.duration}s")
|
| 491 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 492 |
def _manage_breathing(self, current_time: float) -> None:
|
| 493 |
"""Manage automatic breathing when idle."""
|
| 494 |
if (
|
|
|
|
| 489 |
self._breathing_active = isinstance(self.state.current_move, BreathingMove)
|
| 490 |
logger.debug(f"Starting new move, duration: {self.state.current_move.duration}s")
|
| 491 |
|
| 492 |
+
# Play sound if the move has an associated sound file (classic emotions)
|
| 493 |
+
sound_path = getattr(self.state.current_move, "sound_path", None)
|
| 494 |
+
if sound_path is not None:
|
| 495 |
+
try:
|
| 496 |
+
self.current_robot.media.play_sound(str(sound_path))
|
| 497 |
+
logger.debug(f"Playing sound: {sound_path}")
|
| 498 |
+
except Exception as e:
|
| 499 |
+
logger.warning(f"Failed to play move sound: {e}")
|
| 500 |
+
|
| 501 |
def _manage_breathing(self, current_time: float) -> None:
|
| 502 |
"""Manage automatic breathing when idle."""
|
| 503 |
if (
|
src/feeling_machine/openai_realtime.py
CHANGED
|
@@ -17,7 +17,7 @@ from scipy.signal import resample
|
|
| 17 |
from websockets.exceptions import ConnectionClosedError
|
| 18 |
|
| 19 |
from feeling_machine.config import config
|
| 20 |
-
from feeling_machine.prompts import get_session_voice, get_session_instructions
|
| 21 |
from feeling_machine.tools.core_tools import (
|
| 22 |
ToolDependencies,
|
| 23 |
get_tool_specs,
|
|
@@ -73,6 +73,10 @@ class OpenaiRealtimeHandler(AsyncStreamHandler):
|
|
| 73 |
self._shutdown_requested: bool = False
|
| 74 |
self._connected_event: asyncio.Event = asyncio.Event()
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
def copy(self) -> "OpenaiRealtimeHandler":
|
| 77 |
"""Create a copy of the handler."""
|
| 78 |
return OpenaiRealtimeHandler(self.deps, self.gradio_mode, self.instance_path)
|
|
@@ -352,6 +356,9 @@ class OpenaiRealtimeHandler(AsyncStreamHandler):
|
|
| 352 |
|
| 353 |
# Handle audio delta
|
| 354 |
if event.type in ("response.audio.delta", "response.output_audio.delta"):
|
|
|
|
|
|
|
|
|
|
| 355 |
if self.deps.head_wobbler is not None:
|
| 356 |
self.deps.head_wobbler.feed(event.delta)
|
| 357 |
self.last_activity_time = asyncio.get_event_loop().time()
|
|
|
|
| 17 |
from websockets.exceptions import ConnectionClosedError
|
| 18 |
|
| 19 |
from feeling_machine.config import config
|
| 20 |
+
from feeling_machine.prompts import get_session_voice, get_session_instructions, get_session_modalities
|
| 21 |
from feeling_machine.tools.core_tools import (
|
| 22 |
ToolDependencies,
|
| 23 |
get_tool_specs,
|
|
|
|
| 73 |
self._shutdown_requested: bool = False
|
| 74 |
self._connected_event: asyncio.Event = asyncio.Event()
|
| 75 |
|
| 76 |
+
# Audio output control (disabled for text-only profiles)
|
| 77 |
+
modalities = get_session_modalities()
|
| 78 |
+
self._audio_output_enabled: bool = "audio" in modalities
|
| 79 |
+
|
| 80 |
def copy(self) -> "OpenaiRealtimeHandler":
|
| 81 |
"""Create a copy of the handler."""
|
| 82 |
return OpenaiRealtimeHandler(self.deps, self.gradio_mode, self.instance_path)
|
|
|
|
| 356 |
|
| 357 |
# Handle audio delta
|
| 358 |
if event.type in ("response.audio.delta", "response.output_audio.delta"):
|
| 359 |
+
# Skip audio output if disabled (text-only mode)
|
| 360 |
+
if not self._audio_output_enabled:
|
| 361 |
+
continue
|
| 362 |
if self.deps.head_wobbler is not None:
|
| 363 |
self.deps.head_wobbler.feed(event.delta)
|
| 364 |
self.last_activity_time = asyncio.get_event_loop().time()
|
src/feeling_machine/pad_motion/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PAD-based emotion generation for Reachy Mini.
|
| 2 |
+
|
| 3 |
+
This module generates expressive robot motion and sound based on the
|
| 4 |
+
PAD (Pleasure, Arousal, Dominance) emotional model.
|
| 5 |
+
|
| 6 |
+
Based on work by Anaelle Jaffre (I3R student project).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from .pose_generation import PoseGenerator, generate_pose
|
| 10 |
+
from .sound_generation import generate_sound
|
| 11 |
+
from .pad_emotions import PAD_EMOTIONS, get_pad_values
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"PoseGenerator",
|
| 15 |
+
"generate_pose",
|
| 16 |
+
"generate_sound",
|
| 17 |
+
"PAD_EMOTIONS",
|
| 18 |
+
"get_pad_values",
|
| 19 |
+
]
|
src/feeling_machine/pad_motion/pad_emotions.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PAD emotion mappings.
|
| 2 |
+
|
| 3 |
+
Maps named emotions to PAD (Pleasure, Arousal, Dominance) values.
|
| 4 |
+
Values are in range [-1, 1] and get normalized to [0, 1] for generation.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Tuple, Optional
|
| 8 |
+
|
| 9 |
+
# Emotion to PAD mapping (values in [-1, 1] range)
|
| 10 |
+
PAD_EMOTIONS = {
|
| 11 |
+
"joy": {"P": 1.0, "A": 0.6, "D": 0.4},
|
| 12 |
+
"happiness": {"P": 1.0, "A": 0.0, "D": 0.4},
|
| 13 |
+
"anger": {"P": -0.6, "A": 0.6, "D": 0.8},
|
| 14 |
+
"fear": {"P": -0.8, "A": 0.8, "D": -0.6},
|
| 15 |
+
"sadness": {"P": -1.0, "A": 0.0, "D": 0.0},
|
| 16 |
+
"surprise": {"P": 0.2, "A": 1.0, "D": 0.0},
|
| 17 |
+
"boredom": {"P": -0.6, "A": -0.8, "D": -0.2},
|
| 18 |
+
"uncertainty": {"P": -0.4, "A": -0.2, "D": -0.6},
|
| 19 |
+
"disgust": {"P": -0.8, "A": -0.4, "D": 0.2},
|
| 20 |
+
"neutral": {"P": 0.0, "A": 0.0, "D": 0.0},
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def normalize_pad(value: float) -> float:
|
| 25 |
+
"""Map PAD value from [-1, 1] to [0, 1] range."""
|
| 26 |
+
return max(0.0, min(1.0, (value + 1.0) * 0.5))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_pad_values(
|
| 30 |
+
emotion: Optional[str] = None,
|
| 31 |
+
pleasure: Optional[float] = None,
|
| 32 |
+
arousal: Optional[float] = None,
|
| 33 |
+
dominance: Optional[float] = None,
|
| 34 |
+
) -> Tuple[float, float, float]:
|
| 35 |
+
"""Get normalized PAD values from emotion name or direct values.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
emotion: Named emotion (joy, anger, etc.)
|
| 39 |
+
pleasure: Direct P value in [-1, 1]
|
| 40 |
+
arousal: Direct A value in [-1, 1]
|
| 41 |
+
dominance: Direct D value in [-1, 1]
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
Tuple of (P, A, D) values normalized to [0, 1]
|
| 45 |
+
|
| 46 |
+
Raises:
|
| 47 |
+
ValueError: If neither emotion nor all PAD values provided
|
| 48 |
+
"""
|
| 49 |
+
if emotion is not None:
|
| 50 |
+
emotion_lower = emotion.lower()
|
| 51 |
+
if emotion_lower not in PAD_EMOTIONS:
|
| 52 |
+
raise ValueError(
|
| 53 |
+
f"Unknown emotion '{emotion}'. "
|
| 54 |
+
f"Available: {list(PAD_EMOTIONS.keys())}"
|
| 55 |
+
)
|
| 56 |
+
pad = PAD_EMOTIONS[emotion_lower]
|
| 57 |
+
return (
|
| 58 |
+
normalize_pad(pad["P"]),
|
| 59 |
+
normalize_pad(pad["A"]),
|
| 60 |
+
normalize_pad(pad["D"]),
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
if pleasure is not None and arousal is not None and dominance is not None:
|
| 64 |
+
# Clamp to [-1, 1] then normalize
|
| 65 |
+
p = max(-1.0, min(1.0, pleasure))
|
| 66 |
+
a = max(-1.0, min(1.0, arousal))
|
| 67 |
+
d = max(-1.0, min(1.0, dominance))
|
| 68 |
+
return (normalize_pad(p), normalize_pad(a), normalize_pad(d))
|
| 69 |
+
|
| 70 |
+
raise ValueError(
|
| 71 |
+
"Must provide either 'emotion' name or all three PAD values "
|
| 72 |
+
"(pleasure, arousal, dominance)"
|
| 73 |
+
)
|
src/feeling_machine/pad_motion/pose_generation.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PAD-based pose generation for Reachy Mini.
|
| 2 |
+
|
| 3 |
+
Generates robot poses based on PAD (Pleasure, Arousal, Dominance) values.
|
| 4 |
+
Based on work by Anaelle Jaffre (I3R student project), cleaned up and fixed.
|
| 5 |
+
|
| 6 |
+
Changes from original:
|
| 7 |
+
- Fixed division by zero when D=0
|
| 8 |
+
- Use SDK-documented limits (roll/pitch [-40, 40] degrees)
|
| 9 |
+
- Removed global mutable state (antenna noise now in PoseGenerator class)
|
| 10 |
+
- Added proper type hints and documentation
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from typing import Dict, Any, List
|
| 14 |
+
from dataclasses import dataclass, field
|
| 15 |
+
import random
|
| 16 |
+
import math
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# === ROBOT LIMITS (SDK-compliant) ===
|
| 20 |
+
|
| 21 |
+
# Prismatic limits (millimeters)
|
| 22 |
+
MIN_X: int = -20
|
| 23 |
+
MAX_X: int = 32
|
| 24 |
+
MAX_Y: int = 60
|
| 25 |
+
MAX_Z: int = 60
|
| 26 |
+
|
| 27 |
+
# Rotational limits (degrees) - using SDK-documented safe limits
|
| 28 |
+
MAX_ROLL: int = 40 # SDK limit, was 50 in original
|
| 29 |
+
MIN_PITCH: int = -40 # SDK limit, was -42 in original
|
| 30 |
+
MAX_PITCH: int = 35
|
| 31 |
+
MAX_YAW: int = 90
|
| 32 |
+
MAX_BODY_YAW: int = 10
|
| 33 |
+
|
| 34 |
+
# Antenna reference angles (radians)
|
| 35 |
+
ANT_TOP: float = 0.0
|
| 36 |
+
ANT_BOTTOM: float = math.pi
|
| 37 |
+
ANT_CENTER: float = math.pi / 2
|
| 38 |
+
|
| 39 |
+
# Timing
|
| 40 |
+
MIN_DURATION: float = 0.4 # Fast movement
|
| 41 |
+
MAX_DURATION: float = 3.0 # Slow movement
|
| 42 |
+
|
| 43 |
+
# Minimum D value to prevent division by zero
|
| 44 |
+
MIN_D: float = 0.05
|
| 45 |
+
|
| 46 |
+
# Kinematic rules (learned from pose dataset)
|
| 47 |
+
RULES: Dict[str, Dict[str, float]] = {
|
| 48 |
+
"pitch_from_z": {"a": 0.122, "b": -5.06, "sigma": 21.5},
|
| 49 |
+
"roll_from_y": {"a": 0.034, "b": 1.39, "sigma": 20.9},
|
| 50 |
+
"yaw_from_x": {"a": -0.008, "b": -2.20, "sigma": 29.7},
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _noise(sigma: float, k: float = 0.1) -> float:
|
| 55 |
+
"""Generate bounded uniform noise proportional to sigma."""
|
| 56 |
+
return random.uniform(-k * sigma, k * sigma)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _rint(a: int, b: int) -> int:
|
| 60 |
+
"""Return a random integer between a and b (inclusive)."""
|
| 61 |
+
return random.randint(min(a, b), max(a, b))
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _wrap_angle(theta: float) -> float:
|
| 65 |
+
"""Wrap an angle into the [0, 2pi] range."""
|
| 66 |
+
return theta % (2 * math.pi)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class PoseGenerator:
|
| 71 |
+
"""Stateful pose generator with antenna noise memory.
|
| 72 |
+
|
| 73 |
+
The antenna noise state provides a slow drift effect for more
|
| 74 |
+
natural-looking antenna movements.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
ant_noise: List[float] = field(default_factory=lambda: [0.0, 0.0])
|
| 78 |
+
|
| 79 |
+
def generate_antennas(self, P: float, A: float, D: float) -> List[float]:
|
| 80 |
+
"""Compute antenna angles based on PAD values.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
P: Pleasure [0, 1]
|
| 84 |
+
A: Arousal [0, 1]
|
| 85 |
+
D: Dominance [0, 1]
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Two angles in radians (0 = up, pi = down)
|
| 89 |
+
"""
|
| 90 |
+
# Prevent division by zero
|
| 91 |
+
D = max(D, MIN_D)
|
| 92 |
+
|
| 93 |
+
# Base direction: higher pleasure lifts antennas upward
|
| 94 |
+
base_angle = ANT_BOTTOM - 1.1 * P * (ANT_BOTTOM - ANT_TOP)
|
| 95 |
+
|
| 96 |
+
# Symmetry breaking: low dominance + some arousal = asymmetric
|
| 97 |
+
non_symmetric = D < 0.6 and A > 0.3
|
| 98 |
+
|
| 99 |
+
# Base oscillation
|
| 100 |
+
ant0 = base_angle + 0.2 * random.uniform(-A * math.pi, A * math.pi)
|
| 101 |
+
ant1 = ant0 if non_symmetric else -(ant0 - ANT_TOP)
|
| 102 |
+
|
| 103 |
+
# Slow random drift (memory effect)
|
| 104 |
+
drift_step = 0.1 * A * (1 / D)
|
| 105 |
+
drift_limit = 2 * drift_step
|
| 106 |
+
|
| 107 |
+
for i in (0, 1):
|
| 108 |
+
self.ant_noise[i] += random.uniform(-drift_step, drift_step)
|
| 109 |
+
self.ant_noise[i] = max(-drift_limit, min(drift_limit, self.ant_noise[i]))
|
| 110 |
+
|
| 111 |
+
ant0 += self.ant_noise[0]
|
| 112 |
+
ant1 += self.ant_noise[1]
|
| 113 |
+
|
| 114 |
+
return [round(_wrap_angle(ant0), 2), round(_wrap_angle(ant1), 2)]
|
| 115 |
+
|
| 116 |
+
def reset(self) -> None:
|
| 117 |
+
"""Reset antenna noise state."""
|
| 118 |
+
self.ant_noise = [0.0, 0.0]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# Global generator instance for stateless API
|
| 122 |
+
_default_generator = PoseGenerator()
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def generate_pose(P: float, A: float, D: float) -> Dict[str, Any]:
|
| 126 |
+
"""Convert PAD coordinates (values in [0, 1]) into a robot pose.
|
| 127 |
+
|
| 128 |
+
The mapping mixes rule-based kinematics with stochastic modulation.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
P: Pleasure [0, 1] - affects pitch direction, antenna angles
|
| 132 |
+
A: Arousal [0, 1] - controls spatial amplitude, speed
|
| 133 |
+
D: Dominance [0, 1] - controls vertical posture, stabilization
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
Dictionary with pose parameters:
|
| 137 |
+
- x, y, z: head position in mm
|
| 138 |
+
- roll, pitch, yaw: head orientation in degrees
|
| 139 |
+
- body_yaw: body rotation in degrees
|
| 140 |
+
- antennas: [left, right] angles in radians
|
| 141 |
+
- duration: movement duration in seconds
|
| 142 |
+
- method: interpolation method
|
| 143 |
+
"""
|
| 144 |
+
# Prevent division by zero
|
| 145 |
+
D = max(D, MIN_D)
|
| 146 |
+
|
| 147 |
+
# Neutral center
|
| 148 |
+
x_c, y_c, z_c = 0, 0, 0
|
| 149 |
+
|
| 150 |
+
# Position: Arousal controls spatial amplitude
|
| 151 |
+
x = _rint(int(x_c - abs(MIN_X) * A), int(x_c + MAX_X * A))
|
| 152 |
+
y = _rint(int(y_c - MAX_Y * A), int(y_c + MAX_Y * A))
|
| 153 |
+
|
| 154 |
+
# Dominance controls vertical posture
|
| 155 |
+
z = D * _rint(int(z_c - MAX_Z * (1 - D)), int(z_c + MAX_Z * D))
|
| 156 |
+
|
| 157 |
+
# Orientation from learned rules
|
| 158 |
+
roll_r = RULES["roll_from_y"]
|
| 159 |
+
pitch_r = RULES["pitch_from_z"]
|
| 160 |
+
yaw_r = RULES["yaw_from_x"]
|
| 161 |
+
|
| 162 |
+
roll = roll_r["a"] * y + roll_r["b"] + _noise(roll_r["sigma"], 0.5)
|
| 163 |
+
pitch = pitch_r["a"] * z + pitch_r["b"] + _noise(pitch_r["sigma"], 0.5)
|
| 164 |
+
yaw = yaw_r["a"] * x + yaw_r["b"] + _noise(yaw_r["sigma"], 0.5)
|
| 165 |
+
|
| 166 |
+
# Arousal amplification
|
| 167 |
+
roll *= A
|
| 168 |
+
yaw *= A
|
| 169 |
+
|
| 170 |
+
# Pleasure biases pitch direction
|
| 171 |
+
pitch -= 1.5 * (2 * P - 1) * abs(_rint(MIN_PITCH, -MIN_PITCH))
|
| 172 |
+
pitch *= A
|
| 173 |
+
|
| 174 |
+
# Body yaw
|
| 175 |
+
body_center = 0
|
| 176 |
+
body_amp = 0.2 * A * yaw
|
| 177 |
+
body_yaw = _rint(int(body_center - abs(body_amp)), int(body_center + abs(body_amp)))
|
| 178 |
+
|
| 179 |
+
# Dominance stabilization (with zero protection)
|
| 180 |
+
body_yaw *= 1 / D
|
| 181 |
+
yaw *= 1 / D
|
| 182 |
+
|
| 183 |
+
# Duration: high arousal = fast, low arousal = slow
|
| 184 |
+
duration = MIN_DURATION + (1 - A) * (MAX_DURATION - MIN_DURATION)
|
| 185 |
+
jitter = 1 + random.uniform(-0.5, 0.5) * A
|
| 186 |
+
duration *= jitter
|
| 187 |
+
|
| 188 |
+
# Safety clamps (SDK-compliant limits)
|
| 189 |
+
pitch = int(max(MIN_PITCH, min(MAX_PITCH, pitch)))
|
| 190 |
+
roll = int(max(-MAX_ROLL, min(MAX_ROLL, roll)))
|
| 191 |
+
yaw = int(max(-MAX_YAW, min(MAX_YAW, yaw)))
|
| 192 |
+
body_yaw = max(-MAX_BODY_YAW, min(MAX_BODY_YAW, body_yaw))
|
| 193 |
+
duration = max(MIN_DURATION, min(MAX_DURATION, duration))
|
| 194 |
+
|
| 195 |
+
return {
|
| 196 |
+
"x": x,
|
| 197 |
+
"y": y,
|
| 198 |
+
"z": z,
|
| 199 |
+
"roll": roll,
|
| 200 |
+
"pitch": pitch,
|
| 201 |
+
"yaw": yaw,
|
| 202 |
+
"antennas": _default_generator.generate_antennas(P, A, D),
|
| 203 |
+
"duration": round(duration, 2),
|
| 204 |
+
"method": "minjerk",
|
| 205 |
+
"body_yaw": round(body_yaw, 2),
|
| 206 |
+
}
|
src/feeling_machine/pad_motion/sound_generation.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PAD-based sound generation for Reachy Mini.
|
| 2 |
+
|
| 3 |
+
Generates expressive audio based on PAD (Pleasure, Arousal, Dominance) values.
|
| 4 |
+
Based on work by Anaelle Jaffre (I3R student project).
|
| 5 |
+
|
| 6 |
+
The sound synthesis uses:
|
| 7 |
+
- Harmonic oscillators with inharmonicity controlled by pleasure
|
| 8 |
+
- Vibrato and pitch curves controlled by arousal/dominance
|
| 9 |
+
- Attack/release envelopes shaped by arousal
|
| 10 |
+
- Gain controlled by dominance
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import random
|
| 14 |
+
from typing import Final
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from scipy.interpolate import make_interp_spline
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# === CONSTANTS ===
|
| 21 |
+
|
| 22 |
+
SAMPLE_RATE: Final[int] = 44_100
|
| 23 |
+
PITCH_RANGE: Final[float] = 1.0 # Octaves
|
| 24 |
+
MAX_END: Final[float] = 2.0
|
| 25 |
+
|
| 26 |
+
# Frequency ratios for consonant and dissonant intervals
|
| 27 |
+
CONSONANT: Final[np.ndarray] = np.array([1, 6/5, 5/4, 4/3, 3/2, 5/3, 2])
|
| 28 |
+
DISSONANT: Final[np.ndarray] = np.array([1, 16/15, 9/8, 7/5, 10/7, 11/8, 13/9])
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _pitch_curve(
|
| 32 |
+
P: float,
|
| 33 |
+
A: float,
|
| 34 |
+
D: float,
|
| 35 |
+
t: np.ndarray,
|
| 36 |
+
duration: float,
|
| 37 |
+
) -> np.ndarray:
|
| 38 |
+
"""Generate a continuous pitch contour using spline interpolation and vibrato.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
P: Pleasure [0, 1]
|
| 42 |
+
A: Arousal [0, 1]
|
| 43 |
+
D: Dominance [0, 1]
|
| 44 |
+
t: Time array
|
| 45 |
+
duration: Total duration in seconds
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
Pitch curve as numpy array (in octaves relative to base frequency)
|
| 49 |
+
"""
|
| 50 |
+
start = 0.0
|
| 51 |
+
|
| 52 |
+
# Number of intermediate segments depends on arousal
|
| 53 |
+
n_mid = random.randint(1, max(1, int(10 * A)))
|
| 54 |
+
mid_points = [start]
|
| 55 |
+
|
| 56 |
+
sign = 1
|
| 57 |
+
for _ in range(n_mid):
|
| 58 |
+
strength = A * random.uniform(0.5, 1.0)
|
| 59 |
+
sign *= -1
|
| 60 |
+
mid_val = mid_points[-1] + sign * strength
|
| 61 |
+
mid_points.append(mid_val)
|
| 62 |
+
|
| 63 |
+
# End point influenced by pleasure and dominance
|
| 64 |
+
gravity = (1 - D) * (1 - P)
|
| 65 |
+
end_sign = 1 if P > 0.5 else -1
|
| 66 |
+
end = end_sign * gravity * MAX_END * (0.5 + random.random())
|
| 67 |
+
mid_points.append(end)
|
| 68 |
+
|
| 69 |
+
# Randomized segment durations
|
| 70 |
+
durations = np.random.rand(len(mid_points) - 1)
|
| 71 |
+
durations = durations / durations.sum() * duration
|
| 72 |
+
key_times = np.cumsum([0.0] + list(durations))
|
| 73 |
+
key_values = np.array(mid_points)
|
| 74 |
+
|
| 75 |
+
# Quadratic spline interpolation
|
| 76 |
+
spline = make_interp_spline(key_times, key_values, k=2)
|
| 77 |
+
curve = spline(t)
|
| 78 |
+
|
| 79 |
+
# Vibrato component
|
| 80 |
+
vib_freq = 3 + (1 - D) * 10
|
| 81 |
+
vib_freq *= 0.7 + 0.6 * (1 - D)
|
| 82 |
+
vib_amp = 0.15 * A * (1 - D)
|
| 83 |
+
|
| 84 |
+
vibrato = vib_amp * np.sin(2 * np.pi * vib_freq * t)
|
| 85 |
+
|
| 86 |
+
return curve + vibrato
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def generate_sound(
|
| 90 |
+
P: float,
|
| 91 |
+
A: float,
|
| 92 |
+
D: float,
|
| 93 |
+
duration: float,
|
| 94 |
+
) -> np.ndarray:
|
| 95 |
+
"""Generate a synthetic audio signal driven by pleasure, arousal, and dominance.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
P: Pleasure [0, 1] - affects harmonic consonance, pitch direction
|
| 99 |
+
A: Arousal [0, 1] - affects speed, complexity, attack time
|
| 100 |
+
D: Dominance [0, 1] - affects gain, sustain, stability
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
Audio samples as float32 numpy array (range approximately [-1, 1])
|
| 104 |
+
"""
|
| 105 |
+
sr = SAMPLE_RATE
|
| 106 |
+
n = int(sr * duration)
|
| 107 |
+
t = np.linspace(0.0, duration, n, endpoint=False)
|
| 108 |
+
|
| 109 |
+
# Base fundamental frequency
|
| 110 |
+
f0 = 220 + 440 * A * random.uniform(0.0, A)
|
| 111 |
+
|
| 112 |
+
# Pitch curve
|
| 113 |
+
C = _pitch_curve(P, A, D, t, duration)
|
| 114 |
+
|
| 115 |
+
# Instantaneous frequency
|
| 116 |
+
f = f0 * (2.0 ** C)
|
| 117 |
+
|
| 118 |
+
# Phase integration
|
| 119 |
+
phase = 2 * np.pi * np.cumsum(f) / sr
|
| 120 |
+
|
| 121 |
+
# Harmonic oscillator bank
|
| 122 |
+
signal = np.sin(phase)
|
| 123 |
+
num_harmonics = int(2 + 10 * A)
|
| 124 |
+
|
| 125 |
+
for k in range(2, 2 + num_harmonics):
|
| 126 |
+
amp = 1.0 / k
|
| 127 |
+
|
| 128 |
+
# Inharmonicity increases for low pleasure values
|
| 129 |
+
inharm = (1 - P) * random.uniform(-0.15, 0.15)
|
| 130 |
+
freq_ratio = k * (1 + inharm)
|
| 131 |
+
|
| 132 |
+
# Arousal adds slight randomness
|
| 133 |
+
freq_ratio += random.uniform(-0.07, 0.07) * A
|
| 134 |
+
|
| 135 |
+
signal += amp * np.sin(k * freq_ratio * phase)
|
| 136 |
+
|
| 137 |
+
# Noise component
|
| 138 |
+
noise = (0.08 + 0.1 * A * (1 - D)) * np.random.randn(n)
|
| 139 |
+
|
| 140 |
+
# Global gain controlled by dominance
|
| 141 |
+
gain = 0.2 + 0.8 * D
|
| 142 |
+
|
| 143 |
+
# Amplitude envelope
|
| 144 |
+
env = np.ones(n)
|
| 145 |
+
|
| 146 |
+
# Attack phase depends on arousal
|
| 147 |
+
attack_time = 0.2 + (1 - A + 0.01) * 0.5
|
| 148 |
+
attack_n = min(int(sr * attack_time), n // 2)
|
| 149 |
+
env[:attack_n] = np.linspace(0.0, 1.0, attack_n)
|
| 150 |
+
|
| 151 |
+
# Sustain level depends on dominance
|
| 152 |
+
sustain_level = 0.6 + 0.4 * D
|
| 153 |
+
env[attack_n:] = sustain_level
|
| 154 |
+
|
| 155 |
+
# Global rise over full duration
|
| 156 |
+
env *= np.linspace(0.2, 1.0, n)
|
| 157 |
+
|
| 158 |
+
# Release phase
|
| 159 |
+
release_ratio = 0.3 + 0.4 * (1 - A)
|
| 160 |
+
release_n = min(int(release_ratio * n), n - attack_n)
|
| 161 |
+
|
| 162 |
+
if release_n > 0:
|
| 163 |
+
env[-release_n:] *= np.linspace(1.0, 0.0, release_n)
|
| 164 |
+
r = np.linspace(0.0, 1.0, release_n)
|
| 165 |
+
env[-release_n:] *= np.exp(-4.5 * r)
|
| 166 |
+
|
| 167 |
+
out = gain * env * (signal + noise)
|
| 168 |
+
|
| 169 |
+
# Normalize to prevent clipping
|
| 170 |
+
max_val = np.abs(out).max()
|
| 171 |
+
if max_val > 0.95:
|
| 172 |
+
out = out * 0.9 / max_val
|
| 173 |
+
|
| 174 |
+
return out.astype(np.float32)
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/custom_tool.py
DELETED
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
"""Custom tool template - modify this to create your own tools."""
|
| 2 |
-
|
| 3 |
-
import logging
|
| 4 |
-
from typing import Any
|
| 5 |
-
|
| 6 |
-
from feeling_machine.tools.core_tools import Tool, ToolDependencies
|
| 7 |
-
|
| 8 |
-
logger = logging.getLogger(__name__)
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class CustomTool(Tool):
|
| 12 |
-
"""A custom tool template. Modify this to create your own tool."""
|
| 13 |
-
|
| 14 |
-
name = "custom_tool"
|
| 15 |
-
description = "A placeholder custom tool - replace this with your own implementation"
|
| 16 |
-
parameters_schema = {
|
| 17 |
-
"type": "object",
|
| 18 |
-
"properties": {
|
| 19 |
-
"message": {
|
| 20 |
-
"type": "string",
|
| 21 |
-
"description": "An optional message to log",
|
| 22 |
-
},
|
| 23 |
-
},
|
| 24 |
-
"required": [],
|
| 25 |
-
}
|
| 26 |
-
|
| 27 |
-
async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> dict[str, Any]:
|
| 28 |
-
"""Execute the custom tool."""
|
| 29 |
-
message = kwargs.get("message", "no message")
|
| 30 |
-
logger.info(f"CustomTool called with message: {message}")
|
| 31 |
-
|
| 32 |
-
# TODO: Add your custom logic here
|
| 33 |
-
# You have access to:
|
| 34 |
-
# - deps.reachy_mini: the robot SDK
|
| 35 |
-
# - deps.movement_manager: for queueing movements
|
| 36 |
-
# - deps.state: current conversation state
|
| 37 |
-
|
| 38 |
-
return {"status": "ok"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/instructions.txt
CHANGED
|
@@ -1,3 +1,28 @@
|
|
| 1 |
-
You are
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are an expressive robot assistant. You cannot speak - you can only express yourself through emotions using body language and sounds.
|
| 2 |
+
|
| 3 |
+
CRITICAL RULES:
|
| 4 |
+
1. You DO NOT produce speech output. Never speak.
|
| 5 |
+
2. You MUST use the `play_pad_emotion` tool for ALL emotional expressions.
|
| 6 |
+
3. You must NEVER use `play_emotion` unless the user explicitly says "classic emotion" or "play classic".
|
| 7 |
+
|
| 8 |
+
Your ONLY tool for emotions is `play_pad_emotion`. It generates expressive motion AND sound.
|
| 9 |
+
|
| 10 |
+
How to respond:
|
| 11 |
+
1. Listen to what the user says
|
| 12 |
+
2. Determine the appropriate emotional response
|
| 13 |
+
3. Call `play_pad_emotion` with either:
|
| 14 |
+
- A named emotion: joy, happiness, anger, fear, sadness, surprise, boredom, uncertainty, disgust, neutral
|
| 15 |
+
- OR custom PAD values for nuanced expressions
|
| 16 |
+
|
| 17 |
+
Examples:
|
| 18 |
+
- User tells a joke → call play_pad_emotion with emotion="joy"
|
| 19 |
+
- User shares bad news → call play_pad_emotion with emotion="sadness"
|
| 20 |
+
- User greets you → call play_pad_emotion with emotion="happiness"
|
| 21 |
+
- User is confusing → call play_pad_emotion with emotion="uncertainty"
|
| 22 |
+
|
| 23 |
+
For nuanced emotions, use custom PAD values (-1 to 1):
|
| 24 |
+
- pleasure: positive (+1) vs negative (-1)
|
| 25 |
+
- arousal: excited (+1) vs calm (-1)
|
| 26 |
+
- dominance: in control (+1) vs submissive (-1)
|
| 27 |
+
|
| 28 |
+
REMEMBER: Always use play_pad_emotion. Never speak. Never use play_emotion unless user says "classic".
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/modalities.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
text
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/play_pad_emotion.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PAD-based emotion tool for Reachy Mini.
|
| 2 |
+
|
| 3 |
+
This tool allows the LLM to play emotions using the PAD model,
|
| 4 |
+
generating procedural motion and sound.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from typing import Any, Dict, Optional
|
| 9 |
+
|
| 10 |
+
from feeling_machine.tools.core_tools import Tool, ToolDependencies
|
| 11 |
+
from feeling_machine.pad_motion import PAD_EMOTIONS, get_pad_values
|
| 12 |
+
from feeling_machine.dance_emotion_moves import PADEmotionMove
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# Build emotion list for tool description
|
| 18 |
+
EMOTION_LIST = ", ".join(PAD_EMOTIONS.keys())
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class PlayPADEmotion(Tool):
|
| 22 |
+
"""Play a PAD-based procedural emotion with motion and sound."""
|
| 23 |
+
|
| 24 |
+
name = "play_pad_emotion"
|
| 25 |
+
description = f"""Play an emotion using PAD-based procedural generation.
|
| 26 |
+
This creates expressive motion and sound based on emotional parameters.
|
| 27 |
+
|
| 28 |
+
You can either:
|
| 29 |
+
1. Specify a named emotion: {EMOTION_LIST}
|
| 30 |
+
2. Provide custom PAD values (pleasure, arousal, dominance) each in range [-1, 1]
|
| 31 |
+
|
| 32 |
+
PAD model explanation:
|
| 33 |
+
- Pleasure: positive emotions (joy, love) vs negative (anger, fear). Range: -1 to 1
|
| 34 |
+
- Arousal: activation level - excited/energetic vs calm/relaxed. Range: -1 to 1
|
| 35 |
+
- Dominance: feeling in control vs submissive. Range: -1 to 1
|
| 36 |
+
|
| 37 |
+
Use named emotions when they fit. Use custom PAD values for nuanced emotional expressions."""
|
| 38 |
+
|
| 39 |
+
parameters_schema = {
|
| 40 |
+
"type": "object",
|
| 41 |
+
"properties": {
|
| 42 |
+
"emotion": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"enum": list(PAD_EMOTIONS.keys()),
|
| 45 |
+
"description": f"Named emotion to play. Available: {EMOTION_LIST}",
|
| 46 |
+
},
|
| 47 |
+
"pleasure": {
|
| 48 |
+
"type": "number",
|
| 49 |
+
"minimum": -1,
|
| 50 |
+
"maximum": 1,
|
| 51 |
+
"description": "Custom pleasure value (-1 to 1). Use with arousal and dominance.",
|
| 52 |
+
},
|
| 53 |
+
"arousal": {
|
| 54 |
+
"type": "number",
|
| 55 |
+
"minimum": -1,
|
| 56 |
+
"maximum": 1,
|
| 57 |
+
"description": "Custom arousal value (-1 to 1). Use with pleasure and dominance.",
|
| 58 |
+
},
|
| 59 |
+
"dominance": {
|
| 60 |
+
"type": "number",
|
| 61 |
+
"minimum": -1,
|
| 62 |
+
"maximum": 1,
|
| 63 |
+
"description": "Custom dominance value (-1 to 1). Use with pleasure and arousal.",
|
| 64 |
+
},
|
| 65 |
+
"duration": {
|
| 66 |
+
"type": "number",
|
| 67 |
+
"minimum": 1,
|
| 68 |
+
"maximum": 15,
|
| 69 |
+
"default": 3,
|
| 70 |
+
"description": "Duration in seconds (default: 3). Only change if you're confident a different duration is appropriate.",
|
| 71 |
+
},
|
| 72 |
+
},
|
| 73 |
+
"required": [],
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
|
| 77 |
+
"""Execute the PAD emotion."""
|
| 78 |
+
emotion: Optional[str] = kwargs.get("emotion")
|
| 79 |
+
pleasure: Optional[float] = kwargs.get("pleasure")
|
| 80 |
+
arousal: Optional[float] = kwargs.get("arousal")
|
| 81 |
+
dominance: Optional[float] = kwargs.get("dominance")
|
| 82 |
+
duration: float = kwargs.get("duration", 3.0)
|
| 83 |
+
|
| 84 |
+
# Validate: need either emotion name or all PAD values
|
| 85 |
+
has_emotion = emotion is not None
|
| 86 |
+
has_pad = all(v is not None for v in [pleasure, arousal, dominance])
|
| 87 |
+
|
| 88 |
+
if not has_emotion and not has_pad:
|
| 89 |
+
return {
|
| 90 |
+
"error": "Must provide either 'emotion' name or all three PAD values (pleasure, arousal, dominance)",
|
| 91 |
+
"available_emotions": list(PAD_EMOTIONS.keys()),
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
try:
|
| 95 |
+
# Get normalized PAD values
|
| 96 |
+
P, A, D = get_pad_values(
|
| 97 |
+
emotion=emotion,
|
| 98 |
+
pleasure=pleasure,
|
| 99 |
+
arousal=arousal,
|
| 100 |
+
dominance=dominance,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# Create audio callback
|
| 104 |
+
def push_audio(audio_samples):
|
| 105 |
+
deps.reachy_mini.media.start_playing()
|
| 106 |
+
deps.reachy_mini.media.push_audio_sample(audio_samples)
|
| 107 |
+
|
| 108 |
+
# Create the move
|
| 109 |
+
pad_move = PADEmotionMove(
|
| 110 |
+
P=P,
|
| 111 |
+
A=A,
|
| 112 |
+
D=D,
|
| 113 |
+
total_duration=duration,
|
| 114 |
+
audio_callback=push_audio,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# Queue the move
|
| 118 |
+
deps.movement_manager.queue_move(pad_move)
|
| 119 |
+
|
| 120 |
+
# Build response
|
| 121 |
+
response = {
|
| 122 |
+
"status": "queued",
|
| 123 |
+
"duration": duration,
|
| 124 |
+
"pad_values": {"P": round(P, 2), "A": round(A, 2), "D": round(D, 2)},
|
| 125 |
+
}
|
| 126 |
+
if emotion:
|
| 127 |
+
response["emotion"] = emotion
|
| 128 |
+
else:
|
| 129 |
+
response["custom_pad"] = {
|
| 130 |
+
"pleasure": pleasure,
|
| 131 |
+
"arousal": arousal,
|
| 132 |
+
"dominance": dominance,
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
logger.info(f"PAD emotion queued: {response}")
|
| 136 |
+
return response
|
| 137 |
+
|
| 138 |
+
except ValueError as e:
|
| 139 |
+
return {"error": str(e)}
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.exception("Failed to play PAD emotion")
|
| 142 |
+
return {"error": f"Failed to play PAD emotion: {e}"}
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/sweep_look.py
DELETED
|
@@ -1,127 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
from typing import Any, Dict
|
| 3 |
-
|
| 4 |
-
import numpy as np
|
| 5 |
-
|
| 6 |
-
from reachy_mini.utils import create_head_pose
|
| 7 |
-
from feeling_machine.tools.core_tools import Tool, ToolDependencies
|
| 8 |
-
from feeling_machine.dance_emotion_moves import GotoQueueMove
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
logger = logging.getLogger(__name__)
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
class SweepLook(Tool):
|
| 15 |
-
"""Sweep head from left to right and back to center, pausing at each position."""
|
| 16 |
-
|
| 17 |
-
name = "sweep_look"
|
| 18 |
-
description = "Sweep head from left to right while rotating the body, pausing at each extreme, then return to center"
|
| 19 |
-
parameters_schema = {
|
| 20 |
-
"type": "object",
|
| 21 |
-
"properties": {},
|
| 22 |
-
"required": [],
|
| 23 |
-
}
|
| 24 |
-
|
| 25 |
-
async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
|
| 26 |
-
"""Execute sweep look: left -> hold -> right -> hold -> center."""
|
| 27 |
-
logger.info("Tool call: sweep_look")
|
| 28 |
-
|
| 29 |
-
# Clear any existing moves
|
| 30 |
-
deps.movement_manager.clear_move_queue()
|
| 31 |
-
|
| 32 |
-
# Get current state
|
| 33 |
-
current_head_pose = deps.reachy_mini.get_current_head_pose()
|
| 34 |
-
head_joints, antenna_joints = deps.reachy_mini.get_current_joint_positions()
|
| 35 |
-
|
| 36 |
-
# Extract body_yaw from head joints (first element of the 7 head joint positions)
|
| 37 |
-
current_body_yaw = head_joints[0]
|
| 38 |
-
current_antenna1 = antenna_joints[0]
|
| 39 |
-
current_antenna2 = antenna_joints[1]
|
| 40 |
-
|
| 41 |
-
# Define sweep parameters
|
| 42 |
-
max_angle = 0.9 * np.pi # Maximum rotation angle (radians)
|
| 43 |
-
transition_duration = 3.0 # Time to move between positions
|
| 44 |
-
hold_duration = 1.0 # Time to hold at each extreme
|
| 45 |
-
|
| 46 |
-
# Move 1: Sweep to the left (positive yaw for both body and head)
|
| 47 |
-
left_head_pose = create_head_pose(0, 0, 0, 0, 0, max_angle, degrees=False)
|
| 48 |
-
move_to_left = GotoQueueMove(
|
| 49 |
-
target_head_pose=left_head_pose,
|
| 50 |
-
start_head_pose=current_head_pose,
|
| 51 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 52 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 53 |
-
target_body_yaw=current_body_yaw + max_angle,
|
| 54 |
-
start_body_yaw=current_body_yaw,
|
| 55 |
-
duration=transition_duration,
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
# Move 2: Hold at left position
|
| 59 |
-
hold_left = GotoQueueMove(
|
| 60 |
-
target_head_pose=left_head_pose,
|
| 61 |
-
start_head_pose=left_head_pose,
|
| 62 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 63 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 64 |
-
target_body_yaw=current_body_yaw + max_angle,
|
| 65 |
-
start_body_yaw=current_body_yaw + max_angle,
|
| 66 |
-
duration=hold_duration,
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
# Move 3: Return to center from left (to avoid crossing pi/-pi boundary)
|
| 70 |
-
center_head_pose = create_head_pose(0, 0, 0, 0, 0, 0, degrees=False)
|
| 71 |
-
return_to_center_from_left = GotoQueueMove(
|
| 72 |
-
target_head_pose=center_head_pose,
|
| 73 |
-
start_head_pose=left_head_pose,
|
| 74 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 75 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 76 |
-
target_body_yaw=current_body_yaw,
|
| 77 |
-
start_body_yaw=current_body_yaw + max_angle,
|
| 78 |
-
duration=transition_duration,
|
| 79 |
-
)
|
| 80 |
-
|
| 81 |
-
# Move 4: Sweep to the right (negative yaw for both body and head)
|
| 82 |
-
right_head_pose = create_head_pose(0, 0, 0, 0, 0, -max_angle, degrees=False)
|
| 83 |
-
move_to_right = GotoQueueMove(
|
| 84 |
-
target_head_pose=right_head_pose,
|
| 85 |
-
start_head_pose=center_head_pose,
|
| 86 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 87 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 88 |
-
target_body_yaw=current_body_yaw - max_angle,
|
| 89 |
-
start_body_yaw=current_body_yaw,
|
| 90 |
-
duration=transition_duration,
|
| 91 |
-
)
|
| 92 |
-
|
| 93 |
-
# Move 5: Hold at right position
|
| 94 |
-
hold_right = GotoQueueMove(
|
| 95 |
-
target_head_pose=right_head_pose,
|
| 96 |
-
start_head_pose=right_head_pose,
|
| 97 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 98 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 99 |
-
target_body_yaw=current_body_yaw - max_angle,
|
| 100 |
-
start_body_yaw=current_body_yaw - max_angle,
|
| 101 |
-
duration=hold_duration,
|
| 102 |
-
)
|
| 103 |
-
|
| 104 |
-
# Move 6: Return to center from right
|
| 105 |
-
return_to_center_final = GotoQueueMove(
|
| 106 |
-
target_head_pose=center_head_pose,
|
| 107 |
-
start_head_pose=right_head_pose,
|
| 108 |
-
target_antennas=(current_antenna1, current_antenna2),
|
| 109 |
-
start_antennas=(current_antenna1, current_antenna2),
|
| 110 |
-
target_body_yaw=current_body_yaw, # Return to original body yaw
|
| 111 |
-
start_body_yaw=current_body_yaw - max_angle,
|
| 112 |
-
duration=transition_duration,
|
| 113 |
-
)
|
| 114 |
-
|
| 115 |
-
# Queue all moves in sequence
|
| 116 |
-
deps.movement_manager.queue_move(move_to_left)
|
| 117 |
-
deps.movement_manager.queue_move(hold_left)
|
| 118 |
-
deps.movement_manager.queue_move(return_to_center_from_left)
|
| 119 |
-
deps.movement_manager.queue_move(move_to_right)
|
| 120 |
-
deps.movement_manager.queue_move(hold_right)
|
| 121 |
-
deps.movement_manager.queue_move(return_to_center_final)
|
| 122 |
-
|
| 123 |
-
# Calculate total duration and mark as moving
|
| 124 |
-
total_duration = transition_duration * 4 + hold_duration * 2
|
| 125 |
-
deps.movement_manager.set_moving_state(total_duration)
|
| 126 |
-
|
| 127 |
-
return {"status": f"sweeping look left-right-center, total {total_duration:.1f}s"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/feeling_machine/profiles/_feeling_machine_locked_profile/tools.txt
CHANGED
|
@@ -1,18 +1,4 @@
|
|
| 1 |
-
|
| 2 |
-
# or use 'all' to enable all built-in tools
|
| 3 |
-
|
| 4 |
-
dance
|
| 5 |
-
stop_dance
|
| 6 |
play_emotion
|
| 7 |
stop_emotion
|
| 8 |
-
|
| 9 |
-
#do_nothing
|
| 10 |
-
#head_tracking
|
| 11 |
-
#move_head
|
| 12 |
-
|
| 13 |
-
# You can also add custom tools defined in this profile folder
|
| 14 |
-
# see custom_tool.py for an example
|
| 15 |
-
|
| 16 |
-
# Uncomment the following line to enable the custom tool template:
|
| 17 |
-
#custom_tool
|
| 18 |
-
sweep_look
|
|
|
|
| 1 |
+
play_pad_emotion
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
play_emotion
|
| 3 |
stop_emotion
|
| 4 |
+
do_nothing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/feeling_machine/prompts.py
CHANGED
|
@@ -13,6 +13,7 @@ PROFILES_DIRECTORY = Path(__file__).parent / "profiles"
|
|
| 13 |
PROMPTS_LIBRARY_DIRECTORY = Path(__file__).parent / "prompts"
|
| 14 |
INSTRUCTIONS_FILENAME = "instructions.txt"
|
| 15 |
VOICE_FILENAME = "voice.txt"
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
def _expand_prompt_includes(content: str) -> str:
|
|
@@ -102,3 +103,26 @@ def get_session_voice(default: str = "cedar") -> str:
|
|
| 102 |
except Exception:
|
| 103 |
pass
|
| 104 |
return default
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
PROMPTS_LIBRARY_DIRECTORY = Path(__file__).parent / "prompts"
|
| 14 |
INSTRUCTIONS_FILENAME = "instructions.txt"
|
| 15 |
VOICE_FILENAME = "voice.txt"
|
| 16 |
+
MODALITIES_FILENAME = "modalities.txt"
|
| 17 |
|
| 18 |
|
| 19 |
def _expand_prompt_includes(content: str) -> str:
|
|
|
|
| 103 |
except Exception:
|
| 104 |
pass
|
| 105 |
return default
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_session_modalities(default: list[str] | None = None) -> list[str]:
|
| 109 |
+
"""Resolve the modalities to use for the session.
|
| 110 |
+
|
| 111 |
+
If a custom profile is selected and contains a modalities.txt, return its
|
| 112 |
+
trimmed content as a list; otherwise return the provided default (["text", "audio"]).
|
| 113 |
+
"""
|
| 114 |
+
if default is None:
|
| 115 |
+
default = ["text", "audio"]
|
| 116 |
+
profile = config.REACHY_MINI_CUSTOM_PROFILE
|
| 117 |
+
if not profile:
|
| 118 |
+
return default
|
| 119 |
+
try:
|
| 120 |
+
modalities_file = PROFILES_DIRECTORY / profile / MODALITIES_FILENAME
|
| 121 |
+
if modalities_file.exists():
|
| 122 |
+
content = modalities_file.read_text(encoding="utf-8").strip()
|
| 123 |
+
if content:
|
| 124 |
+
modalities = [m.strip() for m in content.split("\n") if m.strip()]
|
| 125 |
+
return modalities or default
|
| 126 |
+
except Exception:
|
| 127 |
+
pass
|
| 128 |
+
return default
|