+
+
+
+
+
+
+
+
Status
+
Ollama Not Found
+
+
Offline
+
+
Cannot reach the Ollama server. Make sure Ollama is running (ollama serve) and a model is pulled (ollama pull llama3.2).
+
+
+
+
+
+
+
Profiles
+
Personality studio
+
+
Live
+
+
Create lean instruction sets, toggle tools, and apply a voice for your Reachy Mini.
+
+
+
Select & launch
+
Pick a profile and choose what should launch on startup.
+
+
+
+
+
+
+
+
+
+
+
+ Built-in default
+
+
+
+
+
+
+
Create / edit
+
Adjust instructions, tools, and voice, then save your profile.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/reachy_mini_conversation_app/static/main.js b/src/reachy_mini_conversation_app/static/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..a924fa3ee4ca6d9c5c96e2bebe6b7f781a933ffd
--- /dev/null
+++ b/src/reachy_mini_conversation_app/static/main.js
@@ -0,0 +1,411 @@
+async function fetchStatus() {
+ try {
+ const url = new URL("/status", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 2000);
+ if (!resp.ok) throw new Error("status error");
+ return await resp.json();
+ } catch (e) {
+ return { ollama_connected: false, error: true };
+ }
+}
+
+const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
+
+async function fetchWithTimeout(url, options = {}, timeoutMs = 2000) {
+ const controller = new AbortController();
+ const id = setTimeout(() => controller.abort(), timeoutMs);
+ try {
+ return await fetch(url, { ...options, signal: controller.signal });
+ } finally {
+ clearTimeout(id);
+ }
+}
+
+async function waitForStatus(timeoutMs = 15000) {
+ const deadline = Date.now() + timeoutMs;
+ while (true) {
+ try {
+ const url = new URL("/status", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 2000);
+ if (resp.ok) return await resp.json();
+ } catch (e) { }
+ if (Date.now() >= deadline) return null;
+ await sleep(500);
+ }
+}
+
+async function waitForPersonalityData(timeoutMs = 15000) {
+ const loadingText = document.querySelector("#loading p");
+ let attempts = 0;
+ const deadline = Date.now() + timeoutMs;
+ while (true) {
+ attempts += 1;
+ try {
+ const url = new URL("/personalities", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 2000);
+ if (resp.ok) return await resp.json();
+ } catch (e) { }
+
+ if (loadingText) {
+ loadingText.textContent = attempts > 8 ? "Starting backend…" : "Loading…";
+ }
+ if (Date.now() >= deadline) return null;
+ await sleep(500);
+ }
+}
+
+// ---------- Personalities API ----------
+async function getPersonalities() {
+ const url = new URL("/personalities", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 2000);
+ if (!resp.ok) throw new Error("list_failed");
+ return await resp.json();
+}
+
+async function loadPersonality(name) {
+ const url = new URL("/personalities/load", window.location.origin);
+ url.searchParams.set("name", name);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 3000);
+ if (!resp.ok) throw new Error("load_failed");
+ return await resp.json();
+}
+
+async function savePersonality(payload) {
+ // Try JSON POST first
+ const saveUrl = new URL("/personalities/save", window.location.origin);
+ saveUrl.searchParams.set("_", Date.now().toString());
+ let resp = await fetchWithTimeout(saveUrl, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(payload),
+ }, 5000);
+ if (resp.ok) return await resp.json();
+
+ // Fallback to form-encoded POST
+ try {
+ const form = new URLSearchParams();
+ form.set("name", payload.name || "");
+ form.set("instructions", payload.instructions || "");
+ form.set("tools_text", payload.tools_text || "");
+ form.set("voice", payload.voice || "cedar");
+ const url = new URL("/personalities/save_raw", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ resp = await fetchWithTimeout(url, {
+ method: "POST",
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
+ body: form.toString(),
+ }, 5000);
+ if (resp.ok) return await resp.json();
+ } catch { }
+
+ // Fallback to GET (query params)
+ try {
+ const url = new URL("/personalities/save_raw", window.location.origin);
+ url.searchParams.set("name", payload.name || "");
+ url.searchParams.set("instructions", payload.instructions || "");
+ url.searchParams.set("tools_text", payload.tools_text || "");
+ url.searchParams.set("voice", payload.voice || "cedar");
+ url.searchParams.set("_", Date.now().toString());
+ resp = await fetchWithTimeout(url, { method: "GET" }, 5000);
+ if (resp.ok) return await resp.json();
+ } catch { }
+
+ const data = await resp.json().catch(() => ({}));
+ throw new Error(data.error || "save_failed");
+}
+
+async function applyPersonality(name, { persist = false } = {}) {
+ const url = new URL("/personalities/apply", window.location.origin);
+ url.searchParams.set("name", name || "");
+ if (persist) {
+ url.searchParams.set("persist", "1");
+ }
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, { method: "POST" }, 5000);
+ if (!resp.ok) {
+ const data = await resp.json().catch(() => ({}));
+ throw new Error(data.error || "apply_failed");
+ }
+ return await resp.json();
+}
+
+async function getVoices() {
+ try {
+ const url = new URL("/voices", window.location.origin);
+ url.searchParams.set("_", Date.now().toString());
+ const resp = await fetchWithTimeout(url, {}, 3000);
+ if (!resp.ok) throw new Error("voices_failed");
+ return await resp.json();
+ } catch (e) {
+ return ["en-US-AriaNeural"];
+ }
+}
+
+function show(el, flag) {
+ el.classList.toggle("hidden", !flag);
+}
+
+async function init() {
+ const loading = document.getElementById("loading");
+ show(loading, true);
+ const configuredPanel = document.getElementById("configured");
+ const ollamaErrorPanel = document.getElementById("ollama-error");
+ const personalityPanel = document.getElementById("personality-panel");
+ const retryBtn = document.getElementById("retry-btn");
+ const modelNameEl = document.getElementById("model-name");
+
+ // Personality elements
+ const pSelect = document.getElementById("personality-select");
+ const pApply = document.getElementById("apply-personality");
+ const pPersist = document.getElementById("persist-personality");
+ const pNew = document.getElementById("new-personality");
+ const pSave = document.getElementById("save-personality");
+ const pStartupLabel = document.getElementById("startup-label");
+ const pName = document.getElementById("personality-name");
+ const pInstr = document.getElementById("instructions-ta");
+ const pTools = document.getElementById("tools-ta");
+ const pStatus = document.getElementById("personality-status");
+ const pVoice = document.getElementById("voice-select");
+ const pAvail = document.getElementById("tools-available");
+
+ const AUTO_WITH = {
+ dance: ["stop_dance"],
+ play_emotion: ["stop_emotion"],
+ };
+
+ show(configuredPanel, false);
+ show(ollamaErrorPanel, false);
+ show(personalityPanel, false);
+
+ // Check Ollama status
+ const st = (await waitForStatus()) || { ollama_connected: false };
+
+ if (st.ollama_connected) {
+ show(configuredPanel, true);
+ if (modelNameEl && st.model) modelNameEl.textContent = st.model;
+ } else {
+ show(ollamaErrorPanel, true);
+ show(loading, false);
+
+ retryBtn.addEventListener("click", () => {
+ window.location.reload();
+ });
+
+ return;
+ }
+
+ // Wait until backend routes are ready before rendering personalities UI
+ const list = (await waitForPersonalityData()) || { choices: [] };
+ if (!list.choices.length) {
+ pStatus.textContent = "Personality endpoints not ready yet. Retry shortly.";
+ pStatus.className = "status warn";
+ show(loading, false);
+ return;
+ }
+
+ // Initialize personalities UI
+ try {
+ const choices = Array.isArray(list.choices) ? list.choices : [];
+ const DEFAULT_OPTION = choices[0] || "(built-in default)";
+ const startupChoice = choices.includes(list.startup) ? list.startup : DEFAULT_OPTION;
+ const currentChoice = choices.includes(list.current) ? list.current : startupChoice;
+
+ function setStartupLabel(name) {
+ const display = name && name !== DEFAULT_OPTION ? name : "Built-in default";
+ pStartupLabel.textContent = `Launch on start: ${display}`;
+ }
+
+ // Populate select
+ pSelect.innerHTML = "";
+ for (const n of choices) {
+ const opt = document.createElement("option");
+ opt.value = n;
+ opt.textContent = n;
+ pSelect.appendChild(opt);
+ }
+ if (choices.length) {
+ const preferred = choices.includes(startupChoice) ? startupChoice : currentChoice;
+ pSelect.value = preferred;
+ }
+ const voices = await getVoices();
+ pVoice.innerHTML = "";
+ for (const v of voices) {
+ const opt = document.createElement("option");
+ opt.value = v;
+ opt.textContent = v;
+ pVoice.appendChild(opt);
+ }
+ setStartupLabel(startupChoice);
+
+ function renderToolCheckboxes(available, enabled) {
+ pAvail.innerHTML = "";
+ const enabledSet = new Set(enabled);
+ for (const t of available) {
+ const wrap = document.createElement("div");
+ wrap.className = "chk";
+ const id = `tool-${t}`;
+ const cb = document.createElement("input");
+ cb.type = "checkbox";
+ cb.id = id;
+ cb.value = t;
+ cb.checked = enabledSet.has(t);
+ const lab = document.createElement("label");
+ lab.htmlFor = id;
+ lab.textContent = t;
+ wrap.appendChild(cb);
+ wrap.appendChild(lab);
+ pAvail.appendChild(wrap);
+ }
+ }
+
+ function getSelectedTools() {
+ const selected = new Set();
+ pAvail.querySelectorAll('input[type="checkbox"]').forEach((el) => {
+ if (el.checked) selected.add(el.value);
+ });
+ // Auto-include dependencies
+ for (const [main, deps] of Object.entries(AUTO_WITH)) {
+ if (selected.has(main)) {
+ for (const d of deps) selected.add(d);
+ }
+ }
+ return Array.from(selected);
+ }
+
+ function syncToolsTextarea() {
+ const selected = getSelectedTools();
+ const comments = pTools.value
+ .split("\n")
+ .filter((ln) => ln.trim().startsWith("#"));
+ const body = selected.join("\n");
+ pTools.value = (comments.join("\n") + (comments.length ? "\n" : "") + body).trim() + "\n";
+ }
+
+ function attachToolHandlers() {
+ pAvail.addEventListener("change", (ev) => {
+ const target = ev.target;
+ if (!(target instanceof HTMLInputElement) || target.type !== "checkbox") return;
+ const name = target.value;
+ // If a main tool toggled, propagate to deps
+ if (AUTO_WITH[name]) {
+ for (const dep of AUTO_WITH[name]) {
+ const depEl = pAvail.querySelector(`input[value="${dep}"]`);
+ if (depEl) depEl.checked = target.checked || depEl.checked;
+ }
+ }
+ syncToolsTextarea();
+ });
+ }
+
+ async function loadSelected() {
+ const selected = pSelect.value;
+ const data = await loadPersonality(selected);
+ pInstr.value = data.instructions || "";
+ pTools.value = data.tools_text || "";
+ pVoice.value = data.voice || "en-US-AriaNeural";
+ // Available tools as checkboxes
+ renderToolCheckboxes(data.available_tools, data.enabled_tools);
+ attachToolHandlers();
+ // Default name field to last segment of selection
+ const idx = selected.lastIndexOf("/");
+ pName.value = idx >= 0 ? selected.slice(idx + 1) : "";
+ pStatus.textContent = `Loaded ${selected}`;
+ pStatus.className = "status";
+ }
+
+ pSelect.addEventListener("change", loadSelected);
+ await loadSelected();
+ show(personalityPanel, true);
+
+ pApply.addEventListener("click", async () => {
+ pStatus.textContent = "Applying...";
+ pStatus.className = "status";
+ try {
+ const res = await applyPersonality(pSelect.value);
+ if (res.startup) setStartupLabel(res.startup);
+ pStatus.textContent = res.status || "Applied.";
+ pStatus.className = "status ok";
+ } catch (e) {
+ pStatus.textContent = `Failed to apply${e.message ? ": " + e.message : ""}`;
+ pStatus.className = "status error";
+ }
+ });
+
+ pPersist.addEventListener("click", async () => {
+ pStatus.textContent = "Saving for startup...";
+ pStatus.className = "status";
+ try {
+ const res = await applyPersonality(pSelect.value, { persist: true });
+ if (res.startup) setStartupLabel(res.startup);
+ pStatus.textContent = res.status || "Saved for startup.";
+ pStatus.className = "status ok";
+ } catch (e) {
+ pStatus.textContent = `Failed to persist${e.message ? ": " + e.message : ""}`;
+ pStatus.className = "status error";
+ }
+ });
+
+ pNew.addEventListener("click", () => {
+ pName.value = "";
+ pInstr.value = "# Write your instructions here\n# e.g., Keep responses concise and friendly.";
+ pTools.value = "# tools enabled for this profile\n";
+ // Keep available tools list, clear selection
+ pAvail.querySelectorAll('input[type="checkbox"]').forEach((el) => {
+ el.checked = false;
+ });
+ pVoice.value = "en-US-AriaNeural";
+ pStatus.textContent = "Fill fields and click Save.";
+ pStatus.className = "status";
+ });
+
+ pSave.addEventListener("click", async () => {
+ const name = (pName.value || "").trim();
+ if (!name) {
+ pStatus.textContent = "Enter a valid name.";
+ pStatus.className = "status warn";
+ return;
+ }
+ pStatus.textContent = "Saving...";
+ pStatus.className = "status";
+ try {
+ // Ensure tools.txt reflects checkbox selection and auto-includes
+ syncToolsTextarea();
+ const res = await savePersonality({
+ name,
+ instructions: pInstr.value || "",
+ tools_text: pTools.value || "",
+ voice: pVoice.value || "en-US-AriaNeural",
+ });
+ // Refresh select choices
+ pSelect.innerHTML = "";
+ for (const n of res.choices) {
+ const opt = document.createElement("option");
+ opt.value = n;
+ opt.textContent = n;
+ if (n === res.value) opt.selected = true;
+ pSelect.appendChild(opt);
+ }
+ pStatus.textContent = "Saved.";
+ pStatus.className = "status ok";
+ // Auto-apply
+ try { await applyPersonality(pSelect.value); } catch { }
+ } catch (e) {
+ pStatus.textContent = "Failed to save.";
+ pStatus.className = "status error";
+ }
+ });
+ } catch (e) {
+ pStatus.textContent = "UI failed to load. Please refresh.";
+ pStatus.className = "status warn";
+ } finally {
+ // Hide loading when initial setup is done
+ show(loading, false);
+ }
+}
+
+window.addEventListener("DOMContentLoaded", init);
diff --git a/src/reachy_mini_conversation_app/static/style.css b/src/reachy_mini_conversation_app/static/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..b55678202eb61f63bd7c89b9440ad2607593e926
--- /dev/null
+++ b/src/reachy_mini_conversation_app/static/style.css
@@ -0,0 +1,317 @@
+:root {
+ --bg: #060b1a;
+ --bg-2: #071023;
+ --panel: rgba(11, 18, 36, 0.8);
+ --border: rgba(255, 255, 255, 0.08);
+ --text: #eaf2ff;
+ --muted: #9fb6d7;
+ --ok: #4ce0b3;
+ --warn: #ffb547;
+ --error: #ff5c70;
+ --accent: #45c4ff;
+ --accent-2: #5ef0c1;
+ --shadow: 0 20px 70px rgba(0, 0, 0, 0.45);
+}
+
+* { box-sizing: border-box; }
+body {
+ margin: 0;
+ min-height: 100vh;
+ font-family: "Space Grotesk", "Inter", "Segoe UI", sans-serif;
+ background: radial-gradient(circle at 20% 20%, rgba(69, 196, 255, 0.16), transparent 35%),
+ radial-gradient(circle at 80% 0%, rgba(94, 240, 193, 0.16), transparent 32%),
+ linear-gradient(135deg, var(--bg), var(--bg-2));
+ color: var(--text);
+}
+
+.ambient {
+ position: fixed;
+ inset: 0;
+ background: radial-gradient(circle at 30% 60%, rgba(255, 255, 255, 0.05), transparent 35%),
+ radial-gradient(circle at 75% 30%, rgba(69, 196, 255, 0.08), transparent 32%);
+ filter: blur(60px);
+ z-index: 0;
+ pointer-events: none;
+}
+
+/* Loading overlay */
+.loading {
+ position: fixed;
+ inset: 0;
+ background: rgba(5, 10, 24, 0.92);
+ backdrop-filter: blur(4px);
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ z-index: 9999;
+}
+.loading .spinner {
+ width: 46px;
+ height: 46px;
+ border: 4px solid rgba(255,255,255,0.15);
+ border-top-color: var(--accent);
+ border-radius: 50%;
+ animation: spin 1s linear infinite;
+ margin-bottom: 12px;
+}
+.loading p { color: var(--muted); margin: 0; letter-spacing: 0.4px; }
+@keyframes spin { to { transform: rotate(360deg); } }
+
+.container {
+ position: relative;
+ max-width: 960px;
+ margin: 7vh auto;
+ padding: 0 24px 40px;
+ z-index: 1;
+}
+
+.hero {
+ margin-bottom: 24px;
+}
+.hero h1 {
+ margin: 6px 0 6px;
+ font-size: 32px;
+ letter-spacing: -0.4px;
+}
+.subtitle {
+ margin: 0;
+ color: var(--muted);
+ line-height: 1.5;
+}
+.pill {
+ display: inline-flex;
+ align-items: center;
+ gap: 6px;
+ padding: 6px 12px;
+ border-radius: 999px;
+ background: rgba(94, 240, 193, 0.1);
+ color: var(--accent-2);
+ font-size: 12px;
+ letter-spacing: 0.3px;
+ border: 1px solid rgba(94, 240, 193, 0.25);
+}
+
+.panel {
+ background: var(--panel);
+ border: 1px solid var(--border);
+ border-radius: 14px;
+ padding: 18px 18px 16px;
+ box-shadow: var(--shadow);
+ backdrop-filter: blur(10px);
+ margin-top: 16px;
+}
+.panel-heading {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 12px;
+ margin-bottom: 8px;
+}
+.panel-heading h2 {
+ margin: 2px 0;
+ font-size: 22px;
+}
+.eyebrow {
+ margin: 0;
+ text-transform: uppercase;
+ font-size: 11px;
+ letter-spacing: 0.5px;
+ color: var(--muted);
+}
+.muted { color: var(--muted); }
+.chip {
+ display: inline-flex;
+ align-items: center;
+ padding: 6px 10px;
+ border-radius: 999px;
+ font-size: 12px;
+ color: var(--text);
+ background: rgba(255, 255, 255, 0.08);
+ border: 1px solid var(--border);
+}
+.chip-ok {
+ background: rgba(76, 224, 179, 0.15);
+ color: var(--ok);
+ border-color: rgba(76, 224, 179, 0.4);
+}
+
+.hidden { display: none; }
+label {
+ display: block;
+ margin: 8px 0 6px;
+ font-size: 13px;
+ color: var(--muted);
+ letter-spacing: 0.2px;
+}
+input[type="password"],
+input[type="text"],
+select,
+textarea {
+ width: 100%;
+ padding: 12px 14px;
+ border: 1px solid var(--border);
+ border-radius: 10px;
+ background: rgba(255, 255, 255, 0.04);
+ color: var(--text);
+ transition: border 0.15s ease, box-shadow 0.15s ease;
+}
+input:focus,
+select:focus,
+textarea:focus {
+ border-color: rgba(94, 240, 193, 0.7);
+ outline: none;
+ box-shadow: 0 0 0 3px rgba(94, 240, 193, 0.15);
+}
+input.error {
+ border-color: var(--error);
+ box-shadow: 0 0 0 3px rgba(255, 92, 112, 0.15);
+}
+select option {
+ background: #0b152a;
+ color: var(--text);
+}
+textarea { resize: vertical; }
+
+button {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ margin-top: 12px;
+ padding: 11px 16px;
+ border: none;
+ border-radius: 10px;
+ background: linear-gradient(120deg, var(--accent), var(--accent-2));
+ color: #031022;
+ cursor: pointer;
+ font-weight: 600;
+ letter-spacing: 0.2px;
+ box-shadow: 0 14px 40px rgba(69, 196, 255, 0.25);
+ transition: transform 0.12s ease, filter 0.12s ease, box-shadow 0.12s ease;
+}
+button:hover { filter: brightness(1.06); transform: translateY(-1px); }
+button:active { transform: translateY(0); }
+button.ghost {
+ background: rgba(255, 255, 255, 0.05);
+ color: var(--text);
+ box-shadow: none;
+ border: 1px solid var(--border);
+}
+button.ghost:hover { border-color: rgba(94, 240, 193, 0.4); }
+.actions {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+ flex-wrap: wrap;
+}
+.status {
+ margin: 0;
+ color: var(--muted);
+ font-size: 13px;
+}
+.status.ok { color: var(--ok); }
+.status.warn { color: var(--warn); }
+.status.error { color: var(--error); }
+
+/* Personality layout */
+.row {
+ display: grid;
+ grid-template-columns: 160px 1fr;
+ gap: 12px 18px;
+ align-items: center;
+ margin-top: 12px;
+}
+.row > label { margin: 0; }
+.row > button { margin: 0; }
+
+/* First row: controls inline */
+#personality-panel .row-top {
+ grid-template-columns: 160px 1fr auto auto auto;
+}
+
+#tools-available {
+ max-height: 240px;
+ overflow: auto;
+ padding: 10px;
+ border: 1px solid var(--border);
+ border-radius: 10px;
+ background: rgba(255, 255, 255, 0.03);
+}
+
+/* Checkbox grid for tools */
+.checkbox-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(170px, 1fr));
+ gap: 10px 14px;
+}
+.startup-row {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ flex-wrap: wrap;
+}
+.row-save .actions {
+ justify-content: flex-start;
+}
+.input-field {
+ width: 100%;
+ padding: 12px 14px;
+ border: 1px solid var(--border);
+ border-radius: 10px;
+ background: rgba(255, 255, 255, 0.05);
+ color: var(--text);
+ transition: border 0.15s ease, box-shadow 0.15s ease;
+}
+.input-field:focus {
+ border-color: rgba(94, 240, 193, 0.7);
+ outline: none;
+ box-shadow: 0 0 0 3px rgba(94, 240, 193, 0.15);
+}
+.section {
+ border: 1px solid var(--border);
+ border-radius: 12px;
+ padding: 12px 14px;
+ margin-top: 14px;
+ background: rgba(255, 255, 255, 0.02);
+}
+.section-heading {
+ display: flex;
+ align-items: baseline;
+ gap: 10px;
+ justify-content: space-between;
+}
+.section-heading h3 {
+ margin: 6px 0;
+ font-size: 16px;
+ letter-spacing: -0.1px;
+}
+.section-heading .small {
+ margin: 0;
+ font-size: 12px;
+}
+.checkbox-grid .chk {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 8px 10px;
+ border-radius: 10px;
+ background: rgba(255, 255, 255, 0.02);
+ border: 1px solid transparent;
+ transition: border 0.12s ease, background 0.12s ease;
+}
+.checkbox-grid .chk:hover { border-color: rgba(94, 240, 193, 0.3); background: rgba(255, 255, 255, 0.04); }
+.checkbox-grid input[type="checkbox"] {
+ width: 16px; height: 16px;
+ accent-color: var(--accent);
+}
+.checkbox-grid label {
+ margin: 0; font-size: 13px; color: var(--text);
+}
+
+@media (max-width: 760px) {
+ .hero h1 { font-size: 26px; }
+ .row { grid-template-columns: 1fr; }
+ #personality-panel .row:first-of-type { grid-template-columns: 1fr; }
+ button { width: 100%; justify-content: center; }
+ .actions { flex-direction: column; align-items: flex-start; }
+}
diff --git a/src/reachy_mini_conversation_app/tools/__init__.py b/src/reachy_mini_conversation_app/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5887927462555a6876812548ec6f920e1bf7f91d
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/__init__.py
@@ -0,0 +1,4 @@
+"""Tools library for Reachy Mini conversation app.
+
+Tools are now loaded dynamically based on the profile's tools.txt file.
+"""
diff --git a/src/reachy_mini_conversation_app/tools/camera.py b/src/reachy_mini_conversation_app/tools/camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c7fbb8f0a709f3ee73370fccfa5028e78df39e7
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/camera.py
@@ -0,0 +1,68 @@
+import base64
+import asyncio
+import logging
+from typing import Any, Dict
+
+import cv2
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+
+class Camera(Tool):
+ """Take a picture with the camera and ask a question about it."""
+
+ name = "camera"
+ description = "Take a picture with the camera and ask a question about it."
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "question": {
+ "type": "string",
+ "description": "The question to ask about the picture",
+ },
+ },
+ "required": ["question"],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Take a picture with the camera and ask a question about it."""
+ image_query = (kwargs.get("question") or "").strip()
+ if not image_query:
+ logger.warning("camera: empty question")
+ return {"error": "question must be a non-empty string"}
+
+ logger.info("Tool call: camera question=%s", image_query[:120])
+
+ # Get frame from camera worker buffer (like main_works.py)
+ if deps.camera_worker is not None:
+ frame = deps.camera_worker.get_latest_frame()
+ if frame is None:
+ logger.error("No frame available from camera worker")
+ return {"error": "No frame available"}
+ else:
+ logger.error("Camera worker not available")
+ return {"error": "Camera worker not available"}
+
+ # Use vision manager for processing if available
+ if deps.vision_manager is not None:
+ vision_result = await asyncio.to_thread(
+ deps.vision_manager.processor.process_image, frame, image_query,
+ )
+ if isinstance(vision_result, dict) and "error" in vision_result:
+ return vision_result
+ return (
+ {"image_description": vision_result}
+ if isinstance(vision_result, str)
+ else {"error": "vision returned non-string"}
+ )
+
+ # Encode image directly to JPEG bytes without writing to file
+ success, buffer = cv2.imencode('.jpg', frame)
+ if not success:
+ raise RuntimeError("Failed to encode frame as JPEG")
+
+ b64_encoded = base64.b64encode(buffer.tobytes()).decode("utf-8")
+ return {"b64_im": b64_encoded}
diff --git a/src/reachy_mini_conversation_app/tools/core_tools.py b/src/reachy_mini_conversation_app/tools/core_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..e81bd4d9c221ba910d75b53dc6bc3f5f95186faf
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/core_tools.py
@@ -0,0 +1,308 @@
+from __future__ import annotations
+import re
+import abc
+import sys
+import json
+import inspect
+import logging
+import importlib
+import importlib.util
+from typing import Any, Dict, List
+from pathlib import Path
+from dataclasses import dataclass
+
+from reachy_mini import ReachyMini
+from reachy_mini_conversation_app.config import DEFAULT_PROFILES_DIRECTORY as DEFAULT_PROFILES_PATH # noqa: F401
+from reachy_mini_conversation_app.config import config
+
+
+logger = logging.getLogger(__name__)
+
+
+DEFAULT_PROFILES_MODULE = "reachy_mini_conversation_app.profiles"
+
+
+if not logger.handlers:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s:%(lineno)d | %(message)s")
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ logger.setLevel(logging.INFO)
+
+
+ALL_TOOLS: Dict[str, "Tool"] = {}
+ALL_TOOL_SPECS: List[Dict[str, Any]] = []
+_TOOLS_INITIALIZED = False
+
+
+
+def get_concrete_subclasses(base: type[Tool]) -> List[type[Tool]]:
+ """Recursively find all concrete (non-abstract) subclasses of a base class."""
+ result: List[type[Tool]] = []
+ for cls in base.__subclasses__():
+ if not inspect.isabstract(cls):
+ result.append(cls)
+ # recurse into subclasses
+ result.extend(get_concrete_subclasses(cls))
+ return result
+
+
+@dataclass
+class ToolDependencies:
+ """External dependencies injected into tools."""
+
+ reachy_mini: ReachyMini
+ movement_manager: Any # MovementManager from moves.py
+ # Optional deps
+ camera_worker: Any | None = None # CameraWorker for frame buffering
+ vision_manager: Any | None = None
+ head_wobbler: Any | None = None # HeadWobbler for audio-reactive motion
+ motion_duration_s: float = 1.0
+
+
+# Tool base class
+class Tool(abc.ABC):
+ """Base abstraction for tools used in function-calling.
+
+ Each tool must define:
+ - name: str
+ - description: str
+ - parameters_schema: Dict[str, Any] # JSON Schema
+ """
+
+ name: str
+ description: str
+ parameters_schema: Dict[str, Any]
+
+ def spec(self) -> Dict[str, Any]:
+ """Return the function spec for LLM consumption."""
+ return {
+ "type": "function",
+ "name": self.name,
+ "description": self.description,
+ "parameters": self.parameters_schema,
+ }
+
+ @abc.abstractmethod
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Async tool execution entrypoint."""
+ raise NotImplementedError
+
+
+def _load_module_from_file(module_name: str, file_path: Path) -> None:
+ """Load a Python module from a file path."""
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if not (spec and spec.loader):
+ raise ModuleNotFoundError(f"Cannot create spec for {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module)
+
+
+def _try_load_tool(
+ tool_name: str,
+ module_path: str,
+ fallback_directory: Path | None,
+ file_subpath: str,
+) -> str:
+ """Try to load a tool: first via importlib, then from file if fallback is configured."""
+ try:
+ importlib.import_module(module_path)
+ return "module"
+ except ModuleNotFoundError:
+ if fallback_directory is None:
+ raise
+ tool_file = fallback_directory / file_subpath
+ if not tool_file.exists():
+ raise FileNotFoundError(f"tool file not found at {tool_file}")
+ _load_module_from_file(tool_name, tool_file)
+ return "file"
+
+
+def _format_error(error: Exception) -> str:
+ """Format an exception for logging."""
+ if isinstance(error, FileNotFoundError):
+ return f"Tool file not found: {error}"
+ if isinstance(error, ModuleNotFoundError):
+ return f"Missing dependency: {error}"
+ if isinstance(error, ImportError):
+ return f"Import error: {error}"
+ return f"{type(error).__name__}: {error}"
+
+
+# Registry & specs (dynamic)
+def _load_profile_tools() -> None:
+ """Load tools based on profile's tools.txt file."""
+ # Determine which profile to use
+ profile = config.REACHY_MINI_CUSTOM_PROFILE or "default"
+ logger.info(f"Loading tools for profile: {profile}")
+
+ # Build path to tools.txt
+ # Get the profile directory path
+ profile_module_path = config.PROFILES_DIRECTORY / profile
+ tools_txt_path = profile_module_path / "tools.txt"
+ default_tools_txt_path = Path(__file__).parent.parent / "profiles" / "default" / "tools.txt"
+
+ if config.PROFILES_DIRECTORY != DEFAULT_PROFILES_PATH:
+ logger.info(
+ "Loading external profile '%s' from %s",
+ profile,
+ profile_module_path,
+ )
+
+ if not tools_txt_path.exists():
+ if profile != "default" and default_tools_txt_path.exists():
+ logger.warning(
+ "tools.txt not found for profile '%s' at %s. Falling back to default profile tools at %s",
+ profile,
+ tools_txt_path,
+ default_tools_txt_path,
+ )
+ tools_txt_path = default_tools_txt_path
+ else:
+ logger.error(f"✗ tools.txt not found at {tools_txt_path}")
+ sys.exit(1)
+
+ # Read and parse tools.txt
+ try:
+ with open(tools_txt_path, "r") as f:
+ lines = f.readlines()
+ except Exception as e:
+ logger.error(f"✗ Failed to read tools.txt: {e}")
+ sys.exit(1)
+
+ # Parse tool names (skip comments and blank lines)
+ tool_names = []
+ for line in lines:
+ line = line.strip()
+ # Skip blank lines and comments
+ if not line or line.startswith("#"):
+ continue
+ tool_names.append(line)
+
+ logger.info(f"Found {len(tool_names)} tools to load: {tool_names}")
+
+ if config.AUTOLOAD_EXTERNAL_TOOLS and config.TOOLS_DIRECTORY and config.TOOLS_DIRECTORY.is_dir():
+ discovered_external_tools: List[str] = []
+ for tool_file in sorted(config.TOOLS_DIRECTORY.glob("*.py")):
+ if tool_file.name.startswith("_"):
+ continue
+ candidate_name = tool_file.stem
+ if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", candidate_name):
+ logger.warning("Skipping external tool with invalid name: %s", tool_file.name)
+ continue
+ discovered_external_tools.append(candidate_name)
+
+ extra_tools = [name for name in discovered_external_tools if name not in tool_names]
+ if extra_tools:
+ tool_names.extend(extra_tools)
+ logger.info(
+ "AUTOLOAD_EXTERNAL_TOOLS enabled: added %d external tool(s): %s",
+ len(extra_tools),
+ extra_tools,
+ )
+
+ for tool_name in tool_names:
+ loaded = False
+ profile_error = None
+ profile_import_path = f"{DEFAULT_PROFILES_MODULE}.{profile}.{tool_name}"
+
+ # Try profile tool first
+ try:
+ source = _try_load_tool(
+ tool_name,
+ module_path=profile_import_path,
+ fallback_directory=config.PROFILES_DIRECTORY,
+ file_subpath=f"{profile}/{tool_name}.py",
+ )
+ if source == "file":
+ logger.info("✓ Loaded external profile tool: %s", tool_name)
+ else:
+ logger.info("✓ Loaded core profile tool: %s", tool_name)
+ loaded = True
+ except (ModuleNotFoundError, FileNotFoundError) as e:
+ if tool_name not in str(e):
+ profile_error = _format_error(e)
+ logger.error(f"❌ Failed to load profile tool '{tool_name}': {profile_error}")
+ logger.error(f" Module path: {profile_import_path}")
+ except Exception as e:
+ profile_error = _format_error(e)
+ logger.error(f"❌ Failed to load profile tool '{tool_name}': {profile_error}")
+ logger.error(f" Module path: {profile_import_path}")
+
+ # Try tools directory if not found in profile
+ if not loaded:
+ shared_module_path = f"reachy_mini_conversation_app.tools.{tool_name}"
+ try:
+ source = _try_load_tool(
+ tool_name,
+ module_path=shared_module_path,
+ fallback_directory=config.TOOLS_DIRECTORY,
+ file_subpath=f"{tool_name}.py",
+ )
+ if source == "file":
+ logger.info("✓ Loaded external tool: %s", tool_name)
+ else:
+ logger.info("✓ Loaded core tool: %s", tool_name)
+ except (ModuleNotFoundError, FileNotFoundError):
+ if profile_error:
+ logger.error(f"❌ Tool '{tool_name}' also not found in shared tools")
+ else:
+ logger.warning(f"⚠️ Tool '{tool_name}' not found in profile or shared tools")
+ except Exception as e:
+ logger.error(f"❌ Failed to load shared tool '{tool_name}': {_format_error(e)}")
+ logger.error(f" Module path: {shared_module_path}")
+
+
+
+def _initialize_tools() -> None:
+ """Populate registry once, even if module is imported repeatedly."""
+ global ALL_TOOLS, ALL_TOOL_SPECS, _TOOLS_INITIALIZED
+
+ if _TOOLS_INITIALIZED:
+ logger.debug("Tools already initialized; skipping reinitialization.")
+ return
+
+ _load_profile_tools()
+
+ ALL_TOOLS = {cls.name: cls() for cls in get_concrete_subclasses(Tool)} # type: ignore[type-abstract]
+ ALL_TOOL_SPECS = [tool.spec() for tool in ALL_TOOLS.values()]
+
+ for tool_name, tool in ALL_TOOLS.items():
+ logger.info(f"tool registered: {tool_name} - {tool.description}")
+
+ _TOOLS_INITIALIZED = True
+
+
+_initialize_tools()
+
+
+def get_tool_specs(exclusion_list: list[str] = []) -> list[Dict[str, Any]]:
+ """Get tool specs, optionally excluding some tools."""
+ return [spec for spec in ALL_TOOL_SPECS if spec.get("name") not in exclusion_list]
+
+
+# Dispatcher
+def _safe_load_obj(args_json: str) -> Dict[str, Any]:
+ try:
+ parsed_args = json.loads(args_json or "{}")
+ return parsed_args if isinstance(parsed_args, dict) else {}
+ except Exception:
+ logger.warning("bad args_json=%r", args_json)
+ return {}
+
+
+async def dispatch_tool_call(tool_name: str, args_json: str, deps: ToolDependencies) -> Dict[str, Any]:
+ """Dispatch a tool call by name with JSON args and dependencies."""
+ tool = ALL_TOOLS.get(tool_name)
+
+ if not tool:
+ return {"error": f"unknown tool: {tool_name}"}
+
+ args = _safe_load_obj(args_json)
+ try:
+ return await tool(deps, **args)
+ except Exception as e:
+ msg = f"{type(e).__name__}: {e}"
+ logger.exception("Tool error in %s: %s", tool_name, msg)
+ return {"error": msg}
diff --git a/src/reachy_mini_conversation_app/tools/dance.py b/src/reachy_mini_conversation_app/tools/dance.py
new file mode 100644
index 0000000000000000000000000000000000000000..833cd5520294704264ff90fa3d89f1db12719545
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/dance.py
@@ -0,0 +1,86 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+# Initialize dance library
+try:
+ from reachy_mini_dances_library.collection.dance import AVAILABLE_MOVES
+ from reachy_mini_conversation_app.dance_emotion_moves import DanceQueueMove
+
+ DANCE_AVAILABLE = True
+except ImportError as e:
+ logger.warning(f"Dance library not available: {e}")
+ AVAILABLE_MOVES = {}
+ DANCE_AVAILABLE = False
+
+
+class Dance(Tool):
+ """Play a named or random dance move once (or repeat). Non-blocking."""
+
+ name = "dance"
+ description = "Play a named or random dance move once (or repeat). Non-blocking."
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "move": {
+ "type": "string",
+ "description": """Name of the move; use 'random' or omit for random.
+ Here is a list of the available moves:
+ simple_nod: A simple, continuous up-and-down nodding motion.
+ head_tilt_roll: A continuous side-to-side head roll (ear to shoulder).
+ side_to_side_sway: A smooth, side-to-side sway of the entire head.
+ dizzy_spin: A circular 'dizzy' head motion combining roll and pitch.
+ stumble_and_recover: A simulated stumble and recovery with multiple axis movements. Good vibes
+ interwoven_spirals: A complex spiral motion using three axes at different frequencies.
+ sharp_side_tilt: A sharp, quick side-to-side tilt using a triangle waveform.
+ side_peekaboo: A multi-stage peekaboo performance, hiding and peeking to each side.
+ yeah_nod: An emphatic two-part yeah nod using transient motions.
+ uh_huh_tilt: A combined roll-and-pitch uh-huh gesture of agreement.
+ neck_recoil: A quick, transient backward recoil of the neck.
+ chin_lead: A forward motion led by the chin, combining translation and pitch.
+ groovy_sway_and_roll: A side-to-side sway combined with a corresponding roll for a groovy effect.
+ chicken_peck: A sharp, forward, chicken-like pecking motion.
+ side_glance_flick: A quick glance to the side that holds, then returns.
+ polyrhythm_combo: A 3-beat sway and a 2-beat nod create a polyrhythmic feel.
+ grid_snap: A robotic, grid-snapping motion using square waveforms.
+ pendulum_swing: A simple, smooth pendulum-like swing using a roll motion.
+ jackson_square: Traces a rectangle via a 5-point path, with sharp twitches on arrival at each checkpoint.
+ """,
+ },
+ "repeat": {
+ "type": "integer",
+ "description": "How many times to repeat the move (default 1).",
+ },
+ },
+ "required": [],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Play a named or random dance move once (or repeat). Non-blocking."""
+ if not DANCE_AVAILABLE:
+ return {"error": "Dance system not available"}
+
+ move_name = kwargs.get("move")
+ repeat = int(kwargs.get("repeat", 1))
+
+ logger.info("Tool call: dance move=%s repeat=%d", move_name, repeat)
+
+ if not move_name or move_name == "random":
+ import random
+
+ move_name = random.choice(list(AVAILABLE_MOVES.keys()))
+
+ if move_name not in AVAILABLE_MOVES:
+ return {"error": f"Unknown dance move '{move_name}'. Available: {list(AVAILABLE_MOVES.keys())}"}
+
+ # Add dance moves to queue
+ movement_manager = deps.movement_manager
+ for _ in range(repeat):
+ dance_move = DanceQueueMove(move_name)
+ movement_manager.queue_move(dance_move)
+
+ return {"status": "queued", "move": move_name, "repeat": repeat}
diff --git a/src/reachy_mini_conversation_app/tools/do_nothing.py b/src/reachy_mini_conversation_app/tools/do_nothing.py
new file mode 100644
index 0000000000000000000000000000000000000000..c64588f68737e98819a4629f639c698624d221b5
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/do_nothing.py
@@ -0,0 +1,30 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+
+class DoNothing(Tool):
+ """Choose to do nothing - stay still and silent. Use when you want to be contemplative or just chill."""
+
+ name = "do_nothing"
+ description = "Choose to do nothing - stay still and silent. Use when you want to be contemplative or just chill."
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "reason": {
+ "type": "string",
+ "description": "Optional reason for doing nothing (e.g., 'contemplating existence', 'saving energy', 'being mysterious')",
+ },
+ },
+ "required": [],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Do nothing - stay still and silent."""
+ reason = kwargs.get("reason", "just chilling")
+ logger.info("Tool call: do_nothing reason=%s", reason)
+ return {"status": "doing nothing", "reason": reason}
diff --git a/src/reachy_mini_conversation_app/tools/head_tracking.py b/src/reachy_mini_conversation_app/tools/head_tracking.py
new file mode 100644
index 0000000000000000000000000000000000000000..57fa178d3fe568c6d31573d4fd7454a429d8b4a0
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/head_tracking.py
@@ -0,0 +1,31 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+
+class HeadTracking(Tool):
+ """Toggle head tracking state."""
+
+ name = "head_tracking"
+ description = "Toggle head tracking state."
+ parameters_schema = {
+ "type": "object",
+ "properties": {"start": {"type": "boolean"}},
+ "required": ["start"],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Enable or disable head tracking."""
+ enable = bool(kwargs.get("start"))
+
+ # Update camera worker head tracking state
+ if deps.camera_worker is not None:
+ deps.camera_worker.set_head_tracking_enabled(enable)
+
+ status = "started" if enable else "stopped"
+ logger.info("Tool call: head_tracking %s", status)
+ return {"status": f"head tracking {status}"}
diff --git a/src/reachy_mini_conversation_app/tools/move_head.py b/src/reachy_mini_conversation_app/tools/move_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..821c562159e0b4d315c691b9458e6b4385744e8f
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/move_head.py
@@ -0,0 +1,79 @@
+import logging
+from typing import Any, Dict, Tuple, Literal
+
+from reachy_mini.utils import create_head_pose
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+from reachy_mini_conversation_app.dance_emotion_moves import GotoQueueMove
+
+
+logger = logging.getLogger(__name__)
+
+Direction = Literal["left", "right", "up", "down", "front"]
+
+
+class MoveHead(Tool):
+ """Move head in a given direction."""
+
+ name = "move_head"
+ description = "Move your head in a given direction: left, right, up, down or front."
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "direction": {
+ "type": "string",
+ "enum": ["left", "right", "up", "down", "front"],
+ },
+ },
+ "required": ["direction"],
+ }
+
+ # mapping: direction -> args for create_head_pose
+ DELTAS: Dict[str, Tuple[int, int, int, int, int, int]] = {
+ "left": (0, 0, 0, 0, 0, 40),
+ "right": (0, 0, 0, 0, 0, -40),
+ "up": (0, 0, 0, 0, -30, 0),
+ "down": (0, 0, 0, 0, 30, 0),
+ "front": (0, 0, 0, 0, 0, 0),
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Move head in a given direction."""
+ direction_raw = kwargs.get("direction")
+ if not isinstance(direction_raw, str):
+ return {"error": "direction must be a string"}
+ direction: Direction = direction_raw # type: ignore[assignment]
+ logger.info("Tool call: move_head direction=%s", direction)
+
+ deltas = self.DELTAS.get(direction, self.DELTAS["front"])
+ target = create_head_pose(*deltas, degrees=True)
+
+ # Use new movement manager
+ try:
+ movement_manager = deps.movement_manager
+
+ # Get current state for interpolation
+ current_head_pose = deps.reachy_mini.get_current_head_pose()
+ _, current_antennas = deps.reachy_mini.get_current_joint_positions()
+
+ # Create goto move
+ goto_move = GotoQueueMove(
+ target_head_pose=target,
+ start_head_pose=current_head_pose,
+ target_antennas=(0, 0), # Reset antennas to default
+ start_antennas=(
+ current_antennas[0],
+ current_antennas[1],
+ ), # Skip body_yaw
+ target_body_yaw=0, # Reset body yaw
+ start_body_yaw=current_antennas[0], # body_yaw is first in joint positions
+ duration=deps.motion_duration_s,
+ )
+
+ movement_manager.queue_move(goto_move)
+ movement_manager.set_moving_state(deps.motion_duration_s)
+
+ return {"status": f"looking {direction}"}
+
+ except Exception as e:
+ logger.error("move_head failed")
+ return {"error": f"move_head failed: {type(e).__name__}: {e}"}
diff --git a/src/reachy_mini_conversation_app/tools/play_emotion.py b/src/reachy_mini_conversation_app/tools/play_emotion.py
new file mode 100644
index 0000000000000000000000000000000000000000..34b2fdac147429f27d40b2a8963fc0207c215f60
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/play_emotion.py
@@ -0,0 +1,84 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+# Initialize emotion library
+try:
+ from reachy_mini.motion.recorded_move import RecordedMoves
+ from reachy_mini_conversation_app.dance_emotion_moves import EmotionQueueMove
+
+ # Note: huggingface_hub automatically reads HF_TOKEN from environment variables
+ RECORDED_MOVES = RecordedMoves("pollen-robotics/reachy-mini-emotions-library")
+ EMOTION_AVAILABLE = True
+except ImportError as e:
+ logger.warning(f"Emotion library not available: {e}")
+ RECORDED_MOVES = None
+ EMOTION_AVAILABLE = False
+
+
+def get_available_emotions_and_descriptions() -> str:
+ """Get formatted list of available emotions with descriptions."""
+ if not EMOTION_AVAILABLE:
+ return "Emotions not available"
+
+ try:
+ emotion_names = RECORDED_MOVES.list_moves()
+ output = "Available emotions:\n"
+ for name in emotion_names:
+ description = RECORDED_MOVES.get(name).description
+ output += f" - {name}: {description}\n"
+ return output
+ except Exception as e:
+ return f"Error getting emotions: {e}"
+
+
+class PlayEmotion(Tool):
+ """Play a pre-recorded emotion."""
+
+ name = "play_emotion"
+ description = "Play a pre-recorded emotion"
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "emotion": {
+ "type": "string",
+ "description": f"""Name of the emotion to play.
+ Here is a list of the available emotions:
+ {get_available_emotions_and_descriptions()}
+ """,
+ },
+ },
+ "required": ["emotion"],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Play a pre-recorded emotion."""
+ if not EMOTION_AVAILABLE:
+ return {"error": "Emotion system not available"}
+
+ emotion_name = kwargs.get("emotion")
+ if not emotion_name:
+ return {"error": "Emotion name is required"}
+
+ logger.info("Tool call: play_emotion emotion=%s", emotion_name)
+
+ # Check if emotion exists
+ try:
+ emotion_names = RECORDED_MOVES.list_moves()
+ if emotion_name not in emotion_names:
+ return {"error": f"Unknown emotion '{emotion_name}'. Available: {emotion_names}"}
+
+ # Add emotion to queue
+ movement_manager = deps.movement_manager
+ emotion_move = EmotionQueueMove(emotion_name, RECORDED_MOVES)
+ movement_manager.queue_move(emotion_move)
+
+ return {"status": "queued", "emotion": emotion_name}
+
+ except Exception as e:
+ logger.exception("Failed to play emotion")
+ return {"error": f"Failed to play emotion: {e!s}"}
diff --git a/src/reachy_mini_conversation_app/tools/stop_dance.py b/src/reachy_mini_conversation_app/tools/stop_dance.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab14e84a2cac345fd1696ff21ce59d14bdec7dde
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/stop_dance.py
@@ -0,0 +1,31 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+
+class StopDance(Tool):
+ """Stop the current dance move."""
+
+ name = "stop_dance"
+ description = "Stop the current dance move"
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "dummy": {
+ "type": "boolean",
+ "description": "dummy boolean, set it to true",
+ },
+ },
+ "required": ["dummy"],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Stop the current dance move."""
+ logger.info("Tool call: stop_dance")
+ movement_manager = deps.movement_manager
+ movement_manager.clear_move_queue()
+ return {"status": "stopped dance and cleared queue"}
diff --git a/src/reachy_mini_conversation_app/tools/stop_emotion.py b/src/reachy_mini_conversation_app/tools/stop_emotion.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5d2323fe3d0642466c03efbac9b9f16641d05ae
--- /dev/null
+++ b/src/reachy_mini_conversation_app/tools/stop_emotion.py
@@ -0,0 +1,31 @@
+import logging
+from typing import Any, Dict
+
+from reachy_mini_conversation_app.tools.core_tools import Tool, ToolDependencies
+
+
+logger = logging.getLogger(__name__)
+
+
+class StopEmotion(Tool):
+ """Stop the current emotion."""
+
+ name = "stop_emotion"
+ description = "Stop the current emotion"
+ parameters_schema = {
+ "type": "object",
+ "properties": {
+ "dummy": {
+ "type": "boolean",
+ "description": "dummy boolean, set it to true",
+ },
+ },
+ "required": ["dummy"],
+ }
+
+ async def __call__(self, deps: ToolDependencies, **kwargs: Any) -> Dict[str, Any]:
+ """Stop the current emotion."""
+ logger.info("Tool call: stop_emotion")
+ movement_manager = deps.movement_manager
+ movement_manager.clear_move_queue()
+ return {"status": "stopped emotion and cleared queue"}
diff --git a/src/reachy_mini_conversation_app/utils.py b/src/reachy_mini_conversation_app/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..36baa9522968b4e01e73bec8656e94e92b87a0b3
--- /dev/null
+++ b/src/reachy_mini_conversation_app/utils.py
@@ -0,0 +1,123 @@
+import logging
+import argparse
+import warnings
+from typing import Any, Tuple, Optional
+
+from reachy_mini import ReachyMini
+from reachy_mini_conversation_app.camera_worker import CameraWorker
+
+
+def parse_args() -> Tuple[argparse.Namespace, list]: # type: ignore
+ """Parse command line arguments."""
+ parser = argparse.ArgumentParser("Reachy Mini Conversation App")
+ parser.add_argument(
+ "--head-tracker",
+ choices=["yolo", "mediapipe", None],
+ default=None,
+ help="Choose head tracker (default: None)",
+ )
+ parser.add_argument("--no-camera", default=False, action="store_true", help="Disable camera usage")
+ parser.add_argument(
+ "--local-vision",
+ default=False,
+ action="store_true",
+ help="Use local vision model instead of gpt-realtime vision",
+ )
+ parser.add_argument("--gradio", default=False, action="store_true", help="Open gradio interface")
+ parser.add_argument("--debug", default=False, action="store_true", help="Enable debug logging")
+ parser.add_argument(
+ "--robot-name",
+ type=str,
+ default=None,
+ help="[Optional] Robot name/prefix for Zenoh topics (must match daemon's --robot-name). Only needed for development with multiple robots.",
+ )
+ return parser.parse_known_args()
+
+
+def handle_vision_stuff(args: argparse.Namespace, current_robot: ReachyMini) -> Tuple[CameraWorker | None, Any, Any]:
+ """Initialize camera, head tracker, camera worker, and vision manager.
+
+ By default, vision is handled by gpt-realtime model when camera tool is used.
+ If --local-vision flag is used, a local vision model will process images periodically.
+ """
+ camera_worker = None
+ head_tracker = None
+ vision_manager = None
+
+ if not args.no_camera:
+ # Initialize head tracker if specified
+ if args.head_tracker is not None:
+ if args.head_tracker == "yolo":
+ from reachy_mini_conversation_app.vision.yolo_head_tracker import HeadTracker
+
+ head_tracker = HeadTracker()
+ elif args.head_tracker == "mediapipe":
+ from reachy_mini_toolbox.vision import HeadTracker # type: ignore[no-redef]
+
+ head_tracker = HeadTracker()
+
+ # Initialize camera worker
+ camera_worker = CameraWorker(current_robot, head_tracker)
+
+ # Initialize vision manager only if local vision is requested
+ if args.local_vision:
+ try:
+ from reachy_mini_conversation_app.vision.processors import initialize_vision_manager
+
+ vision_manager = initialize_vision_manager(camera_worker)
+ except ImportError as e:
+ raise ImportError(
+ "To use --local-vision, please install the extra dependencies: pip install '.[local_vision]'",
+ ) from e
+ else:
+ logging.getLogger(__name__).info(
+ "Using gpt-realtime for vision (default). Use --local-vision for local processing.",
+ )
+
+ return camera_worker, head_tracker, vision_manager
+
+
+def setup_logger(debug: bool) -> logging.Logger:
+ """Setups the logger."""
+ log_level = "DEBUG" if debug else "INFO"
+ logging.basicConfig(
+ level=getattr(logging, log_level, logging.INFO),
+ format="%(asctime)s %(levelname)s %(name)s:%(lineno)d | %(message)s",
+ )
+ logger = logging.getLogger(__name__)
+
+ # Suppress WebRTC warnings
+ warnings.filterwarnings("ignore", message=".*AVCaptureDeviceTypeExternal.*")
+ warnings.filterwarnings("ignore", category=UserWarning, module="aiortc")
+
+ # Tame third-party noise (looser in DEBUG)
+ if log_level == "DEBUG":
+ logging.getLogger("aiortc").setLevel(logging.INFO)
+ logging.getLogger("fastrtc").setLevel(logging.INFO)
+ logging.getLogger("aioice").setLevel(logging.INFO)
+ logging.getLogger("httpx").setLevel(logging.WARNING)
+ logging.getLogger("websockets").setLevel(logging.INFO)
+ else:
+ logging.getLogger("aiortc").setLevel(logging.ERROR)
+ logging.getLogger("fastrtc").setLevel(logging.ERROR)
+ logging.getLogger("aioice").setLevel(logging.WARNING)
+ return logger
+
+def log_connection_troubleshooting(logger: logging.Logger, robot_name: Optional[str]) -> None:
+ """Log troubleshooting steps for connection issues."""
+ logger.error("Troubleshooting steps:")
+ logger.error(" 1. Verify reachy-mini-daemon is running")
+
+ if robot_name is not None:
+ logger.error(
+ f" 2. Daemon must be started with: --robot-name '{robot_name}'"
+ )
+ else:
+ logger.error(
+ " 2. If daemon uses --robot-name, add the same flag here: "
+ "--robot-name