ui design
Browse files- app.py +11 -492
- logic.py +419 -0
- mcp_tools.py +42 -0
- persona_engine.py +61 -8
- ui_chat.py +383 -0
- ui_creator.py +140 -0
- ui_info.py +9 -0
app.py
CHANGED
|
@@ -1,510 +1,29 @@
|
|
| 1 |
import os
|
| 2 |
-
import json
|
| 3 |
-
from pathlib import Path
|
| 4 |
-
from datetime import datetime
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
-
# FastAPI backend (mounted under /)
|
| 9 |
from api import fastapi_app
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
# Avatar logic
|
| 12 |
-
from avatar_store import avatar_generated_path
|
| 13 |
-
from generate_image_with_nano import build_prompt, run_edit
|
| 14 |
-
from mcp_tools import (
|
| 15 |
-
create_avatar,
|
| 16 |
-
delete_avatar_memory,
|
| 17 |
-
delete_avatar_portrait,
|
| 18 |
-
delete_generated_images,
|
| 19 |
-
generate_as_avatar,
|
| 20 |
-
get_avatar,
|
| 21 |
-
record_generated_image,
|
| 22 |
-
set_avatar_portrait,
|
| 23 |
-
store_avatar_memory,
|
| 24 |
-
)
|
| 25 |
-
|
| 26 |
-
DEFAULT_AVATAR_ID = "08a2fb96"
|
| 27 |
-
INITIAL_GREETING = "Hello! How are you doing?"
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# ----------------------------------------------------
|
| 31 |
-
# Helpers
|
| 32 |
-
# ----------------------------------------------------
|
| 33 |
-
|
| 34 |
-
def _resolve_path(path_str):
|
| 35 |
-
if not path_str:
|
| 36 |
-
return None
|
| 37 |
-
path = Path(path_str)
|
| 38 |
-
if not path.is_absolute():
|
| 39 |
-
path = Path(__file__).parent / path
|
| 40 |
-
return str(path)
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def decide_tool(message: str) -> str:
|
| 44 |
-
text = (message or "").lower()
|
| 45 |
-
if "remember" in text:
|
| 46 |
-
return "store_avatar_memory"
|
| 47 |
-
if "who are you" in text:
|
| 48 |
-
return "get_avatar"
|
| 49 |
-
return "generate_as_avatar"
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
# ----------------------------------------------------
|
| 53 |
-
# Creator UI actions
|
| 54 |
-
# ----------------------------------------------------
|
| 55 |
-
|
| 56 |
-
def ui_create_avatar(description: str) -> str:
|
| 57 |
-
try:
|
| 58 |
-
data = create_avatar({"description": description})
|
| 59 |
-
return json.dumps(data, ensure_ascii=False)
|
| 60 |
-
except ValueError as exc:
|
| 61 |
-
return str(exc)
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def ui_load_avatar(avatar_id: str, admin_id: str):
|
| 65 |
-
try:
|
| 66 |
-
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 67 |
-
data.pop("generated_images", None)
|
| 68 |
-
mem = data.get("memory", [])
|
| 69 |
-
rows = [[m.get("entry", ""), m.get("private", False)] for m in mem]
|
| 70 |
-
portrait = _resolve_path(data.get("portrait"))
|
| 71 |
-
return (
|
| 72 |
-
json.dumps(
|
| 73 |
-
{k: v for k, v in data.items() if k != "memory"},
|
| 74 |
-
ensure_ascii=False,
|
| 75 |
-
indent=2,
|
| 76 |
-
),
|
| 77 |
-
rows,
|
| 78 |
-
[],
|
| 79 |
-
portrait,
|
| 80 |
-
)
|
| 81 |
-
except ValueError as exc:
|
| 82 |
-
return str(exc), [], [], None
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
def ui_add_context_text(avatar_id: str, admin_id: str, text: str, private: bool):
|
| 86 |
-
text = (text or "").strip()
|
| 87 |
-
if not text:
|
| 88 |
-
return "Enter context text."
|
| 89 |
-
try:
|
| 90 |
-
store_avatar_memory(
|
| 91 |
-
{
|
| 92 |
-
"avatar_id": avatar_id,
|
| 93 |
-
"admin_id": admin_id,
|
| 94 |
-
"entry": text,
|
| 95 |
-
"private": private,
|
| 96 |
-
}
|
| 97 |
-
)
|
| 98 |
-
return "Context stored."
|
| 99 |
-
except ValueError as exc:
|
| 100 |
-
return str(exc)
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
def ui_add_context_file(avatar_id: str, admin_id: str, file_obj, private: bool):
|
| 104 |
-
if not file_obj:
|
| 105 |
-
return "Upload a file."
|
| 106 |
-
try:
|
| 107 |
-
with open(file_obj.name, "r", encoding="utf-8") as handle:
|
| 108 |
-
data = handle.read(4000)
|
| 109 |
-
except Exception:
|
| 110 |
-
return "Unable to read file."
|
| 111 |
-
return ui_add_context_text(avatar_id, admin_id, data, private)
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def ui_delete_memory(avatar_id: str, admin_id: str, indices):
|
| 115 |
-
indices = indices or []
|
| 116 |
-
if isinstance(indices, (int, float)):
|
| 117 |
-
indices = [int(indices)]
|
| 118 |
-
|
| 119 |
-
if not indices:
|
| 120 |
-
return "Select entries to delete.", gr.update(), []
|
| 121 |
-
|
| 122 |
-
try:
|
| 123 |
-
for idx in sorted(set(int(i) for i in indices if int(i) >= 0), reverse=True):
|
| 124 |
-
delete_avatar_memory(
|
| 125 |
-
{"avatar_id": avatar_id, "admin_id": admin_id, "index": idx}
|
| 126 |
-
)
|
| 127 |
-
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 128 |
-
mem = data.get("memory", [])
|
| 129 |
-
rows = [[m.get("entry", ""), m.get("private", False)] for m in mem]
|
| 130 |
-
return ("Entries removed.", rows, [])
|
| 131 |
-
except ValueError as exc:
|
| 132 |
-
return str(exc), gr.update(), indices
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
def ui_upload_portrait(avatar_id: str, admin_id: str, file_path):
|
| 136 |
-
portrait_path = file_path.name if hasattr(file_path, "name") else file_path
|
| 137 |
-
if not portrait_path:
|
| 138 |
-
return "Upload a portrait image.", None
|
| 139 |
-
try:
|
| 140 |
-
result = set_avatar_portrait(
|
| 141 |
-
{
|
| 142 |
-
"avatar_id": avatar_id,
|
| 143 |
-
"admin_id": admin_id,
|
| 144 |
-
"portrait_file": portrait_path,
|
| 145 |
-
}
|
| 146 |
-
)
|
| 147 |
-
return "Portrait updated.", _resolve_path(result.get("portrait"))
|
| 148 |
-
except ValueError as exc:
|
| 149 |
-
return str(exc), None
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
def ui_remove_portrait(avatar_id: str, admin_id: str):
|
| 153 |
-
try:
|
| 154 |
-
delete_avatar_portrait({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 155 |
-
return "Portrait removed.", None
|
| 156 |
-
except ValueError as exc:
|
| 157 |
-
return str(exc), None
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
def ui_clear_generated(avatar_id: str, admin_id: str):
|
| 161 |
-
try:
|
| 162 |
-
result = delete_generated_images({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 163 |
-
return f"Removed {result.get('removed', 0)} generated images."
|
| 164 |
-
except ValueError as exc:
|
| 165 |
-
return str(exc)
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
def ui_chat_portrait(avatar_id: str):
|
| 169 |
-
if not avatar_id:
|
| 170 |
-
return None
|
| 171 |
-
try:
|
| 172 |
-
data = get_avatar({"avatar_id": avatar_id, "admin_id": ""})
|
| 173 |
-
return _resolve_path(data.get("portrait"))
|
| 174 |
-
except ValueError:
|
| 175 |
-
return None
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
def ui_select_memory(evt: gr.SelectData, current):
|
| 179 |
-
current = list(current or [])
|
| 180 |
-
if not evt or evt.index is None:
|
| 181 |
-
return current
|
| 182 |
-
|
| 183 |
-
idx = evt.index
|
| 184 |
-
if isinstance(idx, (list, tuple)):
|
| 185 |
-
idx = idx[0]
|
| 186 |
-
if idx is None:
|
| 187 |
-
return current
|
| 188 |
-
|
| 189 |
-
row = int(idx)
|
| 190 |
-
if row in current:
|
| 191 |
-
current.remove(row)
|
| 192 |
-
else:
|
| 193 |
-
current.append(row)
|
| 194 |
-
return current
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
# ----------------------------------------------------
|
| 198 |
-
# Chat logic
|
| 199 |
-
# ----------------------------------------------------
|
| 200 |
-
|
| 201 |
-
def _format_history(history):
|
| 202 |
-
return [
|
| 203 |
-
{"role": "user" if speaker == "user" else "assistant", "content": content}
|
| 204 |
-
for speaker, content in history
|
| 205 |
-
]
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
def send_message(avatar_id, admin_id, message, history, generate_image=True):
|
| 209 |
-
history = history or []
|
| 210 |
-
message = (message or "").strip()
|
| 211 |
-
tool = ""
|
| 212 |
-
image_path = None
|
| 213 |
-
|
| 214 |
-
if not message:
|
| 215 |
-
return _format_history(history), "", history, tool, image_path, generate_image
|
| 216 |
-
|
| 217 |
-
if not avatar_id:
|
| 218 |
-
history.append(("avatar", "Set avatar id first."))
|
| 219 |
-
return _format_history(history), "", history, tool, image_path, generate_image
|
| 220 |
-
|
| 221 |
-
history.append(("user", message))
|
| 222 |
-
tool = decide_tool(message)
|
| 223 |
-
|
| 224 |
-
try:
|
| 225 |
-
if tool == "store_avatar_memory":
|
| 226 |
-
if not admin_id:
|
| 227 |
-
reply = "Admin id required to store memory."
|
| 228 |
-
else:
|
| 229 |
-
result = store_avatar_memory(
|
| 230 |
-
{
|
| 231 |
-
"avatar_id": avatar_id,
|
| 232 |
-
"admin_id": admin_id,
|
| 233 |
-
"entry": message,
|
| 234 |
-
"private": True,
|
| 235 |
-
}
|
| 236 |
-
)
|
| 237 |
-
reply = f"Memory stored ({result['memory_entries']})."
|
| 238 |
-
|
| 239 |
-
elif tool == "get_avatar":
|
| 240 |
-
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 241 |
-
image_path = _resolve_path(data.get("portrait"))
|
| 242 |
-
reply = json.dumps(data, ensure_ascii=False)
|
| 243 |
-
|
| 244 |
-
else:
|
| 245 |
-
data = generate_as_avatar(
|
| 246 |
-
{"avatar_id": avatar_id, "message": message, "history": history}
|
| 247 |
-
)
|
| 248 |
-
reply = data["response"]
|
| 249 |
-
|
| 250 |
-
avatar = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 251 |
-
image_path = _resolve_path(avatar.get("portrait"))
|
| 252 |
-
|
| 253 |
-
if not image_path:
|
| 254 |
-
generate_image = False
|
| 255 |
-
|
| 256 |
-
except ValueError as exc:
|
| 257 |
-
reply = str(exc)
|
| 258 |
-
|
| 259 |
-
history.append(("avatar", reply))
|
| 260 |
-
image_output = gr.update()
|
| 261 |
-
return (
|
| 262 |
-
_format_history(history),
|
| 263 |
-
"",
|
| 264 |
-
history,
|
| 265 |
-
f"Tool used: {tool or 'n/a'}",
|
| 266 |
-
image_output,
|
| 267 |
-
generate_image,
|
| 268 |
-
)
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
def send_message_public(avatar_id, message, history, generate_image=True):
|
| 272 |
-
return send_message(avatar_id, "", message, history, generate_image)
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
def initial_greeting(avatar_id):
|
| 276 |
-
avatar_id = avatar_id or DEFAULT_AVATAR_ID
|
| 277 |
-
return send_message_public(avatar_id, INITIAL_GREETING, [], True)
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
def maybe_generate_image(avatar_id, history, generate_image=True, current_image=None):
|
| 281 |
-
current_image = current_image or None
|
| 282 |
-
if not generate_image:
|
| 283 |
-
return current_image, generate_image
|
| 284 |
-
if not avatar_id:
|
| 285 |
-
return current_image, generate_image
|
| 286 |
-
if not history:
|
| 287 |
-
return current_image, generate_image
|
| 288 |
-
|
| 289 |
-
last_user = None
|
| 290 |
-
last_reply = None
|
| 291 |
-
for speaker, content in reversed(history):
|
| 292 |
-
if last_reply is None and speaker == "avatar":
|
| 293 |
-
last_reply = content
|
| 294 |
-
elif last_user is None and speaker == "user":
|
| 295 |
-
last_user = content
|
| 296 |
-
if last_user and last_reply:
|
| 297 |
-
break
|
| 298 |
-
|
| 299 |
-
try:
|
| 300 |
-
avatar = get_avatar({"avatar_id": avatar_id, "admin_id": ""})
|
| 301 |
-
except ValueError:
|
| 302 |
-
return current_image, False
|
| 303 |
-
|
| 304 |
-
portrait_path = _resolve_path(avatar.get("portrait"))
|
| 305 |
-
if not portrait_path or not Path(portrait_path).exists():
|
| 306 |
-
return current_image, False
|
| 307 |
-
|
| 308 |
-
try:
|
| 309 |
-
prompt = build_prompt(
|
| 310 |
-
avatar.get("persona", "Avatar"),
|
| 311 |
-
f"{last_user or ''} Reply: {last_reply or ''}",
|
| 312 |
-
)
|
| 313 |
-
timestamp = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
| 314 |
-
out_path = avatar_generated_path(avatar_id, timestamp)
|
| 315 |
-
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 316 |
-
|
| 317 |
-
rc = run_edit(Path(portrait_path), prompt, out_path)
|
| 318 |
-
if rc == 0:
|
| 319 |
-
record_generated_image(avatar_id, out_path, prompt, timestamp)
|
| 320 |
-
return str(out_path), True
|
| 321 |
-
except BaseException:
|
| 322 |
-
return current_image, generate_image
|
| 323 |
-
|
| 324 |
-
return current_image, generate_image
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
# ----------------------------------------------------
|
| 328 |
-
# UI Assembly
|
| 329 |
-
# ----------------------------------------------------
|
| 330 |
|
| 331 |
with gr.Blocks() as ui:
|
| 332 |
-
gr.
|
|
|
|
| 333 |
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
|
|
|
| 337 |
|
| 338 |
-
avatar_id_input = gr.Textbox(
|
| 339 |
-
label="Avatar ID",
|
| 340 |
-
placeholder="e.g. 08a2fb96",
|
| 341 |
-
value=DEFAULT_AVATAR_ID,
|
| 342 |
-
)
|
| 343 |
-
image_display = gr.Image(label="Image", type="filepath")
|
| 344 |
-
generate_toggle = gr.Checkbox(label="Generate image for this reply", value=True)
|
| 345 |
-
chat = gr.Chatbot()
|
| 346 |
-
msg = gr.Textbox(label="Message")
|
| 347 |
-
state = gr.State([])
|
| 348 |
-
send_btn = gr.Button("Send")
|
| 349 |
-
clear_btn = gr.Button("Clear Chat")
|
| 350 |
-
tool_status = gr.Markdown("Tool used: n/a")
|
| 351 |
-
|
| 352 |
-
chat_event = msg.submit(
|
| 353 |
-
send_message_public,
|
| 354 |
-
inputs=[avatar_id_input, msg, state, generate_toggle],
|
| 355 |
-
outputs=[chat, msg, state, tool_status, image_display, generate_toggle],
|
| 356 |
-
)
|
| 357 |
-
chat_event.then(
|
| 358 |
-
maybe_generate_image,
|
| 359 |
-
inputs=[avatar_id_input, state, generate_toggle, image_display],
|
| 360 |
-
outputs=[image_display, generate_toggle],
|
| 361 |
-
)
|
| 362 |
-
|
| 363 |
-
send_event = send_btn.click(
|
| 364 |
-
send_message_public,
|
| 365 |
-
inputs=[avatar_id_input, msg, state, generate_toggle],
|
| 366 |
-
outputs=[chat, msg, state, tool_status, image_display, generate_toggle],
|
| 367 |
-
)
|
| 368 |
-
send_event.then(
|
| 369 |
-
maybe_generate_image,
|
| 370 |
-
inputs=[avatar_id_input, state, generate_toggle, image_display],
|
| 371 |
-
outputs=[image_display, generate_toggle],
|
| 372 |
-
)
|
| 373 |
-
|
| 374 |
-
clear_btn.click(
|
| 375 |
-
lambda: ([], "", [], "Tool used: n/a", None, True),
|
| 376 |
-
outputs=[chat, msg, state, tool_status, image_display, generate_toggle],
|
| 377 |
-
)
|
| 378 |
-
|
| 379 |
-
avatar_id_input.change(ui_chat_portrait, inputs=avatar_id_input, outputs=image_display)
|
| 380 |
-
|
| 381 |
-
initial_load = ui.load(
|
| 382 |
-
initial_greeting,
|
| 383 |
-
inputs=[avatar_id_input],
|
| 384 |
-
outputs=[chat, msg, state, tool_status, image_display, generate_toggle],
|
| 385 |
-
)
|
| 386 |
-
initial_load.then(
|
| 387 |
-
maybe_generate_image,
|
| 388 |
-
inputs=[avatar_id_input, state, generate_toggle, image_display],
|
| 389 |
-
outputs=[image_display, generate_toggle],
|
| 390 |
-
)
|
| 391 |
-
|
| 392 |
-
# Creator tab
|
| 393 |
-
with gr.Tab("Creator"):
|
| 394 |
-
gr.Markdown("Create avatars, upload portraits, and manage memory.")
|
| 395 |
-
|
| 396 |
-
with gr.Row():
|
| 397 |
-
with gr.Column():
|
| 398 |
-
description = gr.Textbox(label="Description")
|
| 399 |
-
create_btn = gr.Button("Create")
|
| 400 |
-
create_output = gr.Textbox(label="Result", lines=3)
|
| 401 |
-
create_btn.click(ui_create_avatar, inputs=description, outputs=create_output)
|
| 402 |
-
|
| 403 |
-
with gr.Column():
|
| 404 |
-
creator_avatar_id = gr.Textbox(label="Avatar ID", value="08a2fb96")
|
| 405 |
-
creator_admin_id = gr.Textbox(
|
| 406 |
-
label="Admin ID",
|
| 407 |
-
type="password",
|
| 408 |
-
value="01043e7a9c7141f892d0b08e6c325f18",
|
| 409 |
-
)
|
| 410 |
-
load_btn = gr.Button("Load")
|
| 411 |
-
avatar_data = gr.Textbox(label="Avatar Data", lines=6)
|
| 412 |
-
portrait_view = gr.Image(label="Portrait", type="filepath")
|
| 413 |
-
memory_table = gr.Dataframe(
|
| 414 |
-
headers=["memory", "private"],
|
| 415 |
-
wrap=True,
|
| 416 |
-
interactive=True,
|
| 417 |
-
show_row_numbers=True,
|
| 418 |
-
)
|
| 419 |
-
selected_index = gr.State([])
|
| 420 |
-
|
| 421 |
-
memory_table.select(ui_select_memory, inputs=[selected_index], outputs=[selected_index])
|
| 422 |
-
|
| 423 |
-
load_btn.click(
|
| 424 |
-
ui_load_avatar,
|
| 425 |
-
inputs=[creator_avatar_id, creator_admin_id],
|
| 426 |
-
outputs=[avatar_data, memory_table, selected_index, portrait_view],
|
| 427 |
-
)
|
| 428 |
-
|
| 429 |
-
delete_btn = gr.Button("Remove Selected Memories")
|
| 430 |
-
delete_status = gr.Textbox(label="Delete Status", lines=2)
|
| 431 |
-
delete_btn.click(
|
| 432 |
-
ui_delete_memory,
|
| 433 |
-
inputs=[creator_avatar_id, creator_admin_id, selected_index],
|
| 434 |
-
outputs=[delete_status, memory_table, selected_index],
|
| 435 |
-
)
|
| 436 |
-
|
| 437 |
-
portrait_upload = gr.Image(label="Upload New Portrait", type="filepath")
|
| 438 |
-
portrait_status = gr.Textbox(label="Portrait Status", lines=2)
|
| 439 |
-
upload_portrait_btn = gr.Button("Save Portrait")
|
| 440 |
-
remove_portrait_btn = gr.Button("Remove Portrait")
|
| 441 |
-
clear_generated_btn = gr.Button("Remove Generated Images")
|
| 442 |
-
clear_generated_status = gr.Textbox(label="Generated Images Status", lines=2)
|
| 443 |
-
|
| 444 |
-
upload_portrait_btn.click(
|
| 445 |
-
ui_upload_portrait,
|
| 446 |
-
inputs=[creator_avatar_id, creator_admin_id, portrait_upload],
|
| 447 |
-
outputs=[portrait_status, portrait_view],
|
| 448 |
-
)
|
| 449 |
-
remove_portrait_btn.click(
|
| 450 |
-
ui_remove_portrait,
|
| 451 |
-
inputs=[creator_avatar_id, creator_admin_id],
|
| 452 |
-
outputs=[portrait_status, portrait_view],
|
| 453 |
-
)
|
| 454 |
-
clear_generated_btn.click(
|
| 455 |
-
ui_clear_generated,
|
| 456 |
-
inputs=[creator_avatar_id, creator_admin_id],
|
| 457 |
-
outputs=[clear_generated_status],
|
| 458 |
-
)
|
| 459 |
-
|
| 460 |
-
gr.Markdown("### Add Context")
|
| 461 |
-
context_text = gr.Textbox(label="Context Text or URL", lines=4)
|
| 462 |
-
private_toggle = gr.Checkbox(label="Private?", value=False)
|
| 463 |
-
add_text_btn = gr.Button("Add Text Context")
|
| 464 |
-
text_status = gr.Textbox(label="Status", lines=2)
|
| 465 |
-
|
| 466 |
-
add_text_btn.click(
|
| 467 |
-
ui_add_context_text,
|
| 468 |
-
inputs=[creator_avatar_id, creator_admin_id, context_text, private_toggle],
|
| 469 |
-
outputs=text_status,
|
| 470 |
-
)
|
| 471 |
-
|
| 472 |
-
context_file = gr.File(label="Text File")
|
| 473 |
-
add_file_btn = gr.Button("Add File Context")
|
| 474 |
-
file_status = gr.Textbox(label="File Status", lines=2)
|
| 475 |
-
|
| 476 |
-
add_file_btn.click(
|
| 477 |
-
ui_add_context_file,
|
| 478 |
-
inputs=[creator_avatar_id, creator_admin_id, context_file, private_toggle],
|
| 479 |
-
outputs=file_status,
|
| 480 |
-
)
|
| 481 |
-
|
| 482 |
-
with gr.Tab("Info"):
|
| 483 |
-
gr.Markdown("### Avatar MCP Workflow")
|
| 484 |
-
gr.Image(
|
| 485 |
-
label="Infographic",
|
| 486 |
-
value=_resolve_path("avatar_infographic.png"),
|
| 487 |
-
type="filepath",
|
| 488 |
-
)
|
| 489 |
-
|
| 490 |
-
# ----------------------------------------------------
|
| 491 |
-
# Export ASGI app for Hugging Face Spaces
|
| 492 |
-
# ----------------------------------------------------
|
| 493 |
-
# Mount Gradio UI at "/" so FastAPI lives at /mcp/*
|
| 494 |
-
# ----------------------------------------------------
|
| 495 |
|
| 496 |
app = gr.mount_gradio_app(fastapi_app, ui, path="/")
|
| 497 |
|
| 498 |
-
# ----------------------------------------------------
|
| 499 |
-
# Local-only server runner
|
| 500 |
-
# ----------------------------------------------------
|
| 501 |
-
# HuggingFace Spaces sets SPACE_ID → DO NOT run uvicorn here.
|
| 502 |
-
# Dockerfile CMD will run uvicorn in HF.
|
| 503 |
-
# Local Python execution ("python app.py") WILL run uvicorn.
|
| 504 |
-
# ----------------------------------------------------
|
| 505 |
|
| 506 |
if __name__ == "__main__" and not os.getenv("SPACE_ID"):
|
| 507 |
import uvicorn
|
|
|
|
| 508 |
port = int(os.getenv("PORT", "7860"))
|
| 509 |
-
print(f"
|
| 510 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
|
|
|
|
| 5 |
from api import fastapi_app
|
| 6 |
+
from ui_chat import UI_STYLE, build_chat_tab
|
| 7 |
+
from ui_creator import build_creator_tab
|
| 8 |
+
from ui_info import build_info_tab
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
with gr.Blocks() as ui:
|
| 12 |
+
gr.HTML(UI_STYLE, sanitize_html=False)
|
| 13 |
+
gr.Markdown("## Avatar MCP")
|
| 14 |
|
| 15 |
+
with gr.Tabs():
|
| 16 |
+
build_chat_tab(ui)
|
| 17 |
+
build_creator_tab()
|
| 18 |
+
build_info_tab()
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
app = gr.mount_gradio_app(fastapi_app, ui, path="/")
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
if __name__ == "__main__" and not os.getenv("SPACE_ID"):
|
| 25 |
import uvicorn
|
| 26 |
+
|
| 27 |
port = int(os.getenv("PORT", "7860"))
|
| 28 |
+
print(f"Running locally at http://localhost:{port}")
|
| 29 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
logic.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# logic.py
|
| 2 |
+
# Modernized backend logic for Avatar MCP
|
| 3 |
+
# Contains ALL non-UI logic: chat, creator tools, images, helpers.
|
| 4 |
+
# UI lives in app.py and calls these functions.
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import base64
|
| 8 |
+
import html
|
| 9 |
+
import re
|
| 10 |
+
import string
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
|
| 14 |
+
# FastAPI + Avatar backend imports
|
| 15 |
+
from mcp_tools import (
|
| 16 |
+
create_avatar,
|
| 17 |
+
delete_avatar_portrait,
|
| 18 |
+
delete_generated_images,
|
| 19 |
+
generate_as_avatar,
|
| 20 |
+
get_avatar,
|
| 21 |
+
record_generated_image,
|
| 22 |
+
set_avatar_portrait,
|
| 23 |
+
store_avatar_memory,
|
| 24 |
+
save_all_memories,
|
| 25 |
+
ensure_avatar_by_admin,
|
| 26 |
+
)
|
| 27 |
+
from avatar_store import avatar_generated_path
|
| 28 |
+
from generate_image_with_nano import build_prompt, run_edit
|
| 29 |
+
from persona_engine import extract_memories_from_text
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
DEFAULT_AVATAR_ID = "08a2fb96"
|
| 33 |
+
INITIAL_GREETING = "Hello! How are you doing?"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# =====================================================
|
| 37 |
+
# Helpers
|
| 38 |
+
# =====================================================
|
| 39 |
+
|
| 40 |
+
def resolve_path(path_str):
|
| 41 |
+
if not path_str:
|
| 42 |
+
return None
|
| 43 |
+
p = Path(path_str)
|
| 44 |
+
if not p.is_absolute():
|
| 45 |
+
p = Path(__file__).parent / p
|
| 46 |
+
return str(p)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def encode_image_data(path_str):
|
| 50 |
+
"""Convert image file → base64 data URL."""
|
| 51 |
+
try:
|
| 52 |
+
data = Path(path_str).read_bytes()
|
| 53 |
+
b64 = base64.b64encode(data).decode("utf-8")
|
| 54 |
+
ext = Path(path_str).suffix.lower()
|
| 55 |
+
mime = {
|
| 56 |
+
".jpg": "image/jpeg",
|
| 57 |
+
".jpeg": "image/jpeg",
|
| 58 |
+
".png": "image/png",
|
| 59 |
+
".gif": "image/gif",
|
| 60 |
+
".webp": "image/webp",
|
| 61 |
+
}.get(ext, "image/png")
|
| 62 |
+
return f"data:{mime};base64,{b64}"
|
| 63 |
+
except Exception:
|
| 64 |
+
return ""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def history_pairs(history):
|
| 68 |
+
"""Convert internal dict history → MCP-friendly tuples."""
|
| 69 |
+
out = []
|
| 70 |
+
for entry in history or []:
|
| 71 |
+
s = entry.get("speaker")
|
| 72 |
+
if s not in ("user", "avatar"):
|
| 73 |
+
s = "avatar"
|
| 74 |
+
out.append((s, entry.get("text", "")))
|
| 75 |
+
return out
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def decide_tool(message: str) -> str:
|
| 79 |
+
text = (message or "").lower()
|
| 80 |
+
if "remember" in text:
|
| 81 |
+
return "store_avatar_memory"
|
| 82 |
+
if "who are you" in text:
|
| 83 |
+
return "get_avatar"
|
| 84 |
+
return "generate_as_avatar"
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _ensure_avatar_id(avatar_id: str, admin_id: str) -> str:
|
| 88 |
+
if avatar_id:
|
| 89 |
+
return avatar_id
|
| 90 |
+
if not admin_id:
|
| 91 |
+
raise ValueError("avatar_id or admin_id required")
|
| 92 |
+
avatar = ensure_avatar_by_admin(admin_id)
|
| 93 |
+
if not avatar:
|
| 94 |
+
raise ValueError("avatar not found for admin id")
|
| 95 |
+
return avatar.get("id")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# =====================================================
|
| 99 |
+
# Chat Logic
|
| 100 |
+
# =====================================================
|
| 101 |
+
|
| 102 |
+
def send_message(avatar_id, admin_id, message, history, generate_image=True):
|
| 103 |
+
"""
|
| 104 |
+
Core chat pipeline:
|
| 105 |
+
- append user message
|
| 106 |
+
- choose tool
|
| 107 |
+
- execute tool
|
| 108 |
+
- append avatar reply
|
| 109 |
+
"""
|
| 110 |
+
history = list(history or [])
|
| 111 |
+
msg = (message or "").strip()
|
| 112 |
+
tool = ""
|
| 113 |
+
|
| 114 |
+
if not msg:
|
| 115 |
+
return history, "n/a"
|
| 116 |
+
|
| 117 |
+
# Append user message
|
| 118 |
+
history.append({"speaker": "user", "text": msg})
|
| 119 |
+
tool = decide_tool(msg)
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
if tool == "store_avatar_memory":
|
| 123 |
+
if not admin_id:
|
| 124 |
+
reply = "Admin id required to store memory."
|
| 125 |
+
else:
|
| 126 |
+
result = store_avatar_memory({
|
| 127 |
+
"avatar_id": avatar_id,
|
| 128 |
+
"admin_id": admin_id,
|
| 129 |
+
"entry": msg,
|
| 130 |
+
"private": True
|
| 131 |
+
})
|
| 132 |
+
reply = f"Memory stored ({result['memory_entries']})."
|
| 133 |
+
|
| 134 |
+
elif tool == "get_avatar":
|
| 135 |
+
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 136 |
+
reply = json.dumps(data, ensure_ascii=False)
|
| 137 |
+
|
| 138 |
+
else:
|
| 139 |
+
# Persona reply
|
| 140 |
+
data = generate_as_avatar({
|
| 141 |
+
"avatar_id": avatar_id,
|
| 142 |
+
"message": msg,
|
| 143 |
+
"history": history_pairs(history)
|
| 144 |
+
})
|
| 145 |
+
reply = data["response"]
|
| 146 |
+
|
| 147 |
+
# Check portrait availability before generating images
|
| 148 |
+
avatar = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 149 |
+
portrait = resolve_path(avatar.get("portrait"))
|
| 150 |
+
if not portrait:
|
| 151 |
+
generate_image = False
|
| 152 |
+
|
| 153 |
+
except ValueError as exc:
|
| 154 |
+
reply = str(exc)
|
| 155 |
+
|
| 156 |
+
# Add avatar reply
|
| 157 |
+
history.append({"speaker": "avatar", "text": reply})
|
| 158 |
+
|
| 159 |
+
return history, tool
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def send_message_public(avatar_id, message, history, generate_image=True):
|
| 163 |
+
"""Public chat mode = no admin id."""
|
| 164 |
+
return send_message(avatar_id, "", message, history, generate_image)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def initial_greeting(avatar_id):
|
| 168 |
+
avatar_id = avatar_id or DEFAULT_AVATAR_ID
|
| 169 |
+
history, _ = send_message_public(avatar_id, INITIAL_GREETING, [], True)
|
| 170 |
+
return history
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def maybe_generate_image(avatar_id, history, generate_image=True):
|
| 174 |
+
"""
|
| 175 |
+
After the avatar replies, decide whether to generate an image.
|
| 176 |
+
"""
|
| 177 |
+
history = list(history or [])
|
| 178 |
+
|
| 179 |
+
if not generate_image or not avatar_id or not history:
|
| 180 |
+
return history
|
| 181 |
+
|
| 182 |
+
# Find last avatar message + user message
|
| 183 |
+
last_user = None
|
| 184 |
+
last_avatar = None
|
| 185 |
+
for entry in reversed(history):
|
| 186 |
+
if entry.get("speaker") == "avatar" and last_avatar is None:
|
| 187 |
+
last_avatar = entry
|
| 188 |
+
elif entry.get("speaker") == "user" and last_user is None:
|
| 189 |
+
last_user = entry.get("text", "")
|
| 190 |
+
if last_avatar and last_user:
|
| 191 |
+
break
|
| 192 |
+
|
| 193 |
+
if not last_avatar:
|
| 194 |
+
return history
|
| 195 |
+
|
| 196 |
+
# Check portrait
|
| 197 |
+
try:
|
| 198 |
+
avatar = get_avatar({"avatar_id": avatar_id, "admin_id": ""})
|
| 199 |
+
except ValueError:
|
| 200 |
+
return history
|
| 201 |
+
|
| 202 |
+
portrait = resolve_path(avatar.get("portrait"))
|
| 203 |
+
if not portrait or not Path(portrait).exists():
|
| 204 |
+
return history
|
| 205 |
+
|
| 206 |
+
# Build prompt for the image generator
|
| 207 |
+
try:
|
| 208 |
+
prompt = build_prompt(
|
| 209 |
+
avatar.get("persona", "Avatar"),
|
| 210 |
+
f"{last_user or ''} Reply: {last_avatar.get('text', '')}"
|
| 211 |
+
)
|
| 212 |
+
timestamp = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
| 213 |
+
out_path = avatar_generated_path(avatar_id, timestamp)
|
| 214 |
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 215 |
+
|
| 216 |
+
rc = run_edit(Path(portrait), prompt, out_path)
|
| 217 |
+
if rc == 0:
|
| 218 |
+
record_generated_image(avatar_id, out_path, prompt, timestamp)
|
| 219 |
+
last_avatar["image_path"] = str(out_path)
|
| 220 |
+
last_avatar["image_data"] = encode_image_data(out_path)
|
| 221 |
+
except BaseException:
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
return history
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def clear_chat_session():
|
| 228 |
+
"""Reset chat state."""
|
| 229 |
+
return [], "Tool used: n/a", True
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
# =====================================================
|
| 233 |
+
# Creator Logic
|
| 234 |
+
# =====================================================
|
| 235 |
+
|
| 236 |
+
def ui_create_avatar(description: str):
|
| 237 |
+
try:
|
| 238 |
+
data = create_avatar({"description": description})
|
| 239 |
+
avatar_id = data.get("id")
|
| 240 |
+
admin_id = data.get("admin_id")
|
| 241 |
+
json_blob, rows, portrait, _, _, _ = ui_load_avatar(avatar_id, admin_id)
|
| 242 |
+
return f"Avatar created: {avatar_id}", json_blob, rows, portrait, avatar_id, admin_id
|
| 243 |
+
except ValueError as exc:
|
| 244 |
+
return str(exc), "", [], None, "", ""
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def ui_load_avatar(avatar_id: str, admin_id: str):
|
| 248 |
+
"""Load avatar + memory + portrait."""
|
| 249 |
+
try:
|
| 250 |
+
if avatar_id:
|
| 251 |
+
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 252 |
+
else:
|
| 253 |
+
data = ensure_avatar_by_admin(admin_id)
|
| 254 |
+
avatar_id = data.get("id")
|
| 255 |
+
admin_id = data.get("admin_id", admin_id)
|
| 256 |
+
data = dict(data)
|
| 257 |
+
data.pop("generated_images", None)
|
| 258 |
+
|
| 259 |
+
mem = data.get("memory", [])
|
| 260 |
+
rows = [[m.get("entry", ""), m.get("private", False)] for m in mem]
|
| 261 |
+
portrait = resolve_path(data.get("portrait"))
|
| 262 |
+
|
| 263 |
+
return (
|
| 264 |
+
json.dumps({k: v for k, v in data.items() if k != "memory"},
|
| 265 |
+
ensure_ascii=False, indent=2),
|
| 266 |
+
rows,
|
| 267 |
+
portrait,
|
| 268 |
+
"Avatar loaded.",
|
| 269 |
+
avatar_id,
|
| 270 |
+
admin_id,
|
| 271 |
+
)
|
| 272 |
+
except ValueError as exc:
|
| 273 |
+
return str(exc), [], None, str(exc), "", ""
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def _current_memory_rows(avatar_id: str, admin_id: str):
|
| 277 |
+
try:
|
| 278 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 279 |
+
data = get_avatar({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 280 |
+
except ValueError:
|
| 281 |
+
return []
|
| 282 |
+
mem = data.get("memory", [])
|
| 283 |
+
return [[m.get("entry", ""), m.get("private", False)] for m in mem]
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def _clean_memory_entry(text: str) -> str:
|
| 287 |
+
raw = (text or "").strip()
|
| 288 |
+
raw = re.sub(r"^```[\w-]*", "", raw, flags=re.IGNORECASE).strip()
|
| 289 |
+
raw = raw.replace("```", " ")
|
| 290 |
+
raw = raw.replace('"', "")
|
| 291 |
+
raw = " ".join(raw.split())
|
| 292 |
+
allowed = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 .,!?'-")
|
| 293 |
+
clean = "".join(ch for ch in raw if ch in allowed).strip()
|
| 294 |
+
return clean
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def _ingest_memories(avatar_id: str, admin_id: str, text: str, private: bool):
|
| 298 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 299 |
+
entries = extract_memories_from_text(text, max_items=200) or [text]
|
| 300 |
+
stored = 0
|
| 301 |
+
for entry in entries:
|
| 302 |
+
clean = _clean_memory_entry(entry)
|
| 303 |
+
if not clean:
|
| 304 |
+
continue
|
| 305 |
+
store_avatar_memory({
|
| 306 |
+
"avatar_id": avatar_id,
|
| 307 |
+
"admin_id": admin_id,
|
| 308 |
+
"entry": clean,
|
| 309 |
+
"private": private
|
| 310 |
+
})
|
| 311 |
+
stored += 1
|
| 312 |
+
rows = _current_memory_rows(avatar_id, admin_id)
|
| 313 |
+
if stored == 0:
|
| 314 |
+
return "No memories extracted.", rows
|
| 315 |
+
if stored == 1:
|
| 316 |
+
return "Stored 1 memory.", rows
|
| 317 |
+
return f"Stored {stored} memories.", rows
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def ui_add_context_text(avatar_id: str, admin_id: str, text: str, private: bool):
|
| 321 |
+
text = (text or "").strip()
|
| 322 |
+
if not text:
|
| 323 |
+
return "Enter context text.", _current_memory_rows(avatar_id, admin_id)
|
| 324 |
+
try:
|
| 325 |
+
return _ingest_memories(avatar_id, admin_id, text, private)
|
| 326 |
+
except ValueError as exc:
|
| 327 |
+
return str(exc), _current_memory_rows(avatar_id, admin_id)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def ui_add_context_file(avatar_id: str, admin_id: str, file_obj, private: bool):
|
| 331 |
+
if not file_obj:
|
| 332 |
+
return "Upload a file.", _current_memory_rows(avatar_id, admin_id)
|
| 333 |
+
try:
|
| 334 |
+
with open(file_obj.name, "r", encoding="utf-8") as f:
|
| 335 |
+
data = f.read()
|
| 336 |
+
return ui_add_context_text(avatar_id, admin_id, data, private)
|
| 337 |
+
except Exception as exc:
|
| 338 |
+
return f"Unable to read file: {exc}", _current_memory_rows(avatar_id, admin_id)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def ui_upload_portrait(avatar_id: str, admin_id: str, file_path):
|
| 342 |
+
portrait_path = file_path.name if hasattr(file_path, "name") else file_path
|
| 343 |
+
if not portrait_path:
|
| 344 |
+
return "Upload a portrait image.", None
|
| 345 |
+
try:
|
| 346 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 347 |
+
result = set_avatar_portrait({
|
| 348 |
+
"avatar_id": avatar_id,
|
| 349 |
+
"admin_id": admin_id,
|
| 350 |
+
"portrait_file": portrait_path
|
| 351 |
+
})
|
| 352 |
+
return "Portrait updated.", resolve_path(result.get("portrait"))
|
| 353 |
+
except ValueError as exc:
|
| 354 |
+
return str(exc), None
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def ui_remove_portrait(avatar_id: str, admin_id: str):
|
| 358 |
+
try:
|
| 359 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 360 |
+
delete_avatar_portrait({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 361 |
+
return "Portrait removed.", None
|
| 362 |
+
except ValueError as exc:
|
| 363 |
+
return str(exc), None
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def ui_clear_generated(avatar_id: str, admin_id: str):
|
| 367 |
+
try:
|
| 368 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 369 |
+
result = delete_generated_images({"avatar_id": avatar_id, "admin_id": admin_id})
|
| 370 |
+
return f"Removed {result.get('removed', 0)} generated images."
|
| 371 |
+
except ValueError as exc:
|
| 372 |
+
return str(exc)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def ui_chat_portrait(avatar_id: str):
|
| 376 |
+
if not avatar_id:
|
| 377 |
+
return None
|
| 378 |
+
try:
|
| 379 |
+
data = get_avatar({"avatar_id": avatar_id, "admin_id": ""})
|
| 380 |
+
return resolve_path(data.get("portrait"))
|
| 381 |
+
except ValueError:
|
| 382 |
+
return None
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def ui_select_memory(evt=None, current=None):
|
| 386 |
+
"""Toggle row selection in memory table."""
|
| 387 |
+
current = list(current or [])
|
| 388 |
+
if not evt or getattr(evt, "index", None) is None:
|
| 389 |
+
return current
|
| 390 |
+
|
| 391 |
+
idx = evt.index
|
| 392 |
+
if isinstance(idx, (list, tuple)):
|
| 393 |
+
idx = idx[0]
|
| 394 |
+
if idx is None:
|
| 395 |
+
return current
|
| 396 |
+
|
| 397 |
+
row = int(idx)
|
| 398 |
+
if row in current:
|
| 399 |
+
current.remove(row)
|
| 400 |
+
else:
|
| 401 |
+
current.append(row)
|
| 402 |
+
return current
|
| 403 |
+
|
| 404 |
+
def ui_update_memory(avatar_id, admin_id, updated_table):
|
| 405 |
+
"""
|
| 406 |
+
updated_table is a list of rows like:
|
| 407 |
+
[
|
| 408 |
+
["text of memory", True],
|
| 409 |
+
["another memory", False],
|
| 410 |
+
]
|
| 411 |
+
"""
|
| 412 |
+
try:
|
| 413 |
+
avatar_id = _ensure_avatar_id(avatar_id, admin_id)
|
| 414 |
+
if hasattr(updated_table, "to_numpy"):
|
| 415 |
+
updated_table = updated_table.to_numpy().tolist()
|
| 416 |
+
result = save_all_memories(avatar_id, admin_id, updated_table)
|
| 417 |
+
return "Memories updated successfully!"
|
| 418 |
+
except ValueError as exc:
|
| 419 |
+
return str(exc)
|
mcp_tools.py
CHANGED
|
@@ -4,6 +4,7 @@ from typing import Any, Dict
|
|
| 4 |
|
| 5 |
from avatar_store import (
|
| 6 |
ROOT as AVATAR_ROOT,
|
|
|
|
| 7 |
avatar_portrait_path,
|
| 8 |
avatar_generated_path,
|
| 9 |
avatar_generated_dir,
|
|
@@ -89,6 +90,33 @@ def store_avatar_memory(payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
| 89 |
return {"status": "stored", "memory_entries": len(avatar["memory"])}
|
| 90 |
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
def get_avatar_context(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 93 |
payload = payload or {}
|
| 94 |
avatar_id = payload.get("avatar_id")
|
|
@@ -241,6 +269,20 @@ def ensure_public_avatar(avatar_id: str) -> dict:
|
|
| 241 |
return avatar
|
| 242 |
|
| 243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
def delete_generated_images(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 245 |
payload = payload or {}
|
| 246 |
avatar_id = payload.get("avatar_id")
|
|
|
|
| 4 |
|
| 5 |
from avatar_store import (
|
| 6 |
ROOT as AVATAR_ROOT,
|
| 7 |
+
STORE,
|
| 8 |
avatar_portrait_path,
|
| 9 |
avatar_generated_path,
|
| 10 |
avatar_generated_dir,
|
|
|
|
| 90 |
return {"status": "stored", "memory_entries": len(avatar["memory"])}
|
| 91 |
|
| 92 |
|
| 93 |
+
def save_all_memories(avatar_id: str, admin_id: str, rows: list) -> dict:
|
| 94 |
+
"""
|
| 95 |
+
Replace the avatar's memory list with rows shaped like
|
| 96 |
+
[["text", True], ["text2", False]]
|
| 97 |
+
"""
|
| 98 |
+
if not avatar_id or not admin_id:
|
| 99 |
+
raise ValueError("avatar_id and admin_id required")
|
| 100 |
+
avatar = load_avatar(avatar_id)
|
| 101 |
+
if admin_id != avatar.get("admin_id"):
|
| 102 |
+
raise ValueError("invalid admin_id")
|
| 103 |
+
|
| 104 |
+
sanitized: list[dict] = []
|
| 105 |
+
rows = rows or []
|
| 106 |
+
for row in rows:
|
| 107 |
+
if not row:
|
| 108 |
+
continue
|
| 109 |
+
text = (row[0] if isinstance(row, (list, tuple)) else "").strip()
|
| 110 |
+
if not text:
|
| 111 |
+
continue
|
| 112 |
+
private = bool(row[1]) if isinstance(row, (list, tuple)) and len(row) > 1 else False
|
| 113 |
+
sanitized.append({"entry": text, "private": private})
|
| 114 |
+
|
| 115 |
+
avatar["memory"] = sanitized
|
| 116 |
+
save_avatar(avatar)
|
| 117 |
+
return {"status": "saved", "memory_entries": len(sanitized)}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
def get_avatar_context(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 121 |
payload = payload or {}
|
| 122 |
avatar_id = payload.get("avatar_id")
|
|
|
|
| 269 |
return avatar
|
| 270 |
|
| 271 |
|
| 272 |
+
def ensure_avatar_by_admin(admin_id: str) -> dict:
|
| 273 |
+
if not admin_id:
|
| 274 |
+
raise ValueError("admin_id required")
|
| 275 |
+
for path in STORE.glob("*/avatar.json"):
|
| 276 |
+
try:
|
| 277 |
+
avatar = load_avatar(path.parent.name)
|
| 278 |
+
if avatar.get("admin_id") == admin_id:
|
| 279 |
+
avatar.pop("generated_images", None)
|
| 280 |
+
return avatar
|
| 281 |
+
except Exception:
|
| 282 |
+
continue
|
| 283 |
+
raise ValueError("avatar not found for admin_id")
|
| 284 |
+
|
| 285 |
+
|
| 286 |
def delete_generated_images(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 287 |
payload = payload or {}
|
| 288 |
avatar_id = payload.get("avatar_id")
|
persona_engine.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
|
|
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import httpx
|
|
@@ -122,6 +123,17 @@ def _call_ollama(system: str, prompt: str) -> str | None:
|
|
| 122 |
return None
|
| 123 |
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
def reply_as_persona(avatar: dict, message: str, history=None) -> str:
|
| 126 |
persona = avatar.get("persona", "avatar")
|
| 127 |
description = avatar.get("description", "")
|
|
@@ -166,14 +178,55 @@ def reply_as_persona(avatar: dict, message: str, history=None) -> str:
|
|
| 166 |
]
|
| 167 |
if s
|
| 168 |
).strip()
|
| 169 |
-
|
| 170 |
-
llm_reply = None
|
| 171 |
-
if BACKEND == "openai":
|
| 172 |
-
llm_reply = _call_openai(system, message) or _call_hf(system, message)
|
| 173 |
-
elif BACKEND == "hf":
|
| 174 |
-
llm_reply = _call_hf(system, message)
|
| 175 |
-
elif BACKEND == "ollama":
|
| 176 |
-
llm_reply = _call_ollama(system, message)
|
| 177 |
if llm_reply:
|
| 178 |
return llm_reply
|
| 179 |
return f"{persona} (described as {description or 'mysterious'}) replies to '{message}'."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
+
import re
|
| 4 |
from pathlib import Path
|
| 5 |
|
| 6 |
import httpx
|
|
|
|
| 123 |
return None
|
| 124 |
|
| 125 |
|
| 126 |
+
def _call_backend(system: str, prompt: str) -> str | None:
|
| 127 |
+
_log_backend()
|
| 128 |
+
if BACKEND == "openai":
|
| 129 |
+
return _call_openai(system, prompt) or _call_hf(system, prompt)
|
| 130 |
+
if BACKEND == "hf":
|
| 131 |
+
return _call_hf(system, prompt)
|
| 132 |
+
if BACKEND == "ollama":
|
| 133 |
+
return _call_ollama(system, prompt)
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
|
| 137 |
def reply_as_persona(avatar: dict, message: str, history=None) -> str:
|
| 138 |
persona = avatar.get("persona", "avatar")
|
| 139 |
description = avatar.get("description", "")
|
|
|
|
| 178 |
]
|
| 179 |
if s
|
| 180 |
).strip()
|
| 181 |
+
llm_reply = _call_backend(system, message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
if llm_reply:
|
| 183 |
return llm_reply
|
| 184 |
return f"{persona} (described as {description or 'mysterious'}) replies to '{message}'."
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def extract_memories_from_text(text: str, max_items: int = 100) -> list[str]:
|
| 188 |
+
text = (text or "").strip()
|
| 189 |
+
if not text:
|
| 190 |
+
return []
|
| 191 |
+
|
| 192 |
+
system = (
|
| 193 |
+
"You turn raw text into concise first-person memory statements. "
|
| 194 |
+
"Respond with a JSON array of strings. Each string should be a standalone memory that could be added "
|
| 195 |
+
"to a personal knowledge base. Keep entries short (max 200 characters)."
|
| 196 |
+
)
|
| 197 |
+
prompt = (
|
| 198 |
+
f"Input text:\n{text}\n\n"
|
| 199 |
+
f"Extract at most {max_items} distinct memories. If none are found, return an empty JSON array."
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
response = _call_backend(system, prompt)
|
| 203 |
+
entries: list[str] = []
|
| 204 |
+
if response:
|
| 205 |
+
try:
|
| 206 |
+
parsed = json.loads(response)
|
| 207 |
+
if isinstance(parsed, str):
|
| 208 |
+
parsed = json.loads(parsed)
|
| 209 |
+
if isinstance(parsed, list):
|
| 210 |
+
entries = [str(item).strip() for item in parsed if str(item).strip()]
|
| 211 |
+
except json.JSONDecodeError:
|
| 212 |
+
pass
|
| 213 |
+
if not entries:
|
| 214 |
+
candidates = [line.strip("-• \t") for line in response.splitlines()]
|
| 215 |
+
entries = [c for c in candidates if len(c) > 3]
|
| 216 |
+
|
| 217 |
+
if not entries:
|
| 218 |
+
rough = [
|
| 219 |
+
seg.strip()
|
| 220 |
+
for seg in re.split(r"[\n\r]+|(?<=[.!?])\s+", text)
|
| 221 |
+
if seg.strip()
|
| 222 |
+
]
|
| 223 |
+
entries = rough[:max_items]
|
| 224 |
+
|
| 225 |
+
# Deduplicate while preserving order
|
| 226 |
+
seen = set()
|
| 227 |
+
unique: list[str] = []
|
| 228 |
+
for entry in entries:
|
| 229 |
+
if entry not in seen:
|
| 230 |
+
seen.add(entry)
|
| 231 |
+
unique.append(entry)
|
| 232 |
+
return unique[:max_items]
|
ui_chat.py
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import html
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
from logic import (
|
| 5 |
+
send_message_public,
|
| 6 |
+
maybe_generate_image,
|
| 7 |
+
initial_greeting,
|
| 8 |
+
DEFAULT_AVATAR_ID,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# -------------------------------------------------------------
|
| 12 |
+
# CSS + JS (modern UI, responsive, smooth scroll)
|
| 13 |
+
# -------------------------------------------------------------
|
| 14 |
+
UI_STYLE = """
|
| 15 |
+
<style>
|
| 16 |
+
:root {
|
| 17 |
+
--user-bg: #e8f1ff;
|
| 18 |
+
--user-border: #8cb4ff;
|
| 19 |
+
|
| 20 |
+
--avatar-bg: #dfffe0;
|
| 21 |
+
--avatar-border: #8cd190;
|
| 22 |
+
|
| 23 |
+
--panel-border: #d4d4d4;
|
| 24 |
+
--panel-radius: 16px;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
/* Chat panel container */
|
| 28 |
+
#chat-panel {
|
| 29 |
+
width: 100%;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
/* Scrollable chat area */
|
| 33 |
+
.chat-scroll {
|
| 34 |
+
max-height: 800px;
|
| 35 |
+
min-height: 340px;
|
| 36 |
+
overflow-y: auto;
|
| 37 |
+
padding: 20px;
|
| 38 |
+
background: white;
|
| 39 |
+
border: 3px solid var(--panel-border);
|
| 40 |
+
border-radius: var(--panel-radius);
|
| 41 |
+
display: flex;
|
| 42 |
+
flex-direction: column;
|
| 43 |
+
gap: 16px;
|
| 44 |
+
scroll-behavior: smooth;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
.chat-empty {
|
| 48 |
+
text-align: center;
|
| 49 |
+
color: #6b7280;
|
| 50 |
+
font-style: italic;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
/* Chat row alignment */
|
| 54 |
+
.chat-row {
|
| 55 |
+
display: flex;
|
| 56 |
+
width: 100%;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
.message-row.user {
|
| 60 |
+
justify-content: flex-end;
|
| 61 |
+
}
|
| 62 |
+
.message-row.avatar {
|
| 63 |
+
justify-content: flex-start;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
/* Chat bubble */
|
| 67 |
+
.chat-bubble {
|
| 68 |
+
width: 75%;
|
| 69 |
+
padding: 12px 14px;
|
| 70 |
+
border-radius: 14px;
|
| 71 |
+
border: 2px solid transparent;
|
| 72 |
+
display: flex;
|
| 73 |
+
flex-direction: column;
|
| 74 |
+
gap: 8px;
|
| 75 |
+
box-shadow: 0 2px 6px rgba(0,0,0,0.05);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
.chat-bubble.user {
|
| 79 |
+
background: var(--user-bg);
|
| 80 |
+
border-color: var(--user-border);
|
| 81 |
+
margin-left: auto;
|
| 82 |
+
}
|
| 83 |
+
.chat-bubble.avatar {
|
| 84 |
+
background: var(--avatar-bg);
|
| 85 |
+
border-color: var(--avatar-border);
|
| 86 |
+
margin-right: auto;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/* Inline images */
|
| 90 |
+
.chat-bubble img {
|
| 91 |
+
width: 70%;
|
| 92 |
+
border-radius: 12px;
|
| 93 |
+
border: 1px solid #d0d7e2;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
.chat-text {
|
| 97 |
+
font-size: 1rem;
|
| 98 |
+
font-weight: 600;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
/* Mobile layout */
|
| 102 |
+
@media (max-width: 768px) {
|
| 103 |
+
.chat-bubble {
|
| 104 |
+
width: 100%;
|
| 105 |
+
}
|
| 106 |
+
.chat-bubble img {
|
| 107 |
+
width: 100%;
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/* Keep chat readable while backend events run */
|
| 112 |
+
.gradio-container .block.loading,
|
| 113 |
+
.gradio-container .block.pending,
|
| 114 |
+
.gradio-container .loading,
|
| 115 |
+
.gradio-container .pending {
|
| 116 |
+
filter: none !important;
|
| 117 |
+
opacity: 1 !important;
|
| 118 |
+
}
|
| 119 |
+
.gradio-container .block.loading *,
|
| 120 |
+
.gradio-container .block.pending *,
|
| 121 |
+
.gradio-container .loading *,
|
| 122 |
+
.gradio-container .pending * {
|
| 123 |
+
filter: none !important;
|
| 124 |
+
opacity: 1 !important;
|
| 125 |
+
}
|
| 126 |
+
.gradio-container .block.loading::after,
|
| 127 |
+
.gradio-container .block.loading::before,
|
| 128 |
+
.gradio-container .block.pending::after,
|
| 129 |
+
.gradio-container .block.pending::before,
|
| 130 |
+
.gradio-container .loading::after,
|
| 131 |
+
.gradio-container .loading::before,
|
| 132 |
+
.gradio-container .pending::after,
|
| 133 |
+
.gradio-container .pending::before {
|
| 134 |
+
display: none !important;
|
| 135 |
+
}
|
| 136 |
+
</style>
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
SCROLL_JS_BODY = """
|
| 140 |
+
const app = document.querySelector("gradio-app");
|
| 141 |
+
const root = app && app.shadowRoot ? app.shadowRoot : document;
|
| 142 |
+
const host = root.querySelector("#chat-panel");
|
| 143 |
+
if (!host) {
|
| 144 |
+
console.warn("[scroll-js] host not found");
|
| 145 |
+
} else {
|
| 146 |
+
const panel = host.querySelector(".chat-scroll") || host.querySelector(".svelte-1n1h5do") || host;
|
| 147 |
+
if (!panel) {
|
| 148 |
+
console.warn("[scroll-js] scrollable panel not found");
|
| 149 |
+
} else {
|
| 150 |
+
const run = () => {
|
| 151 |
+
panel.scrollTop = panel.scrollHeight;
|
| 152 |
+
};
|
| 153 |
+
run();
|
| 154 |
+
setTimeout(run, 32);
|
| 155 |
+
setTimeout(run, 160);
|
| 156 |
+
panel.querySelectorAll("img").forEach((img) => {
|
| 157 |
+
if (img.dataset.scrollWatcher === "1") return;
|
| 158 |
+
img.dataset.scrollWatcher = "1";
|
| 159 |
+
const fire = () => {
|
| 160 |
+
panel.scrollTop = panel.scrollHeight;
|
| 161 |
+
};
|
| 162 |
+
if (img.complete) {
|
| 163 |
+
setTimeout(fire, 32);
|
| 164 |
+
} else {
|
| 165 |
+
img.addEventListener("load", fire, { once: true });
|
| 166 |
+
img.addEventListener("error", fire, { once: true });
|
| 167 |
+
}
|
| 168 |
+
});
|
| 169 |
+
if (!panel.dataset.observeScroll) {
|
| 170 |
+
panel.dataset.observeScroll = "1";
|
| 171 |
+
const observer = new MutationObserver(() => {
|
| 172 |
+
panel.scrollTop = panel.scrollHeight;
|
| 173 |
+
});
|
| 174 |
+
observer.observe(panel, { childList: true, subtree: true });
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
SCROLL_SNIPPET = f"<script>(function(){{{SCROLL_JS_BODY}}})()</script>"
|
| 181 |
+
SCROLL_BUTTON_JS = f"() => {{{SCROLL_JS_BODY}}}"
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def render_chat(history):
|
| 185 |
+
rows = ["<div class='chat-scroll'>"]
|
| 186 |
+
|
| 187 |
+
if not history:
|
| 188 |
+
rows.append("<div class='chat-empty'>Start the conversation...</div>")
|
| 189 |
+
else:
|
| 190 |
+
for entry in history:
|
| 191 |
+
speaker = entry.get("speaker", "avatar")
|
| 192 |
+
text = entry.get("text", "") or ""
|
| 193 |
+
safe_text = html.escape(text) if isinstance(text, str) else str(text)
|
| 194 |
+
|
| 195 |
+
row_class = "chat-row message-row user" if speaker == "user" else "chat-row message-row avatar"
|
| 196 |
+
bubble_class = "chat-bubble user" if speaker == "user" else "chat-bubble avatar"
|
| 197 |
+
|
| 198 |
+
html_block = [f"<div class='{row_class}'>"]
|
| 199 |
+
html_block.append(f"<div class='{bubble_class}'>")
|
| 200 |
+
|
| 201 |
+
img = entry.get("image_data")
|
| 202 |
+
if img and speaker != "user":
|
| 203 |
+
html_block.append(f"<img src='{img}' />")
|
| 204 |
+
|
| 205 |
+
html_block.append(f"<div class='chat-text'>{safe_text}</div>")
|
| 206 |
+
|
| 207 |
+
if img and speaker == "user":
|
| 208 |
+
html_block.append(f"<img src='{img}' />")
|
| 209 |
+
|
| 210 |
+
html_block.append("</div></div>")
|
| 211 |
+
rows.append("".join(html_block))
|
| 212 |
+
|
| 213 |
+
rows.append("</div>")
|
| 214 |
+
rows.append(SCROLL_SNIPPET)
|
| 215 |
+
return "\n".join(rows)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def build_chat_tab(blocks: gr.Blocks):
|
| 219 |
+
with gr.Tab("Chat"):
|
| 220 |
+
avatar_id_input = gr.Textbox(
|
| 221 |
+
label="Avatar ID",
|
| 222 |
+
value=DEFAULT_AVATAR_ID,
|
| 223 |
+
interactive=True,
|
| 224 |
+
scale=2
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
chat_display = gr.HTML(render_chat([]), elem_id="chat-panel", sanitize_html=False)
|
| 228 |
+
msg_input = gr.Textbox(label="Message", placeholder="Say something...")
|
| 229 |
+
state = gr.State([])
|
| 230 |
+
pending_message = gr.State("")
|
| 231 |
+
|
| 232 |
+
with gr.Row():
|
| 233 |
+
with gr.Column(scale=1):
|
| 234 |
+
send_btn = gr.Button("Send", variant="primary")
|
| 235 |
+
tool_status = gr.Markdown("Tool used: n/a")
|
| 236 |
+
generate_toggle = gr.Checkbox(label="Generate Images", value=True)
|
| 237 |
+
clear_btn = gr.Button("Clear")
|
| 238 |
+
|
| 239 |
+
def stash_message(msg):
|
| 240 |
+
return msg, ""
|
| 241 |
+
|
| 242 |
+
def mark_processing():
|
| 243 |
+
return gr.update(value="Generating...", interactive=False)
|
| 244 |
+
|
| 245 |
+
def mark_ready():
|
| 246 |
+
return gr.update(value="Send", interactive=True)
|
| 247 |
+
|
| 248 |
+
def on_send(aid, msg, history, gen_flag):
|
| 249 |
+
history, tool = send_message_public(aid, msg, history, gen_flag)
|
| 250 |
+
return render_chat(history), "", history, f"Tool used: {tool}", gen_flag
|
| 251 |
+
|
| 252 |
+
def on_image_gen(aid, history, gen_flag):
|
| 253 |
+
history = maybe_generate_image(aid, history, gen_flag)
|
| 254 |
+
return render_chat(history), history, gen_flag
|
| 255 |
+
|
| 256 |
+
def mark_image_processing(gen_flag):
|
| 257 |
+
if not gen_flag:
|
| 258 |
+
return gr.update()
|
| 259 |
+
return gr.update(label="Generate Images (processing...)", interactive=False)
|
| 260 |
+
|
| 261 |
+
def mark_image_ready(gen_flag):
|
| 262 |
+
if not gen_flag:
|
| 263 |
+
return gr.update()
|
| 264 |
+
return gr.update(label="Generate Images", interactive=True)
|
| 265 |
+
|
| 266 |
+
send_btn.click(
|
| 267 |
+
stash_message,
|
| 268 |
+
inputs=[msg_input],
|
| 269 |
+
outputs=[pending_message, msg_input],
|
| 270 |
+
queue=False
|
| 271 |
+
).then(
|
| 272 |
+
mark_processing,
|
| 273 |
+
outputs=[send_btn],
|
| 274 |
+
show_progress="hidden"
|
| 275 |
+
).then(
|
| 276 |
+
on_send,
|
| 277 |
+
inputs=[avatar_id_input, pending_message, state, generate_toggle],
|
| 278 |
+
outputs=[chat_display, msg_input, state, tool_status, generate_toggle],
|
| 279 |
+
show_progress="hidden"
|
| 280 |
+
).then(
|
| 281 |
+
mark_image_processing,
|
| 282 |
+
inputs=[generate_toggle],
|
| 283 |
+
outputs=[generate_toggle],
|
| 284 |
+
show_progress="hidden"
|
| 285 |
+
).then(
|
| 286 |
+
on_image_gen,
|
| 287 |
+
inputs=[avatar_id_input, state, generate_toggle],
|
| 288 |
+
outputs=[chat_display, state, generate_toggle],
|
| 289 |
+
show_progress="hidden"
|
| 290 |
+
).then(
|
| 291 |
+
mark_image_ready,
|
| 292 |
+
inputs=[generate_toggle],
|
| 293 |
+
outputs=[generate_toggle],
|
| 294 |
+
show_progress="hidden"
|
| 295 |
+
).then(
|
| 296 |
+
mark_ready,
|
| 297 |
+
outputs=[send_btn],
|
| 298 |
+
show_progress="hidden"
|
| 299 |
+
).then(
|
| 300 |
+
None,
|
| 301 |
+
js=SCROLL_BUTTON_JS,
|
| 302 |
+
show_progress="hidden"
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
msg_input.submit(
|
| 307 |
+
stash_message,
|
| 308 |
+
inputs=[msg_input],
|
| 309 |
+
outputs=[pending_message, msg_input],
|
| 310 |
+
queue=False
|
| 311 |
+
).then(
|
| 312 |
+
mark_processing,
|
| 313 |
+
outputs=[send_btn],
|
| 314 |
+
show_progress="hidden"
|
| 315 |
+
).then(
|
| 316 |
+
on_send,
|
| 317 |
+
inputs=[avatar_id_input, pending_message, state, generate_toggle],
|
| 318 |
+
outputs=[chat_display, msg_input, state, tool_status, generate_toggle],
|
| 319 |
+
show_progress="hidden"
|
| 320 |
+
).then(
|
| 321 |
+
mark_image_processing,
|
| 322 |
+
inputs=[generate_toggle],
|
| 323 |
+
outputs=[generate_toggle],
|
| 324 |
+
show_progress="hidden"
|
| 325 |
+
).then(
|
| 326 |
+
on_image_gen,
|
| 327 |
+
inputs=[avatar_id_input, state, generate_toggle],
|
| 328 |
+
outputs=[chat_display, state, generate_toggle],
|
| 329 |
+
show_progress="hidden"
|
| 330 |
+
).then(
|
| 331 |
+
mark_image_ready,
|
| 332 |
+
inputs=[generate_toggle],
|
| 333 |
+
outputs=[generate_toggle],
|
| 334 |
+
show_progress="hidden"
|
| 335 |
+
).then(
|
| 336 |
+
mark_ready,
|
| 337 |
+
outputs=[send_btn],
|
| 338 |
+
show_progress="hidden"
|
| 339 |
+
).then(
|
| 340 |
+
None,
|
| 341 |
+
js=SCROLL_BUTTON_JS,
|
| 342 |
+
show_progress="hidden"
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
clear_btn.click(
|
| 346 |
+
lambda: (render_chat([]), "", [], "Tool used: n/a", True),
|
| 347 |
+
outputs=[chat_display, msg_input, state, tool_status, generate_toggle],
|
| 348 |
+
show_progress="hidden"
|
| 349 |
+
).then(
|
| 350 |
+
None,
|
| 351 |
+
js=SCROLL_BUTTON_JS,
|
| 352 |
+
show_progress="hidden"
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
def load_greeting(aid):
|
| 356 |
+
hist = initial_greeting(aid)
|
| 357 |
+
return render_chat(hist), "", hist, "Tool used: greet", True
|
| 358 |
+
|
| 359 |
+
blocks.load(
|
| 360 |
+
load_greeting,
|
| 361 |
+
inputs=[avatar_id_input],
|
| 362 |
+
outputs=[chat_display, msg_input, state, tool_status, generate_toggle],
|
| 363 |
+
show_progress="hidden"
|
| 364 |
+
).then(
|
| 365 |
+
mark_image_processing,
|
| 366 |
+
inputs=[generate_toggle],
|
| 367 |
+
outputs=[generate_toggle],
|
| 368 |
+
show_progress="hidden"
|
| 369 |
+
).then(
|
| 370 |
+
on_image_gen,
|
| 371 |
+
inputs=[avatar_id_input, state, generate_toggle],
|
| 372 |
+
outputs=[chat_display, state, generate_toggle],
|
| 373 |
+
show_progress="hidden"
|
| 374 |
+
).then(
|
| 375 |
+
mark_image_ready,
|
| 376 |
+
inputs=[generate_toggle],
|
| 377 |
+
outputs=[generate_toggle],
|
| 378 |
+
show_progress="hidden"
|
| 379 |
+
).then(
|
| 380 |
+
None,
|
| 381 |
+
js=SCROLL_BUTTON_JS,
|
| 382 |
+
show_progress="hidden"
|
| 383 |
+
)
|
ui_creator.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
from logic import (
|
| 4 |
+
ui_upload_portrait,
|
| 5 |
+
ui_remove_portrait,
|
| 6 |
+
ui_add_context_text,
|
| 7 |
+
ui_add_context_file,
|
| 8 |
+
ui_clear_generated,
|
| 9 |
+
ui_load_avatar,
|
| 10 |
+
ui_create_avatar,
|
| 11 |
+
ui_update_memory,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
DEMO_ADMIN_ID = "01043e7a9c7141f892d0b08e6c325f18"
|
| 15 |
+
|
| 16 |
+
def build_creator_tab():
|
| 17 |
+
with gr.Tab("Creator"):
|
| 18 |
+
card_style = (
|
| 19 |
+
"border:1px solid #e0e0e0; border-radius:6px; padding:12px; background:white;"
|
| 20 |
+
)
|
| 21 |
+
with gr.Row():
|
| 22 |
+
with gr.Column(scale=1):
|
| 23 |
+
gr.HTML(
|
| 24 |
+
f"<div style='{card_style}'>"
|
| 25 |
+
"<div style=\"font-weight:600; font-size: 0.9rem; margin-bottom:6px\">Load Existing Avatar</div>"
|
| 26 |
+
)
|
| 27 |
+
creator_avatar_id = gr.Textbox(label="Avatar ID", value="", visible=False)
|
| 28 |
+
creator_admin_id = gr.Textbox(
|
| 29 |
+
label="Admin ID", type="password", value=DEMO_ADMIN_ID
|
| 30 |
+
)
|
| 31 |
+
load_btn = gr.Button("Load Avatar", variant="primary", scale=1)
|
| 32 |
+
load_status = gr.Textbox(
|
| 33 |
+
label="Load Status", value="", interactive=False, lines=1
|
| 34 |
+
)
|
| 35 |
+
gr.HTML("</div>")
|
| 36 |
+
with gr.Column(scale=1):
|
| 37 |
+
gr.HTML(
|
| 38 |
+
f"<div style='{card_style}'>"
|
| 39 |
+
"<div style=\"font-weight:600; font-size: 0.9rem; margin-bottom:6px\">Create New Avatar</div>"
|
| 40 |
+
)
|
| 41 |
+
new_desc = gr.Textbox(label="Name")
|
| 42 |
+
create_btn = gr.Button("Create Avatar", variant="primary", scale=1)
|
| 43 |
+
create_status = gr.Textbox(
|
| 44 |
+
label="Create Status", value="", interactive=False, lines=1
|
| 45 |
+
)
|
| 46 |
+
gr.HTML("</div>")
|
| 47 |
+
|
| 48 |
+
with gr.Row():
|
| 49 |
+
with gr.Column(scale=1):
|
| 50 |
+
avatar_data = gr.Textbox(label="Avatar JSON", lines=8)
|
| 51 |
+
portrait_view = gr.Image(label="Portrait", type="filepath")
|
| 52 |
+
|
| 53 |
+
portrait_upload = gr.Image(label="Upload Portrait", type="filepath")
|
| 54 |
+
portrait_status = gr.Textbox(label="Portrait Status")
|
| 55 |
+
|
| 56 |
+
upload_portrait_btn = gr.Button("Save Portrait")
|
| 57 |
+
remove_portrait_btn = gr.Button("Remove Portrait")
|
| 58 |
+
|
| 59 |
+
upload_portrait_btn.click(
|
| 60 |
+
ui_upload_portrait,
|
| 61 |
+
inputs=[creator_avatar_id, creator_admin_id, portrait_upload],
|
| 62 |
+
outputs=[portrait_status, portrait_view],
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
remove_portrait_btn.click(
|
| 66 |
+
ui_remove_portrait,
|
| 67 |
+
inputs=[creator_avatar_id, creator_admin_id],
|
| 68 |
+
outputs=[portrait_status, portrait_view],
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
with gr.Column(scale=2):
|
| 72 |
+
gr.Markdown("### Memories")
|
| 73 |
+
|
| 74 |
+
memory_table = gr.Dataframe(
|
| 75 |
+
headers=["memory", "private"],
|
| 76 |
+
wrap=True,
|
| 77 |
+
interactive=True,
|
| 78 |
+
show_row_numbers=True,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
save_edits_btn = gr.Button("Save Edited Memories")
|
| 82 |
+
save_edits_status = gr.Textbox(label="Edit Status")
|
| 83 |
+
|
| 84 |
+
save_edits_btn.click(
|
| 85 |
+
ui_update_memory,
|
| 86 |
+
inputs=[creator_avatar_id, creator_admin_id, memory_table],
|
| 87 |
+
outputs=[save_edits_status],
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
gr.Markdown("### Add New Memory")
|
| 91 |
+
|
| 92 |
+
context_text = gr.Textbox(label="Text / URL", lines=4)
|
| 93 |
+
private_toggle = gr.Checkbox(label="Private?")
|
| 94 |
+
text_status = gr.Textbox(label="Text Status")
|
| 95 |
+
|
| 96 |
+
add_text_btn = gr.Button("Add Text Memory")
|
| 97 |
+
add_text_btn.click(
|
| 98 |
+
ui_add_context_text,
|
| 99 |
+
inputs=[creator_avatar_id, creator_admin_id, context_text, private_toggle],
|
| 100 |
+
outputs=[text_status, memory_table],
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
context_file = gr.File(label="Upload File")
|
| 104 |
+
file_status = gr.Textbox(label="File Status")
|
| 105 |
+
|
| 106 |
+
add_file_btn = gr.Button("Add File Memory")
|
| 107 |
+
add_file_btn.click(
|
| 108 |
+
ui_add_context_file,
|
| 109 |
+
inputs=[creator_avatar_id, creator_admin_id, context_file, private_toggle],
|
| 110 |
+
outputs=[file_status, memory_table],
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
clear_gen_btn = gr.Button("Clear Generated Images")
|
| 114 |
+
clear_gen_status = gr.Textbox(label="Generated Images Status")
|
| 115 |
+
|
| 116 |
+
clear_gen_btn.click(
|
| 117 |
+
ui_clear_generated,
|
| 118 |
+
inputs=[creator_avatar_id, creator_admin_id],
|
| 119 |
+
outputs=[clear_gen_status],
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
load_btn.click(
|
| 123 |
+
ui_load_avatar,
|
| 124 |
+
inputs=[creator_avatar_id, creator_admin_id],
|
| 125 |
+
outputs=[avatar_data, memory_table, portrait_view, load_status, creator_avatar_id, creator_admin_id],
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
create_btn.click(
|
| 129 |
+
lambda: ("", [], None, "Creating new avatar...", "", DEMO_ADMIN_ID),
|
| 130 |
+
outputs=[avatar_data, memory_table, portrait_view, load_status, creator_avatar_id, creator_admin_id],
|
| 131 |
+
queue=False,
|
| 132 |
+
).then(
|
| 133 |
+
ui_create_avatar,
|
| 134 |
+
inputs=[new_desc],
|
| 135 |
+
outputs=[create_status, avatar_data, memory_table, portrait_view, creator_avatar_id, creator_admin_id],
|
| 136 |
+
).then(
|
| 137 |
+
lambda: "New avatar ready. Use the admin ID shown in the creator panel.",
|
| 138 |
+
outputs=[load_status],
|
| 139 |
+
queue=False,
|
| 140 |
+
)
|
ui_info.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
from logic import resolve_path
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def build_info_tab():
|
| 7 |
+
with gr.Tab("Info"):
|
| 8 |
+
gr.Markdown("### Avatar MCP Workflow")
|
| 9 |
+
gr.Image(label="Infographic", value=resolve_path("avatar_infographic.png"), type="filepath")
|