danielritchie's picture
Update app.py
b8282c8 verified
raw
history blame
8.74 kB
# app.py — Affection 👁️ (Hugging Face Space)
import os
import gradio as gr
import matplotlib.pyplot as plt
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
os.environ["SPACES_DISABLE_RELOAD"] = "1"
from utils.presets import EMOTION_PRESETS
from utils.drama import apply_drama
from utils.color_model import infer_color, render_color
# ------------------------------------------------------------
# Passion (Radial Amplification)
# ------------------------------------------------------------
def apply_passion(raw: dict, passion: float) -> dict:
passion = max(0.0, min(3.5, float(passion)))
out = {}
for k, v in raw.items():
v = float(v)
if k in ("V", "A", "D"):
delta = v - 0.5
magnitude = abs(delta)
gain = 1.0 + passion * magnitude
out[k] = max(0.0, min(1.0, 0.5 + delta * gain))
else:
out[k] = max(0.0, min(1.0, v))
return out
# ------------------------------------------------------------
# Valence–Arousal Visualization (2D Projection)
# ------------------------------------------------------------
def generate_scatter(raw, amplified, cinematic, label, passion, drama):
fig, ax = plt.subplots(figsize=(6, 6))
base_color = "#2C3E50" # neutral deep tone
# Plot cinematic anchors faintly
for _, preset in EMOTION_PRESETS.items():
t = preset["target"]
ax.scatter(t["V"], t["A"], alpha=0.1, s=90, color="#BBBBBB")
# Natural
ax.scatter(
raw["V"], raw["A"],
s=180,
facecolor=base_color,
alpha=0.5,
label="Natural (Extraction)"
)
# After Passion
ax.scatter(
amplified["V"], amplified["A"],
s=180,
facecolors="none",
edgecolors=base_color,
linewidth=2,
label="After Passion (Radial Gain)"
)
# After Drama
ax.scatter(
cinematic["V"], cinematic["A"],
s=220,
facecolor=base_color,
edgecolor="black",
linewidth=1.5,
alpha=0.9,
label="After Drama (Cinematic Alignment)"
)
# Arrow 1 — Raw → Amplified
ax.arrow(
raw["V"],
raw["A"],
amplified["V"] - raw["V"],
amplified["A"] - raw["A"],
head_width=0.02,
length_includes_head=True,
color=base_color,
linestyle="--",
linewidth=2,
alpha=0.6
)
# Arrow 2 — Amplified → Cinematic
ax.arrow(
amplified["V"],
amplified["A"],
cinematic["V"] - amplified["V"],
cinematic["A"] - amplified["A"],
head_width=0.02,
length_includes_head=True,
color=base_color,
linestyle="-",
linewidth=2,
alpha=0.9
)
# ----------------------------------
# Dynamic Zoom (Centered + 20% Padding)
# ----------------------------------
xs = [raw["V"], amplified["V"], cinematic["V"]]
ys = [raw["A"], amplified["A"], cinematic["A"]]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
span_x = max_x - min_x
span_y = max_y - min_y
# Use the larger span to keep square framing
span = max(span_x, span_y)
# Avoid zero-span collapse
span = max(span, 0.05)
padding = span * 0.20 # 20% larger
center_x = (min_x + max_x) / 2
center_y = (min_y + max_y) / 2
half_range = (span / 2) + padding
ax.set_xlim(center_x - half_range, center_x + half_range)
ax.set_ylim(center_y - half_range, center_y + half_range)
ax.set_aspect('equal', adjustable='box')
ax.set_xlabel("Valence")
ax.set_ylabel("Arousal")
ax.set_title(f"{label}\nPassion={round(passion,2)} | Drama={round(drama,2)}")
ax.legend(loc="lower right")
ax.grid(alpha=0.15)
plt.tight_layout()
return fig
# ------------------------------------------------------------
# Fast-Loop Simulation
# ------------------------------------------------------------
def run_pipeline(preset_name, passion, drama):
preset = EMOTION_PRESETS[preset_name]
text = preset["text"]
natural = preset["raw"]
target = preset["target"]
amplified = apply_passion(natural, passion)
cinematic = apply_drama(amplified, target, drama)
color_params = infer_color(cinematic)
color_block = render_color(color_params)
fig = generate_scatter(natural, amplified, cinematic, preset_name, passion, drama)
return (
text,
natural,
amplified,
cinematic,
color_params,
color_block,
fig
)
# ------------------------------------------------------------
# UI
# ------------------------------------------------------------
with gr.Blocks(title="Affection 👁️ — Edge Emotional Intelligence") as demo:
gr.Markdown("# Affection 👁️")
gr.Markdown("## Simulation Layer for an Edge AI Emotional Robotics System")
# ---------------------------
# Robot Speech
# ---------------------------
gr.Markdown("### 🗣 Robot Speech")
preset_selector = gr.Radio(
choices=list(EMOTION_PRESETS.keys()),
label="Select Transcript Sample",
value=list(EMOTION_PRESETS.keys())[0],
)
transcript_output = gr.Textbox(label="Input Transcript", interactive=False)
gr.Markdown("---")
# ---------------------------
# Edge Affect Processing
# ---------------------------
gr.Markdown("### ⚡ Edge Affect Processing — NVIDIA Jetson Orin Nano")
gr.Markdown(
"""
This section provides a simplified visualization of a more complex on-device architecture.
In hardware deployment, the NVIDIA Jetson Orin Nano performs all of the following:
• Robot hardware daemon service
• Interactive conversational application
• Real-time transcript ingestion
• VAD extraction (NRC-VAD lexicon)
• Structural language metrics (Complexity + Coherence)
• Radial emotional amplification (Passion)
• Cinematic nearest-exemplar alignment (Drama)
• Dual-timescale blending (fast burst + slow baseline via Nemotron/Ollama)
• Continuous emotional state streaming for display on an expression module
This demo isolates a single loop transformation for clarity.
Our NVIDIA edge device is capable of running this loop 200x per second.
"""
)
with gr.Row():
passion = gr.Slider(
minimum=0.0,
maximum=3.0,
value=2.25,
step=0.1,
label="Passion (Radial Emotional Amplification)"
)
drama = gr.Slider(
minimum=0.0,
maximum=1.5,
value=0.65,
step=0.05,
label="Drama (Cinematic Alignment)"
)
with gr.Row():
natural_output = gr.JSON(label="Natural VAD+CC")
amplified_output = gr.JSON(label="After Passion")
cinematic_output = gr.JSON(label="After Drama")
scatter_output = gr.Plot(label="Valence–Arousal Projection")
gr.Markdown(
"""
**Note:**
This plot shows a 2D Valence–Arousal projection for visualization only, but results are from the actual model.
Actual transformation and color inference are more complex and operate on the full 5D VAD+CC vector.
"""
)
gr.Markdown("---")
# ---------------------------
# Emotional Expression
# ---------------------------
gr.Markdown("### 💡 Emotional Expression")
gr.Markdown(
"""
The finalized VAD+CC vector is transmitted to an expressive display module. In this example, we are converting to colors to be used for eyes.
The module does not compute emotion.
It receives the 5D emotional state and runs a trained neural model to convert it into expressive color.
Model used here (same as deployment):
https://huggingface.co/danielritchie/vibe-color-model
VAD+CC (Affect Engine) → Embedded Model → Color Rendering (Expression)
"""
)
rgb_output = gr.JSON(label="Model Output (RGB + Expressive Parameters)")
color_display = gr.HTML(label="Rendered Expression")
outputs = [
transcript_output,
natural_output,
amplified_output,
cinematic_output,
rgb_output,
color_display,
scatter_output
]
preset_selector.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs)
passion.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs)
drama.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs)
demo.load(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs)
demo.launch(server_name="0.0.0.0", server_port=7860)