# app.py — Affection 👁️ (Hugging Face Space) import os import gradio as gr import matplotlib.pyplot as plt os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1" os.environ["SPACES_DISABLE_RELOAD"] = "1" from utils.presets import EMOTION_PRESETS from utils.drama import apply_drama from utils.color_model import infer_color, render_color # ------------------------------------------------------------ # Passion (Radial Amplification) # ------------------------------------------------------------ def apply_passion(raw: dict, passion: float) -> dict: passion = max(0.0, min(3.5, float(passion))) out = {} for k, v in raw.items(): v = float(v) if k in ("V", "A", "D"): delta = v - 0.5 magnitude = abs(delta) gain = 1.0 + passion * magnitude out[k] = max(0.0, min(1.0, 0.5 + delta * gain)) else: out[k] = max(0.0, min(1.0, v)) return out # ------------------------------------------------------------ # Valence–Arousal Visualization (2D Projection) # ------------------------------------------------------------ def generate_scatter(raw, amplified, cinematic, label, passion, drama): fig, ax = plt.subplots(figsize=(6, 6)) base_color = "#2C3E50" # neutral deep tone # Plot cinematic anchors faintly for _, preset in EMOTION_PRESETS.items(): t = preset["target"] ax.scatter(t["V"], t["A"], alpha=0.1, s=90, color="#BBBBBB") # Natural ax.scatter( raw["V"], raw["A"], s=180, facecolor=base_color, alpha=0.5, label="Natural (Extraction)" ) # After Passion ax.scatter( amplified["V"], amplified["A"], s=180, facecolors="none", edgecolors=base_color, linewidth=2, label="After Passion (Radial Gain)" ) # After Drama ax.scatter( cinematic["V"], cinematic["A"], s=220, facecolor=base_color, edgecolor="black", linewidth=1.5, alpha=0.9, label="After Drama (Cinematic Alignment)" ) # Arrow 1 — Raw → Amplified ax.arrow( raw["V"], raw["A"], amplified["V"] - raw["V"], amplified["A"] - raw["A"], head_width=0.02, length_includes_head=True, color=base_color, linestyle="--", linewidth=2, alpha=0.6 ) # Arrow 2 — Amplified → Cinematic ax.arrow( amplified["V"], amplified["A"], cinematic["V"] - amplified["V"], cinematic["A"] - amplified["A"], head_width=0.02, length_includes_head=True, color=base_color, linestyle="-", linewidth=2, alpha=0.9 ) ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.set_xlabel("Valence") ax.set_ylabel("Arousal") ax.set_title(f"{label}\nPassion={round(passion,2)} | Drama={round(drama,2)}") ax.legend(loc="lower right") ax.grid(alpha=0.15) plt.tight_layout() return fig # ------------------------------------------------------------ # Fast-Loop Simulation # ------------------------------------------------------------ def run_pipeline(preset_name, passion, drama): preset = EMOTION_PRESETS[preset_name] text = preset["text"] natural = preset["raw"] target = preset["target"] amplified = apply_passion(natural, passion) cinematic = apply_drama(amplified, target, drama) color_params = infer_color(cinematic) color_block = render_color(color_params) fig = generate_scatter(natural, amplified, cinematic, preset_name, passion, drama) return ( text, natural, amplified, cinematic, color_params, color_block, fig ) # ------------------------------------------------------------ # UI # ------------------------------------------------------------ with gr.Blocks(title="Affection 👁️ — Edge Emotional Intelligence") as demo: gr.Markdown("# Affection 👁️") gr.Markdown("## Simulation Layer for an Edge AI Emotional Robotics System") # --------------------------- # Robot Speech # --------------------------- gr.Markdown("### 🗣 Robot Speech") preset_selector = gr.Radio( choices=list(EMOTION_PRESETS.keys()), label="Select Transcript Sample", value=list(EMOTION_PRESETS.keys())[0], ) transcript_output = gr.Textbox(label="Input Transcript", interactive=False) gr.Markdown("---") # --------------------------- # Edge Affect Processing # --------------------------- gr.Markdown("### ⚡ Edge Affect Processing — NVIDIA Jetson Orin Nano") gr.Markdown( """ This section provides a simplified visualization of a more complex on-device architecture. In hardware deployment, the NVIDIA Jetson Orin Nano performs: • Real-time transcript ingestion • VAD extraction (NRC-VAD lexicon) • Structural language metrics (Complexity + Coherence) • Radial emotional amplification (Passion) • Cinematic nearest-exemplar alignment (Drama) • Dual-timescale blending (fast burst + slow baseline via Nemotron/Ollama) • Continuous emotional state streaming to the display module This demo isolates the fast-loop transformation for clarity. """ ) with gr.Row(): passion = gr.Slider( minimum=0.0, maximum=3.0, value=2.25, step=0.1, label="Passion (Radial Emotional Amplification)" ) drama = gr.Slider( minimum=0.0, maximum=1.5, value=0.65, step=0.05, label="Drama (Cinematic Alignment)" ) with gr.Row(): natural_output = gr.JSON(label="Natural VAD+CC") amplified_output = gr.JSON(label="After Passion") cinematic_output = gr.JSON(label="After Drama") scatter_output = gr.Plot(label="Valence–Arousal Projection") gr.Markdown( """ **Note:** This plot shows a 2D Valence–Arousal projection for visualization only. All transformations and color inference operate on the full 5D VAD+CC vector. """ ) gr.Markdown("---") # --------------------------- # Emotional Expression # --------------------------- gr.Markdown("### 💡 Emotional Expression") gr.Markdown( """ The finalized VAD+CC vector is transmitted to an embedded display module. The module does not compute emotion. It receives the 5D emotional state and runs a trained neural model to convert it into expressive color. Model used here (same as deployment): https://huggingface.co/danielritchie/vibe-color-model VAD+CC → Embedded Model → Color Rendering """ ) rgb_output = gr.JSON(label="Model Output (RGB + Expressive Parameters)") color_display = gr.HTML(label="Rendered Expression") outputs = [ transcript_output, natural_output, amplified_output, cinematic_output, rgb_output, color_display, scatter_output ] preset_selector.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) passion.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) drama.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) demo.load(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) demo.launch(server_name="0.0.0.0", server_port=7860)