| |
|
|
| import os |
| import gradio as gr |
| import matplotlib.pyplot as plt |
|
|
| os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" |
| os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1" |
| os.environ["SPACES_DISABLE_RELOAD"] = "1" |
|
|
| from utils.presets import EMOTION_PRESETS |
| from utils.drama import apply_drama |
| from utils.color_model import infer_color, render_color |
|
|
|
|
| |
| |
| |
| def apply_passion(raw: dict, passion: float) -> dict: |
| passion = max(0.0, min(3.5, float(passion))) |
| out = {} |
|
|
| for k, v in raw.items(): |
| v = float(v) |
| if k in ("V", "A", "D"): |
| delta = v - 0.5 |
| magnitude = abs(delta) |
| gain = 1.0 + passion * magnitude |
| out[k] = max(0.0, min(1.0, 0.5 + delta * gain)) |
| else: |
| out[k] = max(0.0, min(1.0, v)) |
|
|
| return out |
|
|
|
|
| |
| |
| |
| def generate_scatter(raw, amplified, cinematic, label, passion, drama): |
|
|
| fig, ax = plt.subplots(figsize=(6, 6)) |
|
|
| base_color = "#2C3E50" |
|
|
| |
| for _, preset in EMOTION_PRESETS.items(): |
| t = preset["target"] |
| ax.scatter(t["V"], t["A"], alpha=0.1, s=90, color="#BBBBBB") |
|
|
| |
| ax.scatter( |
| raw["V"], raw["A"], |
| s=180, |
| facecolor=base_color, |
| alpha=0.5, |
| label="Natural (Extraction)" |
| ) |
|
|
| |
| ax.scatter( |
| amplified["V"], amplified["A"], |
| s=180, |
| facecolors="none", |
| edgecolors=base_color, |
| linewidth=2, |
| label="After Passion (Radial Gain)" |
| ) |
|
|
| |
| ax.scatter( |
| cinematic["V"], cinematic["A"], |
| s=220, |
| facecolor=base_color, |
| edgecolor="black", |
| linewidth=1.5, |
| alpha=0.9, |
| label="After Drama (Cinematic Alignment)" |
| ) |
|
|
| |
| ax.arrow( |
| raw["V"], |
| raw["A"], |
| amplified["V"] - raw["V"], |
| amplified["A"] - raw["A"], |
| head_width=0.02, |
| length_includes_head=True, |
| color=base_color, |
| linestyle="--", |
| linewidth=2, |
| alpha=0.6 |
| ) |
|
|
| |
| ax.arrow( |
| amplified["V"], |
| amplified["A"], |
| cinematic["V"] - amplified["V"], |
| cinematic["A"] - amplified["A"], |
| head_width=0.02, |
| length_includes_head=True, |
| color=base_color, |
| linestyle="-", |
| linewidth=2, |
| alpha=0.9 |
| ) |
|
|
| ax.set_xlim(0, 1) |
| ax.set_ylim(0, 1) |
| ax.set_xlabel("Valence") |
| ax.set_ylabel("Arousal") |
| ax.set_title(f"{label}\nPassion={round(passion,2)} | Drama={round(drama,2)}") |
|
|
| ax.legend(loc="lower right") |
| ax.grid(alpha=0.15) |
|
|
| plt.tight_layout() |
| return fig |
|
|
|
|
| |
| |
| |
| def run_pipeline(preset_name, passion, drama): |
|
|
| preset = EMOTION_PRESETS[preset_name] |
|
|
| text = preset["text"] |
| natural = preset["raw"] |
| target = preset["target"] |
|
|
| amplified = apply_passion(natural, passion) |
| cinematic = apply_drama(amplified, target, drama) |
|
|
| color_params = infer_color(cinematic) |
| color_block = render_color(color_params) |
|
|
| fig = generate_scatter(natural, amplified, cinematic, preset_name, passion, drama) |
|
|
| return ( |
| text, |
| natural, |
| amplified, |
| cinematic, |
| color_params, |
| color_block, |
| fig |
| ) |
|
|
|
|
| |
| |
| |
| with gr.Blocks(title="Affection 👁️ — Edge Emotional Intelligence") as demo: |
|
|
| gr.Markdown("# Affection 👁️") |
| gr.Markdown("## Simulation Layer for an Edge AI Emotional Robotics System") |
|
|
| |
| |
| |
| gr.Markdown("### 🗣 Robot Speech") |
|
|
| preset_selector = gr.Radio( |
| choices=list(EMOTION_PRESETS.keys()), |
| label="Select Transcript Sample", |
| value=list(EMOTION_PRESETS.keys())[0], |
| ) |
|
|
| transcript_output = gr.Textbox(label="Input Transcript", interactive=False) |
|
|
| gr.Markdown("---") |
|
|
| |
| |
| |
| gr.Markdown("### ⚡ Edge Affect Processing — NVIDIA Jetson Orin Nano") |
|
|
| gr.Markdown( |
| """ |
| This section provides a simplified visualization of a more complex on-device architecture. |
| |
| In hardware deployment, the NVIDIA Jetson Orin Nano performs all of the following: |
| |
| • Robot hardware daemon service |
| • Interactive conversational application |
| • Real-time transcript ingestion |
| • VAD extraction (NRC-VAD lexicon) |
| • Structural language metrics (Complexity + Coherence) |
| • Radial emotional amplification (Passion) |
| • Cinematic nearest-exemplar alignment (Drama) |
| • Dual-timescale blending (fast burst + slow baseline via Nemotron/Ollama) |
| • Continuous emotional state streaming for display on an expression module |
| |
| This demo isolates a single loop transformation for clarity. |
| |
| Our NVIDIA edge device is capable of running this loop 200x per second. |
| """ |
| ) |
|
|
| with gr.Row(): |
| passion = gr.Slider( |
| minimum=0.0, |
| maximum=3.0, |
| value=2.25, |
| step=0.1, |
| label="Passion (Radial Emotional Amplification)" |
| ) |
|
|
| drama = gr.Slider( |
| minimum=0.0, |
| maximum=1.5, |
| value=0.65, |
| step=0.05, |
| label="Drama (Cinematic Alignment)" |
| ) |
|
|
| with gr.Row(): |
| natural_output = gr.JSON(label="Natural VAD+CC") |
| amplified_output = gr.JSON(label="After Passion") |
| cinematic_output = gr.JSON(label="After Drama") |
|
|
| scatter_output = gr.Plot(label="Valence–Arousal Projection") |
|
|
| gr.Markdown( |
| """ |
| **Note:** |
| This plot shows a 2D Valence–Arousal projection for visualization only, but results are from the actual model. |
| Actual transformation and color inference are more complex and operate on the full 5D VAD+CC vector. |
| """ |
| ) |
|
|
| gr.Markdown("---") |
|
|
| |
| |
| |
| gr.Markdown("### 💡 Emotional Expression") |
|
|
| gr.Markdown( |
| """ |
| The finalized VAD+CC vector is transmitted to an expressive display module. In this example, we are converting to colors to be used for eyes. |
| |
| The module does not compute emotion. |
| It receives the 5D emotional state and runs a trained neural model to convert it into expressive color. |
| |
| Model used here (same as deployment): |
| https://huggingface.co/danielritchie/vibe-color-model |
| |
| VAD+CC (Affect Engine) → Embedded Model → Color Rendering (Expression) |
| """ |
| ) |
|
|
| rgb_output = gr.JSON(label="Model Output (RGB + Expressive Parameters)") |
| color_display = gr.HTML(label="Rendered Expression") |
|
|
| outputs = [ |
| transcript_output, |
| natural_output, |
| amplified_output, |
| cinematic_output, |
| rgb_output, |
| color_display, |
| scatter_output |
| ] |
|
|
| preset_selector.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) |
| passion.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) |
| drama.change(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) |
|
|
| demo.load(fn=run_pipeline, inputs=[preset_selector, passion, drama], outputs=outputs) |
|
|
| demo.launch(server_name="0.0.0.0", server_port=7860) |
|
|