danielritchie commited on
Commit
1e934e4
·
verified ·
1 Parent(s): 288fa8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -52
app.py CHANGED
@@ -1,87 +1,158 @@
1
  import gradio as gr
2
  import os
 
3
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
4
  os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
5
  os.environ["SPACES_DISABLE_RELOAD"] = "1"
6
 
7
  from utils.presets import EMOTION_PRESETS
 
8
  from utils.drama import apply_drama
9
- from utils.color_model import infer_color, apply_cinematic_blend, render_color
10
  from utils.visualization import generate_scatter
11
 
12
 
13
- def process_emotion(emotion, drama):
14
- preset = EMOTION_PRESETS[emotion]
 
 
 
15
 
16
- raw = preset["raw"]
 
 
 
17
  target = preset["target"]
18
 
19
- cinematic = apply_drama(raw, target, drama)
 
20
 
21
- # 1️⃣ Get cinematic parameters from model
22
- model_output = infer_color(cinematic)
23
 
24
- # 2️⃣ Apply drama-aware blend toward white
25
- blended = apply_cinematic_blend(model_output, drama)
26
 
27
- # 3️⃣ Render HTML
28
- color_block = render_color(blended)
 
29
 
30
- scatter_fig = generate_scatter(raw, cinematic, emotion, drama)
31
 
32
  return (
33
- preset["text"],
34
- raw,
 
35
  cinematic,
36
- blended,
37
  color_block,
38
  scatter_fig
39
  )
40
 
41
 
42
- with gr.Blocks(title="VIBE-Eyes 👁️") as demo:
 
 
 
 
43
 
44
  gr.Markdown("# VIBE-Eyes 👁️")
45
- gr.Markdown("**Emotion becomes cinema.**")
46
 
47
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- with gr.Column(scale=1):
 
 
 
 
 
 
50
 
51
- emotion = gr.Radio(
52
- choices=list(EMOTION_PRESETS.keys()),
53
- label="Select Emotion",
54
- value="Anger (Red)"
55
- )
 
 
56
 
57
- drama = gr.Slider(
58
- minimum=0,
59
- maximum=1.5,
60
- value=0,
61
- step=0.05,
62
- label="Drama (Cinematic Amplification)"
63
- )
 
 
 
 
 
 
 
 
64
 
65
- with gr.Column(scale=2):
66
 
67
- text_output = gr.Textbox(label="Preset Text")
 
 
68
 
69
- with gr.Row():
70
- raw_output = gr.JSON(label="Raw VAD+CC")
71
- cine_output = gr.JSON(label="Cinematic VAD+CC")
72
 
73
- rgb_output = gr.JSON(label="RGB + E + I")
74
- color_display = gr.HTML(label="Rendered Color")
75
 
76
- scatter_output = gr.Plot(label="Valence–Arousal Space")
 
 
77
 
78
- emotion.change(
79
- fn=process_emotion,
80
- inputs=[emotion, drama],
81
  outputs=[
82
- text_output,
83
- raw_output,
84
- cine_output,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  rgb_output,
86
  color_display,
87
  scatter_output
@@ -89,23 +160,23 @@ with gr.Blocks(title="VIBE-Eyes 👁️") as demo:
89
  )
90
 
91
  drama.change(
92
- fn=process_emotion,
93
- inputs=[emotion, drama],
94
  outputs=[
95
- text_output,
96
- raw_output,
97
- cine_output,
 
98
  rgb_output,
99
  color_display,
100
  scatter_output
101
  ]
102
  )
103
 
 
104
  demo.launch(
105
  server_name="0.0.0.0",
106
  server_port=7860,
107
- ssr_mode=False,
108
  show_error=True,
109
  debug=False
110
  )
111
-
 
1
  import gradio as gr
2
  import os
3
+
4
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
5
  os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
6
  os.environ["SPACES_DISABLE_RELOAD"] = "1"
7
 
8
  from utils.presets import EMOTION_PRESETS
9
+ from utils.passion import apply_passion
10
  from utils.drama import apply_drama
11
+ from utils.color_model import infer_color, render_color
12
  from utils.visualization import generate_scatter
13
 
14
 
15
+ # ------------------------------------------------------------
16
+ # Fast Loop Processing (Edge Simulation)
17
+ # ------------------------------------------------------------
18
+
19
+ def run_pipeline(preset_name, passion, drama):
20
 
21
+ preset = EMOTION_PRESETS[preset_name]
22
+
23
+ text = preset["text"]
24
+ natural = preset["raw"]
25
  target = preset["target"]
26
 
27
+ # Edge Phase 1 — Natural Extraction
28
+ extracted = natural
29
 
30
+ # Edge Phase 2 Passion Amplification
31
+ amplified = apply_passion(extracted, passion)
32
 
33
+ # Edge Phase 3 Cinematic Alignment
34
+ cinematic = apply_drama(amplified, target, drama)
35
 
36
+ # Embedded Phase — Color Model
37
+ color_params = infer_color(cinematic)
38
+ color_block = render_color(color_params)
39
 
40
+ scatter_fig = generate_scatter(extracted, cinematic, preset_name, drama)
41
 
42
  return (
43
+ text,
44
+ extracted,
45
+ amplified,
46
  cinematic,
47
+ color_params,
48
  color_block,
49
  scatter_fig
50
  )
51
 
52
 
53
+ # ------------------------------------------------------------
54
+ # UI
55
+ # ------------------------------------------------------------
56
+
57
+ with gr.Blocks(title="VIBE-Eyes — Edge Emotional Intelligence") as demo:
58
 
59
  gr.Markdown("# VIBE-Eyes 👁️")
60
+ gr.Markdown("## Edge Emotional Intelligence for Robotics")
61
 
62
+ # --------------------------------------------------------
63
+ # SECTION 1 — Robot Speech
64
+ # --------------------------------------------------------
65
+
66
+ gr.Markdown("### 🗣 Robot Speech")
67
+
68
+ preset_selector = gr.Radio(
69
+ choices=list(EMOTION_PRESETS.keys()),
70
+ label="Select Transcript Sample",
71
+ value=list(EMOTION_PRESETS.keys())[0]
72
+ )
73
+
74
+ transcript_output = gr.Textbox(
75
+ label="Transcript",
76
+ interactive=False
77
+ )
78
+
79
+ gr.Markdown("---")
80
+
81
+ # --------------------------------------------------------
82
+ # SECTION 2 — Edge Processing
83
+ # --------------------------------------------------------
84
+
85
+ gr.Markdown("### ⚡ Edge Processing — NVIDIA Jetson Orin Nano")
86
 
87
+ passion = gr.Slider(
88
+ minimum=0.0,
89
+ maximum=3.0,
90
+ value=2.25,
91
+ step=0.1,
92
+ label="Passion (Radial Emotional Amplification)"
93
+ )
94
 
95
+ drama = gr.Slider(
96
+ minimum=0.0,
97
+ maximum=1.5,
98
+ value=0.65,
99
+ step=0.05,
100
+ label="Drama (Cinematic Alignment)"
101
+ )
102
 
103
+ with gr.Row():
104
+ natural_output = gr.JSON(label="Natural VAD+CC (Extraction)")
105
+ amplified_output = gr.JSON(label="After Passion (Radial Gain)")
106
+ cinematic_output = gr.JSON(label="After Drama (Cinematic Alignment)")
107
+
108
+ scatter_output = gr.Plot(label="Valence–Arousal Projection")
109
+
110
+ gr.Markdown(
111
+ """
112
+ *Note:*
113
+ In the full hardware system, a slow baseline loop (Nemotron via Ollama)
114
+ continuously recalibrates the emotional baseline in parallel.
115
+ This demo focuses on the real-time fast loop for clarity.
116
+ """
117
+ )
118
 
119
+ gr.Markdown("---")
120
 
121
+ # --------------------------------------------------------
122
+ # SECTION 3 — Embedded Display Module
123
+ # --------------------------------------------------------
124
 
125
+ gr.Markdown("### 💡 Embedded Display Module")
 
 
126
 
127
+ rgb_output = gr.JSON(label="Color Model Output (RGB + Expressive Parameters)")
128
+ color_display = gr.HTML(label="Rendered Expression")
129
 
130
+ # --------------------------------------------------------
131
+ # Bind
132
+ # --------------------------------------------------------
133
 
134
+ preset_selector.change(
135
+ fn=run_pipeline,
136
+ inputs=[preset_selector, passion, drama],
137
  outputs=[
138
+ transcript_output,
139
+ natural_output,
140
+ amplified_output,
141
+ cinematic_output,
142
+ rgb_output,
143
+ color_display,
144
+ scatter_output
145
+ ]
146
+ )
147
+
148
+ passion.change(
149
+ fn=run_pipeline,
150
+ inputs=[preset_selector, passion, drama],
151
+ outputs=[
152
+ transcript_output,
153
+ natural_output,
154
+ amplified_output,
155
+ cinematic_output,
156
  rgb_output,
157
  color_display,
158
  scatter_output
 
160
  )
161
 
162
  drama.change(
163
+ fn=run_pipeline,
164
+ inputs=[preset_selector, passion, drama],
165
  outputs=[
166
+ transcript_output,
167
+ natural_output,
168
+ amplified_output,
169
+ cinematic_output,
170
  rgb_output,
171
  color_display,
172
  scatter_output
173
  ]
174
  )
175
 
176
+
177
  demo.launch(
178
  server_name="0.0.0.0",
179
  server_port=7860,
 
180
  show_error=True,
181
  debug=False
182
  )