danielritchie commited on
Commit
9191c48
·
1 Parent(s): 42b7fa0
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from utils.presets import EMOTION_PRESETS
4
+ from utils.drama import apply_drama
5
+ from utils.color_model import infer_color, scale_rgb, render_color
6
+ from utils.visualization import generate_scatter
7
+
8
+
9
+ def process_emotion(emotion, drama):
10
+ preset = EMOTION_PRESETS[emotion]
11
+
12
+ raw = preset["raw"]
13
+ target = preset["target"]
14
+
15
+ cinematic = apply_drama(raw, target, drama)
16
+
17
+ rgb = infer_color(cinematic)
18
+ scaled = scale_rgb(rgb)
19
+ color_block = render_color(scaled)
20
+
21
+ scatter_fig = generate_scatter(raw, cinematic, emotion, drama)
22
+
23
+ return (
24
+ preset["text"],
25
+ raw,
26
+ cinematic,
27
+ scaled,
28
+ color_block,
29
+ scatter_fig
30
+ )
31
+
32
+
33
+ with gr.Blocks(title="VIBE-Eyes 👁️") as demo:
34
+
35
+ gr.Markdown("# VIBE-Eyes 👁️")
36
+ gr.Markdown("**Emotion becomes cinema.**")
37
+
38
+ with gr.Row():
39
+
40
+ with gr.Column(scale=1):
41
+
42
+ emotion = gr.Radio(
43
+ choices=list(EMOTION_PRESETS.keys()),
44
+ label="Select Emotion",
45
+ value="Anger (Red)"
46
+ )
47
+
48
+ drama = gr.Slider(
49
+ minimum=0,
50
+ maximum=1.5,
51
+ value=0,
52
+ step=0.05,
53
+ label="Drama (Cinematic Amplification)"
54
+ )
55
+
56
+ with gr.Column(scale=2):
57
+
58
+ text_output = gr.Textbox(label="Preset Text")
59
+
60
+ with gr.Row():
61
+ raw_output = gr.JSON(label="Raw VAD+CC")
62
+ cine_output = gr.JSON(label="Cinematic VAD+CC")
63
+
64
+ rgb_output = gr.JSON(label="RGB + E + I")
65
+ color_display = gr.HTML(label="Rendered Color")
66
+
67
+ scatter_output = gr.Plot(label="Valence–Arousal Space")
68
+
69
+ emotion.change(
70
+ fn=process_emotion,
71
+ inputs=[emotion, drama],
72
+ outputs=[
73
+ text_output,
74
+ raw_output,
75
+ cine_output,
76
+ rgb_output,
77
+ color_display,
78
+ scatter_output
79
+ ]
80
+ )
81
+
82
+ drama.change(
83
+ fn=process_emotion,
84
+ inputs=[emotion, drama],
85
+ outputs=[
86
+ text_output,
87
+ raw_output,
88
+ cine_output,
89
+ rgb_output,
90
+ color_display,
91
+ scatter_output
92
+ ]
93
+ )
94
+
95
+ demo.launch()
96
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers
4
+ matplotlib
5
+ numpy
6
+
utils/cinematic.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from datasets import load_dataset
3
+ from sklearn.metrics.pairwise import cosine_similarity
4
+
5
+ palette_dataset = load_dataset("danielritchie/cinematic-mood-palette")["train"]
6
+
7
+ palette_vectors = []
8
+ palette_names = []
9
+
10
+ for row in palette_dataset:
11
+ palette_vectors.append([row["V"], row["A"], row["D"], row["Cx"], row["Co"]])
12
+ palette_names.append(row.get("name", "unknown"))
13
+
14
+ palette_vectors = np.array(palette_vectors)
15
+
16
+
17
+ def nearest_palette_vector(raw_vad):
18
+ raw_vec = np.array([[raw_vad[k] for k in ["V","A","D","Cx","Co"]]])
19
+ sims = cosine_similarity(raw_vec, palette_vectors)[0]
20
+ idx = np.argmax(sims)
21
+ anchor = palette_vectors[idx]
22
+
23
+ return {
24
+ "vector": {
25
+ "V": anchor[0],
26
+ "A": anchor[1],
27
+ "D": anchor[2],
28
+ "Cx": anchor[3],
29
+ "Co": anchor[4],
30
+ },
31
+ "name": palette_names[idx]
32
+ }
33
+
34
+
35
+ def amplify_with_palette(raw, drama):
36
+ anchor_data = nearest_palette_vector(raw)
37
+ anchor = anchor_data["vector"]
38
+
39
+ amplified = {
40
+ k: float(raw[k] + drama * (anchor[k] - raw[k]))
41
+ for k in raw
42
+ }
43
+
44
+ return amplified, anchor_data["name"]
utils/color_model.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModel
3
+
4
+ model = AutoModel.from_pretrained("danielritchie/vibe-color-model")
5
+ model.eval()
6
+
7
+
8
+ def infer_color(vad):
9
+ input_tensor = torch.tensor([[
10
+ vad["V"],
11
+ vad["A"],
12
+ vad["D"],
13
+ vad["Cx"],
14
+ vad["Co"]
15
+ ]], dtype=torch.float32)
16
+
17
+ with torch.no_grad():
18
+ output = model(input_tensor)
19
+
20
+ r, g, b, e, i = output[0].tolist()
21
+
22
+ return {
23
+ "R": r,
24
+ "G": g,
25
+ "B": b,
26
+ "E": e,
27
+ "I": i
28
+ }
29
+
30
+
31
+ def scale_rgb(rgb):
32
+ return {
33
+ "R": int(max(0, min(255, rgb["R"] * 255))),
34
+ "G": int(max(0, min(255, rgb["G"] * 255))),
35
+ "B": int(max(0, min(255, rgb["B"] * 255))),
36
+ "E": rgb["E"],
37
+ "I": rgb["I"]
38
+ }
39
+
40
+
41
+ def render_color(rgb):
42
+ return f"""
43
+ <div style="
44
+ width:100%;
45
+ height:240px;
46
+ border-radius:18px;
47
+ background: rgb({rgb['R']},{rgb['G']},{rgb['B']});
48
+ box-shadow: 0px 6px 32px rgba(0,0,0,0.25);
49
+ transition: all 0.3s ease-in-out;
50
+ "></div>
51
+ """
52
+
utils/drama.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def apply_drama(raw, target, drama):
2
+ return {
3
+ k: raw[k] + drama * (target[k] - raw[k])
4
+ for k in raw
5
+ }
6
+
utils/presets.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ EMOTION_PRESETS = {
2
+
3
+ "Anger (Red)": {
4
+ "text": "Stop. I can't tolerate this anymore.",
5
+ "raw": {"V":0.15,"A":0.80,"D":0.75,"Cx":0.70,"Co":0.80},
6
+ "target": {"V":0.05,"A":0.95,"D":0.90,"Cx":0.90,"Co":0.95},
7
+ },
8
+
9
+ "Love (Pink)": {
10
+ "text": "I feel close to you. There is warmth between us.",
11
+ "raw": {"V":0.85,"A":0.65,"D":0.80,"Cx":0.60,"Co":0.75},
12
+ "target": {"V":0.95,"A":0.75,"D":0.95,"Cx":0.70,"Co":0.70},
13
+ },
14
+
15
+ "Sadness (Blue)": {
16
+ "text": "Everything feels heavy and distant.",
17
+ "raw": {"V":0.30,"A":0.35,"D":0.15,"Cx":0.45,"Co":0.35},
18
+ "target": {"V":0.20,"A":0.30,"D":0.00,"Cx":0.40,"Co":0.20},
19
+ },
20
+
21
+ "Anxiety (Off Green)": {
22
+ "text": "Something feels wrong. I can't settle.",
23
+ "raw": {"V":0.35,"A":0.85,"D":0.35,"Cx":0.80,"Co":0.85},
24
+ "target": {"V":0.30,"A":1.00,"D":0.20,"Cx":1.00,"Co":1.00},
25
+ },
26
+
27
+ "Neutral Calm (Gray)": {
28
+ "text": "Everything is stable. Nothing is pushing or pulling.",
29
+ "raw": {"V":0.55,"A":0.45,"D":0.55,"Cx":0.55,"Co":0.60},
30
+ "target": {"V":0.50,"A":0.50,"D":0.50,"Cx":0.60,"Co":0.65},
31
+ }
32
+ }
33
+
utils/visualization.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from utils.presets import EMOTION_PRESETS
3
+
4
+
5
+ def generate_scatter(raw, cinematic, emotion, drama):
6
+ fig, ax = plt.subplots(figsize=(6, 6))
7
+
8
+ # Plot target anchors faintly
9
+ for name, preset in EMOTION_PRESETS.items():
10
+ t = preset["target"]
11
+ ax.scatter(t["V"], t["A"], alpha=0.2, s=100, color="gray")
12
+
13
+ # Raw point
14
+ ax.scatter(raw["V"], raw["A"], s=200, color="blue", label="Raw")
15
+
16
+ # Cinematic point
17
+ ax.scatter(cinematic["V"], cinematic["A"], s=200, color="red", label="Cinematic")
18
+
19
+ # Arrow scaled by drama
20
+ ax.arrow(
21
+ raw["V"],
22
+ raw["A"],
23
+ cinematic["V"] - raw["V"],
24
+ cinematic["A"] - raw["A"],
25
+ head_width=0.02,
26
+ length_includes_head=True,
27
+ color="black"
28
+ )
29
+
30
+ ax.set_xlim(0, 1)
31
+ ax.set_ylim(0, 1)
32
+ ax.set_xlabel("Valence")
33
+ ax.set_ylabel("Arousal")
34
+ ax.set_title(f"{emotion} | Drama: {round(drama,2)}")
35
+ ax.legend()
36
+
37
+ plt.tight_layout()
38
+ return fig
39
+