st192011 commited on
Commit
4f85db7
ยท
verified ยท
1 Parent(s): 69457ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -112
app.py CHANGED
@@ -6,36 +6,30 @@ import random
6
  import os
7
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
8
 
9
- # --- 1. LOAD ARTIFACTS ---
10
  PKG_PATH = "neuro_semantic_package.pt"
11
 
12
- print("๐Ÿš€ System Startup: Loading Artifacts...")
13
  if not os.path.exists(PKG_PATH):
14
- # Fallback for local testing if file isn't in root
15
- POSSIBLE_PATHS = [
16
  "neuro_semantic_package.pt",
17
  "/content/drive/MyDrive/Brain2Text_Project/demo_research_v2/neuro_semantic_package.pt"
18
  ]
19
- for p in POSSIBLE_PATHS:
20
  if os.path.exists(p):
21
  PKG_PATH = p
22
  break
23
-
24
- if not os.path.exists(PKG_PATH):
25
- raise FileNotFoundError(f"CRITICAL: '{PKG_PATH}' missing. Please upload the .pt file.")
26
 
27
- # Load the "Black Box" package
28
- # map_location='cpu' ensures it runs on basic HF spaces without GPU if needed
29
- PKG = torch.load(PKG_PATH, map_location="cpu", weights_only=False)
30
  DATA = PKG['data']
31
- MODELS = PKG['models'] # The Projectors
32
- MATRIX = PKG['matrix'] # Pre-calculated Accuracy Table
33
- MAPPING = PKG['mapping_key'] # Secret Mapping
34
 
35
- # Inverse mapping (Alias -> Real Sub)
36
- ALIAS_TO_REAL = {v: k for k, v in MAPPING.items()}
37
-
38
- # Load Decoder
39
  print("๐Ÿค– Loading RoBERTa-GoEmotions...")
40
  MODEL_NAME = "SamLowe/roberta-base-go_emotions"
41
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
@@ -43,110 +37,107 @@ classifier = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
43
  classifier.eval()
44
  id2label = classifier.config.id2label
45
 
46
- # --- 2. LOGIC FUNCTIONS ---
 
 
 
 
 
 
 
47
 
48
- def get_sentence_options(subject_name):
49
- # Return available sentences for the selected subject
50
- choices = DATA[subject_name]['Text']
51
- # Pick a random one as default to encourage exploration
52
- default = random.choice(choices)
53
- return gr.Dropdown(choices=choices, value=default)
54
 
55
- def get_warning_status(subject, projector_alias):
56
- """Checks for Data Leakage"""
57
- clean_alias = projector_alias.split(" ")[1]
58
- source_subject = ALIAS_TO_REAL.get(clean_alias)
59
-
60
- if source_subject == subject:
61
- return (
62
- "โš ๏ธ **WARNING: DATA LEAKAGE DETECTED**\n\n"
63
- f"The selected Projector ({projector_alias}) includes data from Subject {subject} in its training set.\n"
64
- "Results will be artificially high (Self-Test). For valid research verification, please select a different Projector."
65
- )
66
- else:
67
- return "โœ… **VALID ZERO-SHOT CONFIGURATION**\n\nTarget Subject was NOT seen during Projector training."
68
-
69
- def get_historical_accuracy(subject, projector_alias):
70
- """Retrieves pre-calculated accuracy"""
71
- try:
72
- acc = MATRIX.loc[projector_alias, subject]
73
- return f"**Historical Compatibility:** {acc}"
74
- except:
75
- return "**Historical Compatibility:** N/A"
76
 
77
- def decode_neuro_semantics(subject, projector_alias, text):
78
  # 1. Fetch Data
79
  try:
80
  idx = DATA[subject]['Text'].index(text)
81
  eeg_input = DATA[subject]['X'][idx].reshape(1, -1)
82
  except ValueError:
83
- return pd.DataFrame(), "Error: Data point not found."
84
 
85
- # 2. Project (EEG -> Vector)
86
- proj_model = MODELS[projector_alias]
87
- predicted_vector = proj_model.predict(eeg_input)
88
- tensor_vec = torch.tensor(predicted_vector).float()
89
-
90
- # 3. Decode (Vector -> Emotions)
91
- with torch.no_grad():
92
- # Brain Path
93
- x = classifier.classifier.dense(tensor_vec.unsqueeze(1))
94
- x = torch.tanh(x)
95
- logits_b = classifier.classifier.out_proj(x)
96
- probs_brain = torch.sigmoid(logits_b).squeeze().numpy()
 
97
 
98
- # Text Path (Ground Truth)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  inputs = tokenizer(text, return_tensors="pt")
100
  logits_t = classifier(**inputs).logits
101
  probs_text = torch.sigmoid(logits_t).squeeze().numpy()
102
 
103
- # 4. Rank & Format
104
- top3_b = np.argsort(probs_brain)[::-1][:3]
105
  top2_t = np.argsort(probs_text)[::-1][:2]
106
 
107
- # Check Match (Top-1 Brain vs Top-2 Text)
108
  brain_top1 = id2label[top3_b[0]]
109
  text_top2 = [id2label[i] for i in top2_t]
110
 
111
  match_icon = "โœ…" if brain_top1 in text_top2 else "โŒ"
112
 
113
- # Build Result Table for ONE sentence
114
- # We display the probabilities nicely
115
- brain_str = ", ".join([f"{id2label[i]} ({probs_brain[i]:.2f})" for i in top3_b])
116
  text_str = ", ".join([f"{id2label[i]} ({probs_text[i]:.2f})" for i in top2_t])
117
 
118
- df = pd.DataFrame([{
119
  "Sentence Stimulus": text,
120
  "Text Ground Truth (Top 2)": text_str,
121
  "Brain Decoding (Top 3)": brain_str,
122
  "Match": match_icon
123
- }])
124
-
125
- return df
126
 
127
- def run_batch_analysis(subject, projector_alias):
128
- # Runs 5 random samples for robust demo
129
  subject_data = DATA[subject]
130
  total_indices = list(range(len(subject_data['Text'])))
131
- # Sample up to 5 sentences
132
- selected_indices = random.sample(total_indices, min(5, len(total_indices)))
133
 
134
- results = []
 
 
 
 
 
135
 
136
  for idx in selected_indices:
137
  txt = subject_data['Text'][idx]
138
- df = decode_neuro_semantics(subject, projector_alias, txt)
139
- results.append(df)
 
140
 
141
- final_df = pd.concat(results)
 
142
 
143
- # Calculate Batch Accuracy
144
- acc = (final_df["Match"] == "โœ…").mean() * 100
145
- return final_df, f"**Batch Accuracy:** {acc:.1f}%"
146
-
147
- # --- 3. UI LAYOUT ---
148
 
149
- # Formatted Report Text
150
  REPORT_TEXT = """
151
  ### 1. Abstract
152
  This interface demonstrates a **Brain-Computer Interface (BCI)** capable of decoding high-level semantic information directly from non-invasive EEG signals. By aligning biological neural activity with the latent space of Large Language Models (LLMs), we show that it is possible to reconstruct the **emotional sentiment** of a sentence a user is reading, even if the model has **never seen that user's brain data before**.
@@ -163,8 +154,8 @@ Instead of training a simple classifier to predict "Positive" or "Negative" from
163
 
164
  ### 4. Experimental Setup: Strict Zero-Shot Evaluation
165
  To ensure scientific rigor, this demo adheres to a **Strict Leave-One-Group-Out** protocol.
166
- * **Disjoint Training:** The "Projectors" available in this demo were trained on a subset of subjects and validated on **completely different subjects**.
167
- * **No Calibration:** The model does not receive any calibration data from the target subject. It must rely on universal neural patterns shared across humans.
168
 
169
  ### 5. Interpretation of Results
170
  The demo compares two probability distributions for every sentence:
@@ -174,49 +165,35 @@ The demo compares two probability distributions for every sentence:
174
  **Accuracy Metric:** A prediction is considered correct if the **Top-1 Emotion** predicted from the Brain Signal matches either the **#1 or #2 Emotion** predicted from the Text.
175
  """
176
 
 
177
  with gr.Blocks(theme=gr.themes.Soft(), title="Neuro-Semantic Decoder") as demo:
178
  gr.Markdown("# ๐Ÿง  Neuro-Semantic Alignment: Zero-Shot Decoding")
179
 
180
  with gr.Tabs():
181
- # --- TAB 1: INTERACTIVE DEMO ---
182
  with gr.TabItem("๐Ÿ”ฎ Interactive Demo"):
183
  with gr.Row():
184
  with gr.Column(scale=1):
185
  gr.Markdown("### โš™๏ธ Configuration")
186
 
187
- # Selectors
188
- sub_dropdown = gr.Dropdown(choices=list(DATA.keys()), value="ZKB", label="Select Target Subject (Data Source)")
189
- proj_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="Projector A", label="Select Projector (Decoding Model)")
190
 
191
- # Dynamic Info Boxes
192
- warning_box = gr.Markdown("โœ… **VALID ZERO-SHOT CONFIGURATION**\n\nTarget Subject was NOT seen during Projector training.")
193
- history_box = gr.Markdown("**Historical Compatibility:** 40.0%")
194
 
195
- btn = gr.Button("๐Ÿ”ฎ Run Batch Analysis (5 Samples)", variant="primary")
196
 
197
  with gr.Column(scale=2):
198
- gr.Markdown("### ๐Ÿ“Š Decoding Results")
199
-
200
- # Output Table
201
  result_table = gr.Dataframe(
202
  headers=["Sentence Stimulus", "Text Ground Truth (Top 2)", "Brain Decoding (Top 3)", "Match"],
203
  wrap=True
204
  )
205
- batch_accuracy_box = gr.Markdown("**Batch Accuracy:** -")
206
 
207
- # Interactivity
208
- sub_dropdown.change(fn=get_warning_status, inputs=[sub_dropdown, proj_dropdown], outputs=warning_box)
209
- sub_dropdown.change(fn=get_historical_accuracy, inputs=[sub_dropdown, proj_dropdown], outputs=history_box)
210
-
211
- proj_dropdown.change(fn=get_warning_status, inputs=[sub_dropdown, proj_dropdown], outputs=warning_box)
212
- proj_dropdown.change(fn=get_historical_accuracy, inputs=[sub_dropdown, proj_dropdown], outputs=history_box)
213
-
214
- # Run
215
- btn.click(
216
- fn=run_batch_analysis,
217
- inputs=[sub_dropdown, proj_dropdown],
218
- outputs=[result_table, batch_accuracy_box]
219
- )
220
 
221
  # --- TAB 2: REPORT ---
222
  with gr.TabItem("๐Ÿ“˜ Project Report"):
 
6
  import os
7
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
8
 
9
+ # --- 1. CONFIGURATION & LOAD ---
10
  PKG_PATH = "neuro_semantic_package.pt"
11
 
12
+ # Fallback for local/Colab testing
13
  if not os.path.exists(PKG_PATH):
14
+ # Try looking in common paths
15
+ possible_paths = [
16
  "neuro_semantic_package.pt",
17
  "/content/drive/MyDrive/Brain2Text_Project/demo_research_v2/neuro_semantic_package.pt"
18
  ]
19
+ for p in possible_paths:
20
  if os.path.exists(p):
21
  PKG_PATH = p
22
  break
23
+
24
+ if not os.path.exists(PKG_PATH):
25
+ raise FileNotFoundError("CRITICAL: 'neuro_semantic_package.pt' missing.")
26
 
27
+ print("๐Ÿš€ Loading Neuro-Semantic Engine...")
28
+ PKG = torch.load(PKG_PATH, map_location="cpu", weights_only=False)
 
29
  DATA = PKG['data']
30
+ MODELS = PKG['models']
31
+ MAPPING = PKG['mapping_key']
 
32
 
 
 
 
 
33
  print("๐Ÿค– Loading RoBERTa-GoEmotions...")
34
  MODEL_NAME = "SamLowe/roberta-base-go_emotions"
35
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
 
37
  classifier.eval()
38
  id2label = classifier.config.id2label
39
 
40
+ # --- 2. HISTORICAL ACCURACY LOOKUP ---
41
+ # Based on the "Grand Benchmark" results we calculated
42
+ ENSEMBLE_ACCURACY = {
43
+ "ZAB": "49.0%", "ZDM": "45.7%", "ZDN": "48.3%",
44
+ "ZJS": "61.3%", "ZGW": "42.9%", "ZJN": "53.3%",
45
+ "ZKH": "58.3%", "ZKB": "48.9%", "ZPH": "45.2%",
46
+ "ZMG": "56.6%"
47
+ }
48
 
49
+ # --- 3. LOGIC FUNCTIONS ---
 
 
 
 
 
50
 
51
+ def get_status_info(subject):
52
+ acc = ENSEMBLE_ACCURACY.get(subject, "Unknown")
53
+ return f"**General Model Accuracy on this Subject:** {acc}\n(Baseline Random Chance: ~7.1%)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ def decode_neuro_semantics(subject, text):
56
  # 1. Fetch Data
57
  try:
58
  idx = DATA[subject]['Text'].index(text)
59
  eeg_input = DATA[subject]['X'][idx].reshape(1, -1)
60
  except ValueError:
61
+ return pd.DataFrame(), "Error"
62
 
63
+ # 2. DYNAMIC ENSEMBLE CONSTRUCTION
64
+ # Identify the projector belonging to this subject to EXCLUDE it
65
+ target_alias = MAPPING[subject] # e.g. "A"
66
+ target_proj_name = f"Projector {target_alias}"
67
+
68
+ # Committee = All models EXCEPT the one trained on this subject
69
+ committee_names = [name for name in MODELS.keys() if name != target_proj_name]
70
+
71
+ all_probs = []
72
+
73
+ # 3. Ensemble Prediction Loop
74
+ for proj_name in committee_names:
75
+ proj_model = MODELS[proj_name]
76
 
77
+ # A. Project (EEG -> Vector)
78
+ vec_np = proj_model.predict(eeg_input)
79
+ tensor_vec = torch.tensor(vec_np).float()
80
+
81
+ # B. Decode (Vector -> Probs)
82
+ with torch.no_grad():
83
+ x = classifier.classifier.dense(tensor_vec.unsqueeze(1))
84
+ x = torch.tanh(x)
85
+ logits = classifier.classifier.out_proj(x)
86
+ probs = torch.sigmoid(logits).squeeze().numpy()
87
+ all_probs.append(probs)
88
+
89
+ # 4. Soft Voting (Average Probabilities)
90
+ avg_probs = np.mean(all_probs, axis=0)
91
+
92
+ # 5. Get Ground Truth (Text -> Probs)
93
+ with torch.no_grad():
94
  inputs = tokenizer(text, return_tensors="pt")
95
  logits_t = classifier(**inputs).logits
96
  probs_text = torch.sigmoid(logits_t).squeeze().numpy()
97
 
98
+ # 6. Rank & Format
99
+ top3_b = np.argsort(avg_probs)[::-1][:3]
100
  top2_t = np.argsort(probs_text)[::-1][:2]
101
 
 
102
  brain_top1 = id2label[top3_b[0]]
103
  text_top2 = [id2label[i] for i in top2_t]
104
 
105
  match_icon = "โœ…" if brain_top1 in text_top2 else "โŒ"
106
 
107
+ brain_str = ", ".join([f"{id2label[i]} ({avg_probs[i]:.2f})" for i in top3_b])
 
 
108
  text_str = ", ".join([f"{id2label[i]} ({probs_text[i]:.2f})" for i in top2_t])
109
 
110
+ return {
111
  "Sentence Stimulus": text,
112
  "Text Ground Truth (Top 2)": text_str,
113
  "Brain Decoding (Top 3)": brain_str,
114
  "Match": match_icon
115
+ }, match_icon
 
 
116
 
117
+ def run_analysis(subject):
118
+ # Runs 5 random samples
119
  subject_data = DATA[subject]
120
  total_indices = list(range(len(subject_data['Text'])))
 
 
121
 
122
+ # Ensure we don't crash if < 5 samples
123
+ count = min(5, len(total_indices))
124
+ selected_indices = random.sample(total_indices, count)
125
+
126
+ rows = []
127
+ matches = 0
128
 
129
  for idx in selected_indices:
130
  txt = subject_data['Text'][idx]
131
+ row_data, icon = decode_neuro_semantics(subject, txt)
132
+ rows.append(row_data)
133
+ if icon == "โœ…": matches += 1
134
 
135
+ df = pd.DataFrame(rows)
136
+ batch_acc = (matches / count) * 100
137
 
138
+ return df, f"**Current Batch Accuracy:** {batch_acc:.1f}%"
 
 
 
 
139
 
140
+ # --- 4. REPORT TEXT ---
141
  REPORT_TEXT = """
142
  ### 1. Abstract
143
  This interface demonstrates a **Brain-Computer Interface (BCI)** capable of decoding high-level semantic information directly from non-invasive EEG signals. By aligning biological neural activity with the latent space of Large Language Models (LLMs), we show that it is possible to reconstruct the **emotional sentiment** of a sentence a user is reading, even if the model has **never seen that user's brain data before**.
 
154
 
155
  ### 4. Experimental Setup: Strict Zero-Shot Evaluation
156
  To ensure scientific rigor, this demo adheres to a **Strict Leave-One-Group-Out** protocol.
157
+ * **Disjoint Training:** The decoding is performed by an **Ensemble** of projectors trained on *other* subjects. The target subject's data is strictly excluded from the training set of the active models.
158
+ * **No Calibration:** The model does not receive any calibration data from the target subject. It relies on universal neural patterns shared across humans.
159
 
160
  ### 5. Interpretation of Results
161
  The demo compares two probability distributions for every sentence:
 
165
  **Accuracy Metric:** A prediction is considered correct if the **Top-1 Emotion** predicted from the Brain Signal matches either the **#1 or #2 Emotion** predicted from the Text.
166
  """
167
 
168
+ # --- 5. UI LAYOUT ---
169
  with gr.Blocks(theme=gr.themes.Soft(), title="Neuro-Semantic Decoder") as demo:
170
  gr.Markdown("# ๐Ÿง  Neuro-Semantic Alignment: Zero-Shot Decoding")
171
 
172
  with gr.Tabs():
173
+ # --- TAB 1: DEMO ---
174
  with gr.TabItem("๐Ÿ”ฎ Interactive Demo"):
175
  with gr.Row():
176
  with gr.Column(scale=1):
177
  gr.Markdown("### โš™๏ธ Configuration")
178
 
179
+ sub_dropdown = gr.Dropdown(choices=list(DATA.keys()), value="ZKB", label="Select Human Subject")
 
 
180
 
181
+ # Info Box
182
+ info_box = gr.Markdown(get_status_info("ZKB"))
 
183
 
184
+ btn = gr.Button("๐Ÿ”ฎ Decode Brain Signals", variant="primary")
185
 
186
  with gr.Column(scale=2):
187
+ gr.Markdown("### ๐Ÿ“Š Decoding Results (5 Random Samples)")
 
 
188
  result_table = gr.Dataframe(
189
  headers=["Sentence Stimulus", "Text Ground Truth (Top 2)", "Brain Decoding (Top 3)", "Match"],
190
  wrap=True
191
  )
192
+ batch_acc_box = gr.Markdown("**Current Batch Accuracy:** -")
193
 
194
+ # Events
195
+ sub_dropdown.change(fn=get_status_info, inputs=sub_dropdown, outputs=info_box)
196
+ btn.click(run_analysis, inputs=sub_dropdown, outputs=[result_table, batch_acc_box])
 
 
 
 
 
 
 
 
 
 
197
 
198
  # --- TAB 2: REPORT ---
199
  with gr.TabItem("๐Ÿ“˜ Project Report"):