Naresh4243 commited on
Commit
a931383
Β·
verified Β·
1 Parent(s): 21aba67

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ models/best_eeg_model_200.keras filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y --no-install-recommends \
6
+ libgl1 libglib2.0-0 && rm -rf /var/lib/apt/lists/*
7
+
8
+ COPY requirements-cloud.txt requirements.txt
9
+ RUN pip install --no-cache-dir -r requirements.txt
10
+
11
+ COPY eeg_server.py .
12
+ COPY models/ models/
13
+
14
+ ENV MODEL_DIR=/app/models
15
+ ENV PORT=7860
16
+ ENV TRANSFORMERS_CACHE=/app/.cache
17
+ ENV HF_HOME=/app/.cache
18
+
19
+ EXPOSE 7860
20
+
21
+ CMD ["uvicorn", "eeg_server:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,8 @@
1
- ---
2
- title: Neuroguard
3
- emoji: πŸ“ˆ
4
- colorFrom: green
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: NeuroGuard Inference Server
3
+ emoji: "\U0001F9E0"
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ app_port: 7860
8
+ ---
 
 
eeg_server.py ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NeuroGuard Inference Server β€” EEG + MRI
3
+
4
+ Loads trained models and serves predictions via HTTP.
5
+ Run with: python eeg_server.py
6
+
7
+ EEG: Keras CNN for motor imagery classification from band-power data.
8
+ MRI: ViT classifier from Hugging Face Hub + GradCAM tumor localization.
9
+ """
10
+
11
+ import base64
12
+ import io
13
+ import os
14
+ from contextlib import asynccontextmanager
15
+
16
+ import cv2
17
+ import numpy as np
18
+ from fastapi import FastAPI, File, UploadFile
19
+ from fastapi.middleware.cors import CORSMiddleware
20
+ from PIL import Image
21
+ from pydantic import BaseModel
22
+ from sklearn.preprocessing import StandardScaler
23
+
24
+ # ── Path / port configuration ────────────────────────────────────
25
+ # Environment variables let the same code run locally and in Docker.
26
+ # Locally the defaults resolve to the original paths; in the cloud
27
+ # container MODEL_DIR points to /app/models and PORT to 7860.
28
+ MODEL_DIR = os.environ.get("MODEL_DIR", "")
29
+ PORT = int(os.environ.get("PORT", "5000"))
30
+
31
+ # ── EEG config ──────────────────────────────────────────────────
32
+ EEG_MODEL_PATH = os.environ.get(
33
+ "EEG_MODEL_PATH",
34
+ os.path.join("Brain", "best_eeg_model_200.keras") if not MODEL_DIR
35
+ else os.path.join(MODEL_DIR, "best_eeg_model_200.keras"),
36
+ )
37
+ WINDOW_SIZE = 512
38
+ STRIDE = 32
39
+ NUM_FEATURES = 20
40
+ NUM_CLASSES = 3
41
+ CLASS_LABELS = {0: "Left Motor Imagery", 1: "Right Motor Imagery", 2: "Relaxed State"}
42
+ CHANNEL_NAMES = ["TP9", "AF7", "AF8", "TP10"]
43
+ BAND_NAMES = ["delta", "theta", "alpha", "beta", "gamma"]
44
+
45
+ # ── MRI config ──────────────────────────────────────────────────
46
+ MRI_HF_MODEL = os.environ.get(
47
+ "MRI_HF_MODEL", "itistamtran/vit_brain_tumor_multiclass_v2"
48
+ )
49
+ _HF_LABEL_MAP = {
50
+ "Glioma": "glioma",
51
+ "Meningioma": "meningioma",
52
+ "Pituitary Tumor": "pituitary",
53
+ "No Tumor": "noTumor",
54
+ "Unknown": "unknown",
55
+ "glioma": "glioma",
56
+ "meningioma": "meningioma",
57
+ "pituitary": "pituitary",
58
+ "no_tumor": "noTumor",
59
+ "noTumor": "noTumor",
60
+ "unknown": "unknown",
61
+ }
62
+
63
+ # ── Global model handles ───────────────────────────────────────
64
+ eeg_model = None
65
+ mri_classifier = None # dict: {"model", "processor", "cam", "labels"}
66
+
67
+
68
+ def load_eeg_model():
69
+ global eeg_model
70
+ if eeg_model is not None:
71
+ return
72
+ import tensorflow as tf
73
+ print(f"Loading EEG Keras model from {EEG_MODEL_PATH}...")
74
+ eeg_model = tf.keras.models.load_model(EEG_MODEL_PATH)
75
+ print(f"EEG model loaded. Input: {eeg_model.input_shape}, Output: {eeg_model.output_shape}")
76
+
77
+
78
+ def load_mri_classifier():
79
+ """Lazy-load the HF ViT classifier + GradCAM on first MRI request.
80
+ Deferring PyTorch import avoids the Windows paging-file crash that
81
+ occurs when TensorFlow and PyTorch both load at startup."""
82
+ global mri_classifier
83
+ if mri_classifier is not None:
84
+ return
85
+
86
+ import torch
87
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
88
+ from pytorch_grad_cam import GradCAM
89
+
90
+ print(f"[MRI] Downloading/loading ViT classifier from {MRI_HF_MODEL}...")
91
+ model = AutoModelForImageClassification.from_pretrained(MRI_HF_MODEL)
92
+ processor = AutoImageProcessor.from_pretrained(MRI_HF_MODEL)
93
+ model.eval()
94
+
95
+ id2label = model.config.id2label
96
+ labels = {int(k): _HF_LABEL_MAP.get(v, v) for k, v in id2label.items()}
97
+ print(f"[MRI] Label mapping: {labels}")
98
+
99
+ target_layers = [model.vit.encoder.layer[-1].layernorm_before]
100
+
101
+ def reshape_transform(tensor, height=14, width=14):
102
+ result = tensor[:, 1:, :].reshape(
103
+ tensor.size(0), height, width, tensor.size(2)
104
+ )
105
+ return result.permute(0, 3, 1, 2)
106
+
107
+ cam = GradCAM(
108
+ model=model,
109
+ target_layers=target_layers,
110
+ reshape_transform=reshape_transform,
111
+ )
112
+
113
+ mri_classifier = {
114
+ "model": model,
115
+ "processor": processor,
116
+ "cam": cam,
117
+ "labels": labels,
118
+ "torch": torch,
119
+ }
120
+ print(f"[MRI] ViT classifier + GradCAM ready ({sum(p.numel() for p in model.parameters())/1e6:.1f}M params)")
121
+
122
+
123
+ @asynccontextmanager
124
+ async def lifespan(app: FastAPI):
125
+ load_eeg_model()
126
+ # MRI classifier lazy-loads on first request (avoids TF+PyTorch startup crash)
127
+ yield
128
+
129
+
130
+ app = FastAPI(title="NeuroGuard Inference Server", version="2.0.0", lifespan=lifespan)
131
+
132
+ app.add_middleware(
133
+ CORSMiddleware,
134
+ allow_origins=["*"],
135
+ allow_methods=["*"],
136
+ allow_headers=["*"],
137
+ )
138
+
139
+
140
+ class EegRequest(BaseModel):
141
+ channels: list[list[float]]
142
+ sampling_rate: int = 256
143
+ channel_names: list[str] | None = None
144
+
145
+
146
+ class PredictionItem(BaseModel):
147
+ window_index: int
148
+ predicted_class: int
149
+ label: str
150
+ confidence: float
151
+
152
+
153
+ class EegResponse(BaseModel):
154
+ predictions: list[PredictionItem]
155
+ band_powers: dict[str, float]
156
+ channel_band_powers: dict[str, dict[str, float]]
157
+ cognitive_metrics: dict[str, float]
158
+ anomaly_score: float
159
+ anomalies: list[dict]
160
+ summary: str
161
+ details: str
162
+ is_placeholder: bool = False
163
+ model_name: str = "NeuroGuard EEG CNN v2.0"
164
+
165
+
166
+ @app.get("/health")
167
+ def health():
168
+ return {
169
+ "status": "ok",
170
+ "model_loaded": eeg_model is not None,
171
+ "eeg_model_loaded": eeg_model is not None,
172
+ "mri_model_loaded": True, # always available (auto-downloads from HF Hub)
173
+ }
174
+
175
+
176
+ @app.post("/api/eeg/analyze", response_model=EegResponse)
177
+ def analyze_eeg(req: EegRequest):
178
+ load_eeg_model()
179
+
180
+ num_cols = len(req.channels)
181
+ num_rows = len(req.channels[0]) if num_cols > 0 else 0
182
+
183
+ print(f"[analyze] Received {num_cols} columns x {num_rows} time windows")
184
+
185
+ if num_cols != 20:
186
+ return _empty_response(f"Expected 20 band-power columns, got {num_cols}")
187
+
188
+ # Data arrives as 20 columns (band-power time series), transpose to (time, features)
189
+ data = np.array(req.channels, dtype=np.float32).T # shape: (num_rows, 20)
190
+ print(f"[analyze] Data shape after transpose: {data.shape}")
191
+ print(f"[analyze] Data stats β€” min: {data.min():.6f}, max: {data.max():.6f}, "
192
+ f"mean: {data.mean():.6f}, std: {data.std():.6f}")
193
+
194
+ # --- Step 1: Compute band powers BEFORE normalization ---
195
+ band_powers = _compute_band_powers_from_columns(data)
196
+ channel_band_powers = _compute_channel_band_powers_from_columns(data)
197
+
198
+ print(f"[analyze] Band powers: {band_powers}")
199
+
200
+ # --- Step 2: Clean NaN/Inf ---
201
+ col_means = np.nanmean(data, axis=0)
202
+ col_means = np.where(np.isfinite(col_means), col_means, 0.0)
203
+ for c in range(data.shape[1]):
204
+ mask = ~np.isfinite(data[:, c])
205
+ if mask.any():
206
+ data[mask, c] = col_means[c]
207
+
208
+ # --- Step 3: Z-score normalize (matches training pipeline) ---
209
+ scaler = StandardScaler()
210
+ data_normalized = scaler.fit_transform(data)
211
+
212
+ print(f"[analyze] Post-normalization stats β€” min: {data_normalized.min():.4f}, "
213
+ f"max: {data_normalized.max():.4f}, mean: {data_normalized.mean():.4f}")
214
+
215
+ # --- Step 3b: Repeat-pad if we have some data but less than 512 rows ---
216
+ if 64 <= data_normalized.shape[0] < WINDOW_SIZE:
217
+ original_len = data_normalized.shape[0]
218
+ reps = (WINDOW_SIZE // original_len) + 1
219
+ data_normalized = np.tile(data_normalized, (reps, 1))[:WINDOW_SIZE]
220
+ print(f"[analyze] Padded {original_len} rows to {data_normalized.shape[0]} via repeat-tiling")
221
+
222
+ # --- Step 4: Segment into windows of 512 ---
223
+ windows = []
224
+ for start in range(0, data_normalized.shape[0] - WINDOW_SIZE + 1, STRIDE):
225
+ windows.append(data_normalized[start:start + WINDOW_SIZE])
226
+
227
+ print(f"[analyze] Created {len(windows)} windows of size {WINDOW_SIZE}")
228
+
229
+ if not windows:
230
+ return _empty_response(
231
+ f"Not enough time windows ({num_rows}) for model. "
232
+ f"Need at least 64 band-power windows (~30s recording). Got {num_rows}."
233
+ )
234
+
235
+ # --- Step 5: Reshape for model: (batch, 512, 20, 1) ---
236
+ X = np.array(windows, dtype=np.float32).reshape(len(windows), WINDOW_SIZE, NUM_FEATURES, 1)
237
+ print(f"[analyze] Model input shape: {X.shape}")
238
+
239
+ # --- Step 6: Run inference ---
240
+ raw_preds = eeg_model.predict(X, verbose=0)
241
+ pred_classes = np.argmax(raw_preds, axis=1)
242
+ pred_confs = np.max(raw_preds, axis=1)
243
+
244
+ print(f"[analyze] Predictions β€” classes: {np.unique(pred_classes, return_counts=True)}")
245
+ print(f"[analyze] Mean confidence: {pred_confs.mean():.4f}")
246
+
247
+ predictions = []
248
+ for i in range(len(pred_classes)):
249
+ predictions.append(PredictionItem(
250
+ window_index=i,
251
+ predicted_class=int(pred_classes[i]),
252
+ label=CLASS_LABELS.get(int(pred_classes[i]), "Unknown"),
253
+ confidence=float(pred_confs[i]),
254
+ ))
255
+
256
+ # --- Step 7: Compute cognitive metrics from real band powers ---
257
+ cognitive = _compute_cognitive_metrics(band_powers, channel_band_powers, predictions)
258
+ anomalies = _detect_anomalies(band_powers, channel_band_powers, cognitive)
259
+ anomaly_score = _compute_anomaly_score(anomalies, cognitive)
260
+ summary = _build_summary(predictions, cognitive, anomaly_score)
261
+ details = _build_details(predictions, cognitive, band_powers)
262
+
263
+ return EegResponse(
264
+ predictions=predictions,
265
+ band_powers=band_powers,
266
+ channel_band_powers=channel_band_powers,
267
+ cognitive_metrics=cognitive,
268
+ anomaly_score=anomaly_score,
269
+ anomalies=anomalies,
270
+ summary=summary,
271
+ details=details,
272
+ )
273
+
274
+
275
+ def _empty_response(msg: str) -> EegResponse:
276
+ return EegResponse(
277
+ predictions=[],
278
+ band_powers={"delta": 0, "theta": 0, "alpha": 0, "beta": 0, "gamma": 0},
279
+ channel_band_powers={},
280
+ cognitive_metrics={
281
+ "relaxation_index": 50, "cognitive_engagement": 50,
282
+ "hemispheric_balance": 0, "signal_stability": 100, "focus_level": 50,
283
+ },
284
+ anomaly_score=0,
285
+ anomalies=[],
286
+ summary=msg,
287
+ details=msg,
288
+ )
289
+
290
+
291
+ def _compute_band_powers_from_columns(data):
292
+ """
293
+ Compute average band power across all channels from the 20-column layout.
294
+ Columns: [Delta_TP9..TP10, Theta_TP9..TP10, Alpha_TP9..TP10, Beta_TP9..TP10, Gamma_TP9..TP10]
295
+ """
296
+ result = {}
297
+ for band_idx, band_name in enumerate(BAND_NAMES):
298
+ cols = data[:, band_idx * 4: band_idx * 4 + 4] # 4 channels for this band
299
+ result[band_name] = float(np.mean(cols))
300
+ return result
301
+
302
+
303
+ def _compute_channel_band_powers_from_columns(data):
304
+ """
305
+ Compute per-channel band powers from the 20-column layout.
306
+ """
307
+ result = {}
308
+ for ch_idx, ch_name in enumerate(CHANNEL_NAMES):
309
+ ch_powers = {}
310
+ for band_idx, band_name in enumerate(BAND_NAMES):
311
+ col_idx = band_idx * 4 + ch_idx
312
+ ch_powers[band_name] = float(np.mean(data[:, col_idx]))
313
+ result[ch_name] = ch_powers
314
+ return result
315
+
316
+
317
+ def _compute_cognitive_metrics(bands, ch_bands, predictions):
318
+ delta = bands.get("delta", 0)
319
+ theta = bands.get("theta", 0)
320
+ alpha = bands.get("alpha", 0)
321
+ beta = bands.get("beta", 0)
322
+ gamma = bands.get("gamma", 0)
323
+ total = delta + theta + alpha + beta + gamma
324
+
325
+ # Relaxation: alpha dominance relative to total
326
+ relaxation = (alpha / total * 100) if total > 0 else 50
327
+
328
+ # Cognitive engagement: beta / (alpha + theta) ratio
329
+ eng_den = alpha + theta
330
+ engagement = min(100, (beta / eng_den * 100)) if eng_den > 0 else 50
331
+
332
+ # Focus: (beta + gamma) dominance
333
+ focus = ((beta + gamma) / total * 100) if total > 0 else 50
334
+
335
+ # Hemispheric balance from per-channel alpha + beta
336
+ left_power = sum(
337
+ ch_bands.get(c, {}).get("alpha", 0) + ch_bands.get(c, {}).get("beta", 0)
338
+ for c in ["TP9", "AF7"]
339
+ )
340
+ right_power = sum(
341
+ ch_bands.get(c, {}).get("alpha", 0) + ch_bands.get(c, {}).get("beta", 0)
342
+ for c in ["AF8", "TP10"]
343
+ )
344
+ total_lr = left_power + right_power
345
+ balance = (right_power - left_power) / total_lr if total_lr > 0 else 0
346
+
347
+ # Signal stability: how consistent are the model predictions?
348
+ transitions = 0
349
+ for i in range(1, len(predictions)):
350
+ if predictions[i].predicted_class != predictions[i - 1].predicted_class:
351
+ transitions += 1
352
+ stability = (1 - transitions / max(1, len(predictions) - 1)) * 100 if len(predictions) > 1 else 100
353
+
354
+ return {
355
+ "relaxation_index": max(0, min(100, relaxation)),
356
+ "cognitive_engagement": max(0, min(100, engagement)),
357
+ "hemispheric_balance": max(-1, min(1, balance)),
358
+ "signal_stability": max(0, min(100, stability)),
359
+ "focus_level": max(0, min(100, focus)),
360
+ }
361
+
362
+
363
+ def _detect_anomalies(bands, ch_bands, metrics):
364
+ anomalies = []
365
+ total = sum(bands.values())
366
+ if total <= 0:
367
+ return anomalies
368
+
369
+ delta_r = bands["delta"] / total
370
+ theta_r = bands["theta"] / total
371
+ alpha_r = bands["alpha"] / total
372
+ beta_r = bands["beta"] / total
373
+ gamma_r = bands["gamma"] / total
374
+
375
+ # Thresholds calibrated for consumer-grade EEG (Muse 2 dry electrodes).
376
+ # Clinical EEG uses tighter thresholds; Muse data inherently has more
377
+ # delta/theta due to electrode impedance and limited spatial resolution.
378
+
379
+ if delta_r > 0.55:
380
+ anomalies.append({
381
+ "type": "abnormalSlowing",
382
+ "description": f"Elevated delta power ({delta_r * 100:.1f}% of total). "
383
+ "May indicate drowsiness, fatigue, or poor electrode contact.",
384
+ "severity": min(1.0, (delta_r - 0.50) * 2.5),
385
+ "channel": "Global",
386
+ })
387
+
388
+ if alpha_r < 0.03:
389
+ anomalies.append({
390
+ "type": "reducedAlpha",
391
+ "description": f"Reduced alpha power ({alpha_r * 100:.1f}% of total). "
392
+ "Low alpha may indicate high alertness, anxiety, or "
393
+ "difficulty achieving a relaxed state.",
394
+ "severity": min(1.0, (0.05 - alpha_r) * 10),
395
+ "channel": "Global",
396
+ })
397
+
398
+ if beta_r > 0.45:
399
+ anomalies.append({
400
+ "type": "excessiveBeta",
401
+ "description": f"Elevated beta power ({beta_r * 100:.1f}% of total). "
402
+ "May indicate anxiety, stress, or excessive cognitive load.",
403
+ "severity": min(1.0, (beta_r - 0.40) * 2.5),
404
+ "channel": "Global",
405
+ })
406
+
407
+ if theta_r > 0.40:
408
+ anomalies.append({
409
+ "type": "abnormalSlowing",
410
+ "description": f"Elevated theta power ({theta_r * 100:.1f}% of total). "
411
+ "May indicate drowsiness, inattention, or emotional processing.",
412
+ "severity": min(1.0, (theta_r - 0.35) * 2.5),
413
+ "channel": "Global",
414
+ })
415
+
416
+ # Hemispheric asymmetry β€” Muse has limited spatial resolution,
417
+ # so mild asymmetry is common and not clinically significant.
418
+ bal = abs(metrics["hemispheric_balance"])
419
+ if bal > 0.35:
420
+ side = "left" if metrics["hemispheric_balance"] < 0 else "right"
421
+ anomalies.append({
422
+ "type": "asymmetry",
423
+ "description": f"Notable {side} hemispheric asymmetry (balance: "
424
+ f"{metrics['hemispheric_balance']:.2f}). With consumer EEG, "
425
+ "this may reflect electrode fit rather than a clinical finding.",
426
+ "severity": min(1.0, (bal - 0.30) * 2),
427
+ "channel": f"{'TP9/AF7' if side == 'left' else 'AF8/TP10'}",
428
+ })
429
+
430
+ # Per-channel anomalies β€” raised threshold for dry-electrode noise
431
+ for ch_name, ch_bp in ch_bands.items():
432
+ ch_total = sum(ch_bp.values())
433
+ if ch_total <= 0:
434
+ continue
435
+ ch_delta_r = ch_bp["delta"] / ch_total
436
+ if ch_delta_r > 0.70:
437
+ anomalies.append({
438
+ "type": "abnormalSlowing",
439
+ "description": f"Focal delta excess on {ch_name} ({ch_delta_r * 100:.1f}%). "
440
+ "May indicate poor electrode contact on this channel.",
441
+ "severity": min(1.0, (ch_delta_r - 0.65) * 3),
442
+ "channel": ch_name,
443
+ })
444
+
445
+ return anomalies
446
+
447
+
448
+ def _compute_anomaly_score(anomalies, metrics):
449
+ if not anomalies:
450
+ base = 5.0
451
+ else:
452
+ base = sum(a["severity"] * 15 for a in anomalies)
453
+ base += (100 - metrics["signal_stability"]) * 0.15
454
+ return max(0, min(100, base))
455
+
456
+
457
+ def _build_summary(predictions, metrics, anomaly_score):
458
+ if not predictions:
459
+ return "Insufficient data for classification."
460
+
461
+ class_counts = {}
462
+ for p in predictions:
463
+ class_counts[p.label] = class_counts.get(p.label, 0) + 1
464
+ dominant = max(class_counts, key=class_counts.get)
465
+ pct = class_counts[dominant] / len(predictions) * 100
466
+ avg_conf = sum(p.confidence for p in predictions) / len(predictions) * 100
467
+
468
+ parts = [f"Dominant state: {dominant} ({pct:.0f}% of recording, "
469
+ f"avg confidence {avg_conf:.0f}%)."]
470
+
471
+ relax = metrics["relaxation_index"]
472
+ engage = metrics["cognitive_engagement"]
473
+ focus = metrics["focus_level"]
474
+
475
+ if relax > 30:
476
+ parts.append(f"Alpha activity indicates moderate relaxation ({relax:.0f}/100).")
477
+ if engage > 40:
478
+ parts.append(f"Beta/theta ratio shows cognitive engagement ({engage:.0f}/100).")
479
+ if focus > 30:
480
+ parts.append(f"High-frequency activity suggests attentional focus ({focus:.0f}/100).")
481
+
482
+ if anomaly_score > 40:
483
+ parts.append(f"Some patterns warrant clinical attention (score: {anomaly_score:.0f}/100).")
484
+ elif anomaly_score > 15:
485
+ parts.append(f"Minor irregularities noted (score: {anomaly_score:.0f}/100).")
486
+ else:
487
+ parts.append(f"Brain activity appears within normal range (score: {anomaly_score:.0f}/100).")
488
+
489
+ return " ".join(parts)
490
+
491
+
492
+ def _build_details(predictions, metrics, bands):
493
+ lines = ["=== Motor Imagery Classification ==="]
494
+ lines.append(f"Total windows analyzed: {len(predictions)}")
495
+
496
+ if predictions:
497
+ avg_conf = sum(p.confidence for p in predictions) / len(predictions)
498
+ lines.append(f"Average confidence: {avg_conf * 100:.1f}%")
499
+
500
+ transitions = 0
501
+ for i in range(1, len(predictions)):
502
+ if predictions[i].predicted_class != predictions[i - 1].predicted_class:
503
+ transitions += 1
504
+ lines.append(f"State transitions: {transitions}")
505
+
506
+ class_counts = {}
507
+ for p in predictions:
508
+ class_counts[p.label] = class_counts.get(p.label, 0) + 1
509
+ lines.append("")
510
+ lines.append("=== State Distribution ===")
511
+ total = len(predictions) or 1
512
+ for label, count in sorted(class_counts.items(), key=lambda x: -x[1]):
513
+ lines.append(f" {label}: {count} windows ({count / total * 100:.1f}%)")
514
+
515
+ lines.append("")
516
+ lines.append("=== Band Power Distribution ===")
517
+ bp_total = sum(bands.values())
518
+ if bp_total > 0:
519
+ for band in BAND_NAMES:
520
+ pct = bands[band] / bp_total * 100
521
+ lines.append(f" {band.capitalize():8s}: {bands[band]:.6f} ({pct:.1f}%)")
522
+
523
+ lines.append("")
524
+ lines.append("=== Cognitive Metrics ===")
525
+ lines.append(f" Relaxation Index: {metrics['relaxation_index']:.1f}/100")
526
+ lines.append(f" Cognitive Engagement: {metrics['cognitive_engagement']:.1f}/100")
527
+ lines.append(f" Focus Level: {metrics['focus_level']:.1f}/100")
528
+ lines.append(f" Signal Stability: {metrics['signal_stability']:.1f}/100")
529
+ lines.append(f" Hemispheric Balance: {metrics['hemispheric_balance']:.3f}")
530
+
531
+ return "\n".join(lines)
532
+
533
+
534
+ # ═══════════════════════════════════════════════════════════════
535
+ # MRI Brain Tumor Analysis
536
+ # ═══════════════════════════════════════════════════════════════
537
+
538
+ class MriResponse(BaseModel):
539
+ classification: str
540
+ confidence: float
541
+ segmentation_image_base64: str | None = None
542
+ tumor_area_percent: float | None = None
543
+ region_scores: dict[str, float]
544
+ summary: str
545
+ details: str
546
+ is_placeholder: bool = False
547
+ model_name: str = "NeuroGuard MRI ViT + GradCAM (HF Hub)"
548
+
549
+
550
+ @app.post("/api/mri/analyze")
551
+ async def analyze_mri(image: UploadFile = File(...)):
552
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
553
+
554
+ try:
555
+ load_mri_classifier()
556
+ except Exception as e:
557
+ print(f"[MRI] Failed to load classifier: {e}")
558
+ return MriResponse(
559
+ classification="unknown",
560
+ confidence=0,
561
+ region_scores={},
562
+ summary="MRI model could not be loaded. Check server logs.",
563
+ details=str(e),
564
+ is_placeholder=True,
565
+ )
566
+
567
+ clf = mri_classifier
568
+ model = clf["model"]
569
+ processor = clf["processor"]
570
+ cam = clf["cam"]
571
+ labels = clf["labels"]
572
+ torch = clf["torch"]
573
+
574
+ contents = await image.read()
575
+ print(f"[MRI] Received image: {image.filename}, {len(contents)} bytes")
576
+
577
+ nparr = np.frombuffer(contents, np.uint8)
578
+ img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
579
+ if img_bgr is None:
580
+ return MriResponse(
581
+ classification="unknown",
582
+ confidence=0,
583
+ region_scores={},
584
+ summary="Could not decode image.",
585
+ details="The uploaded file could not be read as an image.",
586
+ )
587
+
588
+ img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
589
+ pil_img = Image.fromarray(img_rgb)
590
+ print(f"[MRI] Image decoded: {pil_img.size[0]}x{pil_img.size[1]}")
591
+
592
+ # ── ViT Classification ───────────────────────────────────────
593
+ inputs = processor(images=pil_img, return_tensors="pt")
594
+
595
+ with torch.no_grad():
596
+ outputs = model(**inputs)
597
+ logits = outputs.logits
598
+ probs = torch.nn.functional.softmax(logits, dim=1)[0]
599
+
600
+ class_idx = int(probs.argmax().item())
601
+ confidence = float(probs[class_idx].item())
602
+ classification = labels.get(class_idx, "unknown")
603
+
604
+ print(f"[MRI] ViT prediction: {classification} (class {class_idx}, conf {confidence:.3f})")
605
+ print(f"[MRI] Softmax: {probs.tolist()}")
606
+
607
+ region_scores = {}
608
+ for idx, prob_val in enumerate(probs.tolist()):
609
+ label = labels.get(idx, f"class_{idx}")
610
+ region_scores[label] = round(prob_val, 4)
611
+
612
+ # ── GradCAM Heatmap ──────────────────────────────────────────
613
+ seg_image_b64 = None
614
+ tumor_area_pct = None
615
+
616
+ try:
617
+ targets = [ClassifierOutputTarget(class_idx)]
618
+ grayscale_cam = cam(input_tensor=inputs["pixel_values"], targets=targets)
619
+ heatmap = grayscale_cam[0, :]
620
+
621
+ img_resized = cv2.resize(img_rgb, (224, 224))
622
+ rgb_normalized = np.float32(img_resized) / 255.0
623
+
624
+ heatmap_color = cv2.applyColorMap(
625
+ np.uint8(255 * heatmap), cv2.COLORMAP_JET
626
+ )
627
+ heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)
628
+ overlay = np.uint8(rgb_normalized * 255 * 0.5 + heatmap_color * 0.5)
629
+
630
+ pil_overlay = Image.fromarray(overlay)
631
+ buf = io.BytesIO()
632
+ pil_overlay.save(buf, format="PNG")
633
+ seg_image_b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
634
+
635
+ if classification != "noTumor":
636
+ tumor_mask = heatmap > 0.5
637
+ tumor_area_pct = round(float(tumor_mask.sum()) / tumor_mask.size * 100, 2)
638
+ else:
639
+ tumor_area_pct = 0.0
640
+
641
+ print(f"[MRI] GradCAM generated, tumor area estimate: {tumor_area_pct}%")
642
+ except Exception as e:
643
+ print(f"[MRI] GradCAM error: {e}")
644
+
645
+ summary = _build_mri_summary(classification, confidence, tumor_area_pct)
646
+ details = _build_mri_details(classification, confidence, region_scores, tumor_area_pct)
647
+
648
+ return MriResponse(
649
+ classification=classification,
650
+ confidence=confidence,
651
+ segmentation_image_base64=seg_image_b64,
652
+ tumor_area_percent=tumor_area_pct,
653
+ region_scores=region_scores,
654
+ summary=summary,
655
+ details=details,
656
+ )
657
+
658
+
659
+ def _build_mri_summary(classification: str, confidence: float, tumor_area: float | None) -> str:
660
+ pct = f"{confidence * 100:.1f}"
661
+ labels_map = {
662
+ "noTumor": f"No tumor detected with {pct}% confidence.",
663
+ "glioma": f"Glioma detected with {pct}% confidence.",
664
+ "meningioma": f"Meningioma detected with {pct}% confidence.",
665
+ "pituitary": f"Pituitary tumor detected with {pct}% confidence.",
666
+ "unknown": f"Scan inconclusive ({pct}% confidence). Image may not be a standard brain MRI.",
667
+ }
668
+ text = labels_map.get(classification, f"Classification: {classification} ({pct}% confidence).")
669
+
670
+ if tumor_area is not None and tumor_area > 0:
671
+ text += f" Tumor region covers approximately {tumor_area:.1f}% of the scan area."
672
+
673
+ return text
674
+
675
+
676
+ def _build_mri_details(classification: str, confidence: float,
677
+ scores: dict, tumor_area: float | None) -> str:
678
+ lines = ["=== Brain Tumor Classification ==="]
679
+ lines.append(f"Predicted class: {classification}")
680
+ lines.append(f"Confidence: {confidence * 100:.1f}%")
681
+ lines.append("")
682
+ lines.append("=== Class Probabilities ===")
683
+ for label, prob in sorted(scores.items(), key=lambda x: -x[1]):
684
+ lines.append(f" {label:15s}: {prob * 100:.1f}%")
685
+ if tumor_area is not None:
686
+ lines.append("")
687
+ lines.append("=== Segmentation ===")
688
+ if tumor_area > 0:
689
+ lines.append(f" Tumor area: {tumor_area:.1f}% of scan")
690
+ else:
691
+ lines.append(" No tumor region segmented")
692
+ return "\n".join(lines)
693
+
694
+
695
+ if __name__ == "__main__":
696
+ import uvicorn
697
+ uvicorn.run(app, host="0.0.0.0", port=PORT, timeout_keep_alive=300)
models/best_eeg_model_200.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b4eb97b3e77d8b00c8eff7f261420bde3ac1117cb7d7805d63d546831834ccb
3
+ size 442369157
requirements-cloud.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cpu
2
+ fastapi
3
+ uvicorn
4
+ numpy
5
+ scikit-learn
6
+ tensorflow-cpu
7
+ transformers
8
+ torch
9
+ grad-cam
10
+ opencv-python-headless
11
+ Pillow
12
+ python-multipart