ryu34 commited on
Commit
cae68c5
·
verified ·
1 Parent(s): 84989fa

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +36 -20
app.py CHANGED
@@ -197,7 +197,7 @@ class ModelManager:
197
  except Exception as e:
198
  logger.warning(f"ROI annotations load failed: {e}")
199
 
200
- # Load brain encoder
201
  try:
202
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename="best_model.pt")
203
  checkpoint = torch.load(model_path, map_location=self.device, weights_only=False)
@@ -215,10 +215,13 @@ class ModelManager:
215
  if self.roi_annotations is not None:
216
  self.brain_encoder.set_roi_assignments(self.roi_annotations)
217
 
 
 
 
218
  logger.info("Brain encoder loaded successfully")
219
  except Exception as e:
220
- logger.error(f"Brain encoder load failed: {e}")
221
- raise
222
 
223
  # Load ridge model
224
  try:
@@ -404,33 +407,46 @@ class ModelManager:
404
  pred_np = predictions.cpu().numpy().flatten()
405
 
406
  # ── Deep encoder for intermediates and uncertainty ──
407
- with torch.no_grad():
408
- deep_pred, intermediates = self.brain_encoder(input_features, return_intermediates=True)
409
-
410
- # Compute modality contributions using ridge (more reliable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  modality_contributions = {}
412
  if self.ridge_model is not None:
 
413
  for key in ['image_multi_layer', 'text_multi_layer', 'audio_multi_layer']:
414
  if key in features_dict:
415
  modality_name = key.split('_')[0]
416
  feat_np = features_dict[key].cpu().numpy()
417
  X_n = (feat_np - ridge['feat_mean']) / ridge['feat_std']
418
  mp = (ridge['model'].predict(X_n) * ridge['fmri_std'] + ridge['fmri_mean']).flatten()
419
- mp = np.clip(mp, -clip_val, clip_val)
 
420
  modality_contributions[modality_name] = mp
421
 
422
- # Compute uncertainty via dropout MC (deep encoder)
423
- self.brain_encoder.train()
424
- mc_predictions = []
425
- for _ in range(10):
426
- with torch.no_grad():
427
- mc_pred = self.brain_encoder(input_features)
428
- mc_predictions.append(mc_pred.cpu().numpy().flatten())
429
- self.brain_encoder.eval()
430
-
431
- mc_predictions = np.array(mc_predictions)
432
- uncertainty = np.std(mc_predictions, axis=0)
433
-
434
  # Compute ROI summaries using z-scored per-voxel predictions
435
  # This shows which regions are MORE or LESS activated compared to baseline
436
  if self.ridge_model is not None:
 
197
  except Exception as e:
198
  logger.warning(f"ROI annotations load failed: {e}")
199
 
200
+ # Load brain encoder (optional - ridge is primary)
201
  try:
202
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename="best_model.pt")
203
  checkpoint = torch.load(model_path, map_location=self.device, weights_only=False)
 
215
  if self.roi_annotations is not None:
216
  self.brain_encoder.set_roi_assignments(self.roi_annotations)
217
 
218
+ # Free checkpoint memory
219
+ del checkpoint
220
+
221
  logger.info("Brain encoder loaded successfully")
222
  except Exception as e:
223
+ logger.warning(f"Brain encoder load failed (will use ridge only): {e}")
224
+ self.brain_encoder = None
225
 
226
  # Load ridge model
227
  try:
 
407
  pred_np = predictions.cpu().numpy().flatten()
408
 
409
  # ── Deep encoder for intermediates and uncertainty ──
410
+ intermediates = {}
411
+ if self.brain_encoder is not None:
412
+ with torch.no_grad():
413
+ deep_pred, intermediates = self.brain_encoder(input_features, return_intermediates=True)
414
+
415
+ # Compute uncertainty via dropout MC (deep encoder)
416
+ self.brain_encoder.train()
417
+ mc_predictions = []
418
+ for _ in range(10):
419
+ with torch.no_grad():
420
+ mc_pred = self.brain_encoder(input_features)
421
+ mc_predictions.append(mc_pred.cpu().numpy().flatten())
422
+ self.brain_encoder.eval()
423
+ mc_predictions = np.array(mc_predictions)
424
+ uncertainty = np.std(mc_predictions, axis=0)
425
+ else:
426
+ # Estimate uncertainty from ridge prediction variance across feature perturbation
427
+ mc_predictions = []
428
+ for _ in range(10):
429
+ noise = np.random.normal(0, 0.01, size=input_features_np.shape)
430
+ X_noisy = (input_features_np + noise - ridge['feat_mean']) / ridge['feat_std']
431
+ mp = ridge['model'].predict(X_noisy).flatten()
432
+ mc_predictions.append(mp)
433
+ mc_predictions = np.array(mc_predictions)
434
+ uncertainty = np.std(mc_predictions, axis=0)
435
+
436
+ # Compute modality contributions using ridge
437
  modality_contributions = {}
438
  if self.ridge_model is not None:
439
+ ridge = self.ridge_model
440
  for key in ['image_multi_layer', 'text_multi_layer', 'audio_multi_layer']:
441
  if key in features_dict:
442
  modality_name = key.split('_')[0]
443
  feat_np = features_dict[key].cpu().numpy()
444
  X_n = (feat_np - ridge['feat_mean']) / ridge['feat_std']
445
  mp = (ridge['model'].predict(X_n) * ridge['fmri_std'] + ridge['fmri_mean']).flatten()
446
+ clip_val_mod = np.percentile(np.abs(mp), 99.5)
447
+ mp = np.clip(mp, -clip_val_mod, clip_val_mod)
448
  modality_contributions[modality_name] = mp
449
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  # Compute ROI summaries using z-scored per-voxel predictions
451
  # This shows which regions are MORE or LESS activated compared to baseline
452
  if self.ridge_model is not None: