Smilesjs commited on
Commit
bb57cfc
·
verified ·
1 Parent(s): 32c143f

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. src/train.py +104 -63
src/train.py CHANGED
@@ -142,7 +142,7 @@ def run_evaluation(model, valid_loader, ontologies, gt, device, out_dir, epoch,
142
 
143
  def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_output_path=None, metrics_output_path=None):
144
  """
145
- Calculates Weighted F-max using GPU streaming to avoid OOM.
146
  """
147
  model.eval()
148
 
@@ -152,8 +152,8 @@ def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_ou
152
  # Initialize accumulators for each threshold
153
  sum_prec = torch.zeros(len(thresholds), device=device)
154
  sum_rec = torch.zeros(len(thresholds), device=device)
155
-
156
- total_samples = 0
157
 
158
  total_samples = 0
159
 
@@ -162,7 +162,6 @@ def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_ou
162
  if pred_output_path:
163
  os.makedirs(os.path.dirname(pred_output_path), exist_ok=True)
164
  f_pred = open(pred_output_path, 'w')
165
- # Need reverse mapping idx -> GO Term
166
  idx_to_go = {v: k for k, v in dataloader.dataset.go_to_idx.items()}
167
 
168
  with torch.no_grad():
@@ -173,89 +172,83 @@ def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_ou
173
  labels = batch['labels'].to(device) # (B, NumClasses)
174
  entry_ids = batch['entry_id']
175
 
176
- # Debug/Fix for ID Truncation issue
177
- # If entry_ids is a single string (e.g. "C0HM65") but batch > 1, iteration yields chars ('C', '0', ... '5')
178
- # This causes "5" to be written as ID.
179
  if isinstance(entry_ids, str):
180
- # This should only happen for Batch Size 1 if standard collate returns string?
181
- # Or if something is broken.
182
  entry_ids = [entry_ids]
183
-
184
- # Ensure it is a list/tuple
185
  if not isinstance(entry_ids, (list, tuple)):
186
- # Convert tensor or other to list if needed, though usually tuple/list
187
  if isinstance(entry_ids, torch.Tensor):
188
  entry_ids = entry_ids.tolist()
189
  else:
190
  entry_ids = list(entry_ids)
191
 
192
- # Check length Consistency
193
- if len(entry_ids) != input_ids.size(0):
194
- # If strict mismatch, maybe we have a problem.
195
- # But let's just proceed with safe iteration
196
- pass
197
-
198
  # 1. Forward
199
  logits = model(input_ids, attention_mask, tax_vector)
200
  probs = torch.sigmoid(logits) # (B, NumClasses)
201
 
202
- # Save Predictions (Streamed)
203
  if f_pred:
204
  probs_cpu = probs.cpu().numpy()
205
  for i, entry_id in enumerate(entry_ids):
206
- # Sparse write: only > 0.01
207
  indices = np.where(probs_cpu[i] > 0.01)[0]
208
  for idx in indices:
209
  term = idx_to_go[idx]
210
  score = probs_cpu[i][idx]
211
  f_pred.write(f"{entry_id}\t{term}\t{score:.4f}\n")
212
 
213
- # 2. Ground Truth IC (for Recall)
214
  # labels * weights
215
- # ic_weights: (NumClasses,)
216
  true_ic = (labels * ic_weights).sum(dim=1) # (B,)
217
-
218
- # Avoid div by zero
219
  true_ic = torch.maximum(true_ic, torch.tensor(1e-9, device=device))
220
 
221
- # 3. Iterate Thresholds (Vectorized over batch? RAM concern dependent)
222
- # Doing it threshold-by-threshold might be slower but safer for memory if we have many thresholds.
223
- # But we can broadcast: (B, 1, C) >= (1, T, 1) -> (B, T, C)
224
- # (B, T, C) is 256 * 100 * 40000 * 1 bit? ~120MB. Safe.
225
-
226
- probs_unsqueezed = probs.unsqueeze(1) # (B, 1, C)
227
- thresholds_unsqueezed = thresholds.view(1, -1, 1) # (1, T, 1)
228
 
229
- # Pred Binary: (B, T, C)
230
  pred_binary = (probs_unsqueezed >= thresholds_unsqueezed).float()
231
 
232
- # Weighted Intersection (TP): (B, T, C) * (B, 1, C) * (1, 1, C)
233
- # We can optimize: (pred_binary * labels) -> TP
234
  labels_unsqueezed = labels.unsqueeze(1) # (B, 1, C)
 
235
 
236
- # Intersection IC: sum((pred & label) * weight, dim=2)
237
- # (B, T)
238
- intersection_ic = (pred_binary * labels_unsqueezed * ic_weights.view(1, 1, -1)).sum(dim=2)
239
 
240
- # Union (Prediction) IC: sum(pred * weight, dim=2)
241
- # (B, T)
242
- pred_ic = (pred_binary * ic_weights.view(1, 1, -1)).sum(dim=2)
243
 
244
- # Precision: Int / Pred
245
  precision = intersection_ic / (pred_ic + 1e-9)
246
 
247
- # Recall: Int / True
248
  recall = intersection_ic / (true_ic.view(-1, 1) + 1e-9)
249
 
250
- # Accumulate
 
 
 
 
 
 
 
 
 
251
  sum_prec += precision.sum(dim=0)
252
  sum_rec += recall.sum(dim=0)
 
 
253
 
254
  total_samples += input_ids.size(0)
255
 
256
- # Explicit GC to be safe
257
- del logits, probs, pred_binary, intersection_ic, pred_ic
258
 
 
 
 
 
 
 
 
259
  if f_pred:
260
  f_pred.close()
261
  print(f"Saved predictions to {pred_output_path}")
@@ -263,14 +256,28 @@ def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_ou
263
  # Compute Averages
264
  avg_prec = sum_prec / total_samples
265
  avg_rec = sum_rec / total_samples
 
 
266
 
267
- # F1
268
  f1_scores = 2 * avg_prec * avg_rec / (avg_prec + avg_rec + 1e-9)
269
-
270
  best_fmax = f1_scores.max().item()
271
  best_t_idx = f1_scores.argmax().item()
272
- best_threshold = thresholds[best_t_idx].item()
273
-
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
  # Save Metrics Detail
276
  if metrics_output_path:
@@ -278,12 +285,15 @@ def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_ou
278
  'threshold': thresholds.cpu().numpy(),
279
  'precision': avg_prec.cpu().numpy(),
280
  'recall': avg_rec.cpu().numpy(),
281
- 'f1': f1_scores.cpu().numpy()
 
 
 
282
  }
283
  pd.DataFrame(metrics_data).to_csv(metrics_output_path, sep='\t', index=False)
284
  print(f"Saved detailed metrics to {metrics_output_path}")
285
 
286
- return best_fmax, best_threshold, f1_scores.cpu().numpy()
287
 
288
  def validate_loss(model, valid_loader, criterion, device):
289
  model.eval()
@@ -545,19 +555,17 @@ def main():
545
  mlflow.log_metric("best_val_loss", best_val_loss, step=epoch)
546
 
547
  # Custom Evaluation Schedule: 3, 10, 15, 20
548
- # For dryrun, maybe run on epoch 1 too?
549
- if epoch in [1, 3, 10, 15, 20]:
550
- print(f"Epoch {epoch}: Running CAFA Evaluation on Best Model (Loss: {best_val_loss:.4f})...")
 
 
551
 
552
- # Save current state to restore after eval
553
  current_state = {
554
  'model': model.state_dict(),
555
  'optimizer': optimizer.state_dict()
556
  }
557
 
558
- # Load best model for evaluation
559
- # If doing dry run, we might not have a best model yet if epoch 1 val loss wasn't best?
560
- # Actually line 491 guarantees best_val_loss update on first epoch.
561
  if os.path.exists(best_model_path):
562
  checkpoint = torch.load(best_model_path)
563
  model.load_state_dict(checkpoint['model_state_dict'])
@@ -565,11 +573,19 @@ def main():
565
  else:
566
  print("Warning: Best model not found, evaluating current model.")
567
 
568
- # Run Evaluation: Novel
569
- metrics_novel = run_evaluation(model, val_novel_loader, ontologies, gt_novel, device, args.output_dir, epoch, prefix="novel")
570
-
571
- # Run Evaluation: Homolog
572
- metrics_homolog = run_evaluation(model, val_homolog_loader, ontologies, gt_homolog, device, args.output_dir, epoch, prefix="homolog")
 
 
 
 
 
 
 
 
573
 
574
  # Log Metrics
575
  all_metrics = {}
@@ -581,10 +597,35 @@ def main():
581
  mlflow.log_metrics(all_metrics, step=epoch)
582
  print("Evaluation Complete. Metrics:", all_metrics)
583
 
 
 
 
 
 
 
 
 
584
  # Restore training state
585
  model.load_state_dict(current_state['model'])
586
  optimizer.load_state_dict(current_state['optimizer'])
587
  print("Restored training state.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
 
589
  save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, output_dir / "latest_model.pth")
590
 
 
142
 
143
  def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_output_path=None, metrics_output_path=None):
144
  """
145
+ Calculates Weighted F-max and S-min using GPU streaming to avoid OOM.
146
  """
147
  model.eval()
148
 
 
152
  # Initialize accumulators for each threshold
153
  sum_prec = torch.zeros(len(thresholds), device=device)
154
  sum_rec = torch.zeros(len(thresholds), device=device)
155
+ sum_ru = torch.zeros(len(thresholds), device=device) # Remaining Uncertainty (Weighted FN)
156
+ sum_mi = torch.zeros(len(thresholds), device=device) # Misinformation (Weighted FP)
157
 
158
  total_samples = 0
159
 
 
162
  if pred_output_path:
163
  os.makedirs(os.path.dirname(pred_output_path), exist_ok=True)
164
  f_pred = open(pred_output_path, 'w')
 
165
  idx_to_go = {v: k for k, v in dataloader.dataset.go_to_idx.items()}
166
 
167
  with torch.no_grad():
 
172
  labels = batch['labels'].to(device) # (B, NumClasses)
173
  entry_ids = batch['entry_id']
174
 
175
+ # --- SAME LOGIC AS BEFORE FOR ID HANDLING ---
 
 
176
  if isinstance(entry_ids, str):
 
 
177
  entry_ids = [entry_ids]
 
 
178
  if not isinstance(entry_ids, (list, tuple)):
 
179
  if isinstance(entry_ids, torch.Tensor):
180
  entry_ids = entry_ids.tolist()
181
  else:
182
  entry_ids = list(entry_ids)
183
 
 
 
 
 
 
 
184
  # 1. Forward
185
  logits = model(input_ids, attention_mask, tax_vector)
186
  probs = torch.sigmoid(logits) # (B, NumClasses)
187
 
188
+ # Save Predictions output logic (kept same)
189
  if f_pred:
190
  probs_cpu = probs.cpu().numpy()
191
  for i, entry_id in enumerate(entry_ids):
 
192
  indices = np.where(probs_cpu[i] > 0.01)[0]
193
  for idx in indices:
194
  term = idx_to_go[idx]
195
  score = probs_cpu[i][idx]
196
  f_pred.write(f"{entry_id}\t{term}\t{score:.4f}\n")
197
 
198
+ # 2. Ground Truth IC
199
  # labels * weights
 
200
  true_ic = (labels * ic_weights).sum(dim=1) # (B,)
 
 
201
  true_ic = torch.maximum(true_ic, torch.tensor(1e-9, device=device))
202
 
203
+ # 3. Thresholding & Metrics Broadcasting
204
+ # (B, 1, C) >= (1, T, 1) -> (B, T, C)
205
+ probs_unsqueezed = probs.unsqueeze(1)
206
+ thresholds_unsqueezed = thresholds.view(1, -1, 1)
 
 
 
207
 
 
208
  pred_binary = (probs_unsqueezed >= thresholds_unsqueezed).float()
209
 
 
 
210
  labels_unsqueezed = labels.unsqueeze(1) # (B, 1, C)
211
+ ic_weights_unsqueezed = ic_weights.view(1, 1, -1) # (1, 1, C)
212
 
213
+ # intersection_ic (TP) shape: (B, T)
214
+ intersection_ic = (pred_binary * labels_unsqueezed * ic_weights_unsqueezed).sum(dim=2)
 
215
 
216
+ # pred_ic (TP + FP) shape: (B, T)
217
+ pred_ic = (pred_binary * ic_weights_unsqueezed).sum(dim=2)
 
218
 
219
+ # Precision: TP / Pred
220
  precision = intersection_ic / (pred_ic + 1e-9)
221
 
222
+ # Recall: TP / True
223
  recall = intersection_ic / (true_ic.view(-1, 1) + 1e-9)
224
 
225
+ # RU (False Negative): (True - TP) -> (B, T)
226
+ ru = true_ic.view(-1, 1) - intersection_ic
227
+ # Handle potential slight float errors
228
+ ru = torch.clamp(ru, min=0.0)
229
+
230
+ # MI (False Positive): (Pred - TP) -> (B, T)
231
+ mi = pred_ic - intersection_ic
232
+ mi = torch.clamp(mi, min=0.0)
233
+
234
+ # Accumulate Sums
235
  sum_prec += precision.sum(dim=0)
236
  sum_rec += recall.sum(dim=0)
237
+ sum_ru += ru.sum(dim=0)
238
+ sum_mi += mi.sum(dim=0)
239
 
240
  total_samples += input_ids.size(0)
241
 
242
+ # GC
243
+ del logits, probs, pred_binary, intersection_ic, pred_ic, ru, mi
244
 
245
+ # Dry run break
246
+ if hasattr(dataloader.dataset, 'dry_run') and dataloader.dataset.dry_run:
247
+ # Dataset doesn't hold flag, we need to pass it or check total_samples
248
+ pass
249
+ if total_samples > 200 and 'dry_run' in str(type(dataloader.dataset)): # hacky check?
250
+ pass
251
+
252
  if f_pred:
253
  f_pred.close()
254
  print(f"Saved predictions to {pred_output_path}")
 
256
  # Compute Averages
257
  avg_prec = sum_prec / total_samples
258
  avg_rec = sum_rec / total_samples
259
+ avg_ru = sum_ru / total_samples
260
+ avg_mi = sum_mi / total_samples
261
 
262
+ # F-max
263
  f1_scores = 2 * avg_prec * avg_rec / (avg_prec + avg_rec + 1e-9)
 
264
  best_fmax = f1_scores.max().item()
265
  best_t_idx = f1_scores.argmax().item()
266
+ best_threshold_f = thresholds[best_t_idx].item()
267
+
268
+ # S-min
269
+ # S = sqrt(RU^2 + MI^2)
270
+ s_scores = torch.sqrt(avg_ru**2 + avg_mi**2)
271
+ min_s = s_scores.min().item()
272
+ best_s_idx = s_scores.argmin().item()
273
+ best_threshold_s = thresholds[best_s_idx].item()
274
+
275
+ metrics = {
276
+ 'fmax_w': best_fmax,
277
+ 'threshold_fmax': best_threshold_f,
278
+ 'smin': min_s,
279
+ 'threshold_smin': best_threshold_s,
280
+ }
281
 
282
  # Save Metrics Detail
283
  if metrics_output_path:
 
285
  'threshold': thresholds.cpu().numpy(),
286
  'precision': avg_prec.cpu().numpy(),
287
  'recall': avg_rec.cpu().numpy(),
288
+ 'f1': f1_scores.cpu().numpy(),
289
+ 'ru': avg_ru.cpu().numpy(),
290
+ 'mi': avg_mi.cpu().numpy(),
291
+ 's': s_scores.cpu().numpy()
292
  }
293
  pd.DataFrame(metrics_data).to_csv(metrics_output_path, sep='\t', index=False)
294
  print(f"Saved detailed metrics to {metrics_output_path}")
295
 
296
+ return metrics
297
 
298
  def validate_loss(model, valid_loader, criterion, device):
299
  model.eval()
 
555
  mlflow.log_metric("best_val_loss", best_val_loss, step=epoch)
556
 
557
  # Custom Evaluation Schedule: 3, 10, 15, 20
558
+ # For dryrun, evaluate on epoch 1 too, and force a break in loops
559
+ run_eval = epoch in [1, 3, 10, 15, 20] or args.dry_run
560
+
561
+ if run_eval:
562
+ print(f"Epoch {epoch}: Running GPU CAFA Evaluation on Best Model (Loss: {best_val_loss:.4f})...")
563
 
 
564
  current_state = {
565
  'model': model.state_dict(),
566
  'optimizer': optimizer.state_dict()
567
  }
568
 
 
 
 
569
  if os.path.exists(best_model_path):
570
  checkpoint = torch.load(best_model_path)
571
  model.load_state_dict(checkpoint['model_state_dict'])
 
573
  else:
574
  print("Warning: Best model not found, evaluating current model.")
575
 
576
+ # Run Evaluation: Novel (GPU)
577
+ metrics_novel = evaluate_gpu(
578
+ model, val_novel_loader, ic_weights, device,
579
+ pred_output_path=output_dir / f"gpu_preds_novel_epoch_{epoch}.tsv",
580
+ metrics_output_path=output_dir / f"metrics_novel_epoch_{epoch}.tsv"
581
+ )
582
+
583
+ # Run Evaluation: Homolog (GPU)
584
+ metrics_homolog = evaluate_gpu(
585
+ model, val_homolog_loader, ic_weights, device,
586
+ pred_output_path=output_dir / f"gpu_preds_homolog_epoch_{epoch}.tsv",
587
+ metrics_output_path=output_dir / f"metrics_homolog_epoch_{epoch}.tsv"
588
+ )
589
 
590
  # Log Metrics
591
  all_metrics = {}
 
597
  mlflow.log_metrics(all_metrics, step=epoch)
598
  print("Evaluation Complete. Metrics:", all_metrics)
599
 
600
+ # Save Best F-max Model (Novel as primary?)
601
+ # Usually we care about Novel Genus F-max
602
+ novel_fmax = metrics_novel['fmax_w']
603
+ if novel_fmax > best_wf_max:
604
+ best_wf_max = novel_fmax
605
+ print(f"New Best Novel F-max: {best_wf_max:.4f}")
606
+ save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth")
607
+
608
  # Restore training state
609
  model.load_state_dict(current_state['model'])
610
  optimizer.load_state_dict(current_state['optimizer'])
611
  print("Restored training state.")
612
+
613
+ if args.dry_run:
614
+ print("Dry run complete (Evaluation).")
615
+ # Usually we care about Novel Genus F-max
616
+ novel_fmax = metrics_novel['fmax_w']
617
+ if novel_fmax > best_wf_max:
618
+ best_wf_max = novel_fmax
619
+ print(f"New Best Novel F-max: {best_wf_max:.4f}")
620
+ save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth")
621
+
622
+ # Restore training state
623
+ model.load_state_dict(current_state['model'])
624
+ optimizer.load_state_dict(current_state['optimizer'])
625
+ print("Restored training state.")
626
+
627
+ if args.dry_run:
628
+ print("Dry run complete (Evaluation).")
629
 
630
  save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, output_dir / "latest_model.pth")
631