ta-ESM2 / src /resume_eval.py
Smilesjs's picture
Upload folder using huggingface_hub
4887b09 verified
import argparse
import sys
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from transformers import AutoTokenizer
# Add src to path if running from root
sys.path.append(os.path.join(os.path.dirname(__file__)))
from dataset import ProteinTaxonomyDataset
from model import TaxonomyAwareESM
from asymmetric_loss import load_ia_weights
def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_output_path=None, metrics_output_path=None):
"""
Calculates Weighted F-max and S-min using GPU streaming to avoid OOM.
(Copied from train.py)
"""
model.eval()
if thresholds is None:
thresholds = torch.linspace(0, 1, 101, device=device)
# Initialize accumulators for each threshold
sum_prec = torch.zeros(len(thresholds), device=device)
sum_rec = torch.zeros(len(thresholds), device=device)
sum_ru = torch.zeros(len(thresholds), device=device) # Remaining Uncertainty (Weighted FN)
sum_mi = torch.zeros(len(thresholds), device=device) # Misinformation (Weighted FP)
total_samples = 0
# Prepare Prediction Output
f_pred = None
if pred_output_path:
os.makedirs(os.path.dirname(pred_output_path), exist_ok=True)
f_pred = open(pred_output_path, 'w')
idx_to_go = {v: k for k, v in dataloader.dataset.go_to_idx.items()}
with torch.no_grad():
for batch in tqdm(dataloader, desc="GPU Eval"):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
tax_vector = batch['tax_vector'].to(device)
labels = batch['labels'].to(device) # (B, NumClasses)
entry_ids = batch['entry_id']
# --- ID HANDLING ---
if isinstance(entry_ids, str):
entry_ids = [entry_ids]
if not isinstance(entry_ids, (list, tuple)):
if isinstance(entry_ids, torch.Tensor):
entry_ids = entry_ids.tolist()
else:
entry_ids = list(entry_ids)
# 1. Forward
logits = model(input_ids, attention_mask, tax_vector)
probs = torch.sigmoid(logits) # (B, NumClasses)
# Save Predictions
if f_pred:
probs_cpu = probs.cpu().numpy()
for i, entry_id in enumerate(entry_ids):
indices = np.where(probs_cpu[i] > 0.01)[0]
for idx in indices:
term = idx_to_go[idx]
score = probs_cpu[i][idx]
f_pred.write(f"{entry_id}\t{term}\t{score:.4f}\n")
# 2. Ground Truth IC
# labels * weights
true_ic = (labels * ic_weights).sum(dim=1) # (B,)
true_ic = torch.maximum(true_ic, torch.tensor(1e-9, device=device))
# 3. Thresholding & Metrics Broadcasting
# (B, 1, C) >= (1, T, 1) -> (B, T, C)
probs_unsqueezed = probs.unsqueeze(1)
thresholds_unsqueezed = thresholds.view(1, -1, 1)
pred_binary = (probs_unsqueezed >= thresholds_unsqueezed).float()
labels_unsqueezed = labels.unsqueeze(1) # (B, 1, C)
ic_weights_unsqueezed = ic_weights.view(1, 1, -1) # (1, 1, C)
# intersection_ic (TP) shape: (B, T)
intersection_ic = (pred_binary * labels_unsqueezed * ic_weights_unsqueezed).sum(dim=2)
# pred_ic (TP + FP) shape: (B, T)
pred_ic = (pred_binary * ic_weights_unsqueezed).sum(dim=2)
# Precision: TP / Pred
precision = intersection_ic / (pred_ic + 1e-9)
# Recall: TP / True
recall = intersection_ic / (true_ic.view(-1, 1) + 1e-9)
# RU (False Negative): (True - TP) -> (B, T)
ru = true_ic.view(-1, 1) - intersection_ic
ru = torch.clamp(ru, min=0.0)
# MI (False Positive): (Pred - TP) -> (B, T)
mi = pred_ic - intersection_ic
mi = torch.clamp(mi, min=0.0)
# Accumulate Sums
sum_prec += precision.sum(dim=0)
sum_rec += recall.sum(dim=0)
sum_ru += ru.sum(dim=0)
sum_mi += mi.sum(dim=0)
total_samples += input_ids.size(0)
del logits, probs, pred_binary, intersection_ic, pred_ic, ru, mi
if f_pred:
f_pred.close()
print(f"Saved predictions to {pred_output_path}")
# Compute Averages
avg_prec = sum_prec / total_samples
avg_rec = sum_rec / total_samples
avg_ru = sum_ru / total_samples
avg_mi = sum_mi / total_samples
# F-max
f1_scores = 2 * avg_prec * avg_rec / (avg_prec + avg_rec + 1e-9)
best_fmax = f1_scores.max().item()
best_t_idx = f1_scores.argmax().item()
best_threshold_f = thresholds[best_t_idx].item()
# S-min
s_scores = torch.sqrt(avg_ru**2 + avg_mi**2)
min_s = s_scores.min().item()
best_s_idx = s_scores.argmin().item()
best_threshold_s = thresholds[best_s_idx].item()
metrics = {
'fmax_w': best_fmax,
'threshold_fmax': best_threshold_f,
'smin': min_s,
'threshold_smin': best_threshold_s,
}
# Save Metrics Detail
if metrics_output_path:
metrics_data = {
'threshold': thresholds.cpu().numpy(),
'precision': avg_prec.cpu().numpy(),
'recall': avg_rec.cpu().numpy(),
'f1': f1_scores.cpu().numpy(),
'ru': avg_ru.cpu().numpy(),
'mi': avg_mi.cpu().numpy(),
's': s_scores.cpu().numpy()
}
pd.DataFrame(metrics_data).to_csv(metrics_output_path, sep='\t', index=False)
print(f"Saved detailed metrics to {metrics_output_path}")
return metrics
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, required=True, help="Path to mounted dataset")
parser.add_argument("--output_dir", type=str, default="outputs", help="Directory for checkpoints and predictions")
parser.add_argument("--checkpoint", type=str, default="latest_model.pth", help="Checkpoint filename to load (in output_dir)")
parser.add_argument("--epoch", type=int, default=3, help="Epoch to associate with predictions")
parser.add_argument("--esm_model_name", type=str, default="facebook/esm2_t33_650M_UR50D", help="ESM model name")
parser.add_argument("--force_novel", action="store_true", help="Force re-evaluation of novel dataset")
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")
output_dir = Path(args.output_dir)
data_path = Path(args.data_path)
# Paths
val_novel_fasta = data_path / "validation_superset" / "validation_novel" / "validation_novel.fasta"
val_novel_term = data_path / "validation_superset" / "validation_novel" / "validation_novel_terms.tsv"
val_homolog_fasta = data_path / "validation_superset" / "validation_homolog" / "validation_homolog.fasta"
val_homolog_term = data_path / "validation_superset" / "validation_homolog" / "validation_homolog_terms.tsv"
species_vec = data_path / "taxon_embedding" / "species_vectors.tsv"
# GO Vocab
go_vocab_path = "src/go_terms.json"
if not os.path.exists(go_vocab_path):
go_vocab_path = "go_terms.json"
ia_path = data_path / "IA.tsv"
go_matrix_path = data_path / "go_info" / "go_ancestor_matrix.npz"
go_mapping_path = data_path / "go_info" / "go_term_mappings.pkl"
# Load Tokenizer
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(args.esm_model_name)
# Initialize Dummy Dataset to get vocab sizes and mappings
# We need to reuse the same vocab as training
print("Initializing mapping dataset...")
dummy_dataset = ProteinTaxonomyDataset(
val_novel_fasta, val_novel_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
)
print("Initializing Model...")
model = TaxonomyAwareESM(
num_classes=dummy_dataset.num_classes,
pretrained_model_name=args.esm_model_name,
use_lora=True,
lora_rank=512, # Assuming standard rank from training
vocab_sizes=dummy_dataset.vocab_sizes
).to(device)
# Load Checkpoint
ckpt_path = output_dir / args.checkpoint
if not ckpt_path.exists():
print(f"Error: Checkpoint not found at {ckpt_path}")
return
print(f"Loading checkpoint from {ckpt_path}...")
checkpoint = torch.load(ckpt_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
print(f"Model loaded (Epoch {checkpoint['epoch']})")
# Load IC Weights
print("Loading IC weights...")
ic_weights = load_ia_weights(
str(ia_path) if ia_path.exists() else "IA.tsv",
dummy_dataset.go_to_idx,
dummy_dataset.num_classes
).to(device)
# --- EVALUATION ---
# 1. Novel Dataset
novel_preds_path = output_dir / f"gpu_preds_novel_epoch_{args.epoch}.tsv"
novel_metrics_path = output_dir / f"metrics_novel_epoch_{args.epoch}.tsv"
if novel_metrics_path.exists() and not args.force_novel:
print(f"Novel metrics already exist at {novel_metrics_path}. Skipping.")
with open(novel_metrics_path, 'r') as f:
print(f.read())
else:
print("Evaluating Novel Dataset...")
val_novel_loader = DataLoader(dummy_dataset, batch_size=32, shuffle=False, num_workers=4, pin_memory=True)
metrics_novel = evaluate_gpu(
model, val_novel_loader, ic_weights, device,
pred_output_path=novel_preds_path,
metrics_output_path=novel_metrics_path
)
print("Novel Metrics:", metrics_novel)
# 2. Homolog Dataset
homolog_preds_path = output_dir / f"gpu_preds_homolog_epoch_{args.epoch}.tsv"
homolog_metrics_path = output_dir / f"metrics_homolog_epoch_{args.epoch}.tsv"
print("Evaluating Homolog Dataset...")
val_homolog_dataset = ProteinTaxonomyDataset(
val_homolog_fasta, val_homolog_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
)
val_homolog_loader = DataLoader(val_homolog_dataset, batch_size=32, shuffle=False, num_workers=4, pin_memory=True)
metrics_homolog = evaluate_gpu(
model, val_homolog_loader, ic_weights, device,
pred_output_path=homolog_preds_path,
metrics_output_path=homolog_metrics_path
)
print("Homolog Metrics:", metrics_homolog)
print("Evaluation All Done.")
if __name__ == "__main__":
main()