import argparse import sys import os import json import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader # from torch.cuda.amp import GradScaler, autocast # Deprecated import numpy as np import pandas as pd from tqdm import tqdm from pathlib import Path # Add CAFA evaluator to path sys.path.append(os.path.join(os.path.dirname(__file__), 'CAFA-evaluator-PK', 'src')) try: from cafaeval.parser import obo_parser, gt_parser, pred_parser from cafaeval.evaluation import evaluate_prediction HAS_EVAL = True except ImportError as e: print(f"Warning: Could not import cafaeval: {e}") HAS_EVAL = False from dataset import ProteinTaxonomyDataset from model import TaxonomyAwareESM, AsymmetricLoss from asymmetric_loss import load_ia_weights from transformers import AutoTokenizer def save_checkpoint(model, optimizer, epoch, metrics, filename): checkpoint = { 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'metrics': metrics } torch.save(checkpoint, filename) print(f"Saved checkpoint to {filename}") def run_evaluation(model, valid_loader, ontologies, gt, device, out_dir, epoch, prefix="valid"): """ Runs prediction and CAFA evaluation (Weighted F-max, S-min). Returns a dict of metrics. """ model.eval() all_preds = [] print(f"Generating predictions for {prefix} set (Epoch {epoch})...") # Needs valid_loader.dataset.go_to_idx to map back to GO IDs idx_to_go = {v: k for k, v in valid_loader.dataset.go_to_idx.items()} # Memory optimization: Stream writes to file instead of keeping all_preds in memory if dataset is huge. # But for validation sets (~5k proteins), list is fine. # Just in case, let's keep it simple for now as requested. with torch.no_grad(): for batch in tqdm(valid_loader, desc=f"{prefix} Infer"): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) tax_vector = batch['tax_vector'].to(device) entry_ids = batch['entry_id'] logits = model(input_ids, attention_mask, tax_vector) probs = torch.sigmoid(logits) probs = probs.cpu().numpy() for i, entry_id in enumerate(entry_ids): row_probs = probs[i] # Threshold for sparseness indices = np.where(row_probs > 0.01)[0] for idx in indices: term = idx_to_go[idx] score = float(row_probs[idx]) all_preds.append((entry_id, term, score)) # Format for cafa_eval pred_dir = os.path.join(out_dir, "preds_temp", prefix) os.makedirs(pred_dir, exist_ok=True) pred_path = os.path.join(pred_dir, f"epoch_{epoch}.tsv") with open(pred_path, 'w') as f: for p in all_preds: f.write(f"{p[0]}\t{p[1]}\t{p[2]:.5f}\n") print(f"Saved {prefix} predictions to {pred_path}") if not HAS_EVAL or ontologies is None: return {} print(f"Running CAFA Evaluation for {prefix}...") try: # Prediction Parser # prop='max' by default in cafa_eval prediction = pred_parser(pred_path, ontologies, gt, prop_mode='max', max_terms=None) if not prediction: print("Warning: No predictions parsed.") return {} # Evaluate tau_arr = np.arange(0.01, 1, 0.01) df_res = evaluate_prediction( prediction, gt, ontologies, tau_arr, gt_exclude=None, normalization='cafa', n_cpu=4 ) # Calculate Metrics: Weighted F-max and S-min per namespace metrics = {} # Namespaces usually: 'cellular_component', 'molecular_function', 'biological_process' # But df_res['ns'] might be abbreviation or full. # CAFA eval usually uses 'BPO', 'MFO', 'CCO' keys in ontologies, but df_res keeps 'ns' col. for ns in df_res['ns'].unique(): df_ns = df_res[df_res['ns'] == ns] # Weighted F-max # Max of 'f_w' column if 'f_w' in df_ns.columns: fmax_w = df_ns['f_w'].max() metrics[f"{ns}_fmax_w"] = fmax_w # S-min # Min of 's' column if 's' in df_ns.columns: smin = df_ns['s'].min() metrics[f"{ns}_smin"] = smin print(f"{prefix} Metrics: {metrics}") # Clean up temp file to save space? # os.remove(pred_path) return metrics except Exception as e: print(f"Evaluation failed: {e}") import traceback traceback.print_exc() return {} def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_output_path=None, metrics_output_path=None): """ Calculates Weighted F-max and S-min using GPU streaming to avoid OOM. """ model.eval() if thresholds is None: thresholds = torch.linspace(0, 1, 101, device=device) # Initialize accumulators for each threshold sum_prec = torch.zeros(len(thresholds), device=device) sum_rec = torch.zeros(len(thresholds), device=device) sum_ru = torch.zeros(len(thresholds), device=device) # Remaining Uncertainty (Weighted FN) sum_mi = torch.zeros(len(thresholds), device=device) # Misinformation (Weighted FP) total_samples = 0 # Prepare Prediction Output f_pred = None if pred_output_path: os.makedirs(os.path.dirname(pred_output_path), exist_ok=True) f_pred = open(pred_output_path, 'w') idx_to_go = {v: k for k, v in dataloader.dataset.go_to_idx.items()} with torch.no_grad(): for batch in tqdm(dataloader, desc="GPU Eval"): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) tax_vector = batch['tax_vector'].to(device) labels = batch['labels'].to(device) # (B, NumClasses) entry_ids = batch['entry_id'] # --- SAME LOGIC AS BEFORE FOR ID HANDLING --- if isinstance(entry_ids, str): entry_ids = [entry_ids] if not isinstance(entry_ids, (list, tuple)): if isinstance(entry_ids, torch.Tensor): entry_ids = entry_ids.tolist() else: entry_ids = list(entry_ids) # 1. Forward logits = model(input_ids, attention_mask, tax_vector) probs = torch.sigmoid(logits) # (B, NumClasses) # Save Predictions output logic (kept same) if f_pred: probs_cpu = probs.cpu().numpy() for i, entry_id in enumerate(entry_ids): indices = np.where(probs_cpu[i] > 0.01)[0] for idx in indices: term = idx_to_go[idx] score = probs_cpu[i][idx] f_pred.write(f"{entry_id}\t{term}\t{score:.4f}\n") # 2. Ground Truth IC # labels * weights true_ic = (labels * ic_weights).sum(dim=1) # (B,) true_ic = torch.maximum(true_ic, torch.tensor(1e-9, device=device)) # 3. Thresholding & Metrics Broadcasting # (B, 1, C) >= (1, T, 1) -> (B, T, C) probs_unsqueezed = probs.unsqueeze(1) thresholds_unsqueezed = thresholds.view(1, -1, 1) pred_binary = (probs_unsqueezed >= thresholds_unsqueezed).float() labels_unsqueezed = labels.unsqueeze(1) # (B, 1, C) ic_weights_unsqueezed = ic_weights.view(1, 1, -1) # (1, 1, C) # intersection_ic (TP) shape: (B, T) intersection_ic = (pred_binary * labels_unsqueezed * ic_weights_unsqueezed).sum(dim=2) # pred_ic (TP + FP) shape: (B, T) pred_ic = (pred_binary * ic_weights_unsqueezed).sum(dim=2) # Precision: TP / Pred precision = intersection_ic / (pred_ic + 1e-9) # Recall: TP / True recall = intersection_ic / (true_ic.view(-1, 1) + 1e-9) # RU (False Negative): (True - TP) -> (B, T) ru = true_ic.view(-1, 1) - intersection_ic # Handle potential slight float errors ru = torch.clamp(ru, min=0.0) # MI (False Positive): (Pred - TP) -> (B, T) mi = pred_ic - intersection_ic mi = torch.clamp(mi, min=0.0) # Accumulate Sums sum_prec += precision.sum(dim=0) sum_rec += recall.sum(dim=0) sum_ru += ru.sum(dim=0) sum_mi += mi.sum(dim=0) total_samples += input_ids.size(0) # GC del logits, probs, pred_binary, intersection_ic, pred_ic, ru, mi # Dry run break if hasattr(dataloader.dataset, 'dry_run') and dataloader.dataset.dry_run: # Dataset doesn't hold flag, we need to pass it or check total_samples pass if total_samples > 200 and 'dry_run' in str(type(dataloader.dataset)): # hacky check? pass if f_pred: f_pred.close() print(f"Saved predictions to {pred_output_path}") # Compute Averages avg_prec = sum_prec / total_samples avg_rec = sum_rec / total_samples avg_ru = sum_ru / total_samples avg_mi = sum_mi / total_samples # F-max f1_scores = 2 * avg_prec * avg_rec / (avg_prec + avg_rec + 1e-9) best_fmax = f1_scores.max().item() best_t_idx = f1_scores.argmax().item() best_threshold_f = thresholds[best_t_idx].item() # S-min # S = sqrt(RU^2 + MI^2) s_scores = torch.sqrt(avg_ru**2 + avg_mi**2) min_s = s_scores.min().item() best_s_idx = s_scores.argmin().item() best_threshold_s = thresholds[best_s_idx].item() metrics = { 'fmax_w': best_fmax, 'threshold_fmax': best_threshold_f, 'smin': min_s, 'threshold_smin': best_threshold_s, } # Save Metrics Detail if metrics_output_path: metrics_data = { 'threshold': thresholds.cpu().numpy(), 'precision': avg_prec.cpu().numpy(), 'recall': avg_rec.cpu().numpy(), 'f1': f1_scores.cpu().numpy(), 'ru': avg_ru.cpu().numpy(), 'mi': avg_mi.cpu().numpy(), 's': s_scores.cpu().numpy() } pd.DataFrame(metrics_data).to_csv(metrics_output_path, sep='\t', index=False) print(f"Saved detailed metrics to {metrics_output_path}") return metrics def validate_loss(model, valid_loader, criterion, device): model.eval() total_loss = 0 steps = 0 torch.cuda.empty_cache() with torch.no_grad(): for batch in tqdm(valid_loader, desc="Valid Loss"): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) tax_vector = batch['tax_vector'].to(device) labels = batch['labels'].to(device) with torch.amp.autocast(device_type=device.type): logits = model(input_ids, attention_mask, tax_vector) loss = criterion(logits, labels) total_loss += loss.item() steps += 1 return total_loss / steps def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_path", type=str, required=True, help="Path to mounted dataset") parser.add_argument("--lr", type=float, default=5e-5) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--epochs", type=int, default=10) parser.add_argument("--num_workers", type=int, default=4, help="Number of data loader workers") parser.add_argument("--T_0", type=int, default=10, help="CosineAnnealingWarmRestarts T_0") parser.add_argument("--T_mult", type=int, default=1, help="CosineAnnealingWarmRestarts T_mult") parser.add_argument("--min_lr", type=float, default=1e-6, help="Minimum learning rate") parser.add_argument("--esm_model_name", type=str, default="facebook/esm2_t33_650M_UR50D", help="ESM model name") parser.add_argument("--gamma_neg", type=float, default=2, help="Asymmetric Loss gamma_neg") parser.add_argument("--gamma_pos", type=float, default=0, help="Asymmetric Loss gamma_pos") parser.add_argument("--clip", type=float, default=0.05, help="Asymmetric Loss clip") parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm for clipping") parser.add_argument("--output_dir", type=str, default="outputs", help="Directory for checkpoints and predictions") parser.add_argument("--mlflow_dir", type=str, default="mlruns", help="Directory for MLflow logs") # LoRA Arguments parser.add_argument("--use_lora", type=bool, default=True, help="Use LoRA for ESM backbone") parser.add_argument("--lora_rank", type=int, default=8, help="LoRA rank") parser.add_argument("--dry_run", action="store_true", help="Run a short dry run for testing") parser.add_argument("--resume_checkpoint", type=str, default=None, help="Path to checkpoint to resume from") parser.add_argument("--skip_eval", action="store_true", help="Skip GPU evaluation during training") args = parser.parse_args() # Paths data_path = Path(args.data_path) train_fasta = data_path / "learning_superset" / "large_learning_superset.fasta" train_term = data_path / "learning_superset" / "large_learning_superset_term.tsv" val_fasta = data_path / "validation_superset" / "validation_superset.fasta" val_term = data_path / "validation_superset" / "validation_superset_term.tsv" # New Separate Validation Sets val_novel_fasta = data_path / "validation_superset" / "validation_novel" / "validation_novel.fasta" val_novel_term = data_path / "validation_superset" / "validation_novel" / "validation_novel_terms.tsv" val_homolog_fasta = data_path / "validation_superset" / "validation_homolog" / "validation_homolog.fasta" val_homolog_term = data_path / "validation_superset" / "validation_homolog" / "validation_homolog_terms.tsv" species_vec = data_path / "taxon_embedding" / "species_vectors.tsv" # GO Vocab is local in src/go_terms.json go_vocab_path = "src/go_terms.json" if not os.path.exists(go_vocab_path): go_vocab_path = "go_terms.json" # Evaluation files obo_path = data_path / "go_info" / "go-basic.obo" ia_path = data_path / "IA.tsv" # Propagation files go_matrix_path = data_path / "go_info" / "go_ancestor_matrix.npz" go_mapping_path = data_path / "go_info" / "go_term_mappings.pkl" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f"Using device: {device}") # Tokenizer print(f"Loading tokenizer for: {args.esm_model_name}") tokenizer = AutoTokenizer.from_pretrained(args.esm_model_name) # Datasets print("Initializing Datasets...") train_dataset = ProteinTaxonomyDataset( train_fasta, train_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer, go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path) ) val_dataset = ProteinTaxonomyDataset( val_fasta, val_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer, go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path) ) train_loader = DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, persistent_workers=True if args.num_workers > 0 else False, prefetch_factor=2 if args.num_workers > 0 else None ) val_loader = DataLoader( val_dataset, batch_size=max(1, args.batch_size // 2), shuffle=False, num_workers=args.num_workers, pin_memory=True, persistent_workers=True if args.num_workers > 0 else False, prefetch_factor=2 if args.num_workers > 0 else None ) # 4.1 Initialize Separate Validation Sets print("Initializing Novel Validation Set...") val_novel_dataset = ProteinTaxonomyDataset( val_novel_fasta, val_novel_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer, go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path) ) val_novel_loader = DataLoader(val_novel_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) print("Initializing Homolog Validation Set...") val_homolog_dataset = ProteinTaxonomyDataset( val_homolog_fasta, val_homolog_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer, go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path) ) val_homolog_loader = DataLoader(val_homolog_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) # Model model = TaxonomyAwareESM( num_classes=train_dataset.num_classes, pretrained_model_name=args.esm_model_name, use_lora=args.use_lora, lora_rank=args.lora_rank, vocab_sizes=train_dataset.vocab_sizes ).to(device) criterion = AsymmetricLoss(gamma_neg=args.gamma_neg, gamma_pos=args.gamma_pos, clip=args.clip).to(device) optimizer = optim.AdamW(model.parameters(), lr=args.lr) # Scheduler scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts( optimizer, T_0=args.T_0, T_mult=args.T_mult, eta_min=args.min_lr ) scaler = torch.cuda.amp.GradScaler(enabled=(device.type == 'cuda')) # Pre-Load Ontology and GT for Evaluation ontologies = None gt = None if HAS_EVAL: print("Loading Ontology and Ground Truth...") # obo_parser(obo_file, valid_rel, ia_file, no_orphans) # Using IA file allows Weighted F-max ontologies = obo_parser( str(obo_path), ("is_a", "part_of"), str(ia_path) if ia_path.exists() else None, True # no_orphans ) gt = gt_parser(str(val_term), ontologies) # Also parse GT for novel and homolog # Note: gt_parser's second arg is ontologies, it filters based on it. # We need GT objects for new sets to pass to run_evaluation print("Loading Ground Truth for Novel/Homolog...") gt_novel = gt_parser(str(val_novel_term), ontologies) gt_homolog = gt_parser(str(val_homolog_term), ontologies) # Load IC Weights for GPU Eval print("Loading IC Weights for GPU Evaluation...") ic_weights = load_ia_weights( str(ia_path) if ia_path.exists() else "IA.tsv", train_dataset.go_to_idx, train_dataset.num_classes ).to(device) # MLflow init import mlflow import time # Configure MLflow if args.mlflow_dir: mlflow_uri = Path(args.mlflow_dir).resolve().as_uri() mlflow.set_tracking_uri(mlflow_uri) print(f"MLflow tracking URI: {mlflow_uri}") mlflow.start_run() mlflow.log_params(vars(args)) best_val_loss = float('inf') output_dir = Path(args.output_dir) os.makedirs(output_dir, exist_ok=True) # Best model path for validation loss best_model_path = output_dir / "best_model_loss.pth" best_wf_max = 0.0 start_epoch = 1 # Resume Checkpoint Logic if args.resume_checkpoint and os.path.exists(args.resume_checkpoint): print(f"Resuming training from checkpoint: {args.resume_checkpoint}") checkpoint = torch.load(args.resume_checkpoint, map_location=device) # Load Model model.load_state_dict(checkpoint['model_state_dict']) # Load Optimizer if 'optimizer_state_dict' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # Load Scheduler (if saved? - Not explicitly passed to save_checkpoint in original code, need to check) # Looking at save_checkpoint, it seems metrics are saved but scheduler might not be in standard dict unless added? # Let's check save_checkpoint... # Ah, save_checkpoint in line 30 only saves model, optimizer, metrics. Scheduler is missing! # But CosineAnnealingWarmRestarts relies on epoch. We can just step it to current epoch. # Update Start Epoch start_epoch = checkpoint['epoch'] + 1 print(f"Resuming from Epoch {start_epoch}") # Restore Best Metrics if 'metrics' in checkpoint and 'val_loss' in checkpoint['metrics']: best_val_loss = checkpoint['metrics']['val_loss'] print(f"Restored Best Val Loss: {best_val_loss}") # Adjust Scheduler to Epoch if start_epoch > 1: # Step scheduler to catch up # This is approximate for WarmRestarts if we don't save its internal state, but usually fine for simple restarts # Better would be to save scheduler state_dict in save_checkpoint. # For now, we manually step. for _ in range(1, start_epoch): if _ >= 3: # Scheduler steps starting from epoch 3 in loop logic scheduler.step() for epoch in range(start_epoch, args.epochs + 1): epoch_start_time = time.time() # Training model.train() total_loss = 0 total_grad_norm = 0 steps = 0 # Warmup Logic if epoch == 1: for param_group in optimizer.param_groups: param_group['lr'] = args.lr * 0.25 elif epoch == 2: for param_group in optimizer.param_groups: param_group['lr'] = args.lr * 0.50 elif epoch == 3: # Ensure we start the scheduler with the base LR for param_group in optimizer.param_groups: param_group['lr'] = args.lr # Get current LR current_lr = optimizer.param_groups[0]['lr'] pbar = tqdm(train_loader, desc=f"Epoch {epoch} Train") for batch in pbar: input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) tax_vector = batch['tax_vector'].to(device) labels = batch['labels'].to(device) optimizer.zero_grad() with torch.amp.autocast(device_type=device.type): logits = model(input_ids, attention_mask, tax_vector) loss = criterion(logits, labels) scaler.scale(loss).backward() # Gradient Clipping scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() total_loss += loss.item() total_grad_norm += grad_norm.item() steps += 1 # Step-wise Logging if steps % 10 == 0: current_gnorm = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm global_step = (epoch - 1) * len(train_loader) + steps mlflow.log_metrics({ "step_train_loss": loss.item(), "step_grad_norm": current_gnorm, "step_lr": optimizer.param_groups[0]['lr'] }, step=global_step) pbar.set_postfix({'loss': total_loss/steps}) if args.dry_run and steps >= 5: print("Dry run: breaking training loop.") break # Step scheduler after EACH EPOCH (starting from epoch 3) if epoch >= 3: scheduler.step() train_loss = total_loss / steps avg_grad_norm = total_grad_norm / steps print(f"Epoch {epoch} Train Loss: {train_loss:.4f}, Grad Norm: {avg_grad_norm:.4f}, LR: {current_lr:.2e}") # Validation Loss Check val_loss = validate_loss(model, val_loader, criterion, device) print(f"Epoch {epoch} Val Loss: {val_loss:.4f}") epoch_time = time.time() - epoch_start_time # Log to MLflow mlflow.log_metrics({ "train_loss": train_loss, "avg_grad_norm": avg_grad_norm, "val_loss": val_loss, "lr": current_lr, "epoch_time": epoch_time }, step=epoch) if val_loss < best_val_loss: print(f"New Best Val Loss: {val_loss:.4f} (was {best_val_loss:.4f})") best_val_loss = val_loss save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, best_model_path) mlflow.log_metric("best_val_loss", best_val_loss, step=epoch) # Custom Evaluation Schedule: 3, 10, 15, 20 # For dryrun, evaluate on epoch 1 too, and force a break in loops # Custom Evaluation Schedule: 3, 10, 15, 20 # For dryrun, evaluate on epoch 1 too, and force a break in loops run_eval = (epoch in [3, 10, 15, 20] or args.dry_run) and not args.skip_eval if run_eval: print(f"Epoch {epoch}: Running GPU CAFA Evaluation on Best Model (Loss: {best_val_loss:.4f})...") current_state = { 'model': model.state_dict(), 'optimizer': optimizer.state_dict() } if os.path.exists(best_model_path): checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['model_state_dict']) print(f"Loaded best model from epoch {checkpoint['epoch']} for evaluation.") else: print("Warning: Best model not found, evaluating current model.") # Run Evaluation: Novel (GPU) metrics_novel = evaluate_gpu( model, val_novel_loader, ic_weights, device, pred_output_path=output_dir / f"gpu_preds_novel_epoch_{epoch}.tsv", metrics_output_path=output_dir / f"metrics_novel_epoch_{epoch}.tsv" ) # Run Evaluation: Homolog (GPU) metrics_homolog = evaluate_gpu( model, val_homolog_loader, ic_weights, device, pred_output_path=output_dir / f"gpu_preds_homolog_epoch_{epoch}.tsv", metrics_output_path=output_dir / f"metrics_homolog_epoch_{epoch}.tsv" ) # Log Metrics all_metrics = {} for k, v in metrics_novel.items(): all_metrics[f"novel_{k}"] = v for k, v in metrics_homolog.items(): all_metrics[f"homolog_{k}"] = v mlflow.log_metrics(all_metrics, step=epoch) print("Evaluation Complete. Metrics:", all_metrics) # Save Best F-max Model (Novel as primary?) # Usually we care about Novel Genus F-max novel_fmax = metrics_novel['fmax_w'] if novel_fmax > best_wf_max: best_wf_max = novel_fmax print(f"New Best Novel F-max: {best_wf_max:.4f}") save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth") # Restore training state model.load_state_dict(current_state['model']) optimizer.load_state_dict(current_state['optimizer']) print("Restored training state.") if args.dry_run: print("Dry run complete (Evaluation).") # Usually we care about Novel Genus F-max novel_fmax = metrics_novel['fmax_w'] if novel_fmax > best_wf_max: best_wf_max = novel_fmax print(f"New Best Novel F-max: {best_wf_max:.4f}") save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth") # Restore training state model.load_state_dict(current_state['model']) optimizer.load_state_dict(current_state['optimizer']) print("Restored training state.") if args.dry_run: print("Dry run complete (Evaluation).") save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, output_dir / "latest_model.pth") mlflow.end_run() if __name__ == "__main__": main()