""" Training and evaluation entry point for Anisotropic Schrödinger Bridge (SB). Simplified from grn_svd: no latent stream, no sparse cache, no SVD dict. Single-stage generation with SDE (or PF-ODE ablation). """ import sys import os _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _PROJECT_ROOT) import _bootstrap_scdfm # noqa: F401 import copy import csv import torch import tyro import tqdm import numpy as np import pandas as pd import anndata as ad from torch.utils.data import DataLoader from tqdm import trange from accelerate import Accelerator, DistributedDataParallelKwargs from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR from torch.utils.tensorboard import SummaryWriter from config.config_sb import SBConfig as Config from src.data.data import get_data_classes from src.model.model import SBModel from src.denoiser import SBDenoiser from src.utils import ( save_checkpoint, load_checkpoint, pick_eval_score, process_vocab, set_requires_grad_for_p_only, GeneVocab, ) from cell_eval import MetricsEvaluator _REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code")) @torch.inference_mode() def test(data_sampler, denoiser, accelerator, config, vocab, data_manager, batch_size=32, path_dir="./"): """Evaluate: generate predictions and compute cell-eval metrics.""" device = accelerator.device gene_ids_test = vocab.encode(list(data_sampler.adata.var_names)) gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device) perturbation_name_list = data_sampler._perturbation_covariates control_data = data_sampler.get_control_data() inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()} all_pred = [control_data["src_cell_data"]] obs_pred = ["control"] * control_data["src_cell_data"].shape[0] all_real = [control_data["src_cell_data"]] obs_real = ["control"] * control_data["src_cell_data"].shape[0] for pert_name in perturbation_name_list: pert_data = data_sampler.get_perturbation_data(pert_name) target = pert_data["tgt_cell_data"] pert_id = pert_data["condition_id"].to(device) source = control_data["src_cell_data"].to(device) if config.perturbation_function == "crisper": pert_name_crisper = [ inverse_dict[int(p)] for p in pert_id[0].cpu().numpy() ] pert_id = torch.tensor( vocab.encode(pert_name_crisper), dtype=torch.long, device=device ).repeat(source.shape[0], 1) idx = torch.randperm(source.shape[0]) source = source[idx][:128] preds = [] for i in trange(0, 128, batch_size, desc=pert_name): bs = source[i:i+batch_size] bp = pert_id[0].repeat(bs.shape[0], 1).to(device) model = denoiser.module if hasattr(denoiser, "module") else denoiser pred = model.generate( bs, bp, gene_ids_test, steps=config.sde_steps if config.use_sde_inference else config.ode_steps, method="sde" if config.use_sde_inference else "ode", ) preds.append(pred) preds = torch.cat(preds, 0).cpu().numpy() all_pred.append(preds) all_real.append(target) obs_pred.extend([pert_name] * preds.shape[0]) obs_real.extend([pert_name] * target.shape[0]) all_pred = np.concatenate(all_pred, 0) all_real = np.concatenate(all_real, 0) pred_adata = ad.AnnData(X=all_pred, obs=pd.DataFrame({"perturbation": obs_pred})) real_adata = ad.AnnData(X=all_real, obs=pd.DataFrame({"perturbation": obs_real})) eval_score = None if accelerator.is_main_process: evaluator = MetricsEvaluator( adata_pred=pred_adata, adata_real=real_adata, control_pert="control", pert_col="perturbation", num_threads=32, ) results, agg_results = evaluator.compute() results.write_csv(os.path.join(path_dir, "results.csv")) agg_results.write_csv(os.path.join(path_dir, "agg_results.csv")) pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad")) real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad")) df = agg_results.to_pandas() for m in ("mse", "pearson_delta", "pr_auc"): if m in df.columns and df[m].notna().any(): eval_score = float(df[m].iloc[0]) break if eval_score is not None: print(f"Eval score: {eval_score:.4f}") return eval_score if __name__ == "__main__": config = tyro.cli(Config) ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) if accelerator.is_main_process: print(config) save_path = config.make_path() os.makedirs(save_path, exist_ok=True) device = accelerator.device # === Data loading === Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes() scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data") data_manager = Data(scdfm_data_path) data_manager.load_data(config.data_name) if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"): data_manager.adata.var_names = data_manager.adata.var["gene_name"].values data_manager.adata.var_names_make_unique() data_manager.process_data( n_top_genes=config.n_top_genes, split_method=config.split_method, fold=config.fold, use_negative_edge=config.use_negative_edge, k=config.topk, ) train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size) # === Mask path === if config.use_negative_edge: mask_path = os.path.join( data_manager.data_path, data_manager.data_name, f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt", ) else: mask_path = os.path.join( data_manager.data_path, data_manager.data_name, f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt", ) # === Vocab === orig_cwd = os.getcwd() os.chdir(os.path.join(_REPO_ROOT, "scDFM")) vocab = process_vocab(data_manager, config) os.chdir(orig_cwd) gene_ids = vocab.encode(list(data_manager.adata.var_names)) gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device) # === Build SBModel === vf = SBModel( ntoken=len(vocab), d_model=config.d_model, nhead=config.nhead, d_hid=config.d_hid, nlayers=config.nlayers, fusion_method=config.fusion_method, perturbation_function=config.perturbation_function, mask_path=mask_path, sigma_min=config.sigma_min, sigma_max=config.sigma_max, sigma_init=config.sigma_init, sigma_hidden_dim=config.sigma_hidden_dim, sigma_num_layers=config.sigma_num_layers, score_head_depth=config.score_head_depth, use_score=config.use_score, ) # === Simple PerturbationDataset (no sparse cache needed) === base_dataset = PerturbationDataset(train_sampler, config.batch_size) dataloader = DataLoader( base_dataset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, persistent_workers=True, ) # === Build SBDenoiser === denoiser = SBDenoiser( model=vf, noise_type=config.noise_type, use_mmd_loss=config.use_mmd_loss, gamma=config.gamma, poisson_alpha=config.poisson_alpha, poisson_target_sum=config.poisson_target_sum, score_weight=config.score_weight, score_t_clip=config.score_t_clip, use_score=config.use_score, sigma_base=config.sigma_base, sigma_sparse_weight=config.sigma_sparse_weight, sigma_volume_weight=config.sigma_volume_weight, ot_method=config.ot_method, ot_reg=config.ot_reg, ot_use_sigma=config.ot_use_sigma, sigma_min=config.sigma_min, t_sample_mode=config.t_sample_mode, t_mean=config.t_mean, t_std=config.t_std, sde_steps=config.sde_steps, use_sde_inference=config.use_sde_inference, source_anchored=config.source_anchored, ) # === EMA model === ema_model = copy.deepcopy(vf).to(device) ema_model.eval() ema_model.requires_grad_(False) # === Optimizer & Scheduler === save_path = config.make_path() optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr) warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps) cosine_scheduler = CosineAnnealingLR(optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min) scheduler = SequentialLR(optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps]) start_iteration = 0 if config.checkpoint_path != "": start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler) ema_model.load_state_dict(vf.state_dict()) # === Prepare with accelerator === denoiser = accelerator.prepare(denoiser) optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader) inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()} # === Test-only mode === if config.test_only: eval_path = os.path.join(save_path, "eval_only") os.makedirs(eval_path, exist_ok=True) eval_score = test( valid_sampler, denoiser, accelerator, config, vocab, data_manager, batch_size=config.eval_batch_size, path_dir=eval_path, ) sys.exit(0) # === Loss logging === if accelerator.is_main_process: os.makedirs(save_path, exist_ok=True) csv_path = os.path.join(save_path, 'loss_curve.csv') csv_file = open(csv_path, 'a' if start_iteration > 0 and os.path.exists(csv_path) else 'w', newline='') csv_writer = csv.writer(csv_file) if start_iteration == 0 or not os.path.exists(csv_path): csv_writer.writerow([ 'iteration', 'loss', 'loss_v', 'loss_s', 'loss_mmd', 'loss_sparse', 'loss_volume', 'sigma_mean', 'sigma_std', 'lr', ]) tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs')) # === Training loop === pbar = tqdm.tqdm(total=config.steps, initial=start_iteration) iteration = start_iteration while iteration < config.steps: for batch_data in dataloader: source = batch_data["src_cell_data"].squeeze(0).to(device) target = batch_data["tgt_cell_data"].squeeze(0).to(device) perturbation_id = batch_data["condition_id"].squeeze(0).to(device) # Random gene subset (same as scDFM) G_full = source.shape[-1] input_gene_ids_pos = torch.randperm(G_full, device=device)[:config.infer_top_gene] source_sub = source[:, input_gene_ids_pos] target_sub = target[:, input_gene_ids_pos] gene_ids_sub = gene_ids[input_gene_ids_pos] if config.perturbation_function == "crisper": pert_name = [inverse_dict[int(p)] for p in perturbation_id[0].cpu().numpy()] perturbation_id = torch.tensor( vocab.encode(pert_name), dtype=torch.long, device=device ).repeat(source_sub.shape[0], 1) base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser base_denoiser.model.train() B = source_sub.shape[0] gene_input = gene_ids_sub.unsqueeze(0).expand(B, -1) loss_dict = base_denoiser.train_step(source_sub, target_sub, perturbation_id, gene_input) loss = loss_dict["loss"] optimizer.zero_grad(set_to_none=True) accelerator.backward(loss) optimizer.step() scheduler.step() # EMA update with torch.no_grad(): for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()): ema_p.lerp_(model_p.data, 1 - config.ema_decay) # Checkpoint & eval if iteration % config.print_every == 0: save_path_ = os.path.join(save_path, f"iteration_{iteration}") os.makedirs(save_path_, exist_ok=True) if accelerator.is_main_process: save_checkpoint( model=ema_model, optimizer=optimizer, scheduler=scheduler, iteration=iteration, eval_score=None, save_path=save_path_, is_best=False, ) if iteration + config.print_every >= config.steps: orig_state = copy.deepcopy(vf.state_dict()) vf.load_state_dict(ema_model.state_dict()) eval_score = test( valid_sampler, denoiser, accelerator, config, vocab, data_manager, batch_size=config.eval_batch_size, path_dir=save_path_, ) vf.load_state_dict(orig_state) if accelerator.is_main_process and eval_score is not None: tb_writer.add_scalar('eval/score', eval_score, iteration) # Logging if accelerator.is_main_process: lr = scheduler.get_last_lr()[0] csv_writer.writerow([ iteration, loss.item(), loss_dict["loss_v"].item(), loss_dict["loss_s"].item(), loss_dict["loss_mmd"].item(), loss_dict["loss_sparse"].item(), loss_dict["loss_volume"].item(), loss_dict["sigma_mean"].item(), loss_dict["sigma_std"].item(), lr, ]) if iteration % 100 == 0: csv_file.flush() tb_writer.add_scalar('loss/total', loss.item(), iteration) tb_writer.add_scalar('loss/velocity', loss_dict["loss_v"].item(), iteration) tb_writer.add_scalar('loss/score', loss_dict["loss_s"].item(), iteration) tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration) tb_writer.add_scalar('sigma/mean', loss_dict["sigma_mean"].item(), iteration) tb_writer.add_scalar('sigma/std', loss_dict["sigma_std"].item(), iteration) tb_writer.add_scalar('lr', lr, iteration) accelerator.wait_for_everyone() pbar.update(1) pbar.set_description( f"L={loss.item():.4f} v={loss_dict['loss_v'].item():.3f} " f"s={loss_dict['loss_s'].item():.3f} σ={loss_dict['sigma_mean'].item():.3f}" ) iteration += 1 if iteration >= config.steps: break if accelerator.is_main_process: csv_file.close() tb_writer.close()