| """ |
| RegFMModel: scDFM backbone + RegulatoryHead + VelocityGate. |
| |
| Parameter names for backbone components match ori_scDFM exactly, |
| enabling direct weight loading from scDFM baseline checkpoints. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
|
|
| from src._scdfm_imports import ( |
| GeneEncoder, |
| ContinuousValueEncoder, |
| GeneadaLN, |
| BatchLabelEncoder, |
| TimestepEmbedder, |
| ExprDecoder, |
| DiffPerceiverBlock, |
| DifferentialTransformerBlock, |
| PerceiverBlock, |
| ) |
| from src.model.layers import RegulatoryHead, VelocityGate |
|
|
|
|
| class RegFMModel(nn.Module): |
| """ |
| Regulatory Flow Matching model. |
| |
| Forward returns: |
| v: (B, G) — final velocity = α·v_reg + (1-α)·v_int |
| R: (B, G, G) — predicted interaction matrix (for L_reg supervision) |
| """ |
|
|
| def __init__( |
| self, |
| ntoken: int = 512, |
| d_model: int = 128, |
| nhead: int = 8, |
| d_hid: int = 512, |
| nlayers: int = 4, |
| dropout: float = 0.1, |
| fusion_method: str = "differential_perceiver", |
| perturbation_function: str = "crisper", |
| use_perturbation_interaction: bool = True, |
| mask_path: str = None, |
| |
| d_r: int = 32, |
| gate_init_bias: float = -3.0, |
| ): |
| super().__init__() |
| self.perturbation_function = perturbation_function |
| self._gate_init_bias = gate_init_bias |
|
|
| |
| self.encoder = GeneEncoder( |
| ntoken, d_model, |
| use_perturbation_interaction=use_perturbation_interaction, |
| mask_path=mask_path, |
| ) |
| self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout) |
| self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout) |
| self.fusion_layer = nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.GELU(), |
| nn.Linear(d_model, d_model), |
| nn.LayerNorm(d_model), |
| ) |
| self.t_embedder = TimestepEmbedder(d_model) |
| self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model) |
|
|
| |
| if fusion_method == "differential_perceiver": |
| self.blocks = nn.ModuleList( |
| [DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0) for i in range(nlayers)] |
| ) |
| elif fusion_method == "differential_transformer": |
| self.blocks = nn.ModuleList( |
| [DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0) for i in range(nlayers)] |
| ) |
| elif fusion_method == "perceiver": |
| self.blocks = nn.ModuleList( |
| [PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1) for _ in range(nlayers)] |
| ) |
| else: |
| raise ValueError(f"Unknown fusion_method: {fusion_method}") |
|
|
| self.gene_adaLN = nn.ModuleList( |
| [GeneadaLN(d_model, dropout) for _ in range(nlayers)] |
| ) |
| self.adapter_layer = nn.ModuleList([ |
| nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.LeakyReLU(), |
| nn.Dropout(dropout), |
| nn.Linear(d_model, d_model), |
| nn.LeakyReLU(), |
| ) |
| for _ in range(nlayers) |
| ]) |
|
|
| |
| self.p_mask_embed = nn.Parameter(torch.randn(d_model)) |
| self.p_head = nn.Sequential(nn.LayerNorm(d_model), nn.Linear(d_model, d_model)) |
|
|
| |
| self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True) |
|
|
| |
| self.reg_head = RegulatoryHead(d_model, d_r) |
| self.velocity_gate = VelocityGate(d_model, gate_init_bias) |
|
|
| self.initialize_weights() |
|
|
| def initialize_weights(self): |
| def _basic_init(module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.xavier_uniform_(module.weight) |
| if module.bias is not None: |
| nn.init.constant_(module.bias, 0) |
| self.apply(_basic_init) |
| |
| nn.init.zeros_(self.velocity_gate.mlp[-1].weight) |
| nn.init.constant_(self.velocity_gate.mlp[-1].bias, self._gate_init_bias) |
|
|
| def get_perturbation_emb(self, perturbation_id=None, perturbation_emb=None, |
| cell_1=None, use_mask: bool = False): |
| """Identical to scDFM model.get_perturbation_emb for compatibility.""" |
| if use_mask: |
| B = cell_1.size(0) |
| return self.p_mask_embed[None, :].expand(B, -1).to(cell_1.device, dtype=cell_1.dtype) |
|
|
| assert perturbation_emb is None or perturbation_id is None |
| if perturbation_id is not None: |
| if self.perturbation_function == "crisper": |
| perturbation_emb = self.encoder(perturbation_id) |
| else: |
| perturbation_emb = self.perturbation_embedder(perturbation_id) |
| perturbation_emb = perturbation_emb.mean(1) |
| elif perturbation_emb is not None: |
| perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype) |
| if perturbation_emb.dim() == 1: |
| perturbation_emb = perturbation_emb.unsqueeze(0) |
| if perturbation_emb.size(0) == 1: |
| perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous() |
| perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb) |
| return perturbation_emb |
|
|
| def forward(self, gene_id, cell_1, t, cell_2, |
| perturbation_id=None, gene_id_all=None, |
| perturbation_emb=None, mode="predict_y"): |
| """ |
| Args: |
| gene_id: (B, G) vocab-encoded gene IDs |
| cell_1: (B, G) noised target expression x_t |
| t: (B,) or scalar — flow timestep |
| cell_2: (B, G) source/control expression |
| perturbation_id: (B, 2) perturbation condition IDs |
| gene_id_all: unused (kept for API compatibility) |
| perturbation_emb: optional precomputed perturbation embedding |
| mode: "predict_y" (default) or "predict_p" |
| Returns: |
| if mode == "predict_y": (v, R) |
| v: (B, G) — gated velocity: α·v_reg + (1-α)·v_int |
| R: (B, G, G) — predicted interaction matrix |
| if mode == "predict_p": (B, d_model) |
| """ |
| if t.dim() == 0: |
| t = t.repeat(cell_1.size(0)) |
|
|
| |
| gene_emb = self.encoder(gene_id) |
| value_emb_1 = self.value_encoder_1(cell_1) + gene_emb |
| value_emb_2 = self.value_encoder_2(cell_2) + gene_emb |
|
|
| value_emb = torch.cat([value_emb_1, value_emb_2], dim=-1) |
| value_emb = self.fusion_layer(value_emb) |
|
|
| t_emb = self.t_embedder(t) |
| pert_emb = self.get_perturbation_emb(perturbation_id, perturbation_emb, cell_1) |
|
|
| x = value_emb |
| for i, block in enumerate(self.blocks): |
| x = self.gene_adaLN[i](gene_emb, x) |
| pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1) |
| x = torch.cat([x, pert_exp], dim=-1) |
| x = self.adapter_layer[i](x) |
| x = block(x, value_emb_2, t_emb) |
|
|
| h = x |
|
|
| |
| if mode == "predict_p": |
| return self.p_head(h.mean(dim=1)) |
|
|
| |
| x_dec = torch.cat([h, pert_emb[:, None, :].expand(-1, h.size(1), -1)], dim=-1) |
| v_int = self.final_layer(x_dec)["pred"] |
|
|
| |
| v_reg, R = self.reg_head(h) |
|
|
| |
| alpha = self.velocity_gate(h, pert_emb, t_emb) |
| v = alpha * v_reg + (1.0 - alpha) * v_int |
|
|
| return v, R |
|
|