lfj-code / GRN /SB /src /model /layers.py
ethan1115's picture
Upload folder using huggingface_hub
9f5e507 verified
"""
ASB layers: AnisotropicSigmaNet and ScoreDecoder.
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .._scdfm_imports import TimestepEmbedder
class AnisotropicSigmaNet(nn.Module):
"""
Predicts per-gene anisotropic diffusion coefficient Οƒ_g(perturbation, t).
Input: pert_emb (B, d_model), t (B,), gene_emb (B, G, d_model)
Output: sigma_g (B, G) in [sigma_min, sigma_max]
Architecture: condition c = pert_emb + t_emb β†’ c + gene_emb β†’ MLP β†’ sigmoid β†’ [min, max]
Does NOT depend on x_t β€” can be called before bridge sampling.
"""
def __init__(
self,
d_model: int = 128,
hidden_dim: int = 256,
num_layers: int = 2,
sigma_min: float = 0.01,
sigma_max: float = 2.0,
sigma_init: float = 0.5,
):
super().__init__()
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.t_embedder = TimestepEmbedder(d_model)
layers = []
in_dim = d_model
for i in range(num_layers):
layers.append(nn.Linear(in_dim if i == 0 else hidden_dim, hidden_dim))
layers.append(nn.SiLU())
layers.append(nn.Linear(hidden_dim, 1))
self.mlp = nn.Sequential(*layers)
self._init_bias(sigma_init)
def _init_bias(self, sigma_init):
"""Initialize final bias so sigmoid output maps to sigma_init."""
target = (sigma_init - self.sigma_min) / (self.sigma_max - self.sigma_min)
target = max(min(target, 0.999), 0.001)
bias_val = math.log(target / (1 - target)) # logit
nn.init.constant_(self.mlp[-1].bias, bias_val)
nn.init.zeros_(self.mlp[-1].weight)
def forward(self, pert_emb: torch.Tensor, t: torch.Tensor,
gene_emb: torch.Tensor) -> torch.Tensor:
"""
Args:
pert_emb: (B, d_model) perturbation embedding
t: (B,) timestep
gene_emb: (B, G, d_model) gene embeddings
Returns:
sigma_g: (B, G) in [sigma_min, sigma_max]
"""
t_emb = self.t_embedder(t) # (B, d_model)
c = pert_emb + t_emb # (B, d_model)
c_exp = c.unsqueeze(1).expand_as(gene_emb) # (B, G, d_model)
h = gene_emb + c_exp # (B, G, d_model)
raw = self.mlp(h).squeeze(-1) # (B, G)
sigma = self.sigma_min + (self.sigma_max - self.sigma_min) * torch.sigmoid(raw)
return sigma
class ScoreDecoder(nn.Module):
"""
Decodes backbone hidden states to score function prediction.
Input: backbone output (B, G, d_model), pert_emb (B, d_model)
Output: score prediction (B, G)
"""
def __init__(self, d_model: int = 128, depth: int = 2):
super().__init__()
self.proj = nn.Linear(d_model * 2, d_model) # concat with pert_emb
blocks = []
for _ in range(depth):
blocks.extend([
nn.LayerNorm(d_model),
nn.Linear(d_model, d_model),
nn.SiLU(),
])
blocks.append(nn.Linear(d_model, 1))
self.mlp = nn.Sequential(*blocks)
def forward(self, x: torch.Tensor, pert_emb: torch.Tensor) -> torch.Tensor:
"""
Args:
x: (B, G, d_model) backbone output
pert_emb: (B, d_model) perturbation embedding
Returns:
score: (B, G)
"""
x_with_pert = torch.cat(
[x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1
)
h = self.proj(x_with_pert)
return self.mlp(h).squeeze(-1)