lfj-code / train /CCFM /pca_emb /src /model /layers.py
ethan1115's picture
Upload folder using huggingface_hub
dcce49b verified
"""
Latent layers for grn_svd: LatentEmbedder, LatentDecoderBlock, LatentDecoder.
Adapted from GRN/grn_ccfm/src/model/layers.py for SVD-projected 128-dim latent space.
"""
import torch
import torch.nn as nn
class LatentEmbedder(nn.Module):
"""
Projects z_t (B, G, latent_dim) to (B, G, d_model).
When latent_dim == d_model: LayerNorm + Linear (identity-like).
When latent_dim != d_model: Linear bottleneck without LayerNorm
(LayerNorm on dim=1 destroys the signal).
"""
def __init__(self, latent_dim: int = 128, d_model: int = 128):
super().__init__()
if latent_dim == d_model:
self.proj = nn.Sequential(
nn.LayerNorm(latent_dim),
nn.Linear(latent_dim, d_model),
)
else:
self.proj = nn.Sequential(
nn.Linear(latent_dim, d_model),
nn.GELU(),
nn.Linear(d_model, d_model),
)
def forward(self, z: torch.Tensor) -> torch.Tensor:
"""z: (B, G, latent_dim) -> (B, G, d_model)"""
return self.proj(z)
class LatentDecoderBlock(nn.Module):
"""
AdaLN-conditioned transformer block for latent decoder head.
6-way modulation: shift/scale/gate for self-attention and MLP.
Copied from CCFM (GRN/grn_ccfm/src/model/layers.py).
"""
def __init__(self, hidden_size: int, num_heads: int = 4, mlp_ratio: float = 4.0,
hidden_size_c: int = None):
super().__init__()
hidden_size_c = hidden_size_c or hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
mlp_hidden = int(hidden_size * mlp_ratio)
self.mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden),
nn.GELU(),
nn.Linear(mlp_hidden, hidden_size),
)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size_c, 6 * hidden_size, bias=True),
)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""
x: (B, G, hidden_size)
c: (B, hidden_size_c) — conditioning vector (t_expr + t_latent + pert_emb)
"""
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
self.adaLN_modulation(c).chunk(6, dim=1)
)
# Self-attention with AdaLN
h = self.norm1(x)
h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
h = self.attn(h, h, h)[0]
x = x + gate_msa.unsqueeze(1) * h
# MLP with AdaLN
h = self.norm2(x)
h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
h = self.mlp(h)
x = x + gate_mlp.unsqueeze(1) * h
return x
class LatentDecoder(nn.Module):
"""
Decodes backbone output (B, G, d_model) to latent velocity (B, G, latent_dim).
Uses AdaLN blocks conditioned on c for timestep/perturbation awareness.
"""
def __init__(self, d_model: int = 128, latent_dim: int = 128,
dh_depth: int = 2, num_heads: int = 4,
hidden_size_c: int = None):
super().__init__()
hidden_size_c = hidden_size_c or d_model
self.dh_proj = nn.Linear(d_model, d_model)
if dh_depth > 0:
self.dh_blocks = nn.ModuleList([
LatentDecoderBlock(d_model, num_heads=num_heads, hidden_size_c=hidden_size_c)
for _ in range(dh_depth)
])
else:
self.dh_blocks = nn.ModuleList()
self.final = nn.Sequential(
nn.LayerNorm(d_model),
nn.Linear(d_model, d_model),
nn.GELU(),
nn.Linear(d_model, latent_dim),
)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""
x: (B, G, d_model) — backbone output
c: (B, d_model) — conditioning vector
Returns: (B, G, latent_dim=128) — predicted latent velocity
"""
h = self.dh_proj(x)
for block in self.dh_blocks:
h = block(h, c)
return self.final(h)