| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import torch |
| | import torch.nn as nn |
| |
|
| |
|
| | class ModLN(nn.Module): |
| | """ |
| | Modulation with adaLN. |
| | |
| | References: |
| | DiT: https://github.com/facebookresearch/DiT/blob/main/models.py#L101 |
| | """ |
| | def __init__(self, inner_dim: int, mod_dim: int, eps: float): |
| | super().__init__() |
| | self.norm = nn.LayerNorm(inner_dim, eps=eps) |
| | self.mlp = nn.Sequential( |
| | nn.SiLU(), |
| | nn.Linear(mod_dim, inner_dim * 2), |
| | ) |
| |
|
| | @staticmethod |
| | def modulate(x, shift, scale): |
| | |
| | |
| | return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) |
| |
|
| | def forward(self, x, cond): |
| | shift, scale = self.mlp(cond).chunk(2, dim=-1) |
| | return self.modulate(self.norm(x), shift, scale) |
| |
|
| |
|
| | class ConditionModulationBlock(nn.Module): |
| | """ |
| | Transformer block that takes in a cross-attention condition and another modulation vector applied to sub-blocks. |
| | """ |
| | |
| | |
| | def __init__(self, inner_dim: int, cond_dim: int, mod_dim: int, num_heads: int, eps: float, |
| | attn_drop: float = 0., attn_bias: bool = False, |
| | mlp_ratio: float = 4., mlp_drop: float = 0.): |
| | super().__init__() |
| | self.norm1 = ModLN(inner_dim, mod_dim, eps) |
| | self.cross_attn = nn.MultiheadAttention( |
| | embed_dim=inner_dim, num_heads=num_heads, kdim=cond_dim, vdim=cond_dim, |
| | dropout=attn_drop, bias=attn_bias, batch_first=True) |
| | self.norm2 = ModLN(inner_dim, mod_dim, eps) |
| | self.self_attn = nn.MultiheadAttention( |
| | embed_dim=inner_dim, num_heads=num_heads, |
| | dropout=attn_drop, bias=attn_bias, batch_first=True) |
| | self.norm3 = ModLN(inner_dim, mod_dim, eps) |
| | self.mlp = nn.Sequential( |
| | nn.Linear(inner_dim, int(inner_dim * mlp_ratio)), |
| | nn.GELU(), |
| | nn.Dropout(mlp_drop), |
| | nn.Linear(int(inner_dim * mlp_ratio), inner_dim), |
| | nn.Dropout(mlp_drop), |
| | ) |
| |
|
| | def forward(self, x, cond, mod): |
| | |
| | |
| | |
| | x = x + self.cross_attn(self.norm1(x, mod), cond, cond, need_weights=False)[0] |
| | before_sa = self.norm2(x, mod) |
| | x = x + self.self_attn(before_sa, before_sa, before_sa, need_weights=False)[0] |
| | x = x + self.mlp(self.norm3(x, mod)) |
| | return x |
| |
|
| |
|
| | class TriplaneTransformer(nn.Module): |
| | """ |
| | Transformer with condition and modulation that generates a triplane representation. |
| | |
| | Reference: |
| | Timm: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L486 |
| | """ |
| | def __init__(self, inner_dim: int, image_feat_dim: int, camera_embed_dim: int, |
| | triplane_low_res: int, triplane_high_res: int, triplane_dim: int, |
| | num_layers: int, num_heads: int, |
| | eps: float = 1e-6): |
| | super().__init__() |
| |
|
| | |
| | self.triplane_low_res = triplane_low_res |
| | self.triplane_high_res = triplane_high_res |
| | self.triplane_dim = triplane_dim |
| |
|
| | |
| | |
| | self.pos_embed = nn.Parameter(torch.randn(1, 3*triplane_low_res**2, inner_dim) * (1. / inner_dim) ** 0.5) |
| | self.layers = nn.ModuleList([ |
| | ConditionModulationBlock( |
| | inner_dim=inner_dim, cond_dim=image_feat_dim, mod_dim=camera_embed_dim, num_heads=num_heads, eps=eps) |
| | for _ in range(num_layers) |
| | ]) |
| | self.norm = nn.LayerNorm(inner_dim, eps=eps) |
| | self.deconv = nn.ConvTranspose2d(inner_dim, triplane_dim, kernel_size=2, stride=2, padding=0) |
| |
|
| | def forward(self, image_feats, camera_embeddings): |
| | |
| | |
| |
|
| | assert image_feats.shape[0] == camera_embeddings.shape[0], \ |
| | f"Mismatched batch size: {image_feats.shape[0]} vs {camera_embeddings.shape[0]}" |
| |
|
| | N = image_feats.shape[0] |
| | H = W = self.triplane_low_res |
| | L = 3 * H * W |
| |
|
| | x = self.pos_embed.repeat(N, 1, 1) |
| | for layer in self.layers: |
| | x = layer(x, image_feats, camera_embeddings) |
| | x = self.norm(x) |
| |
|
| | |
| | x = x.view(N, 3, H, W, -1) |
| | x = torch.einsum('nihwd->indhw', x) |
| | x = x.contiguous().view(3*N, -1, H, W) |
| | x = self.deconv(x) |
| | x = x.view(3, N, *x.shape[-3:]) |
| | x = torch.einsum('indhw->nidhw', x) |
| | x = x.contiguous() |
| |
|
| | assert self.triplane_high_res == x.shape[-2], \ |
| | f"Output triplane resolution does not match with expected: {x.shape[-2]} vs {self.triplane_high_res}" |
| | assert self.triplane_dim == x.shape[-3], \ |
| | f"Output triplane dimension does not match with expected: {x.shape[-3]} vs {self.triplane_dim}" |
| |
|
| | return x |