| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import torch |
| | import torch.nn as nn |
| |
|
| |
|
| | class BasicTransformerBlock(nn.Module): |
| | """ |
| | Transformer block that takes in a cross-attention condition and another modulation vector applied to sub-blocks. |
| | """ |
| | |
| | |
| | def __init__( |
| | self, |
| | inner_dim: int, |
| | cond_dim: int, |
| | num_heads: int, |
| | eps: float, |
| | attn_drop: float = 0., |
| | attn_bias: bool = False, |
| | mlp_ratio: float = 4., |
| | mlp_drop: float = 0., |
| | ): |
| | super().__init__() |
| |
|
| | self.norm1 = nn.LayerNorm(inner_dim) |
| | self.cross_attn = nn.MultiheadAttention( |
| | embed_dim=inner_dim, num_heads=num_heads, kdim=cond_dim, vdim=cond_dim, |
| | dropout=attn_drop, bias=attn_bias, batch_first=True) |
| | self.norm2 = nn.LayerNorm(inner_dim) |
| | self.self_attn = nn.MultiheadAttention( |
| | embed_dim=inner_dim, num_heads=num_heads, |
| | dropout=attn_drop, bias=attn_bias, batch_first=True) |
| | self.norm3 = nn.LayerNorm(inner_dim) |
| | self.mlp = nn.Sequential( |
| | nn.Linear(inner_dim, int(inner_dim * mlp_ratio)), |
| | nn.GELU(), |
| | nn.Dropout(mlp_drop), |
| | nn.Linear(int(inner_dim * mlp_ratio), inner_dim), |
| | nn.Dropout(mlp_drop), |
| | ) |
| |
|
| | def forward(self, x, cond): |
| | |
| | |
| | x = x + self.cross_attn(self.norm1(x), cond, cond)[0] |
| | before_sa = self.norm2(x) |
| | x = x + self.self_attn(before_sa, before_sa, before_sa)[0] |
| | x = x + self.mlp(self.norm3(x)) |
| | return x |
| |
|
| |
|
| | class TriplaneTransformer(nn.Module): |
| | """ |
| | Transformer with condition that generates a triplane representation. |
| | |
| | Reference: |
| | Timm: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L486 |
| | """ |
| | def __init__( |
| | self, |
| | inner_dim: int, |
| | image_feat_dim: int, |
| | triplane_low_res: int, |
| | triplane_high_res: int, |
| | triplane_dim: int, |
| | num_layers: int, |
| | num_heads: int, |
| | eps: float = 1e-6, |
| | ): |
| | super().__init__() |
| |
|
| | |
| | self.triplane_low_res = triplane_low_res |
| | self.triplane_high_res = triplane_high_res |
| | self.triplane_dim = triplane_dim |
| |
|
| | |
| | |
| | self.pos_embed = nn.Parameter(torch.randn(1, 3*triplane_low_res**2, inner_dim) * (1. / inner_dim) ** 0.5) |
| | self.layers = nn.ModuleList([ |
| | BasicTransformerBlock( |
| | inner_dim=inner_dim, cond_dim=image_feat_dim, num_heads=num_heads, eps=eps) |
| | for _ in range(num_layers) |
| | ]) |
| | self.norm = nn.LayerNorm(inner_dim, eps=eps) |
| | self.deconv = nn.ConvTranspose2d(inner_dim, triplane_dim, kernel_size=2, stride=2, padding=0) |
| |
|
| | def forward(self, image_feats): |
| | |
| |
|
| | N = image_feats.shape[0] |
| | H = W = self.triplane_low_res |
| | L = 3 * H * W |
| |
|
| | x = self.pos_embed.repeat(N, 1, 1) |
| | for layer in self.layers: |
| | x = layer(x, image_feats) |
| | x = self.norm(x) |
| |
|
| | |
| | x = x.view(N, 3, H, W, -1) |
| | x = torch.einsum('nihwd->indhw', x) |
| | x = x.contiguous().view(3*N, -1, H, W) |
| | x = self.deconv(x) |
| | x = x.view(3, N, *x.shape[-3:]) |
| | x = torch.einsum('indhw->nidhw', x) |
| | x = x.contiguous() |
| |
|
| | return x |
| |
|