| from typing import * |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import numpy as np |
| from ..modules.utils import convert_module_to_f16, convert_module_to_f32, convert_module_to_bf16 |
| from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock, ModulatedTransformerCrossBlock_woT |
| from ..modules.spatial import patchify, unpatchify |
|
|
|
|
| class TimestepEmbedder(nn.Module): |
| """ |
| Embeds scalar timesteps into vector representations. |
| """ |
| def __init__(self, hidden_size, frequency_embedding_size=256): |
| super().__init__() |
| self.mlp = nn.Sequential( |
| nn.Linear(frequency_embedding_size, hidden_size, bias=True), |
| nn.SiLU(), |
| nn.Linear(hidden_size, hidden_size, bias=True), |
| ) |
| self.frequency_embedding_size = frequency_embedding_size |
|
|
| @staticmethod |
| def timestep_embedding(t, dim, max_period=10000): |
| """ |
| Create sinusoidal timestep embeddings. |
| |
| Args: |
| t: a 1-D Tensor of N indices, one per batch element. |
| These may be fractional. |
| dim: the dimension of the output. |
| max_period: controls the minimum frequency of the embeddings. |
| |
| Returns: |
| an (N, D) Tensor of positional embeddings. |
| """ |
| |
| half = dim // 2 |
| freqs = torch.exp( |
| -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half |
| ).to(device=t.device) |
| args = t[:, None].float() * freqs[None] |
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| if dim % 2: |
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) |
| return embedding |
|
|
| def forward(self, t): |
| t_freq = self.timestep_embedding(t, self.frequency_embedding_size) |
| t_emb = self.mlp(t_freq) |
| return t_emb |
|
|
|
|
| class SparseStructureFlowModel(nn.Module): |
| def __init__( |
| self, |
| resolution: int, |
| in_channels: int, |
| model_channels: int, |
| cond_channels: int, |
| out_channels: int, |
| num_blocks: int, |
| num_heads: Optional[int] = None, |
| num_head_channels: Optional[int] = 64, |
| mlp_ratio: float = 4, |
| patch_size: int = 2, |
| pe_mode: Literal["ape", "rope"] = "ape", |
| use_fp16: bool = False, |
| use_bf16: bool = False, |
| use_checkpoint: bool = False, |
| share_mod: bool = False, |
| qk_rms_norm: bool = False, |
| qk_rms_norm_cross: bool = False, |
| ): |
| super().__init__() |
| self.resolution = resolution |
| self.in_channels = in_channels |
| self.model_channels = model_channels |
| self.cond_channels = cond_channels |
| self.out_channels = out_channels |
| self.num_blocks = num_blocks |
| self.num_heads = num_heads or model_channels // num_head_channels |
| self.mlp_ratio = mlp_ratio |
| self.patch_size = patch_size |
| self.pe_mode = pe_mode |
| self.use_fp16 = use_fp16 |
| self.use_bf16 = use_bf16 |
| self.use_checkpoint = use_checkpoint |
| self.share_mod = share_mod |
| self.qk_rms_norm = qk_rms_norm |
| self.qk_rms_norm_cross = qk_rms_norm_cross |
| if use_fp16: |
| self.dtype = torch.float16 |
| elif use_bf16: |
| self.dtype = torch.bfloat16 |
| else: |
| self.dtype = torch.float32 |
|
|
| self.t_embedder = TimestepEmbedder(model_channels) |
| if share_mod: |
| self.adaLN_modulation = nn.Sequential( |
| nn.SiLU(), |
| nn.Linear(model_channels, 6 * model_channels, bias=True) |
| ) |
|
|
| if pe_mode == "ape": |
| self.pos_embedder = AbsolutePositionEmbedder(model_channels, 3) |
| coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij') |
| coords = torch.stack(coords, dim=-1).reshape(-1, 3) |
| pos_emb = self.pos_embedder(coords) |
| self.register_buffer("pos_emb", pos_emb) |
|
|
| self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels) |
| |
| self.blocks = nn.ModuleList([ |
| ModulatedTransformerCrossBlock( |
| model_channels, |
| cond_channels, |
| num_heads=self.num_heads, |
| mlp_ratio=self.mlp_ratio, |
| attn_mode='full', |
| use_checkpoint=self.use_checkpoint, |
| use_rope=(pe_mode == "rope"), |
| share_mod=share_mod, |
| qk_rms_norm=self.qk_rms_norm, |
| qk_rms_norm_cross=self.qk_rms_norm_cross, |
| ) |
| for _ in range(num_blocks) |
| ]) |
|
|
| self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3) |
|
|
| self.initialize_weights() |
| if use_fp16: |
| self.convert_to_fp16() |
| elif use_bf16: |
| self.convert_to_bf16() |
|
|
| @property |
| def device(self) -> torch.device: |
| """ |
| Return the device of the model. |
| """ |
| return next(self.parameters()).device |
|
|
| def convert_to_fp16(self) -> None: |
| """ |
| Convert the torso of the model to float16. |
| """ |
| self.use_fp16 = True |
| self.use_bf16 = False |
| self.dtype = torch.float16 |
| self.blocks.apply(convert_module_to_f16) |
|
|
| def convert_to_bf16(self) -> None: |
| """ |
| Convert the torso of the model to bfloat16. |
| """ |
| self.use_fp16 = False |
| self.use_bf16 = True |
| self.dtype = torch.bfloat16 |
| self.blocks.apply(convert_module_to_bf16) |
|
|
| def convert_to_fp32(self) -> None: |
| """ |
| Convert the torso of the model to float32. |
| """ |
| self.use_fp16 = False |
| self.use_bf16 = False |
| self.dtype = torch.float32 |
| self.blocks.apply(convert_module_to_f32) |
|
|
| def initialize_weights(self) -> None: |
| |
| def _basic_init(module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.xavier_uniform_(module.weight) |
| if module.bias is not None: |
| nn.init.constant_(module.bias, 0) |
| self.apply(_basic_init) |
|
|
| |
| nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) |
| nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) |
|
|
| |
| if self.share_mod: |
| nn.init.constant_(self.adaLN_modulation[-1].weight, 0) |
| nn.init.constant_(self.adaLN_modulation[-1].bias, 0) |
| else: |
| for block in self.blocks: |
| nn.init.constant_(block.adaLN_modulation[-1].weight, 0) |
| nn.init.constant_(block.adaLN_modulation[-1].bias, 0) |
|
|
| |
| nn.init.constant_(self.out_layer.weight, 0) |
| nn.init.constant_(self.out_layer.bias, 0) |
|
|
| def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor: |
| assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \ |
| f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}" |
|
|
| h = patchify(x, self.patch_size) |
| h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous() |
|
|
| h = self.input_layer(h) |
| h = h + self.pos_emb[None] |
| t_emb = self.t_embedder(t) |
| if self.share_mod: |
| t_emb = self.adaLN_modulation(t_emb) |
| t_emb = t_emb.type(self.dtype) |
| h = h.type(self.dtype) |
| if isinstance(cond, list): |
| for i in range(len(cond)): |
| cond_tmp = cond[i].type(self.dtype) |
| for block in self.blocks: |
| h = block(h, t_emb, cond_tmp) |
| else: |
| cond = cond.type(self.dtype) |
| for block in self.blocks: |
| h = block(h, t_emb, cond) |
| h = h.type(x.dtype) |
| h = F.layer_norm(h, h.shape[-1:]) |
| h = self.out_layer(h) |
|
|
| h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3) |
| h = unpatchify(h, self.patch_size).contiguous() |
|
|
| return h |
|
|
| class ModulatedMultiViewCond(nn.Module): |
| """ |
| Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning. |
| """ |
| def __init__( |
| self, |
| channels: int, |
| ctx_channels: int, |
| num_heads: int, |
| mlp_ratio: float = 4.0, |
| attn_mode: Literal["full", "windowed"] = "full", |
| window_size: Optional[int] = None, |
| shift_window: Optional[Tuple[int, int, int]] = None, |
| use_checkpoint: bool = False, |
| use_rope: bool = False, |
| qk_rms_norm: bool = False, |
| qk_rms_norm_cross: bool = False, |
| qkv_bias: bool = True, |
| share_mod: bool = False, |
| num_init_tokens: int = 4096, |
| dtype: Optional[torch.dtype] = torch.float32, |
| use_fp16: bool = False, |
| ): |
| super().__init__() |
| self.cond_blocks = nn.ModuleList([ |
| ModulatedTransformerCrossBlock_woT( |
| channels, |
| ctx_channels, |
| num_heads=num_heads, |
| mlp_ratio=mlp_ratio, |
| attn_mode=attn_mode, |
| use_checkpoint=use_checkpoint, |
| use_rope=use_rope, |
| share_mod=share_mod, |
| qk_rms_norm=qk_rms_norm, |
| qk_rms_norm_cross=qk_rms_norm_cross, |
| ) |
| for _ in range(4) |
| ]) |
| self.use_fp16 = use_fp16 |
| if use_fp16: |
| self.dtype = torch.float16 |
| else: |
| self.dtype = dtype |
| self.multiview_cond_tokens = nn.Parameter(torch.randn(1, num_init_tokens, channels).to(dtype)) |
| nn.init.normal_(self.multiview_cond_tokens, std=1e-6) |
| self.intermediate_layer_idx = [4, 11, 17, 23] |
| if use_fp16: |
| self.convert_to_fp16() |
|
|
|
|
| def convert_to_fp16(self) -> None: |
| """ |
| Convert the torso of the model to float16. |
| """ |
| self.use_fp16 = True |
| self.dtype = torch.float16 |
| self.cond_blocks.apply(convert_module_to_f16) |
| self.multiview_cond_tokens = nn.Parameter(self.multiview_cond_tokens.data.to(self.dtype)) |
| def forward(self, aggregated_tokens_list: List, image_cond: torch.Tensor): |
|
|
| b = aggregated_tokens_list[0].shape[0] |
| patch_start_idx = 5 |
| idx = 0 |
| cond = self.multiview_cond_tokens.repeat(b, 1, 1) |
| for layer_idx in self.intermediate_layer_idx: |
| x = aggregated_tokens_list[layer_idx][:, :, patch_start_idx:] |
| |
| x = torch.cat([x.reshape(b, -1, 2048), image_cond.reshape(b, -1, 1024)],dim=-1).to(self.dtype) |
| cond = self.cond_blocks[idx](cond, x) |
| idx = idx + 1 |
| return cond |