| | import math |
| | import numpy as np |
| | from inspect import isfunction |
| | from typing import Optional, Any, List |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from einops import rearrange, repeat |
| |
|
| | from diffusers.configuration_utils import ConfigMixin |
| | from diffusers.models.modeling_utils import ModelMixin |
| |
|
| | |
| | import xformers |
| | import xformers.ops |
| |
|
| | from kiui.cam import orbit_camera |
| |
|
| | def get_camera( |
| | num_frames, elevation=15, azimuth_start=0, azimuth_span=360, blender_coord=True, extra_view=False, |
| | ): |
| | angle_gap = azimuth_span / num_frames |
| | cameras = [] |
| | for azimuth in np.arange(azimuth_start, azimuth_span + azimuth_start, angle_gap): |
| | |
| | pose = orbit_camera(-elevation, azimuth, radius=1) |
| |
|
| | |
| | if blender_coord: |
| | pose[2] *= -1 |
| | pose[[1, 2]] = pose[[2, 1]] |
| |
|
| | cameras.append(pose.flatten()) |
| |
|
| | if extra_view: |
| | cameras.append(np.zeros_like(cameras[0])) |
| |
|
| | return torch.from_numpy(np.stack(cameras, axis=0)).float() |
| |
|
| |
|
| | def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): |
| | """ |
| | Create sinusoidal timestep embeddings. |
| | :param timesteps: a 1-D Tensor of N indices, one per batch element. |
| | These may be fractional. |
| | :param dim: the dimension of the output. |
| | :param max_period: controls the minimum frequency of the embeddings. |
| | :return: an [N x dim] Tensor of positional embeddings. |
| | """ |
| | if not repeat_only: |
| | half = dim // 2 |
| | freqs = torch.exp( |
| | -math.log(max_period) |
| | * torch.arange(start=0, end=half, dtype=torch.float32) |
| | / half |
| | ).to(device=timesteps.device) |
| | args = timesteps[:, None] * freqs[None] |
| | embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| | if dim % 2: |
| | embedding = torch.cat( |
| | [embedding, torch.zeros_like(embedding[:, :1])], dim=-1 |
| | ) |
| | else: |
| | embedding = repeat(timesteps, "b -> b d", d=dim) |
| | |
| | return embedding |
| |
|
| |
|
| | def zero_module(module): |
| | """ |
| | Zero out the parameters of a module and return it. |
| | """ |
| | for p in module.parameters(): |
| | p.detach().zero_() |
| | return module |
| |
|
| |
|
| | def conv_nd(dims, *args, **kwargs): |
| | """ |
| | Create a 1D, 2D, or 3D convolution module. |
| | """ |
| | if dims == 1: |
| | return nn.Conv1d(*args, **kwargs) |
| | elif dims == 2: |
| | return nn.Conv2d(*args, **kwargs) |
| | elif dims == 3: |
| | return nn.Conv3d(*args, **kwargs) |
| | raise ValueError(f"unsupported dimensions: {dims}") |
| |
|
| |
|
| | def avg_pool_nd(dims, *args, **kwargs): |
| | """ |
| | Create a 1D, 2D, or 3D average pooling module. |
| | """ |
| | if dims == 1: |
| | return nn.AvgPool1d(*args, **kwargs) |
| | elif dims == 2: |
| | return nn.AvgPool2d(*args, **kwargs) |
| | elif dims == 3: |
| | return nn.AvgPool3d(*args, **kwargs) |
| | raise ValueError(f"unsupported dimensions: {dims}") |
| |
|
| |
|
| | def default(val, d): |
| | if val is not None: |
| | return val |
| | return d() if isfunction(d) else d |
| |
|
| |
|
| | class GEGLU(nn.Module): |
| | def __init__(self, dim_in, dim_out): |
| | super().__init__() |
| | self.proj = nn.Linear(dim_in, dim_out * 2) |
| |
|
| | def forward(self, x): |
| | x, gate = self.proj(x).chunk(2, dim=-1) |
| | return x * F.gelu(gate) |
| |
|
| |
|
| | class FeedForward(nn.Module): |
| | def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): |
| | super().__init__() |
| | inner_dim = int(dim * mult) |
| | dim_out = default(dim_out, dim) |
| | project_in = ( |
| | nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) |
| | if not glu |
| | else GEGLU(dim, inner_dim) |
| | ) |
| |
|
| | self.net = nn.Sequential( |
| | project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) |
| | ) |
| |
|
| | def forward(self, x): |
| | return self.net(x) |
| |
|
| |
|
| | class MemoryEfficientCrossAttention(nn.Module): |
| | |
| | def __init__( |
| | self, |
| | query_dim, |
| | context_dim=None, |
| | heads=8, |
| | dim_head=64, |
| | dropout=0.0, |
| | ip_dim=0, |
| | ip_weight=1, |
| | ): |
| | super().__init__() |
| | |
| | inner_dim = dim_head * heads |
| | context_dim = default(context_dim, query_dim) |
| |
|
| | self.heads = heads |
| | self.dim_head = dim_head |
| |
|
| | self.ip_dim = ip_dim |
| | self.ip_weight = ip_weight |
| |
|
| | if self.ip_dim > 0: |
| | self.to_k_ip = nn.Linear(context_dim, inner_dim, bias=False) |
| | self.to_v_ip = nn.Linear(context_dim, inner_dim, bias=False) |
| |
|
| | self.to_q = nn.Linear(query_dim, inner_dim, bias=False) |
| | self.to_k = nn.Linear(context_dim, inner_dim, bias=False) |
| | self.to_v = nn.Linear(context_dim, inner_dim, bias=False) |
| |
|
| | self.to_out = nn.Sequential( |
| | nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) |
| | ) |
| | self.attention_op: Optional[Any] = None |
| |
|
| | def forward(self, x, context=None): |
| | q = self.to_q(x) |
| | context = default(context, x) |
| |
|
| | if self.ip_dim > 0: |
| | |
| | token_len = context.shape[1] |
| | context_ip = context[:, -self.ip_dim :, :] |
| | k_ip = self.to_k_ip(context_ip) |
| | v_ip = self.to_v_ip(context_ip) |
| | context = context[:, : (token_len - self.ip_dim), :] |
| |
|
| | k = self.to_k(context) |
| | v = self.to_v(context) |
| |
|
| | b, _, _ = q.shape |
| | q, k, v = map( |
| | lambda t: t.unsqueeze(3) |
| | .reshape(b, t.shape[1], self.heads, self.dim_head) |
| | .permute(0, 2, 1, 3) |
| | .reshape(b * self.heads, t.shape[1], self.dim_head) |
| | .contiguous(), |
| | (q, k, v), |
| | ) |
| |
|
| | |
| | out = xformers.ops.memory_efficient_attention( |
| | q, k, v, attn_bias=None, op=self.attention_op |
| | ) |
| |
|
| | if self.ip_dim > 0: |
| | k_ip, v_ip = map( |
| | lambda t: t.unsqueeze(3) |
| | .reshape(b, t.shape[1], self.heads, self.dim_head) |
| | .permute(0, 2, 1, 3) |
| | .reshape(b * self.heads, t.shape[1], self.dim_head) |
| | .contiguous(), |
| | (k_ip, v_ip), |
| | ) |
| | |
| | out_ip = xformers.ops.memory_efficient_attention( |
| | q, k_ip, v_ip, attn_bias=None, op=self.attention_op |
| | ) |
| | out = out + self.ip_weight * out_ip |
| |
|
| | out = ( |
| | out.unsqueeze(0) |
| | .reshape(b, self.heads, out.shape[1], self.dim_head) |
| | .permute(0, 2, 1, 3) |
| | .reshape(b, out.shape[1], self.heads * self.dim_head) |
| | ) |
| | return self.to_out(out) |
| |
|
| |
|
| | class BasicTransformerBlock3D(nn.Module): |
| | |
| | def __init__( |
| | self, |
| | dim, |
| | n_heads, |
| | d_head, |
| | context_dim, |
| | dropout=0.0, |
| | gated_ff=True, |
| | ip_dim=0, |
| | ip_weight=1, |
| | ): |
| | super().__init__() |
| |
|
| | self.attn1 = MemoryEfficientCrossAttention( |
| | query_dim=dim, |
| | context_dim=None, |
| | heads=n_heads, |
| | dim_head=d_head, |
| | dropout=dropout, |
| | ) |
| | self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) |
| | self.attn2 = MemoryEfficientCrossAttention( |
| | query_dim=dim, |
| | context_dim=context_dim, |
| | heads=n_heads, |
| | dim_head=d_head, |
| | dropout=dropout, |
| | |
| | ip_dim=ip_dim, |
| | ip_weight=ip_weight, |
| | ) |
| | self.norm1 = nn.LayerNorm(dim) |
| | self.norm2 = nn.LayerNorm(dim) |
| | self.norm3 = nn.LayerNorm(dim) |
| |
|
| | def forward(self, x, context=None, num_frames=1): |
| | x = rearrange(x, "(b f) l c -> b (f l) c", f=num_frames).contiguous() |
| | x = self.attn1(self.norm1(x), context=None) + x |
| | x = rearrange(x, "b (f l) c -> (b f) l c", f=num_frames).contiguous() |
| | x = self.attn2(self.norm2(x), context=context) + x |
| | x = self.ff(self.norm3(x)) + x |
| | return x |
| |
|
| |
|
| | class SpatialTransformer3D(nn.Module): |
| |
|
| | def __init__( |
| | self, |
| | in_channels, |
| | n_heads, |
| | d_head, |
| | context_dim, |
| | depth=1, |
| | dropout=0.0, |
| | ip_dim=0, |
| | ip_weight=1, |
| | ): |
| | super().__init__() |
| |
|
| | if not isinstance(context_dim, list): |
| | context_dim = [context_dim] |
| |
|
| | self.in_channels = in_channels |
| |
|
| | inner_dim = n_heads * d_head |
| | self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) |
| | self.proj_in = nn.Linear(in_channels, inner_dim) |
| |
|
| | self.transformer_blocks = nn.ModuleList( |
| | [ |
| | BasicTransformerBlock3D( |
| | inner_dim, |
| | n_heads, |
| | d_head, |
| | context_dim=context_dim[d], |
| | dropout=dropout, |
| | ip_dim=ip_dim, |
| | ip_weight=ip_weight, |
| | ) |
| | for d in range(depth) |
| | ] |
| | ) |
| | |
| | self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) |
| | |
| |
|
| | def forward(self, x, context=None, num_frames=1): |
| | |
| | if not isinstance(context, list): |
| | context = [context] |
| | b, c, h, w = x.shape |
| | x_in = x |
| | x = self.norm(x) |
| | x = rearrange(x, "b c h w -> b (h w) c").contiguous() |
| | x = self.proj_in(x) |
| | for i, block in enumerate(self.transformer_blocks): |
| | x = block(x, context=context[i], num_frames=num_frames) |
| | x = self.proj_out(x) |
| | x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() |
| | |
| | return x + x_in |
| |
|
| |
|
| | class PerceiverAttention(nn.Module): |
| | def __init__(self, *, dim, dim_head=64, heads=8): |
| | super().__init__() |
| | self.scale = dim_head ** -0.5 |
| | self.dim_head = dim_head |
| | self.heads = heads |
| | inner_dim = dim_head * heads |
| |
|
| | self.norm1 = nn.LayerNorm(dim) |
| | self.norm2 = nn.LayerNorm(dim) |
| |
|
| | self.to_q = nn.Linear(dim, inner_dim, bias=False) |
| | self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) |
| | self.to_out = nn.Linear(inner_dim, dim, bias=False) |
| |
|
| | def forward(self, x, latents): |
| | """ |
| | Args: |
| | x (torch.Tensor): image features |
| | shape (b, n1, D) |
| | latent (torch.Tensor): latent features |
| | shape (b, n2, D) |
| | """ |
| | x = self.norm1(x) |
| | latents = self.norm2(latents) |
| |
|
| | b, l, _ = latents.shape |
| |
|
| | q = self.to_q(latents) |
| | kv_input = torch.cat((x, latents), dim=-2) |
| | k, v = self.to_kv(kv_input).chunk(2, dim=-1) |
| |
|
| | q, k, v = map( |
| | lambda t: t.reshape(b, t.shape[1], self.heads, -1) |
| | .transpose(1, 2) |
| | .reshape(b, self.heads, t.shape[1], -1) |
| | .contiguous(), |
| | (q, k, v), |
| | ) |
| |
|
| | |
| | scale = 1 / math.sqrt(math.sqrt(self.dim_head)) |
| | weight = (q * scale) @ (k * scale).transpose(-2, -1) |
| | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) |
| | out = weight @ v |
| |
|
| | out = out.permute(0, 2, 1, 3).reshape(b, l, -1) |
| |
|
| | return self.to_out(out) |
| |
|
| |
|
| | class Resampler(nn.Module): |
| | def __init__( |
| | self, |
| | dim=1024, |
| | depth=8, |
| | dim_head=64, |
| | heads=16, |
| | num_queries=8, |
| | embedding_dim=768, |
| | output_dim=1024, |
| | ff_mult=4, |
| | ): |
| | super().__init__() |
| | self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim ** 0.5) |
| | self.proj_in = nn.Linear(embedding_dim, dim) |
| | self.proj_out = nn.Linear(dim, output_dim) |
| | self.norm_out = nn.LayerNorm(output_dim) |
| |
|
| | self.layers = nn.ModuleList([]) |
| | for _ in range(depth): |
| | self.layers.append( |
| | nn.ModuleList( |
| | [ |
| | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), |
| | nn.Sequential( |
| | nn.LayerNorm(dim), |
| | nn.Linear(dim, dim * ff_mult, bias=False), |
| | nn.GELU(), |
| | nn.Linear(dim * ff_mult, dim, bias=False), |
| | ) |
| | ] |
| | ) |
| | ) |
| |
|
| | def forward(self, x): |
| | latents = self.latents.repeat(x.size(0), 1, 1) |
| | x = self.proj_in(x) |
| | for attn, ff in self.layers: |
| | latents = attn(x, latents) + latents |
| | latents = ff(latents) + latents |
| |
|
| | latents = self.proj_out(latents) |
| | return self.norm_out(latents) |
| |
|
| |
|
| | class CondSequential(nn.Sequential): |
| | """ |
| | A sequential module that passes timestep embeddings to the children that |
| | support it as an extra input. |
| | """ |
| |
|
| | def forward(self, x, emb, context=None, num_frames=1): |
| | for layer in self: |
| | if isinstance(layer, ResBlock): |
| | x = layer(x, emb) |
| | elif isinstance(layer, SpatialTransformer3D): |
| | x = layer(x, context, num_frames=num_frames) |
| | else: |
| | x = layer(x) |
| | return x |
| |
|
| |
|
| | class Upsample(nn.Module): |
| | """ |
| | An upsampling layer with an optional convolution. |
| | :param channels: channels in the inputs and outputs. |
| | :param use_conv: a bool determining if a convolution is applied. |
| | :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then |
| | upsampling occurs in the inner-two dimensions. |
| | """ |
| |
|
| | def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): |
| | super().__init__() |
| | self.channels = channels |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.dims = dims |
| | if use_conv: |
| | self.conv = conv_nd( |
| | dims, self.channels, self.out_channels, 3, padding=padding |
| | ) |
| |
|
| | def forward(self, x): |
| | assert x.shape[1] == self.channels |
| | if self.dims == 3: |
| | x = F.interpolate( |
| | x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" |
| | ) |
| | else: |
| | x = F.interpolate(x, scale_factor=2, mode="nearest") |
| | if self.use_conv: |
| | x = self.conv(x) |
| | return x |
| |
|
| |
|
| | class Downsample(nn.Module): |
| | """ |
| | A downsampling layer with an optional convolution. |
| | :param channels: channels in the inputs and outputs. |
| | :param use_conv: a bool determining if a convolution is applied. |
| | :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then |
| | downsampling occurs in the inner-two dimensions. |
| | """ |
| |
|
| | def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): |
| | super().__init__() |
| | self.channels = channels |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.dims = dims |
| | stride = 2 if dims != 3 else (1, 2, 2) |
| | if use_conv: |
| | self.op = conv_nd( |
| | dims, |
| | self.channels, |
| | self.out_channels, |
| | 3, |
| | stride=stride, |
| | padding=padding, |
| | ) |
| | else: |
| | assert self.channels == self.out_channels |
| | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) |
| |
|
| | def forward(self, x): |
| | assert x.shape[1] == self.channels |
| | return self.op(x) |
| |
|
| |
|
| | class ResBlock(nn.Module): |
| | """ |
| | A residual block that can optionally change the number of channels. |
| | :param channels: the number of input channels. |
| | :param emb_channels: the number of timestep embedding channels. |
| | :param dropout: the rate of dropout. |
| | :param out_channels: if specified, the number of out channels. |
| | :param use_conv: if True and out_channels is specified, use a spatial |
| | convolution instead of a smaller 1x1 convolution to change the |
| | channels in the skip connection. |
| | :param dims: determines if the signal is 1D, 2D, or 3D. |
| | :param up: if True, use this block for upsampling. |
| | :param down: if True, use this block for downsampling. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | channels, |
| | emb_channels, |
| | dropout, |
| | out_channels=None, |
| | use_conv=False, |
| | use_scale_shift_norm=False, |
| | dims=2, |
| | up=False, |
| | down=False, |
| | ): |
| | super().__init__() |
| | self.channels = channels |
| | self.emb_channels = emb_channels |
| | self.dropout = dropout |
| | self.out_channels = out_channels or channels |
| | self.use_conv = use_conv |
| | self.use_scale_shift_norm = use_scale_shift_norm |
| |
|
| | self.in_layers = nn.Sequential( |
| | nn.GroupNorm(32, channels), |
| | nn.SiLU(), |
| | conv_nd(dims, channels, self.out_channels, 3, padding=1), |
| | ) |
| |
|
| | self.updown = up or down |
| |
|
| | if up: |
| | self.h_upd = Upsample(channels, False, dims) |
| | self.x_upd = Upsample(channels, False, dims) |
| | elif down: |
| | self.h_upd = Downsample(channels, False, dims) |
| | self.x_upd = Downsample(channels, False, dims) |
| | else: |
| | self.h_upd = self.x_upd = nn.Identity() |
| |
|
| | self.emb_layers = nn.Sequential( |
| | nn.SiLU(), |
| | nn.Linear( |
| | emb_channels, |
| | 2 * self.out_channels if use_scale_shift_norm else self.out_channels, |
| | ), |
| | ) |
| | self.out_layers = nn.Sequential( |
| | nn.GroupNorm(32, self.out_channels), |
| | nn.SiLU(), |
| | nn.Dropout(p=dropout), |
| | zero_module( |
| | conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) |
| | ), |
| | ) |
| |
|
| | if self.out_channels == channels: |
| | self.skip_connection = nn.Identity() |
| | elif use_conv: |
| | self.skip_connection = conv_nd( |
| | dims, channels, self.out_channels, 3, padding=1 |
| | ) |
| | else: |
| | self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) |
| |
|
| | def forward(self, x, emb): |
| | if self.updown: |
| | in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] |
| | h = in_rest(x) |
| | h = self.h_upd(h) |
| | x = self.x_upd(x) |
| | h = in_conv(h) |
| | else: |
| | h = self.in_layers(x) |
| | emb_out = self.emb_layers(emb).type(h.dtype) |
| | while len(emb_out.shape) < len(h.shape): |
| | emb_out = emb_out[..., None] |
| | if self.use_scale_shift_norm: |
| | out_norm, out_rest = self.out_layers[0], self.out_layers[1:] |
| | scale, shift = torch.chunk(emb_out, 2, dim=1) |
| | h = out_norm(h) * (1 + scale) + shift |
| | h = out_rest(h) |
| | else: |
| | h = h + emb_out |
| | h = self.out_layers(h) |
| | return self.skip_connection(x) + h |
| |
|
| |
|
| | class MultiViewUNetModel(ModelMixin, ConfigMixin): |
| | """ |
| | The full multi-view UNet model with attention, timestep embedding and camera embedding. |
| | :param in_channels: channels in the input Tensor. |
| | :param model_channels: base channel count for the model. |
| | :param out_channels: channels in the output Tensor. |
| | :param num_res_blocks: number of residual blocks per downsample. |
| | :param attention_resolutions: a collection of downsample rates at which |
| | attention will take place. May be a set, list, or tuple. |
| | For example, if this contains 4, then at 4x downsampling, attention |
| | will be used. |
| | :param dropout: the dropout probability. |
| | :param channel_mult: channel multiplier for each level of the UNet. |
| | :param conv_resample: if True, use learned convolutions for upsampling and |
| | downsampling. |
| | :param dims: determines if the signal is 1D, 2D, or 3D. |
| | :param num_classes: if specified (as an int), then this model will be |
| | class-conditional with `num_classes` classes. |
| | :param num_heads: the number of attention heads in each attention layer. |
| | :param num_heads_channels: if specified, ignore num_heads and instead use |
| | a fixed channel width per attention head. |
| | :param num_heads_upsample: works with num_heads to set a different number |
| | of heads for upsampling. Deprecated. |
| | :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. |
| | :param resblock_updown: use residual blocks for up/downsampling. |
| | :param use_new_attention_order: use a different attention pattern for potentially |
| | increased efficiency. |
| | :param camera_dim: dimensionality of camera input. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | image_size, |
| | in_channels, |
| | model_channels, |
| | out_channels, |
| | num_res_blocks, |
| | attention_resolutions, |
| | dropout=0, |
| | channel_mult=(1, 2, 4, 8), |
| | conv_resample=True, |
| | dims=2, |
| | num_classes=None, |
| | num_heads=-1, |
| | num_head_channels=-1, |
| | num_heads_upsample=-1, |
| | use_scale_shift_norm=False, |
| | resblock_updown=False, |
| | transformer_depth=1, |
| | context_dim=None, |
| | n_embed=None, |
| | num_attention_blocks=None, |
| | adm_in_channels=None, |
| | camera_dim=None, |
| | ip_dim=0, |
| | ip_weight=1.0, |
| | **kwargs, |
| | ): |
| | super().__init__() |
| | assert context_dim is not None |
| | |
| | if num_heads_upsample == -1: |
| | num_heads_upsample = num_heads |
| |
|
| | if num_heads == -1: |
| | assert ( |
| | num_head_channels != -1 |
| | ), "Either num_heads or num_head_channels has to be set" |
| |
|
| | if num_head_channels == -1: |
| | assert ( |
| | num_heads != -1 |
| | ), "Either num_heads or num_head_channels has to be set" |
| |
|
| | self.image_size = image_size |
| | self.in_channels = in_channels |
| | self.model_channels = model_channels |
| | self.out_channels = out_channels |
| | if isinstance(num_res_blocks, int): |
| | self.num_res_blocks = len(channel_mult) * [num_res_blocks] |
| | else: |
| | if len(num_res_blocks) != len(channel_mult): |
| | raise ValueError( |
| | "provide num_res_blocks either as an int (globally constant) or " |
| | "as a list/tuple (per-level) with the same length as channel_mult" |
| | ) |
| | self.num_res_blocks = num_res_blocks |
| | |
| | if num_attention_blocks is not None: |
| | assert len(num_attention_blocks) == len(self.num_res_blocks) |
| | assert all( |
| | map( |
| | lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], |
| | range(len(num_attention_blocks)), |
| | ) |
| | ) |
| | print( |
| | f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " |
| | f"This option has LESS priority than attention_resolutions {attention_resolutions}, " |
| | f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " |
| | f"attention will still not be set." |
| | ) |
| |
|
| | self.attention_resolutions = attention_resolutions |
| | self.dropout = dropout |
| | self.channel_mult = channel_mult |
| | self.conv_resample = conv_resample |
| | self.num_classes = num_classes |
| | self.num_heads = num_heads |
| | self.num_head_channels = num_head_channels |
| | self.num_heads_upsample = num_heads_upsample |
| | self.predict_codebook_ids = n_embed is not None |
| |
|
| | self.ip_dim = ip_dim |
| | self.ip_weight = ip_weight |
| |
|
| | if self.ip_dim > 0: |
| | self.image_embed = Resampler( |
| | dim=context_dim, |
| | depth=4, |
| | dim_head=64, |
| | heads=12, |
| | num_queries=ip_dim, |
| | embedding_dim=1280, |
| | output_dim=context_dim, |
| | ff_mult=4, |
| | ) |
| |
|
| | time_embed_dim = model_channels * 4 |
| | self.time_embed = nn.Sequential( |
| | nn.Linear(model_channels, time_embed_dim), |
| | nn.SiLU(), |
| | nn.Linear(time_embed_dim, time_embed_dim), |
| | ) |
| |
|
| | if camera_dim is not None: |
| | time_embed_dim = model_channels * 4 |
| | self.camera_embed = nn.Sequential( |
| | nn.Linear(camera_dim, time_embed_dim), |
| | nn.SiLU(), |
| | nn.Linear(time_embed_dim, time_embed_dim), |
| | ) |
| |
|
| | if self.num_classes is not None: |
| | if isinstance(self.num_classes, int): |
| | self.label_emb = nn.Embedding(self.num_classes, time_embed_dim) |
| | elif self.num_classes == "continuous": |
| | |
| | self.label_emb = nn.Linear(1, time_embed_dim) |
| | elif self.num_classes == "sequential": |
| | assert adm_in_channels is not None |
| | self.label_emb = nn.Sequential( |
| | nn.Sequential( |
| | nn.Linear(adm_in_channels, time_embed_dim), |
| | nn.SiLU(), |
| | nn.Linear(time_embed_dim, time_embed_dim), |
| | ) |
| | ) |
| | else: |
| | raise ValueError() |
| |
|
| | self.input_blocks = nn.ModuleList( |
| | [ |
| | CondSequential( |
| | conv_nd(dims, in_channels, model_channels, 3, padding=1) |
| | ) |
| | ] |
| | ) |
| | self._feature_size = model_channels |
| | input_block_chans = [model_channels] |
| | ch = model_channels |
| | ds = 1 |
| | for level, mult in enumerate(channel_mult): |
| | for nr in range(self.num_res_blocks[level]): |
| | layers: List[Any] = [ |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=mult * model_channels, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ) |
| | ] |
| | ch = mult * model_channels |
| | if ds in attention_resolutions: |
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads = ch // num_head_channels |
| | dim_head = num_head_channels |
| |
|
| | if num_attention_blocks is None or nr < num_attention_blocks[level]: |
| | layers.append( |
| | SpatialTransformer3D( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | context_dim=context_dim, |
| | depth=transformer_depth, |
| | ip_dim=self.ip_dim, |
| | ip_weight=self.ip_weight, |
| | ) |
| | ) |
| | self.input_blocks.append(CondSequential(*layers)) |
| | self._feature_size += ch |
| | input_block_chans.append(ch) |
| | if level != len(channel_mult) - 1: |
| | out_ch = ch |
| | self.input_blocks.append( |
| | CondSequential( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=out_ch, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | down=True, |
| | ) |
| | if resblock_updown |
| | else Downsample( |
| | ch, conv_resample, dims=dims, out_channels=out_ch |
| | ) |
| | ) |
| | ) |
| | ch = out_ch |
| | input_block_chans.append(ch) |
| | ds *= 2 |
| | self._feature_size += ch |
| |
|
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads = ch // num_head_channels |
| | dim_head = num_head_channels |
| | |
| | self.middle_block = CondSequential( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ), |
| | SpatialTransformer3D( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | context_dim=context_dim, |
| | depth=transformer_depth, |
| | ip_dim=self.ip_dim, |
| | ip_weight=self.ip_weight, |
| | ), |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ), |
| | ) |
| | self._feature_size += ch |
| |
|
| | self.output_blocks = nn.ModuleList([]) |
| | for level, mult in list(enumerate(channel_mult))[::-1]: |
| | for i in range(self.num_res_blocks[level] + 1): |
| | ich = input_block_chans.pop() |
| | layers = [ |
| | ResBlock( |
| | ch + ich, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=model_channels * mult, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | ) |
| | ] |
| | ch = model_channels * mult |
| | if ds in attention_resolutions: |
| | if num_head_channels == -1: |
| | dim_head = ch // num_heads |
| | else: |
| | num_heads = ch // num_head_channels |
| | dim_head = num_head_channels |
| |
|
| | if num_attention_blocks is None or i < num_attention_blocks[level]: |
| | layers.append( |
| | SpatialTransformer3D( |
| | ch, |
| | num_heads, |
| | dim_head, |
| | context_dim=context_dim, |
| | depth=transformer_depth, |
| | ip_dim=self.ip_dim, |
| | ip_weight=self.ip_weight, |
| | ) |
| | ) |
| | if level and i == self.num_res_blocks[level]: |
| | out_ch = ch |
| | layers.append( |
| | ResBlock( |
| | ch, |
| | time_embed_dim, |
| | dropout, |
| | out_channels=out_ch, |
| | dims=dims, |
| | use_scale_shift_norm=use_scale_shift_norm, |
| | up=True, |
| | ) |
| | if resblock_updown |
| | else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) |
| | ) |
| | ds //= 2 |
| | self.output_blocks.append(CondSequential(*layers)) |
| | self._feature_size += ch |
| |
|
| | self.out = nn.Sequential( |
| | nn.GroupNorm(32, ch), |
| | nn.SiLU(), |
| | zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), |
| | ) |
| | if self.predict_codebook_ids: |
| | self.id_predictor = nn.Sequential( |
| | nn.GroupNorm(32, ch), |
| | conv_nd(dims, model_channels, n_embed, 1), |
| | |
| | ) |
| |
|
| | def forward( |
| | self, |
| | x, |
| | timesteps=None, |
| | context=None, |
| | y=None, |
| | camera=None, |
| | num_frames=1, |
| | ip=None, |
| | ip_img=None, |
| | **kwargs, |
| | ): |
| | """ |
| | Apply the model to an input batch. |
| | :param x: an [(N x F) x C x ...] Tensor of inputs. F is the number of frames (views). |
| | :param timesteps: a 1-D batch of timesteps. |
| | :param context: conditioning plugged in via crossattn |
| | :param y: an [N] Tensor of labels, if class-conditional. |
| | :param num_frames: a integer indicating number of frames for tensor reshaping. |
| | :return: an [(N x F) x C x ...] Tensor of outputs. F is the number of frames (views). |
| | """ |
| | assert ( |
| | x.shape[0] % num_frames == 0 |
| | ), "input batch size must be dividable by num_frames!" |
| | assert (y is not None) == ( |
| | self.num_classes is not None |
| | ), "must specify y if and only if the model is class-conditional" |
| |
|
| | hs = [] |
| |
|
| | t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) |
| |
|
| | emb = self.time_embed(t_emb) |
| |
|
| | if self.num_classes is not None: |
| | assert y is not None |
| | assert y.shape[0] == x.shape[0] |
| | emb = emb + self.label_emb(y) |
| |
|
| | |
| | if camera is not None: |
| | emb = emb + self.camera_embed(camera) |
| | |
| | |
| | if self.ip_dim > 0: |
| | x[(num_frames - 1) :: num_frames, :, :, :] = ip_img |
| | ip_emb = self.image_embed(ip) |
| | context = torch.cat((context, ip_emb), 1) |
| |
|
| | h = x |
| | for module in self.input_blocks: |
| | h = module(h, emb, context, num_frames=num_frames) |
| | hs.append(h) |
| | h = self.middle_block(h, emb, context, num_frames=num_frames) |
| | for module in self.output_blocks: |
| | h = torch.cat([h, hs.pop()], dim=1) |
| | h = module(h, emb, context, num_frames=num_frames) |
| | h = h.type(x.dtype) |
| | if self.predict_codebook_ids: |
| | return self.id_predictor(h) |
| | else: |
| | return self.out(h) |