Spaces:
Runtime error
Runtime error
| # Adapted from https://github.com/Stability-AI/generative-models/blob/main/sgm/modules/diffusionmodules/video_model.py | |
| from functools import partial | |
| from typing import List, Optional, Union | |
| from einops import rearrange | |
| from models.svd.sgm.modules.diffusionmodules.openaimodel import * | |
| from models.svd.sgm.modules.video_attention import SpatialVideoTransformer | |
| from models.svd.sgm.util import default | |
| from models.svd.sgm.modules.diffusionmodules.util import AlphaBlender | |
| from functools import partial | |
| from models.cam.conditioning import ConditionalModel | |
| class VideoResBlock(ResBlock): | |
| def __init__( | |
| self, | |
| channels: int, | |
| emb_channels: int, | |
| dropout: float, | |
| video_kernel_size: Union[int, List[int]] = 3, | |
| merge_strategy: str = "fixed", | |
| merge_factor: float = 0.5, | |
| out_channels: Optional[int] = None, | |
| use_conv: bool = False, | |
| use_scale_shift_norm: bool = False, | |
| dims: int = 2, | |
| use_checkpoint: bool = False, | |
| up: bool = False, | |
| down: bool = False, | |
| ): | |
| super().__init__( | |
| channels, | |
| emb_channels, | |
| dropout, | |
| out_channels=out_channels, | |
| use_conv=use_conv, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| up=up, | |
| down=down, | |
| ) | |
| self.time_stack = ResBlock( | |
| default(out_channels, channels), | |
| emb_channels, | |
| dropout=dropout, | |
| dims=3, | |
| out_channels=default(out_channels, channels), | |
| use_scale_shift_norm=False, | |
| use_conv=False, | |
| up=False, | |
| down=False, | |
| kernel_size=video_kernel_size, | |
| use_checkpoint=use_checkpoint, | |
| exchange_temb_dims=True, | |
| ) | |
| self.time_mixer = AlphaBlender( | |
| alpha=merge_factor, | |
| merge_strategy=merge_strategy, | |
| rearrange_pattern="b t -> b 1 t 1 1", | |
| ) | |
| def forward( | |
| self, | |
| x: th.Tensor, | |
| emb: th.Tensor, | |
| num_video_frames: int, | |
| image_only_indicator: Optional[th.Tensor] = None, | |
| ) -> th.Tensor: | |
| x = super().forward(x, emb) | |
| x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) | |
| x = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) | |
| x = self.time_stack( | |
| x, rearrange(emb, "(b t) ... -> b t ...", t=num_video_frames) | |
| ) | |
| x = self.time_mixer( | |
| x_spatial=x_mix, x_temporal=x, image_only_indicator=image_only_indicator | |
| ) | |
| x = rearrange(x, "b c t h w -> (b t) c h w") | |
| return x | |
| class VideoUNet(nn.Module): | |
| ''' | |
| Adapted from the vanilla SVD model. We add "cross_attention_merger_input_blocks" and "cross_attention_merger_mid_block" to incorporate the CAM control features. | |
| ''' | |
| def __init__( | |
| self, | |
| in_channels: int, | |
| model_channels: int, | |
| out_channels: int, | |
| num_res_blocks: int, | |
| num_conditional_frames: int, | |
| attention_resolutions: Union[List[int], int], | |
| dropout: float = 0.0, | |
| channel_mult: List[int] = (1, 2, 4, 8), | |
| conv_resample: bool = True, | |
| dims: int = 2, | |
| num_classes: Optional[Union[int, str]] = None, | |
| use_checkpoint: bool = False, | |
| num_heads: int = -1, | |
| num_head_channels: int = -1, | |
| num_heads_upsample: int = -1, | |
| use_scale_shift_norm: bool = False, | |
| resblock_updown: bool = False, | |
| transformer_depth: Union[List[int], int] = 1, | |
| transformer_depth_middle: Optional[int] = None, | |
| context_dim: Optional[int] = None, | |
| time_downup: bool = False, | |
| time_context_dim: Optional[int] = None, | |
| extra_ff_mix_layer: bool = False, | |
| use_spatial_context: bool = False, | |
| merge_strategy: str = "fixed", | |
| merge_factor: float = 0.5, | |
| spatial_transformer_attn_type: str = "softmax", | |
| video_kernel_size: Union[int, List[int]] = 3, | |
| use_linear_in_transformer: bool = False, | |
| adm_in_channels: Optional[int] = None, | |
| disable_temporal_crossattention: bool = False, | |
| max_ddpm_temb_period: int = 10000, | |
| merging_mode: str = "addition", | |
| controlnet_mode: bool = False, | |
| use_apm: bool = False, | |
| ): | |
| super().__init__() | |
| assert context_dim is not None | |
| self.controlnet_mode = controlnet_mode | |
| if controlnet_mode: | |
| assert merging_mode.startswith( | |
| "attention"), "other merging modes not implemented" | |
| AttentionCondModel = partial( | |
| ConditionalModel, conditional_model=merging_mode.split("attention_")[1]) | |
| self.cross_attention_merger_input_blocks = nn.ModuleList([]) | |
| if num_heads_upsample == -1: | |
| num_heads_upsample = num_heads | |
| if num_heads == -1: | |
| assert num_head_channels != -1 | |
| if num_head_channels == -1: | |
| assert num_heads != -1 | |
| self.in_channels = in_channels | |
| self.model_channels = model_channels | |
| self.out_channels = out_channels | |
| if isinstance(transformer_depth, int): | |
| transformer_depth = len(channel_mult) * [transformer_depth] | |
| transformer_depth_middle = default( | |
| transformer_depth_middle, transformer_depth[-1] | |
| ) | |
| self.num_res_blocks = num_res_blocks | |
| self.attention_resolutions = attention_resolutions | |
| self.dropout = dropout | |
| self.channel_mult = channel_mult | |
| self.conv_resample = conv_resample | |
| self.num_classes = num_classes | |
| self.use_checkpoint = use_checkpoint | |
| self.num_heads = num_heads | |
| self.num_head_channels = num_head_channels | |
| self.num_heads_upsample = num_heads_upsample | |
| self.dims = dims | |
| self.use_scale_shift_norm = use_scale_shift_norm | |
| self.resblock_updown = resblock_updown | |
| self.transformer_depth = transformer_depth | |
| self.transformer_depth_middle = transformer_depth_middle | |
| self.context_dim = context_dim | |
| self.time_downup = time_downup | |
| self.time_context_dim = time_context_dim | |
| self.extra_ff_mix_layer = extra_ff_mix_layer | |
| self.use_spatial_context = use_spatial_context | |
| self.merge_strategy = merge_strategy | |
| self.merge_factor = merge_factor | |
| self.spatial_transformer_attn_type = spatial_transformer_attn_type | |
| self.video_kernel_size = video_kernel_size | |
| self.use_linear_in_transformer = use_linear_in_transformer | |
| self.adm_in_channels = adm_in_channels | |
| self.disable_temporal_crossattention = disable_temporal_crossattention | |
| self.max_ddpm_temb_period = max_ddpm_temb_period | |
| time_embed_dim = model_channels * 4 | |
| self.time_embed = nn.Sequential( | |
| linear(model_channels, time_embed_dim), | |
| nn.SiLU(), | |
| linear(time_embed_dim, time_embed_dim), | |
| ) | |
| if self.num_classes is not None: | |
| if isinstance(self.num_classes, int): | |
| self.label_emb = nn.Embedding(num_classes, time_embed_dim) | |
| elif self.num_classes == "continuous": | |
| print("setting up linear c_adm embedding layer") | |
| self.label_emb = nn.Linear(1, time_embed_dim) | |
| elif self.num_classes == "timestep": | |
| self.label_emb = nn.Sequential( | |
| Timestep(model_channels), | |
| nn.Sequential( | |
| linear(model_channels, time_embed_dim), | |
| nn.SiLU(), | |
| linear(time_embed_dim, time_embed_dim), | |
| ), | |
| ) | |
| elif self.num_classes == "sequential": | |
| assert adm_in_channels is not None | |
| self.label_emb = nn.Sequential( | |
| nn.Sequential( | |
| linear(adm_in_channels, time_embed_dim), | |
| nn.SiLU(), | |
| linear(time_embed_dim, time_embed_dim), | |
| ) | |
| ) | |
| else: | |
| raise ValueError() | |
| self.input_blocks = nn.ModuleList( | |
| [ | |
| TimestepEmbedSequential( | |
| conv_nd(dims, in_channels, model_channels, 3, padding=1) | |
| ) | |
| ] | |
| ) | |
| self._feature_size = model_channels | |
| input_block_chans = [model_channels] | |
| ch = model_channels | |
| ds = 1 | |
| if controlnet_mode and merging_mode.startswith("attention"): | |
| self.cross_attention_merger_input_blocks.append( | |
| AttentionCondModel(input_channels=ch)) | |
| def get_attention_layer( | |
| ch, | |
| num_heads, | |
| dim_head, | |
| depth=1, | |
| context_dim=None, | |
| use_checkpoint=False, | |
| disabled_sa=False, | |
| use_apm: bool = False, | |
| ): | |
| return SpatialVideoTransformer( | |
| ch, | |
| num_heads, | |
| dim_head, | |
| depth=depth, | |
| context_dim=context_dim, | |
| time_context_dim=time_context_dim, | |
| dropout=dropout, | |
| ff_in=extra_ff_mix_layer, | |
| use_spatial_context=use_spatial_context, | |
| merge_strategy=merge_strategy, | |
| merge_factor=merge_factor, | |
| checkpoint=use_checkpoint, | |
| use_linear=use_linear_in_transformer, | |
| attn_mode=spatial_transformer_attn_type, | |
| disable_self_attn=disabled_sa, | |
| disable_temporal_crossattention=disable_temporal_crossattention, | |
| max_time_embed_period=max_ddpm_temb_period, | |
| use_apm=use_apm, | |
| ) | |
| def get_resblock( | |
| merge_factor, | |
| merge_strategy, | |
| video_kernel_size, | |
| ch, | |
| time_embed_dim, | |
| dropout, | |
| out_ch, | |
| dims, | |
| use_checkpoint, | |
| use_scale_shift_norm, | |
| down=False, | |
| up=False, | |
| ): | |
| return VideoResBlock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| channels=ch, | |
| emb_channels=time_embed_dim, | |
| dropout=dropout, | |
| out_channels=out_ch, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| down=down, | |
| up=up, | |
| ) | |
| for level, mult in enumerate(channel_mult): | |
| for _ in range(num_res_blocks): | |
| layers = [ | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch, | |
| time_embed_dim=time_embed_dim, | |
| dropout=dropout, | |
| out_ch=mult * model_channels, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| ) | |
| ] | |
| ch = mult * model_channels | |
| if ds in attention_resolutions: | |
| if num_head_channels == -1: | |
| dim_head = ch // num_heads | |
| else: | |
| num_heads = ch // num_head_channels | |
| dim_head = num_head_channels | |
| layers.append( | |
| get_attention_layer( | |
| ch, | |
| num_heads, | |
| dim_head, | |
| depth=transformer_depth[level], | |
| context_dim=context_dim, | |
| use_checkpoint=use_checkpoint, | |
| disabled_sa=False, | |
| use_apm=use_apm, | |
| ) | |
| ) | |
| self.input_blocks.append(TimestepEmbedSequential(*layers)) | |
| if controlnet_mode and merging_mode.startswith("attention"): | |
| self.cross_attention_merger_input_blocks.append( | |
| AttentionCondModel(input_channels=ch)) | |
| self._feature_size += ch | |
| input_block_chans.append(ch) | |
| if level != len(channel_mult) - 1: | |
| ds *= 2 | |
| out_ch = ch | |
| self.input_blocks.append( | |
| TimestepEmbedSequential( | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch, | |
| time_embed_dim=time_embed_dim, | |
| dropout=dropout, | |
| out_ch=out_ch, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| down=True, | |
| ) | |
| if resblock_updown | |
| else Downsample( | |
| ch, | |
| conv_resample, | |
| dims=dims, | |
| out_channels=out_ch, | |
| third_down=time_downup, | |
| ) | |
| ) | |
| ) | |
| ch = out_ch | |
| input_block_chans.append(ch) | |
| if controlnet_mode and merging_mode.startswith("attention"): | |
| self.cross_attention_merger_input_blocks.append( | |
| AttentionCondModel(input_channels=ch)) | |
| self._feature_size += ch | |
| if num_head_channels == -1: | |
| dim_head = ch // num_heads | |
| else: | |
| num_heads = ch // num_head_channels | |
| dim_head = num_head_channels | |
| self.middle_block = TimestepEmbedSequential( | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch, | |
| time_embed_dim=time_embed_dim, | |
| out_ch=None, | |
| dropout=dropout, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| ), | |
| get_attention_layer( | |
| ch, | |
| num_heads, | |
| dim_head, | |
| depth=transformer_depth_middle, | |
| context_dim=context_dim, | |
| use_checkpoint=use_checkpoint, | |
| use_apm=use_apm, | |
| ), | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch, | |
| out_ch=None, | |
| time_embed_dim=time_embed_dim, | |
| dropout=dropout, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| ), | |
| ) | |
| self._feature_size += ch | |
| if controlnet_mode and merging_mode.startswith("attention"): | |
| self.cross_attention_merger_mid_block = AttentionCondModel( | |
| input_channels=ch) | |
| self.output_blocks = nn.ModuleList([]) | |
| for level, mult in list(enumerate(channel_mult))[::-1]: | |
| for i in range(num_res_blocks + 1): | |
| ich = input_block_chans.pop() | |
| layers = [ | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch + ich, | |
| time_embed_dim=time_embed_dim, | |
| dropout=dropout, | |
| out_ch=model_channels * mult, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| ) | |
| ] | |
| ch = model_channels * mult | |
| if ds in attention_resolutions: | |
| if num_head_channels == -1: | |
| dim_head = ch // num_heads | |
| else: | |
| num_heads = ch // num_head_channels | |
| dim_head = num_head_channels | |
| layers.append( | |
| get_attention_layer( | |
| ch, | |
| num_heads, | |
| dim_head, | |
| depth=transformer_depth[level], | |
| context_dim=context_dim, | |
| use_checkpoint=use_checkpoint, | |
| disabled_sa=False, | |
| use_apm=use_apm, | |
| ) | |
| ) | |
| if level and i == num_res_blocks: | |
| out_ch = ch | |
| ds //= 2 | |
| layers.append( | |
| get_resblock( | |
| merge_factor=merge_factor, | |
| merge_strategy=merge_strategy, | |
| video_kernel_size=video_kernel_size, | |
| ch=ch, | |
| time_embed_dim=time_embed_dim, | |
| dropout=dropout, | |
| out_ch=out_ch, | |
| dims=dims, | |
| use_checkpoint=use_checkpoint, | |
| use_scale_shift_norm=use_scale_shift_norm, | |
| up=True, | |
| ) | |
| if resblock_updown | |
| else Upsample( | |
| ch, | |
| conv_resample, | |
| dims=dims, | |
| out_channels=out_ch, | |
| third_up=time_downup, | |
| ) | |
| ) | |
| self.output_blocks.append(TimestepEmbedSequential(*layers)) | |
| self._feature_size += ch | |
| self.out = nn.Sequential( | |
| normalization(ch), | |
| nn.SiLU(), | |
| zero_module(conv_nd(dims, model_channels, | |
| out_channels, 3, padding=1)), | |
| ) | |
| def forward( | |
| self, | |
| # [28,8,72,128], i.e. (B F) (2 C) H W = concat([z_t,<cond_frames>]) | |
| x: th.Tensor, | |
| timesteps: th.Tensor, # [28], i.e. (B F) | |
| # [28, 1, 1024], i.e. (B F) 1 T, for cross attention from clip image encoder, <cond_frames_without_noise> | |
| context: Optional[th.Tensor] = None, | |
| # [28, 768], i.e. (B F) T ? concat([<fps_id>,<motion_bucket_id>,<cond_aug>] | |
| y: Optional[th.Tensor] = None, | |
| time_context: Optional[th.Tensor] = None, # NONE | |
| num_video_frames: Optional[int] = None, # 14 | |
| num_conditional_frames: Optional[int] = None, # 8 | |
| # zeros, [2,14], i.e. [B, F] | |
| image_only_indicator: Optional[th.Tensor] = None, | |
| hs_control_input: Optional[th.Tensor] = None, # cam features | |
| hs_control_mid: Optional[th.Tensor] = None, # cam features | |
| ): | |
| assert (y is not None) == ( | |
| self.num_classes is not None | |
| ), "must specify y if and only if the model is class-conditional -> no, relax this TODO" | |
| hs = [] | |
| t_emb = timestep_embedding( | |
| timesteps, self.model_channels, repeat_only=False).to(x.dtype) | |
| emb = self.time_embed(t_emb) | |
| if self.num_classes is not None: | |
| assert y.shape[0] == x.shape[0] | |
| emb = emb + self.label_emb(y) | |
| h = x | |
| for module in self.input_blocks: | |
| h = module( | |
| h, | |
| emb, | |
| context=context, | |
| image_only_indicator=image_only_indicator, | |
| time_context=time_context, | |
| num_video_frames=num_video_frames, | |
| ) | |
| hs.append(h) | |
| # fusion of cam features with base features | |
| if hs_control_input is not None: | |
| new_hs = [] | |
| assert len(hs) == len(hs_control_input) and len( | |
| hs) == len(self.cross_attention_merger_input_blocks) | |
| for h_no_ctrl, h_ctrl, merger in zip(hs, hs_control_input, self.cross_attention_merger_input_blocks): | |
| merged_h = merger(h_no_ctrl, h_ctrl, num_frames=num_video_frames, | |
| num_conditional_frames=num_conditional_frames) | |
| new_hs.append(merged_h) | |
| hs = new_hs | |
| h = self.middle_block( | |
| h, | |
| emb, | |
| context=context, | |
| image_only_indicator=image_only_indicator, | |
| time_context=time_context, | |
| num_video_frames=num_video_frames, | |
| ) | |
| # fusion of cam features with base features | |
| if hs_control_mid is not None: | |
| h = self.cross_attention_merger_mid_block( | |
| h, hs_control_mid, num_frames=num_video_frames, num_conditional_frames=num_conditional_frames) | |
| for module in self.output_blocks: | |
| h = th.cat([h, hs.pop()], dim=1) | |
| h = module( | |
| h, | |
| emb, | |
| context=context, | |
| image_only_indicator=image_only_indicator, | |
| time_context=time_context, | |
| num_video_frames=num_video_frames, | |
| ) | |
| h = h.type(x.dtype) | |
| return self.out(h) | |