| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from typing import Any, Callable |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| from ..utils import deprecate, logging |
| from ..utils.import_utils import is_torch_npu_available, is_torch_xla_available, is_xformers_available |
| from ..utils.torch_utils import maybe_allow_in_graph |
| from .activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, LinearActivation, SwiGLU |
| from .attention_processor import Attention, AttentionProcessor, JointAttnProcessor2_0 |
| from .embeddings import SinusoidalPositionalEmbedding |
| from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm, SD35AdaLayerNormZeroX |
|
|
|
|
| if is_xformers_available(): |
| import xformers as xops |
| else: |
| xops = None |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class AttentionMixin: |
| @property |
| def attn_processors(self) -> dict[str, AttentionProcessor]: |
| r""" |
| Returns: |
| `dict` of attention processors: A dictionary containing all attention processors used in the model with |
| indexed by its weight name. |
| """ |
| |
| processors = {} |
|
|
| def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: dict[str, AttentionProcessor]): |
| if hasattr(module, "get_processor"): |
| processors[f"{name}.processor"] = module.get_processor() |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
| return processors |
|
|
| for name, module in self.named_children(): |
| fn_recursive_add_processors(name, module, processors) |
|
|
| return processors |
|
|
| def set_attn_processor(self, processor: AttentionProcessor | dict[str, AttentionProcessor]): |
| r""" |
| Sets the attention processor to use to compute attention. |
| |
| Parameters: |
| processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
| The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| for **all** `Attention` layers. |
| |
| If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
| processor. This is strongly recommended when setting trainable attention processors. |
| |
| """ |
| count = len(self.attn_processors.keys()) |
|
|
| if isinstance(processor, dict) and len(processor) != count: |
| raise ValueError( |
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| ) |
|
|
| def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
| if hasattr(module, "set_processor"): |
| if not isinstance(processor, dict): |
| module.set_processor(processor) |
| else: |
| module.set_processor(processor.pop(f"{name}.processor")) |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
| for name, module in self.named_children(): |
| fn_recursive_attn_processor(name, module, processor) |
|
|
| def fuse_qkv_projections(self): |
| """ |
| Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) |
| are fused. For cross-attention modules, key and value projection matrices are fused. |
| """ |
| for _, attn_processor in self.attn_processors.items(): |
| if "Added" in str(attn_processor.__class__.__name__): |
| raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") |
|
|
| for module in self.modules(): |
| if isinstance(module, AttentionModuleMixin) and module._supports_qkv_fusion: |
| module.fuse_projections() |
|
|
| def unfuse_qkv_projections(self): |
| """Disables the fused QKV projection if enabled. |
| |
| > [!WARNING] > This API is 🧪 experimental. |
| """ |
| for module in self.modules(): |
| if isinstance(module, AttentionModuleMixin) and module._supports_qkv_fusion: |
| module.unfuse_projections() |
|
|
|
|
| class AttentionModuleMixin: |
| _default_processor_cls = None |
| _available_processors = [] |
| _supports_qkv_fusion = True |
| fused_projections = False |
|
|
| def set_processor(self, processor: AttentionProcessor) -> None: |
| """ |
| Set the attention processor to use. |
| |
| Args: |
| processor (`AttnProcessor`): |
| The attention processor to use. |
| """ |
| |
| |
| if ( |
| hasattr(self, "processor") |
| and isinstance(self.processor, torch.nn.Module) |
| and not isinstance(processor, torch.nn.Module) |
| ): |
| logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") |
| self._modules.pop("processor") |
|
|
| self.processor = processor |
|
|
| def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": |
| """ |
| Get the attention processor in use. |
| |
| Args: |
| return_deprecated_lora (`bool`, *optional*, defaults to `False`): |
| Set to `True` to return the deprecated LoRA attention processor. |
| |
| Returns: |
| "AttentionProcessor": The attention processor in use. |
| """ |
| if not return_deprecated_lora: |
| return self.processor |
|
|
| def set_attention_backend(self, backend: str): |
| from .attention_dispatch import AttentionBackendName |
|
|
| available_backends = {x.value for x in AttentionBackendName.__members__.values()} |
| if backend not in available_backends: |
| raise ValueError(f"`{backend=}` must be one of the following: " + ", ".join(available_backends)) |
|
|
| backend = AttentionBackendName(backend.lower()) |
| self.processor._attention_backend = backend |
|
|
| def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: |
| """ |
| Set whether to use NPU flash attention from `torch_npu` or not. |
| |
| Args: |
| use_npu_flash_attention (`bool`): Whether to use NPU flash attention or not. |
| """ |
|
|
| if use_npu_flash_attention: |
| if not is_torch_npu_available(): |
| raise ImportError("torch_npu is not available") |
|
|
| self.set_attention_backend("_native_npu") |
|
|
| def set_use_xla_flash_attention( |
| self, |
| use_xla_flash_attention: bool, |
| partition_spec: tuple[str | None, ...] | None = None, |
| is_flux=False, |
| ) -> None: |
| """ |
| Set whether to use XLA flash attention from `torch_xla` or not. |
| |
| Args: |
| use_xla_flash_attention (`bool`): |
| Whether to use pallas flash attention kernel from `torch_xla` or not. |
| partition_spec (`tuple[]`, *optional*): |
| Specify the partition specification if using SPMD. Otherwise None. |
| is_flux (`bool`, *optional*, defaults to `False`): |
| Whether the model is a Flux model. |
| """ |
| if use_xla_flash_attention: |
| if not is_torch_xla_available(): |
| raise ImportError("torch_xla is not available") |
|
|
| self.set_attention_backend("_native_xla") |
|
|
| def set_use_memory_efficient_attention_xformers( |
| self, use_memory_efficient_attention_xformers: bool, attention_op: Callable | None = None |
| ) -> None: |
| """ |
| Set whether to use memory efficient attention from `xformers` or not. |
| |
| Args: |
| use_memory_efficient_attention_xformers (`bool`): |
| Whether to use memory efficient attention from `xformers` or not. |
| attention_op (`Callable`, *optional*): |
| The attention operation to use. Defaults to `None` which uses the default attention operation from |
| `xformers`. |
| """ |
| if use_memory_efficient_attention_xformers: |
| if not is_xformers_available(): |
| raise ModuleNotFoundError( |
| "Refer to https://github.com/facebookresearch/xformers for more information on how to install xformers", |
| name="xformers", |
| ) |
| elif not torch.cuda.is_available(): |
| raise ValueError( |
| "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" |
| " only available for GPU " |
| ) |
| else: |
| try: |
| |
| if is_xformers_available(): |
| dtype = None |
| if attention_op is not None: |
| op_fw, op_bw = attention_op |
| dtype, *_ = op_fw.SUPPORTED_DTYPES |
| q = torch.randn((1, 2, 40), device="cuda", dtype=dtype) |
| _ = xops.ops.memory_efficient_attention(q, q, q) |
| except Exception as e: |
| raise e |
|
|
| self.set_attention_backend("xformers") |
|
|
| @torch.no_grad() |
| def fuse_projections(self): |
| """ |
| Fuse the query, key, and value projections into a single projection for efficiency. |
| """ |
| |
| |
| if not self._supports_qkv_fusion: |
| logger.debug( |
| f"{self.__class__.__name__} does not support fusing QKV projections, so `fuse_projections` will no-op." |
| ) |
| return |
|
|
| |
| if getattr(self, "fused_projections", False): |
| return |
|
|
| device = self.to_q.weight.data.device |
| dtype = self.to_q.weight.data.dtype |
|
|
| if hasattr(self, "is_cross_attention") and self.is_cross_attention: |
| |
| concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) |
| in_features = concatenated_weights.shape[1] |
| out_features = concatenated_weights.shape[0] |
|
|
| self.to_kv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) |
| self.to_kv.weight.copy_(concatenated_weights) |
| if hasattr(self, "use_bias") and self.use_bias: |
| concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) |
| self.to_kv.bias.copy_(concatenated_bias) |
| else: |
| |
| concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) |
| in_features = concatenated_weights.shape[1] |
| out_features = concatenated_weights.shape[0] |
|
|
| self.to_qkv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) |
| self.to_qkv.weight.copy_(concatenated_weights) |
| if hasattr(self, "use_bias") and self.use_bias: |
| concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) |
| self.to_qkv.bias.copy_(concatenated_bias) |
|
|
| |
| if ( |
| getattr(self, "add_q_proj", None) is not None |
| and getattr(self, "add_k_proj", None) is not None |
| and getattr(self, "add_v_proj", None) is not None |
| ): |
| concatenated_weights = torch.cat( |
| [self.add_q_proj.weight.data, self.add_k_proj.weight.data, self.add_v_proj.weight.data] |
| ) |
| in_features = concatenated_weights.shape[1] |
| out_features = concatenated_weights.shape[0] |
|
|
| self.to_added_qkv = nn.Linear( |
| in_features, out_features, bias=self.added_proj_bias, device=device, dtype=dtype |
| ) |
| self.to_added_qkv.weight.copy_(concatenated_weights) |
| if self.added_proj_bias: |
| concatenated_bias = torch.cat( |
| [self.add_q_proj.bias.data, self.add_k_proj.bias.data, self.add_v_proj.bias.data] |
| ) |
| self.to_added_qkv.bias.copy_(concatenated_bias) |
|
|
| self.fused_projections = True |
|
|
| @torch.no_grad() |
| def unfuse_projections(self): |
| """ |
| Unfuse the query, key, and value projections back to separate projections. |
| """ |
| |
| |
| if not self._supports_qkv_fusion: |
| return |
|
|
| |
| if not getattr(self, "fused_projections", False): |
| return |
|
|
| |
| if hasattr(self, "to_qkv"): |
| delattr(self, "to_qkv") |
|
|
| if hasattr(self, "to_kv"): |
| delattr(self, "to_kv") |
|
|
| if hasattr(self, "to_added_qkv"): |
| delattr(self, "to_added_qkv") |
|
|
| self.fused_projections = False |
|
|
| def set_attention_slice(self, slice_size: int) -> None: |
| """ |
| Set the slice size for attention computation. |
| |
| Args: |
| slice_size (`int`): |
| The slice size for attention computation. |
| """ |
| if hasattr(self, "sliceable_head_dim") and slice_size is not None and slice_size > self.sliceable_head_dim: |
| raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") |
|
|
| processor = None |
|
|
| |
| if slice_size is not None: |
| processor = self._get_compatible_processor("sliced") |
|
|
| |
| if processor is None: |
| processor = self.default_processor_cls() |
|
|
| self.set_processor(processor) |
|
|
| def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: |
| """ |
| Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. |
| |
| Args: |
| tensor (`torch.Tensor`): The tensor to reshape. |
| |
| Returns: |
| `torch.Tensor`: The reshaped tensor. |
| """ |
| head_size = self.heads |
| batch_size, seq_len, dim = tensor.shape |
| tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) |
| tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) |
| return tensor |
|
|
| def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: |
| """ |
| Reshape the tensor for multi-head attention processing. |
| |
| Args: |
| tensor (`torch.Tensor`): The tensor to reshape. |
| out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. |
| |
| Returns: |
| `torch.Tensor`: The reshaped tensor. |
| """ |
| head_size = self.heads |
| if tensor.ndim == 3: |
| batch_size, seq_len, dim = tensor.shape |
| extra_dim = 1 |
| else: |
| batch_size, extra_dim, seq_len, dim = tensor.shape |
| tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) |
| tensor = tensor.permute(0, 2, 1, 3) |
|
|
| if out_dim == 3: |
| tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) |
|
|
| return tensor |
|
|
| def get_attention_scores( |
| self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor | None = None |
| ) -> torch.Tensor: |
| """ |
| Compute the attention scores. |
| |
| Args: |
| query (`torch.Tensor`): The query tensor. |
| key (`torch.Tensor`): The key tensor. |
| attention_mask (`torch.Tensor`, *optional*): The attention mask to use. |
| |
| Returns: |
| `torch.Tensor`: The attention probabilities/scores. |
| """ |
| dtype = query.dtype |
| if self.upcast_attention: |
| query = query.float() |
| key = key.float() |
|
|
| if attention_mask is None: |
| baddbmm_input = torch.empty( |
| query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device |
| ) |
| beta = 0 |
| else: |
| baddbmm_input = attention_mask |
| beta = 1 |
|
|
| attention_scores = torch.baddbmm( |
| baddbmm_input, |
| query, |
| key.transpose(-1, -2), |
| beta=beta, |
| alpha=self.scale, |
| ) |
| del baddbmm_input |
|
|
| if self.upcast_softmax: |
| attention_scores = attention_scores.float() |
|
|
| attention_probs = attention_scores.softmax(dim=-1) |
| del attention_scores |
|
|
| attention_probs = attention_probs.to(dtype) |
|
|
| return attention_probs |
|
|
| def prepare_attention_mask( |
| self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 |
| ) -> torch.Tensor: |
| """ |
| Prepare the attention mask for the attention computation. |
| |
| Args: |
| attention_mask (`torch.Tensor`): The attention mask to prepare. |
| target_length (`int`): The target length of the attention mask. |
| batch_size (`int`): The batch size for repeating the attention mask. |
| out_dim (`int`, *optional*, defaults to `3`): Output dimension. |
| |
| Returns: |
| `torch.Tensor`: The prepared attention mask. |
| """ |
| head_size = self.heads |
| if attention_mask is None: |
| return attention_mask |
|
|
| current_length: int = attention_mask.shape[-1] |
| if current_length != target_length: |
| if attention_mask.device.type == "mps": |
| |
| |
| padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) |
| padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) |
| attention_mask = torch.cat([attention_mask, padding], dim=2) |
| else: |
| |
| |
| |
| |
| attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) |
|
|
| if out_dim == 3: |
| if attention_mask.shape[0] < batch_size * head_size: |
| attention_mask = attention_mask.repeat_interleave(head_size, dim=0) |
| elif out_dim == 4: |
| attention_mask = attention_mask.unsqueeze(1) |
| attention_mask = attention_mask.repeat_interleave(head_size, dim=1) |
|
|
| return attention_mask |
|
|
| def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: |
| """ |
| Normalize the encoder hidden states. |
| |
| Args: |
| encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. |
| |
| Returns: |
| `torch.Tensor`: The normalized encoder hidden states. |
| """ |
| assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" |
| if isinstance(self.norm_cross, nn.LayerNorm): |
| encoder_hidden_states = self.norm_cross(encoder_hidden_states) |
| elif isinstance(self.norm_cross, nn.GroupNorm): |
| |
| |
| |
| |
| |
| encoder_hidden_states = encoder_hidden_states.transpose(1, 2) |
| encoder_hidden_states = self.norm_cross(encoder_hidden_states) |
| encoder_hidden_states = encoder_hidden_states.transpose(1, 2) |
| else: |
| assert False |
|
|
| return encoder_hidden_states |
|
|
|
|
| def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int): |
| |
| if hidden_states.shape[chunk_dim] % chunk_size != 0: |
| raise ValueError( |
| f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." |
| ) |
|
|
| num_chunks = hidden_states.shape[chunk_dim] // chunk_size |
| ff_output = torch.cat( |
| [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], |
| dim=chunk_dim, |
| ) |
| return ff_output |
|
|
|
|
| @maybe_allow_in_graph |
| class GatedSelfAttentionDense(nn.Module): |
| r""" |
| A gated self-attention dense layer that combines visual features and object features. |
| |
| Parameters: |
| query_dim (`int`): The number of channels in the query. |
| context_dim (`int`): The number of channels in the context. |
| n_heads (`int`): The number of heads to use for attention. |
| d_head (`int`): The number of channels in each head. |
| """ |
|
|
| def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): |
| super().__init__() |
|
|
| |
| self.linear = nn.Linear(context_dim, query_dim) |
|
|
| self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) |
| self.ff = FeedForward(query_dim, activation_fn="geglu") |
|
|
| self.norm1 = nn.LayerNorm(query_dim) |
| self.norm2 = nn.LayerNorm(query_dim) |
|
|
| self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) |
| self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) |
|
|
| self.enabled = True |
|
|
| def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: |
| if not self.enabled: |
| return x |
|
|
| n_visual = x.shape[1] |
| objs = self.linear(objs) |
|
|
| x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] |
| x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) |
|
|
| return x |
|
|
|
|
| @maybe_allow_in_graph |
| class JointTransformerBlock(nn.Module): |
| r""" |
| A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3. |
| |
| Reference: https://huggingface.co/papers/2403.03206 |
| |
| Parameters: |
| dim (`int`): The number of channels in the input and output. |
| num_attention_heads (`int`): The number of heads to use for multi-head attention. |
| attention_head_dim (`int`): The number of channels in each head. |
| context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the |
| processing of `context` conditions. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| context_pre_only: bool = False, |
| qk_norm: str | None = None, |
| use_dual_attention: bool = False, |
| ): |
| super().__init__() |
|
|
| self.use_dual_attention = use_dual_attention |
| self.context_pre_only = context_pre_only |
| context_norm_type = "ada_norm_continous" if context_pre_only else "ada_norm_zero" |
|
|
| if use_dual_attention: |
| self.norm1 = SD35AdaLayerNormZeroX(dim) |
| else: |
| self.norm1 = AdaLayerNormZero(dim) |
|
|
| if context_norm_type == "ada_norm_continous": |
| self.norm1_context = AdaLayerNormContinuous( |
| dim, dim, elementwise_affine=False, eps=1e-6, bias=True, norm_type="layer_norm" |
| ) |
| elif context_norm_type == "ada_norm_zero": |
| self.norm1_context = AdaLayerNormZero(dim) |
| else: |
| raise ValueError( |
| f"Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`" |
| ) |
|
|
| if hasattr(F, "scaled_dot_product_attention"): |
| processor = JointAttnProcessor2_0() |
| else: |
| raise ValueError( |
| "The current PyTorch version does not support the `scaled_dot_product_attention` function." |
| ) |
|
|
| self.attn = Attention( |
| query_dim=dim, |
| cross_attention_dim=None, |
| added_kv_proj_dim=dim, |
| dim_head=attention_head_dim, |
| heads=num_attention_heads, |
| out_dim=dim, |
| context_pre_only=context_pre_only, |
| bias=True, |
| processor=processor, |
| qk_norm=qk_norm, |
| eps=1e-6, |
| ) |
|
|
| if use_dual_attention: |
| self.attn2 = Attention( |
| query_dim=dim, |
| cross_attention_dim=None, |
| dim_head=attention_head_dim, |
| heads=num_attention_heads, |
| out_dim=dim, |
| bias=True, |
| processor=processor, |
| qk_norm=qk_norm, |
| eps=1e-6, |
| ) |
| else: |
| self.attn2 = None |
|
|
| self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) |
| self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") |
|
|
| if not context_pre_only: |
| self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) |
| self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") |
| else: |
| self.norm2_context = None |
| self.ff_context = None |
|
|
| |
| self._chunk_size = None |
| self._chunk_dim = 0 |
|
|
| |
| def set_chunk_feed_forward(self, chunk_size: int | None, dim: int = 0): |
| |
| self._chunk_size = chunk_size |
| self._chunk_dim = dim |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| encoder_hidden_states: torch.FloatTensor, |
| temb: torch.FloatTensor, |
| joint_attention_kwargs: dict[str, Any] | None = None, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| joint_attention_kwargs = joint_attention_kwargs or {} |
| if self.use_dual_attention: |
| norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2 = self.norm1( |
| hidden_states, emb=temb |
| ) |
| else: |
| norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) |
|
|
| if self.context_pre_only: |
| norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb) |
| else: |
| norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( |
| encoder_hidden_states, emb=temb |
| ) |
|
|
| |
| attn_output, context_attn_output = self.attn( |
| hidden_states=norm_hidden_states, |
| encoder_hidden_states=norm_encoder_hidden_states, |
| **joint_attention_kwargs, |
| ) |
|
|
| |
| attn_output = gate_msa.unsqueeze(1) * attn_output |
| hidden_states = hidden_states + attn_output |
|
|
| if self.use_dual_attention: |
| attn_output2 = self.attn2(hidden_states=norm_hidden_states2, **joint_attention_kwargs) |
| attn_output2 = gate_msa2.unsqueeze(1) * attn_output2 |
| hidden_states = hidden_states + attn_output2 |
|
|
| norm_hidden_states = self.norm2(hidden_states) |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
| if self._chunk_size is not None: |
| |
| ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) |
| else: |
| ff_output = self.ff(norm_hidden_states) |
| ff_output = gate_mlp.unsqueeze(1) * ff_output |
|
|
| hidden_states = hidden_states + ff_output |
|
|
| |
| if self.context_pre_only: |
| encoder_hidden_states = None |
| else: |
| context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output |
| encoder_hidden_states = encoder_hidden_states + context_attn_output |
|
|
| norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) |
| norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] |
| if self._chunk_size is not None: |
| |
| context_ff_output = _chunked_feed_forward( |
| self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size |
| ) |
| else: |
| context_ff_output = self.ff_context(norm_encoder_hidden_states) |
| encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output |
|
|
| return encoder_hidden_states, hidden_states |
|
|
|
|
| @maybe_allow_in_graph |
| class BasicTransformerBlock(nn.Module): |
| r""" |
| A basic Transformer block. |
| |
| Parameters: |
| dim (`int`): The number of channels in the input and output. |
| num_attention_heads (`int`): The number of heads to use for multi-head attention. |
| attention_head_dim (`int`): The number of channels in each head. |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| num_embeds_ada_norm (: |
| obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. |
| attention_bias (: |
| obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. |
| only_cross_attention (`bool`, *optional*): |
| Whether to use only cross-attention layers. In this case two cross attention layers are used. |
| double_self_attention (`bool`, *optional*): |
| Whether to use two self-attention layers. In this case no cross attention layers are used. |
| upcast_attention (`bool`, *optional*): |
| Whether to upcast the attention computation to float32. This is useful for mixed precision training. |
| norm_elementwise_affine (`bool`, *optional*, defaults to `True`): |
| Whether to use learnable elementwise affine parameters for normalization. |
| norm_type (`str`, *optional*, defaults to `"layer_norm"`): |
| The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. |
| final_dropout (`bool` *optional*, defaults to False): |
| Whether to apply a final dropout after the last feed-forward layer. |
| attention_type (`str`, *optional*, defaults to `"default"`): |
| The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. |
| positional_embeddings (`str`, *optional*, defaults to `None`): |
| The type of positional embeddings to apply to. |
| num_positional_embeddings (`int`, *optional*, defaults to `None`): |
| The maximum number of positional embeddings to apply. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| dropout=0.0, |
| cross_attention_dim: int | None = None, |
| activation_fn: str = "geglu", |
| num_embeds_ada_norm: int | None = None, |
| attention_bias: bool = False, |
| only_cross_attention: bool = False, |
| double_self_attention: bool = False, |
| upcast_attention: bool = False, |
| norm_elementwise_affine: bool = True, |
| norm_type: str = "layer_norm", |
| norm_eps: float = 1e-5, |
| final_dropout: bool = False, |
| attention_type: str = "default", |
| positional_embeddings: str | None = None, |
| num_positional_embeddings: int | None = None, |
| ada_norm_continous_conditioning_embedding_dim: int | None = None, |
| ada_norm_bias: int | None = None, |
| ff_inner_dim: int | None = None, |
| ff_bias: bool = True, |
| attention_out_bias: bool = True, |
| ): |
| super().__init__() |
| self.dim = dim |
| self.num_attention_heads = num_attention_heads |
| self.attention_head_dim = attention_head_dim |
| self.dropout = dropout |
| self.cross_attention_dim = cross_attention_dim |
| self.activation_fn = activation_fn |
| self.attention_bias = attention_bias |
| self.double_self_attention = double_self_attention |
| self.norm_elementwise_affine = norm_elementwise_affine |
| self.positional_embeddings = positional_embeddings |
| self.num_positional_embeddings = num_positional_embeddings |
| self.only_cross_attention = only_cross_attention |
|
|
| |
| self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" |
| self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" |
| self.use_ada_layer_norm_single = norm_type == "ada_norm_single" |
| self.use_layer_norm = norm_type == "layer_norm" |
| self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous" |
|
|
| if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: |
| raise ValueError( |
| f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" |
| f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." |
| ) |
|
|
| self.norm_type = norm_type |
| self.num_embeds_ada_norm = num_embeds_ada_norm |
|
|
| if positional_embeddings and (num_positional_embeddings is None): |
| raise ValueError( |
| "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." |
| ) |
|
|
| if positional_embeddings == "sinusoidal": |
| self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) |
| else: |
| self.pos_embed = None |
|
|
| |
| |
| if norm_type == "ada_norm": |
| self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) |
| elif norm_type == "ada_norm_zero": |
| self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) |
| elif norm_type == "ada_norm_continuous": |
| self.norm1 = AdaLayerNormContinuous( |
| dim, |
| ada_norm_continous_conditioning_embedding_dim, |
| norm_elementwise_affine, |
| norm_eps, |
| ada_norm_bias, |
| "rms_norm", |
| ) |
| else: |
| self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) |
|
|
| self.attn1 = Attention( |
| query_dim=dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| cross_attention_dim=cross_attention_dim if only_cross_attention else None, |
| upcast_attention=upcast_attention, |
| out_bias=attention_out_bias, |
| ) |
|
|
| |
| if cross_attention_dim is not None or double_self_attention: |
| |
| |
| |
| if norm_type == "ada_norm": |
| self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) |
| elif norm_type == "ada_norm_continuous": |
| self.norm2 = AdaLayerNormContinuous( |
| dim, |
| ada_norm_continous_conditioning_embedding_dim, |
| norm_elementwise_affine, |
| norm_eps, |
| ada_norm_bias, |
| "rms_norm", |
| ) |
| else: |
| self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) |
|
|
| self.attn2 = Attention( |
| query_dim=dim, |
| cross_attention_dim=cross_attention_dim if not double_self_attention else None, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| upcast_attention=upcast_attention, |
| out_bias=attention_out_bias, |
| ) |
| else: |
| if norm_type == "ada_norm_single": |
| self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) |
| else: |
| self.norm2 = None |
| self.attn2 = None |
|
|
| |
| if norm_type == "ada_norm_continuous": |
| self.norm3 = AdaLayerNormContinuous( |
| dim, |
| ada_norm_continous_conditioning_embedding_dim, |
| norm_elementwise_affine, |
| norm_eps, |
| ada_norm_bias, |
| "layer_norm", |
| ) |
|
|
| elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm"]: |
| self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) |
| elif norm_type == "layer_norm_i2vgen": |
| self.norm3 = None |
|
|
| self.ff = FeedForward( |
| dim, |
| dropout=dropout, |
| activation_fn=activation_fn, |
| final_dropout=final_dropout, |
| inner_dim=ff_inner_dim, |
| bias=ff_bias, |
| ) |
|
|
| |
| if attention_type == "gated" or attention_type == "gated-text-image": |
| self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) |
|
|
| |
| if norm_type == "ada_norm_single": |
| self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) |
|
|
| |
| self._chunk_size = None |
| self._chunk_dim = 0 |
|
|
| def set_chunk_feed_forward(self, chunk_size: int | None, dim: int = 0): |
| |
| self._chunk_size = chunk_size |
| self._chunk_dim = dim |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor | None = None, |
| encoder_hidden_states: torch.Tensor | None = None, |
| encoder_attention_mask: torch.Tensor | None = None, |
| timestep: torch.LongTensor | None = None, |
| cross_attention_kwargs: dict[str, Any] = None, |
| class_labels: torch.LongTensor | None = None, |
| added_cond_kwargs: dict[str, torch.Tensor] | None = None, |
| ) -> torch.Tensor: |
| if cross_attention_kwargs is not None: |
| if cross_attention_kwargs.get("scale", None) is not None: |
| logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") |
|
|
| |
| |
| batch_size = hidden_states.shape[0] |
|
|
| if self.norm_type == "ada_norm": |
| norm_hidden_states = self.norm1(hidden_states, timestep) |
| elif self.norm_type == "ada_norm_zero": |
| norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( |
| hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype |
| ) |
| elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]: |
| norm_hidden_states = self.norm1(hidden_states) |
| elif self.norm_type == "ada_norm_continuous": |
| norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"]) |
| elif self.norm_type == "ada_norm_single": |
| shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( |
| self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) |
| ).chunk(6, dim=1) |
| norm_hidden_states = self.norm1(hidden_states) |
| norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa |
| else: |
| raise ValueError("Incorrect norm used") |
|
|
| if self.pos_embed is not None: |
| norm_hidden_states = self.pos_embed(norm_hidden_states) |
|
|
| |
| cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} |
| gligen_kwargs = cross_attention_kwargs.pop("gligen", None) |
|
|
| attn_output = self.attn1( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| attention_mask=attention_mask, |
| **cross_attention_kwargs, |
| ) |
|
|
| if self.norm_type == "ada_norm_zero": |
| attn_output = gate_msa.unsqueeze(1) * attn_output |
| elif self.norm_type == "ada_norm_single": |
| attn_output = gate_msa * attn_output |
|
|
| hidden_states = attn_output + hidden_states |
| if hidden_states.ndim == 4: |
| hidden_states = hidden_states.squeeze(1) |
|
|
| |
| if gligen_kwargs is not None: |
| hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) |
|
|
| |
| if self.attn2 is not None: |
| if self.norm_type == "ada_norm": |
| norm_hidden_states = self.norm2(hidden_states, timestep) |
| elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]: |
| norm_hidden_states = self.norm2(hidden_states) |
| elif self.norm_type == "ada_norm_single": |
| |
| |
| norm_hidden_states = hidden_states |
| elif self.norm_type == "ada_norm_continuous": |
| norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"]) |
| else: |
| raise ValueError("Incorrect norm") |
|
|
| if self.pos_embed is not None and self.norm_type != "ada_norm_single": |
| norm_hidden_states = self.pos_embed(norm_hidden_states) |
|
|
| attn_output = self.attn2( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states, |
| attention_mask=encoder_attention_mask, |
| **cross_attention_kwargs, |
| ) |
| hidden_states = attn_output + hidden_states |
|
|
| |
| |
| if self.norm_type == "ada_norm_continuous": |
| norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"]) |
| elif not self.norm_type == "ada_norm_single": |
| norm_hidden_states = self.norm3(hidden_states) |
|
|
| if self.norm_type == "ada_norm_zero": |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
|
|
| if self.norm_type == "ada_norm_single": |
| norm_hidden_states = self.norm2(hidden_states) |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp |
|
|
| if self._chunk_size is not None: |
| |
| ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) |
| else: |
| ff_output = self.ff(norm_hidden_states) |
|
|
| if self.norm_type == "ada_norm_zero": |
| ff_output = gate_mlp.unsqueeze(1) * ff_output |
| elif self.norm_type == "ada_norm_single": |
| ff_output = gate_mlp * ff_output |
|
|
| hidden_states = ff_output + hidden_states |
| if hidden_states.ndim == 4: |
| hidden_states = hidden_states.squeeze(1) |
|
|
| return hidden_states |
|
|
|
|
| class LuminaFeedForward(nn.Module): |
| r""" |
| A feed-forward layer. |
| |
| Parameters: |
| hidden_size (`int`): |
| The dimensionality of the hidden layers in the model. This parameter determines the width of the model's |
| hidden representations. |
| intermediate_size (`int`): The intermediate dimension of the feedforward layer. |
| multiple_of (`int`, *optional*): Value to ensure hidden dimension is a multiple |
| of this value. |
| ffn_dim_multiplier (float, *optional*): Custom multiplier for hidden |
| dimension. Defaults to None. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| inner_dim: int, |
| multiple_of: int | None = 256, |
| ffn_dim_multiplier: float | None = None, |
| ): |
| super().__init__() |
| |
| if ffn_dim_multiplier is not None: |
| inner_dim = int(ffn_dim_multiplier * inner_dim) |
| inner_dim = multiple_of * ((inner_dim + multiple_of - 1) // multiple_of) |
|
|
| self.linear_1 = nn.Linear( |
| dim, |
| inner_dim, |
| bias=False, |
| ) |
| self.linear_2 = nn.Linear( |
| inner_dim, |
| dim, |
| bias=False, |
| ) |
| self.linear_3 = nn.Linear( |
| dim, |
| inner_dim, |
| bias=False, |
| ) |
| self.silu = FP32SiLU() |
|
|
| def forward(self, x): |
| return self.linear_2(self.silu(self.linear_1(x)) * self.linear_3(x)) |
|
|
|
|
| @maybe_allow_in_graph |
| class TemporalBasicTransformerBlock(nn.Module): |
| r""" |
| A basic Transformer block for video like data. |
| |
| Parameters: |
| dim (`int`): The number of channels in the input and output. |
| time_mix_inner_dim (`int`): The number of channels for temporal attention. |
| num_attention_heads (`int`): The number of heads to use for multi-head attention. |
| attention_head_dim (`int`): The number of channels in each head. |
| cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| time_mix_inner_dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| cross_attention_dim: int | None = None, |
| ): |
| super().__init__() |
| self.is_res = dim == time_mix_inner_dim |
|
|
| self.norm_in = nn.LayerNorm(dim) |
|
|
| |
| |
| self.ff_in = FeedForward( |
| dim, |
| dim_out=time_mix_inner_dim, |
| activation_fn="geglu", |
| ) |
|
|
| self.norm1 = nn.LayerNorm(time_mix_inner_dim) |
| self.attn1 = Attention( |
| query_dim=time_mix_inner_dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| cross_attention_dim=None, |
| ) |
|
|
| |
| if cross_attention_dim is not None: |
| |
| |
| |
| self.norm2 = nn.LayerNorm(time_mix_inner_dim) |
| self.attn2 = Attention( |
| query_dim=time_mix_inner_dim, |
| cross_attention_dim=cross_attention_dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| ) |
| else: |
| self.norm2 = None |
| self.attn2 = None |
|
|
| |
| self.norm3 = nn.LayerNorm(time_mix_inner_dim) |
| self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") |
|
|
| |
| self._chunk_size = None |
| self._chunk_dim = None |
|
|
| def set_chunk_feed_forward(self, chunk_size: int | None, **kwargs): |
| |
| self._chunk_size = chunk_size |
| |
| self._chunk_dim = 1 |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| num_frames: int, |
| encoder_hidden_states: torch.Tensor | None = None, |
| ) -> torch.Tensor: |
| |
| |
| batch_size = hidden_states.shape[0] |
|
|
| batch_frames, seq_length, channels = hidden_states.shape |
| batch_size = batch_frames // num_frames |
|
|
| hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) |
| hidden_states = hidden_states.permute(0, 2, 1, 3) |
| hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) |
|
|
| residual = hidden_states |
| hidden_states = self.norm_in(hidden_states) |
|
|
| if self._chunk_size is not None: |
| hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size) |
| else: |
| hidden_states = self.ff_in(hidden_states) |
|
|
| if self.is_res: |
| hidden_states = hidden_states + residual |
|
|
| norm_hidden_states = self.norm1(hidden_states) |
| attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) |
| hidden_states = attn_output + hidden_states |
|
|
| |
| if self.attn2 is not None: |
| norm_hidden_states = self.norm2(hidden_states) |
| attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) |
| hidden_states = attn_output + hidden_states |
|
|
| |
| norm_hidden_states = self.norm3(hidden_states) |
|
|
| if self._chunk_size is not None: |
| ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) |
| else: |
| ff_output = self.ff(norm_hidden_states) |
|
|
| if self.is_res: |
| hidden_states = ff_output + hidden_states |
| else: |
| hidden_states = ff_output |
|
|
| hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) |
| hidden_states = hidden_states.permute(0, 2, 1, 3) |
| hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) |
|
|
| return hidden_states |
|
|
|
|
| class SkipFFTransformerBlock(nn.Module): |
| def __init__( |
| self, |
| dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| kv_input_dim: int, |
| kv_input_dim_proj_use_bias: bool, |
| dropout=0.0, |
| cross_attention_dim: int | None = None, |
| attention_bias: bool = False, |
| attention_out_bias: bool = True, |
| ): |
| super().__init__() |
| if kv_input_dim != dim: |
| self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias) |
| else: |
| self.kv_mapper = None |
|
|
| self.norm1 = RMSNorm(dim, 1e-06) |
|
|
| self.attn1 = Attention( |
| query_dim=dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| cross_attention_dim=cross_attention_dim, |
| out_bias=attention_out_bias, |
| ) |
|
|
| self.norm2 = RMSNorm(dim, 1e-06) |
|
|
| self.attn2 = Attention( |
| query_dim=dim, |
| cross_attention_dim=cross_attention_dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| out_bias=attention_out_bias, |
| ) |
|
|
| def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs): |
| cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} |
|
|
| if self.kv_mapper is not None: |
| encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) |
|
|
| norm_hidden_states = self.norm1(hidden_states) |
|
|
| attn_output = self.attn1( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states, |
| **cross_attention_kwargs, |
| ) |
|
|
| hidden_states = attn_output + hidden_states |
|
|
| norm_hidden_states = self.norm2(hidden_states) |
|
|
| attn_output = self.attn2( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states, |
| **cross_attention_kwargs, |
| ) |
|
|
| hidden_states = attn_output + hidden_states |
|
|
| return hidden_states |
|
|
|
|
| @maybe_allow_in_graph |
| class FreeNoiseTransformerBlock(nn.Module): |
| r""" |
| A FreeNoise Transformer block. |
| |
| Parameters: |
| dim (`int`): |
| The number of channels in the input and output. |
| num_attention_heads (`int`): |
| The number of heads to use for multi-head attention. |
| attention_head_dim (`int`): |
| The number of channels in each head. |
| dropout (`float`, *optional*, defaults to 0.0): |
| The dropout probability to use. |
| cross_attention_dim (`int`, *optional*): |
| The size of the encoder_hidden_states vector for cross attention. |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): |
| Activation function to be used in feed-forward. |
| num_embeds_ada_norm (`int`, *optional*): |
| The number of diffusion steps used during training. See `Transformer2DModel`. |
| attention_bias (`bool`, defaults to `False`): |
| Configure if the attentions should contain a bias parameter. |
| only_cross_attention (`bool`, defaults to `False`): |
| Whether to use only cross-attention layers. In this case two cross attention layers are used. |
| double_self_attention (`bool`, defaults to `False`): |
| Whether to use two self-attention layers. In this case no cross attention layers are used. |
| upcast_attention (`bool`, defaults to `False`): |
| Whether to upcast the attention computation to float32. This is useful for mixed precision training. |
| norm_elementwise_affine (`bool`, defaults to `True`): |
| Whether to use learnable elementwise affine parameters for normalization. |
| norm_type (`str`, defaults to `"layer_norm"`): |
| The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. |
| final_dropout (`bool` defaults to `False`): |
| Whether to apply a final dropout after the last feed-forward layer. |
| attention_type (`str`, defaults to `"default"`): |
| The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. |
| positional_embeddings (`str`, *optional*): |
| The type of positional embeddings to apply to. |
| num_positional_embeddings (`int`, *optional*, defaults to `None`): |
| The maximum number of positional embeddings to apply. |
| ff_inner_dim (`int`, *optional*): |
| Hidden dimension of feed-forward MLP. |
| ff_bias (`bool`, defaults to `True`): |
| Whether or not to use bias in feed-forward MLP. |
| attention_out_bias (`bool`, defaults to `True`): |
| Whether or not to use bias in attention output project layer. |
| context_length (`int`, defaults to `16`): |
| The maximum number of frames that the FreeNoise block processes at once. |
| context_stride (`int`, defaults to `4`): |
| The number of frames to be skipped before starting to process a new batch of `context_length` frames. |
| weighting_scheme (`str`, defaults to `"pyramid"`): |
| The weighting scheme to use for weighting averaging of processed latent frames. As described in the |
| Equation 9. of the [FreeNoise](https://huggingface.co/papers/2310.15169) paper, "pyramid" is the default |
| setting used. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| num_attention_heads: int, |
| attention_head_dim: int, |
| dropout: float = 0.0, |
| cross_attention_dim: int | None = None, |
| activation_fn: str = "geglu", |
| num_embeds_ada_norm: int | None = None, |
| attention_bias: bool = False, |
| only_cross_attention: bool = False, |
| double_self_attention: bool = False, |
| upcast_attention: bool = False, |
| norm_elementwise_affine: bool = True, |
| norm_type: str = "layer_norm", |
| norm_eps: float = 1e-5, |
| final_dropout: bool = False, |
| positional_embeddings: str | None = None, |
| num_positional_embeddings: int | None = None, |
| ff_inner_dim: int | None = None, |
| ff_bias: bool = True, |
| attention_out_bias: bool = True, |
| context_length: int = 16, |
| context_stride: int = 4, |
| weighting_scheme: str = "pyramid", |
| ): |
| super().__init__() |
| self.dim = dim |
| self.num_attention_heads = num_attention_heads |
| self.attention_head_dim = attention_head_dim |
| self.dropout = dropout |
| self.cross_attention_dim = cross_attention_dim |
| self.activation_fn = activation_fn |
| self.attention_bias = attention_bias |
| self.double_self_attention = double_self_attention |
| self.norm_elementwise_affine = norm_elementwise_affine |
| self.positional_embeddings = positional_embeddings |
| self.num_positional_embeddings = num_positional_embeddings |
| self.only_cross_attention = only_cross_attention |
|
|
| self.set_free_noise_properties(context_length, context_stride, weighting_scheme) |
|
|
| |
| self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" |
| self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" |
| self.use_ada_layer_norm_single = norm_type == "ada_norm_single" |
| self.use_layer_norm = norm_type == "layer_norm" |
| self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous" |
|
|
| if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: |
| raise ValueError( |
| f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" |
| f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." |
| ) |
|
|
| self.norm_type = norm_type |
| self.num_embeds_ada_norm = num_embeds_ada_norm |
|
|
| if positional_embeddings and (num_positional_embeddings is None): |
| raise ValueError( |
| "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." |
| ) |
|
|
| if positional_embeddings == "sinusoidal": |
| self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) |
| else: |
| self.pos_embed = None |
|
|
| |
| |
| self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) |
|
|
| self.attn1 = Attention( |
| query_dim=dim, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| cross_attention_dim=cross_attention_dim if only_cross_attention else None, |
| upcast_attention=upcast_attention, |
| out_bias=attention_out_bias, |
| ) |
|
|
| |
| if cross_attention_dim is not None or double_self_attention: |
| self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) |
|
|
| self.attn2 = Attention( |
| query_dim=dim, |
| cross_attention_dim=cross_attention_dim if not double_self_attention else None, |
| heads=num_attention_heads, |
| dim_head=attention_head_dim, |
| dropout=dropout, |
| bias=attention_bias, |
| upcast_attention=upcast_attention, |
| out_bias=attention_out_bias, |
| ) |
|
|
| |
| self.ff = FeedForward( |
| dim, |
| dropout=dropout, |
| activation_fn=activation_fn, |
| final_dropout=final_dropout, |
| inner_dim=ff_inner_dim, |
| bias=ff_bias, |
| ) |
|
|
| self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) |
|
|
| |
| self._chunk_size = None |
| self._chunk_dim = 0 |
|
|
| def _get_frame_indices(self, num_frames: int) -> list[tuple[int, int]]: |
| frame_indices = [] |
| for i in range(0, num_frames - self.context_length + 1, self.context_stride): |
| window_start = i |
| window_end = min(num_frames, i + self.context_length) |
| frame_indices.append((window_start, window_end)) |
| return frame_indices |
|
|
| def _get_frame_weights(self, num_frames: int, weighting_scheme: str = "pyramid") -> list[float]: |
| if weighting_scheme == "flat": |
| weights = [1.0] * num_frames |
|
|
| elif weighting_scheme == "pyramid": |
| if num_frames % 2 == 0: |
| |
| mid = num_frames // 2 |
| weights = list(range(1, mid + 1)) |
| weights = weights + weights[::-1] |
| else: |
| |
| mid = (num_frames + 1) // 2 |
| weights = list(range(1, mid)) |
| weights = weights + [mid] + weights[::-1] |
|
|
| elif weighting_scheme == "delayed_reverse_sawtooth": |
| if num_frames % 2 == 0: |
| |
| mid = num_frames // 2 |
| weights = [0.01] * (mid - 1) + [mid] |
| weights = weights + list(range(mid, 0, -1)) |
| else: |
| |
| mid = (num_frames + 1) // 2 |
| weights = [0.01] * mid |
| weights = weights + list(range(mid, 0, -1)) |
| else: |
| raise ValueError(f"Unsupported value for weighting_scheme={weighting_scheme}") |
|
|
| return weights |
|
|
| def set_free_noise_properties( |
| self, context_length: int, context_stride: int, weighting_scheme: str = "pyramid" |
| ) -> None: |
| self.context_length = context_length |
| self.context_stride = context_stride |
| self.weighting_scheme = weighting_scheme |
|
|
| def set_chunk_feed_forward(self, chunk_size: int | None, dim: int = 0) -> None: |
| |
| self._chunk_size = chunk_size |
| self._chunk_dim = dim |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor | None = None, |
| encoder_hidden_states: torch.Tensor | None = None, |
| encoder_attention_mask: torch.Tensor | None = None, |
| cross_attention_kwargs: dict[str, Any] = None, |
| *args, |
| **kwargs, |
| ) -> torch.Tensor: |
| if cross_attention_kwargs is not None: |
| if cross_attention_kwargs.get("scale", None) is not None: |
| logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") |
|
|
| cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} |
|
|
| |
| device = hidden_states.device |
| dtype = hidden_states.dtype |
|
|
| num_frames = hidden_states.size(1) |
| frame_indices = self._get_frame_indices(num_frames) |
| frame_weights = self._get_frame_weights(self.context_length, self.weighting_scheme) |
| frame_weights = torch.tensor(frame_weights, device=device, dtype=dtype).unsqueeze(0).unsqueeze(-1) |
| is_last_frame_batch_complete = frame_indices[-1][1] == num_frames |
|
|
| |
| |
| |
| if not is_last_frame_batch_complete: |
| if num_frames < self.context_length: |
| raise ValueError(f"Expected {num_frames=} to be greater or equal than {self.context_length=}") |
| last_frame_batch_length = num_frames - frame_indices[-1][1] |
| frame_indices.append((num_frames - self.context_length, num_frames)) |
|
|
| num_times_accumulated = torch.zeros((1, num_frames, 1), device=device) |
| accumulated_values = torch.zeros_like(hidden_states) |
|
|
| for i, (frame_start, frame_end) in enumerate(frame_indices): |
| |
| |
| |
| weights = torch.ones_like(num_times_accumulated[:, frame_start:frame_end]) |
| weights *= frame_weights |
|
|
| hidden_states_chunk = hidden_states[:, frame_start:frame_end] |
|
|
| |
| |
| norm_hidden_states = self.norm1(hidden_states_chunk) |
|
|
| if self.pos_embed is not None: |
| norm_hidden_states = self.pos_embed(norm_hidden_states) |
|
|
| attn_output = self.attn1( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| attention_mask=attention_mask, |
| **cross_attention_kwargs, |
| ) |
|
|
| hidden_states_chunk = attn_output + hidden_states_chunk |
| if hidden_states_chunk.ndim == 4: |
| hidden_states_chunk = hidden_states_chunk.squeeze(1) |
|
|
| |
| if self.attn2 is not None: |
| norm_hidden_states = self.norm2(hidden_states_chunk) |
|
|
| if self.pos_embed is not None and self.norm_type != "ada_norm_single": |
| norm_hidden_states = self.pos_embed(norm_hidden_states) |
|
|
| attn_output = self.attn2( |
| norm_hidden_states, |
| encoder_hidden_states=encoder_hidden_states, |
| attention_mask=encoder_attention_mask, |
| **cross_attention_kwargs, |
| ) |
| hidden_states_chunk = attn_output + hidden_states_chunk |
|
|
| if i == len(frame_indices) - 1 and not is_last_frame_batch_complete: |
| accumulated_values[:, -last_frame_batch_length:] += ( |
| hidden_states_chunk[:, -last_frame_batch_length:] * weights[:, -last_frame_batch_length:] |
| ) |
| num_times_accumulated[:, -last_frame_batch_length:] += weights[:, -last_frame_batch_length] |
| else: |
| accumulated_values[:, frame_start:frame_end] += hidden_states_chunk * weights |
| num_times_accumulated[:, frame_start:frame_end] += weights |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| hidden_states = torch.cat( |
| [ |
| torch.where(num_times_split > 0, accumulated_split / num_times_split, accumulated_split) |
| for accumulated_split, num_times_split in zip( |
| accumulated_values.split(self.context_length, dim=1), |
| num_times_accumulated.split(self.context_length, dim=1), |
| ) |
| ], |
| dim=1, |
| ).to(dtype) |
|
|
| |
| norm_hidden_states = self.norm3(hidden_states) |
|
|
| if self._chunk_size is not None: |
| ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) |
| else: |
| ff_output = self.ff(norm_hidden_states) |
|
|
| hidden_states = ff_output + hidden_states |
| if hidden_states.ndim == 4: |
| hidden_states = hidden_states.squeeze(1) |
|
|
| return hidden_states |
|
|
|
|
| class FeedForward(nn.Module): |
| r""" |
| A feed-forward layer. |
| |
| Parameters: |
| dim (`int`): The number of channels in the input. |
| dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. |
| mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. |
| dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. |
| bias (`bool`, defaults to True): Whether to use a bias in the linear layer. |
| """ |
|
|
| def __init__( |
| self, |
| dim: int, |
| dim_out: int | None = None, |
| mult: int = 4, |
| dropout: float = 0.0, |
| activation_fn: str = "geglu", |
| final_dropout: bool = False, |
| inner_dim=None, |
| bias: bool = True, |
| ): |
| super().__init__() |
| if inner_dim is None: |
| inner_dim = int(dim * mult) |
| dim_out = dim_out if dim_out is not None else dim |
|
|
| if activation_fn == "gelu": |
| act_fn = GELU(dim, inner_dim, bias=bias) |
| if activation_fn == "gelu-approximate": |
| act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias) |
| elif activation_fn == "geglu": |
| act_fn = GEGLU(dim, inner_dim, bias=bias) |
| elif activation_fn == "geglu-approximate": |
| act_fn = ApproximateGELU(dim, inner_dim, bias=bias) |
| elif activation_fn == "swiglu": |
| act_fn = SwiGLU(dim, inner_dim, bias=bias) |
| elif activation_fn == "linear-silu": |
| act_fn = LinearActivation(dim, inner_dim, bias=bias, activation="silu") |
|
|
| self.net = nn.ModuleList([]) |
| |
| self.net.append(act_fn) |
| |
| self.net.append(nn.Dropout(dropout)) |
| |
| self.net.append(nn.Linear(inner_dim, dim_out, bias=bias)) |
| |
| if final_dropout: |
| self.net.append(nn.Dropout(dropout)) |
|
|
| def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: |
| if len(args) > 0 or kwargs.get("scale", None) is not None: |
| deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." |
| deprecate("scale", "1.0.0", deprecation_message) |
| for module in self.net: |
| hidden_states = module(hidden_states) |
| return hidden_states |
|
|