| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | from typing import Any, Dict, Optional |
| |
|
| | import torch |
| | from torch import nn |
| |
|
| | from ..utils import USE_PEFT_BACKEND |
| | from ..utils.torch_utils import maybe_allow_in_graph |
| | from .activations import GEGLU, GELU, ApproximateGELU |
| | from .attention_processor import Attention |
| | from .embeddings import SinusoidalPositionalEmbedding |
| | from .lora import LoRACompatibleLinear |
| | from .normalization import AdaLayerNorm, AdaLayerNormZero |
| |
|
| |
|
| | @maybe_allow_in_graph |
| | class GatedSelfAttentionDense(nn.Module): |
| | r""" |
| | A gated self-attention dense layer that combines visual features and object features. |
| | |
| | Parameters: |
| | query_dim (`int`): The number of channels in the query. |
| | context_dim (`int`): The number of channels in the context. |
| | n_heads (`int`): The number of heads to use for attention. |
| | d_head (`int`): The number of channels in each head. |
| | """ |
| |
|
| | def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): |
| | super().__init__() |
| |
|
| | |
| | self.linear = nn.Linear(context_dim, query_dim) |
| |
|
| | self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) |
| | self.ff = FeedForward(query_dim, activation_fn="geglu") |
| |
|
| | self.norm1 = nn.LayerNorm(query_dim) |
| | self.norm2 = nn.LayerNorm(query_dim) |
| |
|
| | self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) |
| | self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) |
| |
|
| | self.enabled = True |
| |
|
| | def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: |
| | if not self.enabled: |
| | return x |
| |
|
| | n_visual = x.shape[1] |
| | objs = self.linear(objs) |
| |
|
| | x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] |
| | x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) |
| |
|
| | return x |
| |
|
| |
|
| | @maybe_allow_in_graph |
| | class BasicTransformerBlock(nn.Module): |
| | r""" |
| | A basic Transformer block. |
| | |
| | Parameters: |
| | dim (`int`): The number of channels in the input and output. |
| | num_attention_heads (`int`): The number of heads to use for multi-head attention. |
| | attention_head_dim (`int`): The number of channels in each head. |
| | dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| | cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. |
| | activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| | num_embeds_ada_norm (: |
| | obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. |
| | attention_bias (: |
| | obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. |
| | only_cross_attention (`bool`, *optional*): |
| | Whether to use only cross-attention layers. In this case two cross attention layers are used. |
| | double_self_attention (`bool`, *optional*): |
| | Whether to use two self-attention layers. In this case no cross attention layers are used. |
| | upcast_attention (`bool`, *optional*): |
| | Whether to upcast the attention computation to float32. This is useful for mixed precision training. |
| | norm_elementwise_affine (`bool`, *optional*, defaults to `True`): |
| | Whether to use learnable elementwise affine parameters for normalization. |
| | norm_type (`str`, *optional*, defaults to `"layer_norm"`): |
| | The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. |
| | final_dropout (`bool` *optional*, defaults to False): |
| | Whether to apply a final dropout after the last feed-forward layer. |
| | attention_type (`str`, *optional*, defaults to `"default"`): |
| | The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. |
| | positional_embeddings (`str`, *optional*, defaults to `None`): |
| | The type of positional embeddings to apply to. |
| | num_positional_embeddings (`int`, *optional*, defaults to `None`): |
| | The maximum number of positional embeddings to apply. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | dim: int, |
| | num_attention_heads: int, |
| | attention_head_dim: int, |
| | dropout=0.0, |
| | cross_attention_dim: Optional[int] = None, |
| | activation_fn: str = "geglu", |
| | num_embeds_ada_norm: Optional[int] = None, |
| | attention_bias: bool = False, |
| | only_cross_attention: bool = False, |
| | double_self_attention: bool = False, |
| | upcast_attention: bool = False, |
| | norm_elementwise_affine: bool = True, |
| | norm_type: str = "layer_norm", |
| | norm_eps: float = 1e-5, |
| | final_dropout: bool = False, |
| | attention_type: str = "default", |
| | positional_embeddings: Optional[str] = None, |
| | num_positional_embeddings: Optional[int] = None, |
| | ): |
| | super().__init__() |
| | self.only_cross_attention = only_cross_attention |
| |
|
| | self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" |
| | self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" |
| | self.use_ada_layer_norm_single = norm_type == "ada_norm_single" |
| | self.use_layer_norm = norm_type == "layer_norm" |
| |
|
| | if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: |
| | raise ValueError( |
| | f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" |
| | f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." |
| | ) |
| |
|
| | if positional_embeddings and (num_positional_embeddings is None): |
| | raise ValueError( |
| | "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." |
| | ) |
| |
|
| | if positional_embeddings == "sinusoidal": |
| | self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) |
| | else: |
| | self.pos_embed = None |
| |
|
| | |
| | |
| | if self.use_ada_layer_norm: |
| | self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) |
| | elif self.use_ada_layer_norm_zero: |
| | self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) |
| | else: |
| | self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) |
| |
|
| | self.attn1 = Attention( |
| | query_dim=dim, |
| | heads=num_attention_heads, |
| | dim_head=attention_head_dim, |
| | dropout=dropout, |
| | bias=attention_bias, |
| | cross_attention_dim=cross_attention_dim if only_cross_attention else None, |
| | upcast_attention=upcast_attention, |
| | ) |
| |
|
| | |
| | if cross_attention_dim is not None or double_self_attention: |
| | |
| | |
| | |
| | self.norm2 = ( |
| | AdaLayerNorm(dim, num_embeds_ada_norm) |
| | if self.use_ada_layer_norm |
| | else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) |
| | ) |
| | self.attn2 = Attention( |
| | query_dim=dim, |
| | cross_attention_dim=cross_attention_dim if not double_self_attention else None, |
| | heads=num_attention_heads, |
| | dim_head=attention_head_dim, |
| | dropout=dropout, |
| | bias=attention_bias, |
| | upcast_attention=upcast_attention, |
| | ) |
| | else: |
| | self.norm2 = None |
| | self.attn2 = None |
| |
|
| | |
| | if not self.use_ada_layer_norm_single: |
| | self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) |
| |
|
| | self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) |
| |
|
| | |
| | if attention_type == "gated" or attention_type == "gated-text-image": |
| | self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) |
| |
|
| | |
| | if self.use_ada_layer_norm_single: |
| | self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) |
| |
|
| | |
| | self._chunk_size = None |
| | self._chunk_dim = 0 |
| |
|
| | def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int): |
| | |
| | self._chunk_size = chunk_size |
| | self._chunk_dim = dim |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.FloatTensor, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | timestep: Optional[torch.LongTensor] = None, |
| | cross_attention_kwargs: Dict[str, Any] = None, |
| | class_labels: Optional[torch.LongTensor] = None, |
| | ) -> torch.FloatTensor: |
| | |
| | |
| | batch_size = hidden_states.shape[0] |
| |
|
| | if self.use_ada_layer_norm: |
| | norm_hidden_states = self.norm1(hidden_states, timestep) |
| | elif self.use_ada_layer_norm_zero: |
| | norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( |
| | hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype |
| | ) |
| | elif self.use_layer_norm: |
| | norm_hidden_states = self.norm1(hidden_states) |
| | elif self.use_ada_layer_norm_single: |
| | shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( |
| | self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) |
| | ).chunk(6, dim=1) |
| | norm_hidden_states = self.norm1(hidden_states) |
| | norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa |
| | norm_hidden_states = norm_hidden_states.squeeze(1) |
| | else: |
| | raise ValueError("Incorrect norm used") |
| |
|
| | if self.pos_embed is not None: |
| | norm_hidden_states = self.pos_embed(norm_hidden_states) |
| |
|
| | |
| | lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 |
| |
|
| | |
| | cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} |
| | gligen_kwargs = cross_attention_kwargs.pop("gligen", None) |
| |
|
| | attn_output = self.attn1( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| | attention_mask=attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | if self.use_ada_layer_norm_zero: |
| | attn_output = gate_msa.unsqueeze(1) * attn_output |
| | elif self.use_ada_layer_norm_single: |
| | attn_output = gate_msa * attn_output |
| |
|
| | hidden_states = attn_output + hidden_states |
| | if hidden_states.ndim == 4: |
| | hidden_states = hidden_states.squeeze(1) |
| |
|
| | |
| | if gligen_kwargs is not None: |
| | hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) |
| |
|
| | |
| | if self.attn2 is not None: |
| | if self.use_ada_layer_norm: |
| | norm_hidden_states = self.norm2(hidden_states, timestep) |
| | elif self.use_ada_layer_norm_zero or self.use_layer_norm: |
| | norm_hidden_states = self.norm2(hidden_states) |
| | elif self.use_ada_layer_norm_single: |
| | |
| | |
| | norm_hidden_states = hidden_states |
| | else: |
| | raise ValueError("Incorrect norm") |
| |
|
| | if self.pos_embed is not None and self.use_ada_layer_norm_single is False: |
| | norm_hidden_states = self.pos_embed(norm_hidden_states) |
| |
|
| | attn_output = self.attn2( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=encoder_attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | |
| | |
| | hidden_states = attn_output + hidden_states |
| |
|
| | |
| | if not self.use_ada_layer_norm_single: |
| | norm_hidden_states = self.norm3(hidden_states) |
| |
|
| | if self.use_ada_layer_norm_zero: |
| | norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
| |
|
| | if self.use_ada_layer_norm_single: |
| | norm_hidden_states = self.norm2(hidden_states) |
| | norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp |
| |
|
| | if self._chunk_size is not None: |
| | |
| | if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: |
| | raise ValueError( |
| | f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." |
| | ) |
| |
|
| | num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size |
| | ff_output = torch.cat( |
| | [ |
| | self.ff(hid_slice, scale=lora_scale) |
| | for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim) |
| | ], |
| | dim=self._chunk_dim, |
| | ) |
| | else: |
| | ff_output = self.ff(norm_hidden_states, scale=lora_scale) |
| |
|
| | if self.use_ada_layer_norm_zero: |
| | ff_output = gate_mlp.unsqueeze(1) * ff_output |
| | elif self.use_ada_layer_norm_single: |
| | ff_output = gate_mlp * ff_output |
| |
|
| | hidden_states = ff_output + hidden_states |
| | if hidden_states.ndim == 4: |
| | hidden_states = hidden_states.squeeze(1) |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class FeedForward(nn.Module): |
| | r""" |
| | A feed-forward layer. |
| | |
| | Parameters: |
| | dim (`int`): The number of channels in the input. |
| | dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. |
| | mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. |
| | dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
| | activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
| | final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | dim: int, |
| | dim_out: Optional[int] = None, |
| | mult: int = 4, |
| | dropout: float = 0.0, |
| | activation_fn: str = "geglu", |
| | final_dropout: bool = False, |
| | ): |
| | super().__init__() |
| | inner_dim = int(dim * mult) |
| | dim_out = dim_out if dim_out is not None else dim |
| | linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear |
| |
|
| | if activation_fn == "gelu": |
| | act_fn = GELU(dim, inner_dim) |
| | if activation_fn == "gelu-approximate": |
| | act_fn = GELU(dim, inner_dim, approximate="tanh") |
| | elif activation_fn == "geglu": |
| | act_fn = GEGLU(dim, inner_dim) |
| | elif activation_fn == "geglu-approximate": |
| | act_fn = ApproximateGELU(dim, inner_dim) |
| |
|
| | self.net = nn.ModuleList([]) |
| | |
| | self.net.append(act_fn) |
| | |
| | self.net.append(nn.Dropout(dropout)) |
| | |
| | self.net.append(linear_cls(inner_dim, dim_out)) |
| | |
| | if final_dropout: |
| | self.net.append(nn.Dropout(dropout)) |
| |
|
| | def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: |
| | compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear) |
| | for module in self.net: |
| | if isinstance(module, compatible_cls): |
| | hidden_states = module(hidden_states, scale) |
| | else: |
| | hidden_states = module(hidden_states) |
| | return hidden_states |
| |
|