| | |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from diffusers.utils.import_utils import is_xformers_available |
| | if is_xformers_available(): |
| | import xformers |
| | import xformers.ops |
| | else: |
| | xformers = None |
| |
|
| | class HairAttnProcessor(nn.Module): |
| | def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, use_resampler=False): |
| | super().__init__() |
| |
|
| | self.hidden_size = hidden_size |
| | self.cross_attention_dim = cross_attention_dim |
| | self.scale = scale |
| | self.use_resampler = use_resampler |
| | if self.use_resampler: |
| | self.resampler = Resampler(query_dim=hidden_size) |
| | self.to_k_SSR = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| | self.to_v_SSR = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | |
| | split_num = encoder_hidden_states.shape[1] // 2 |
| | encoder_hidden_states, _hidden_states = encoder_hidden_states[:, :split_num, |
| | :], encoder_hidden_states[:, split_num:, :] |
| |
|
| | if self.use_resampler: |
| | _hidden_states = self.resampler(_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) |
| |
|
| | query = attn.head_to_batch_dim(query) |
| | key = attn.head_to_batch_dim(key) |
| | value = attn.head_to_batch_dim(value) |
| |
|
| | attention_probs = attn.get_attention_scores(query, key, attention_mask) |
| | hidden_states = torch.bmm(attention_probs, value) |
| | hidden_states = attn.batch_to_head_dim(hidden_states) |
| |
|
| | _key = self.to_k_SSR(_hidden_states) |
| | _value = self.to_v_SSR(_hidden_states) |
| |
|
| | _key = attn.head_to_batch_dim(_key) |
| | _value = attn.head_to_batch_dim(_value) |
| |
|
| | _attention_probs = attn.get_attention_scores(query, _key, None) |
| | _hidden_states = torch.bmm(_attention_probs, _value) |
| | _hidden_states = attn.batch_to_head_dim(_hidden_states) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | hidden_states = hidden_states + self.scale * _hidden_states |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class HairAttnProcessor2_0(torch.nn.Module): |
| |
|
| | def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, use_resampler=False): |
| | super().__init__() |
| |
|
| | if not hasattr(F, "scaled_dot_product_attention"): |
| | raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
| |
|
| | self.hidden_size = hidden_size |
| | self.cross_attention_dim = cross_attention_dim |
| | self.scale = scale |
| | self.use_resampler = use_resampler |
| | if self.use_resampler: |
| | self.resampler = Resampler(query_dim=hidden_size) |
| | self.to_k_SSR = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| | self.to_v_SSR = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| |
|
| | if attention_mask is not None: |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| | |
| | |
| | attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | |
| | split_num = encoder_hidden_states.shape[1] // 2 |
| | encoder_hidden_states, _hidden_states = encoder_hidden_states[:, :split_num, |
| | :], encoder_hidden_states[:, split_num:, :] |
| |
|
| | if self.use_resampler: |
| | _hidden_states = self.resampler(_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) |
| |
|
| | inner_dim = key.shape[-1] |
| | head_dim = inner_dim // attn.heads |
| |
|
| | query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | hidden_states = F.scaled_dot_product_attention( |
| | query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False |
| | ) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | hidden_states = hidden_states.to(query.dtype) |
| |
|
| | _hidden_states = _hidden_states.to(self.to_k_SSR.weight.dtype) |
| | _key = self.to_k_SSR(_hidden_states) |
| | _value = self.to_v_SSR(_hidden_states) |
| |
|
| | _key = _key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | _value = _value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | _hidden_states = F.scaled_dot_product_attention( |
| | query.to(self.to_k_SSR.weight.dtype), _key, _value, attn_mask=None, dropout_p=0.0, is_causal=False |
| | ) |
| |
|
| | _hidden_states = _hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | _hidden_states = _hidden_states.to(query.dtype) |
| |
|
| | hidden_states = hidden_states + self.scale * _hidden_states |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class AttnProcessor(nn.Module): |
| | r""" |
| | Default processor for performing attention-related computations. |
| | """ |
| | def __init__( |
| | self, |
| | hidden_size=None, |
| | cross_attention_dim=None, |
| | ): |
| | super().__init__() |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) |
| |
|
| | query = attn.head_to_batch_dim(query) |
| | key = attn.head_to_batch_dim(key) |
| | value = attn.head_to_batch_dim(value) |
| |
|
| | attention_probs = attn.get_attention_scores(query, key, attention_mask) |
| | hidden_states = torch.bmm(attention_probs, value) |
| | hidden_states = attn.batch_to_head_dim(hidden_states) |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| | class AttnProcessor2_0(torch.nn.Module): |
| | r""" |
| | Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | hidden_size=None, |
| | cross_attention_dim=None, |
| | ): |
| | super().__init__() |
| | if not hasattr(F, "scaled_dot_product_attention"): |
| | raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| |
|
| | if attention_mask is not None: |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| | |
| | |
| | attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) |
| |
|
| | inner_dim = key.shape[-1] |
| | head_dim = inner_dim // attn.heads |
| |
|
| | query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | hidden_states = F.scaled_dot_product_attention( |
| | query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False |
| | ) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | hidden_states = hidden_states.to(query.dtype) |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |