| | |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | from diffusers.models.lora import LoRALinearLayer |
| |
|
| |
|
| | class LoRAAttnProcessor(nn.Module): |
| | r""" |
| | Default processor for performing attention-related computations. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | hidden_size=None, |
| | cross_attention_dim=None, |
| | rank=4, |
| | network_alpha=None, |
| | lora_scale=1.0, |
| | ): |
| | super().__init__() |
| |
|
| | self.rank = rank |
| | self.lora_scale = lora_scale |
| | |
| | self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| | self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states) |
| |
|
| | query = attn.head_to_batch_dim(query) |
| | key = attn.head_to_batch_dim(key) |
| | value = attn.head_to_batch_dim(value) |
| |
|
| | attention_probs = attn.get_attention_scores(query, key, attention_mask) |
| | hidden_states = torch.bmm(attention_probs, value) |
| | hidden_states = attn.batch_to_head_dim(hidden_states) |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class LoRAIPAttnProcessor(nn.Module): |
| | r""" |
| | Attention processor for IP-Adapater. |
| | Args: |
| | hidden_size (`int`): |
| | The hidden size of the attention layer. |
| | cross_attention_dim (`int`): |
| | The number of channels in the `encoder_hidden_states`. |
| | scale (`float`, defaults to 1.0): |
| | the weight scale of image prompt. |
| | num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): |
| | The context length of the image features. |
| | """ |
| |
|
| | def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, lora_scale=1.0, scale=1.0, num_tokens=4): |
| | super().__init__() |
| |
|
| | self.rank = rank |
| | self.lora_scale = lora_scale |
| | |
| | self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| | self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| |
|
| | self.hidden_size = hidden_size |
| | self.cross_attention_dim = cross_attention_dim |
| | self.scale = scale |
| | self.num_tokens = num_tokens |
| |
|
| | self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| | self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | else: |
| | |
| | end_pos = encoder_hidden_states.shape[1] - self.num_tokens |
| | encoder_hidden_states, ip_hidden_states = ( |
| | encoder_hidden_states[:, :end_pos, :], |
| | encoder_hidden_states[:, end_pos:, :], |
| | ) |
| | if attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states) |
| |
|
| | query = attn.head_to_batch_dim(query) |
| | key = attn.head_to_batch_dim(key) |
| | value = attn.head_to_batch_dim(value) |
| |
|
| | attention_probs = attn.get_attention_scores(query, key, attention_mask) |
| | hidden_states = torch.bmm(attention_probs, value) |
| | hidden_states = attn.batch_to_head_dim(hidden_states) |
| |
|
| | |
| | ip_key = self.to_k_ip(ip_hidden_states) |
| | ip_value = self.to_v_ip(ip_hidden_states) |
| |
|
| | ip_key = attn.head_to_batch_dim(ip_key) |
| | ip_value = attn.head_to_batch_dim(ip_value) |
| |
|
| | ip_attention_probs = attn.get_attention_scores(query, ip_key, None) |
| | self.attn_map = ip_attention_probs |
| | ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) |
| | ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) |
| |
|
| | hidden_states = hidden_states + self.scale * ip_hidden_states |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class LoRAAttnProcessor2_0(nn.Module): |
| | |
| | r""" |
| | Default processor for performing attention-related computations. |
| | """ |
| | |
| | def __init__( |
| | self, |
| | hidden_size=None, |
| | cross_attention_dim=None, |
| | rank=4, |
| | network_alpha=None, |
| | lora_scale=1.0, |
| | ): |
| | super().__init__() |
| | |
| | self.rank = rank |
| | self.lora_scale = lora_scale |
| | |
| | self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| | self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| |
|
| | def __call__( |
| | self, |
| | attn, |
| | hidden_states, |
| | encoder_hidden_states=None, |
| | attention_mask=None, |
| | temb=None, |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states) |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | elif attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states) |
| |
|
| | inner_dim = key.shape[-1] |
| | head_dim = inner_dim // attn.heads |
| |
|
| | query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | hidden_states = F.scaled_dot_product_attention( |
| | query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False |
| | ) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | hidden_states = hidden_states.to(query.dtype) |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class LoRAIPAttnProcessor2_0(nn.Module): |
| | r""" |
| | Processor for implementing the LoRA attention mechanism. |
| | |
| | Args: |
| | hidden_size (`int`, *optional*): |
| | The hidden size of the attention layer. |
| | cross_attention_dim (`int`, *optional*): |
| | The number of channels in the `encoder_hidden_states`. |
| | rank (`int`, defaults to 4): |
| | The dimension of the LoRA update matrices. |
| | network_alpha (`int`, *optional*): |
| | Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. |
| | """ |
| |
|
| | def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, lora_scale=1.0, scale=1.0, num_tokens=4): |
| | super().__init__() |
| | |
| | self.rank = rank |
| | self.lora_scale = lora_scale |
| | self.num_tokens = num_tokens |
| | |
| | self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| | self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) |
| | self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) |
| | |
| | |
| | self.hidden_size = hidden_size |
| | self.cross_attention_dim = cross_attention_dim |
| | self.scale = scale |
| |
|
| | self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| | self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) |
| |
|
| | def __call__( |
| | self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None |
| | ): |
| | residual = hidden_states |
| |
|
| | if attn.spatial_norm is not None: |
| | hidden_states = attn.spatial_norm(hidden_states, temb) |
| |
|
| | input_ndim = hidden_states.ndim |
| |
|
| | if input_ndim == 4: |
| | batch_size, channel, height, width = hidden_states.shape |
| | hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
| |
|
| | batch_size, sequence_length, _ = ( |
| | hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
| | ) |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
| |
|
| | if attn.group_norm is not None: |
| | hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
| |
|
| | query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states) |
| | |
| |
|
| | if encoder_hidden_states is None: |
| | encoder_hidden_states = hidden_states |
| | else: |
| | |
| | end_pos = encoder_hidden_states.shape[1] - self.num_tokens |
| | encoder_hidden_states, ip_hidden_states = ( |
| | encoder_hidden_states[:, :end_pos, :], |
| | encoder_hidden_states[:, end_pos:, :], |
| | ) |
| | if attn.norm_cross: |
| | encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
| |
|
| | |
| | key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states) |
| |
|
| | inner_dim = key.shape[-1] |
| | head_dim = inner_dim // attn.heads |
| |
|
| | query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | hidden_states = F.scaled_dot_product_attention( |
| | query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False |
| | ) |
| |
|
| | hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | hidden_states = hidden_states.to(query.dtype) |
| | |
| | |
| | ip_key = self.to_k_ip(ip_hidden_states) |
| | ip_value = self.to_v_ip(ip_hidden_states) |
| | |
| | ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| | ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
| |
|
| | |
| | |
| | ip_hidden_states = F.scaled_dot_product_attention( |
| | query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False |
| | ) |
| | |
| |
|
| | ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
| | ip_hidden_states = ip_hidden_states.to(query.dtype) |
| | |
| | hidden_states = hidden_states + self.scale * ip_hidden_states |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | if input_ndim == 4: |
| | hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
| |
|
| | if attn.residual_connection: |
| | hidden_states = hidden_states + residual |
| |
|
| | hidden_states = hidden_states / attn.rescale_output_factor |
| |
|
| | return hidden_states |
| |
|