| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ PyTorch Siglip model. """ |
| |
|
|
|
|
| import math |
| import os |
| import warnings |
| from dataclasses import dataclass |
| from typing import Optional |
| from typing import Tuple |
| from typing import Union |
|
|
| import numpy as np |
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| from torch import nn |
| from torch.nn.init import _calculate_fan_in_and_fan_out |
| from transformers.activations import ACT2FN |
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask |
| from transformers.modeling_outputs import BaseModelOutput |
| from transformers.modeling_outputs import BaseModelOutputWithPooling |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import add_start_docstrings |
| from transformers.utils import add_start_docstrings_to_model_forward |
| from transformers.utils import is_flash_attn_2_available |
| from transformers.utils import logging |
| from transformers.utils import ModelOutput |
| from transformers.utils import replace_return_docstrings |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class SiglipVisionConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a |
| Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip |
| [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| Args: |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| num_channels (`int`, *optional*, defaults to 3): |
| Number of channels in the input images. |
| image_size (`int`, *optional*, defaults to 224): |
| The size (resolution) of each image. |
| patch_size (`int`, *optional*, defaults to 16): |
| The size (resolution) of each patch. |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-06): |
| The epsilon used by the layer normalization layers. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| Example: |
| ```python |
| >>> from transformers import SiglipVisionConfig, SiglipVisionModel |
| >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration |
| >>> configuration = SiglipVisionConfig() |
| >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration |
| >>> model = SiglipVisionModel(configuration) |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "siglip_vision_model" |
|
|
| def __init__( |
| self, |
| hidden_size=768, |
| intermediate_size=3072, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| num_channels=3, |
| image_size=224, |
| patch_size=16, |
| hidden_act="gelu_pytorch_tanh", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.patch_size = patch_size |
| self.image_size = image_size |
| self.attention_dropout = attention_dropout |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| cls._set_token_in_kwargs(kwargs) |
|
|
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| |
| if config_dict.get("model_type") == "siglip": |
| config_dict = config_dict["vision_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
| _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224" |
|
|
| SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| "google/siglip-base-patch16-224", |
| |
| ] |
|
|
| if is_flash_attn_2_available(): |
| from flash_attn import flash_attn_func |
| from flash_attn import flash_attn_varlen_func |
| from flash_attn.bert_padding import index_first_axis |
| from flash_attn.bert_padding import pad_input |
| from flash_attn.bert_padding import unpad_input |
|
|
|
|
| |
| def _get_unpad_data(attention_mask): |
| seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) |
| indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() |
| max_seqlen_in_batch = seqlens_in_batch.max().item() |
| cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) |
| return ( |
| indices, |
| cu_seqlens, |
| max_seqlen_in_batch, |
| ) |
|
|
|
|
| def _trunc_normal_(tensor, mean, std, a, b): |
| |
| |
| def norm_cdf(x): |
| |
| return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 |
|
|
| if (mean < a - 2 * std) or (mean > b + 2 * std): |
| warnings.warn( |
| "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
| "The distribution of values may be incorrect.", |
| stacklevel=2, |
| ) |
|
|
| |
| |
| |
| l = norm_cdf((a - mean) / std) |
| u = norm_cdf((b - mean) / std) |
|
|
| |
| |
| tensor.uniform_(2 * l - 1, 2 * u - 1) |
|
|
| |
| |
| if tensor.dtype in [torch.float16, torch.bfloat16]: |
| |
| og_dtype = tensor.dtype |
| tensor = tensor.to(torch.float32) |
| tensor.erfinv_() |
| tensor = tensor.to(og_dtype) |
| else: |
| tensor.erfinv_() |
|
|
| |
| tensor.mul_(std * math.sqrt(2.0)) |
| tensor.add_(mean) |
|
|
| |
| if tensor.dtype == torch.float16: |
| |
| tensor = tensor.to(torch.float32) |
| tensor.clamp_(min=a, max=b) |
| tensor = tensor.to(torch.float16) |
| else: |
| tensor.clamp_(min=a, max=b) |
|
|
|
|
| def trunc_normal_tf_( |
| tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0 |
| ) -> torch.Tensor: |
| """Fills the input Tensor with values drawn from a truncated |
| normal distribution. The values are effectively drawn from the |
| normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` |
| with values outside :math:`[a, b]` redrawn until they are within |
| the bounds. The method used for generating the random values works |
| best when :math:`a \\leq \text{mean} \\leq b`. |
| NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the |
| bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 |
| and the result is subsquently scaled and shifted by the mean and std args. |
| Args: |
| tensor: an n-dimensional `torch.Tensor` |
| mean: the mean of the normal distribution |
| std: the standard deviation of the normal distribution |
| a: the minimum cutoff value |
| b: the maximum cutoff value |
| """ |
| with torch.no_grad(): |
| _trunc_normal_(tensor, 0, 1.0, a, b) |
| tensor.mul_(std).add_(mean) |
|
|
|
|
| def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): |
| fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) |
| if mode == "fan_in": |
| denom = fan_in |
| elif mode == "fan_out": |
| denom = fan_out |
| elif mode == "fan_avg": |
| denom = (fan_in + fan_out) / 2 |
|
|
| variance = scale / denom |
|
|
| if distribution == "truncated_normal": |
| |
| trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) |
| elif distribution == "normal": |
| with torch.no_grad(): |
| tensor.normal_(std=math.sqrt(variance)) |
| elif distribution == "uniform": |
| bound = math.sqrt(3 * variance) |
| with torch.no_grad(): |
| tensor.uniform_(-bound, bound) |
| else: |
| raise ValueError(f"invalid distribution {distribution}") |
|
|
|
|
| def lecun_normal_(tensor): |
| variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") |
|
|
|
|
| def default_flax_embed_init(tensor): |
| variance_scaling_(tensor, mode="fan_in", distribution="normal") |
|
|
|
|
| @dataclass |
| |
| class SiglipVisionModelOutput(ModelOutput): |
| """ |
| Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. |
| Args: |
| image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): |
| The image embeddings obtained by applying the projection layer to the pooler_output. |
| last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Sequence of hidden-states at the output of the last layer of the model. |
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| sequence_length)`. |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| """ |
|
|
| image_embeds: Optional[torch.FloatTensor] = None |
| last_hidden_state: torch.FloatTensor = None |
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| attentions: Optional[Tuple[torch.FloatTensor]] = None |
|
|
|
|
| class SiglipVisionEmbeddings(nn.Module): |
| def __init__(self, config: SiglipVisionConfig): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.image_size = config.image_size |
| self.patch_size = config.patch_size |
|
|
| self.patch_embedding = nn.Conv2d( |
| in_channels=config.num_channels, |
| out_channels=self.embed_dim, |
| kernel_size=self.patch_size, |
| stride=self.patch_size, |
| padding="valid", |
| ) |
|
|
| self.num_patches_per_side = self.image_size // self.patch_size |
| self.num_patches = self.num_patches_per_side**2 |
| self.num_positions = self.num_patches |
| self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) |
|
|
| def forward( |
| self, |
| pixel_values: torch.FloatTensor, |
| patch_attention_mask: torch.BoolTensor, |
| tgt_sizes: Optional[torch.IntTensor] = None, |
| ) -> torch.Tensor: |
| batch_size = pixel_values.size(0) |
|
|
| patch_embeds = self.patch_embedding(pixel_values) |
| embeddings = patch_embeds.flatten(2).transpose(1, 2) |
|
|
| max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3) |
| max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size |
| boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) |
| position_ids = torch.full( |
| size=( |
| batch_size, |
| max_nb_patches_h * max_nb_patches_w, |
| ), |
| fill_value=0, |
| ) |
|
|
| for batch_idx, p_attn_mask in enumerate(patch_attention_mask): |
| if tgt_sizes is not None: |
| nb_patches_h = tgt_sizes[batch_idx][0] |
| nb_patches_w = tgt_sizes[batch_idx][1] |
| else: |
| nb_patches_h = p_attn_mask[:, 0].sum() |
| nb_patches_w = p_attn_mask[0].sum() |
|
|
| fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h) |
| fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w) |
|
|
| bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) |
| bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) |
|
|
| pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() |
| position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids |
|
|
| position_ids = position_ids.to(self.position_embedding.weight.device) |
|
|
| embeddings = embeddings + self.position_embedding(position_ids) |
| return embeddings |
|
|
|
|
| class SiglipAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_heads |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| f" {self.num_heads})." |
| ) |
| self.scale = self.head_dim**-0.5 |
| self.dropout = config.attention_dropout |
|
|
| self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| """Input shape: Batch x Time x Channel""" |
|
|
| batch_size, q_len, _ = hidden_states.size() |
|
|
| query_states = self.q_proj(hidden_states) |
| key_states = self.k_proj(hidden_states) |
| value_states = self.v_proj(hidden_states) |
|
|
| query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
| k_v_seq_len = key_states.shape[-2] |
| attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale |
|
|
| if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): |
| raise ValueError( |
| f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is" |
| f" {attn_weights.size()}" |
| ) |
|
|
| if attention_mask is not None: |
| if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): |
| raise ValueError( |
| f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}" |
| ) |
| attn_weights = attn_weights + attention_mask |
|
|
| |
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) |
| attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) |
| attn_output = torch.matmul(attn_weights, value_states) |
|
|
| if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): |
| raise ValueError( |
| f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is" |
| f" {attn_output.size()}" |
| ) |
|
|
| attn_output = attn_output.transpose(1, 2).contiguous() |
| attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) |
|
|
| attn_output = self.out_proj(attn_output) |
|
|
| return attn_output, attn_weights |
|
|
|
|
| class SiglipFlashAttention2(SiglipAttention): |
| """ |
| Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays |
| untouched. The only required change would be on the forward pass where it needs to correctly call the public API of |
| flash attention and deal with padding tokens in case the input contains any of them. |
| """ |
|
|
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
| self.is_causal = False |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| output_attentions: bool = False, |
| use_cache: bool = False, |
| **kwargs, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| output_attentions = False |
|
|
| bsz, q_len, _ = hidden_states.size() |
|
|
| query_states = self.q_proj(hidden_states) |
| key_states = self.k_proj(hidden_states) |
| value_states = self.v_proj(hidden_states) |
|
|
| |
| |
| |
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
| kv_seq_len = key_states.shape[-2] |
| if past_key_value is not None: |
| kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| query_states = query_states.transpose(1, 2) |
| key_states = key_states.transpose(1, 2) |
| value_states = value_states.transpose(1, 2) |
|
|
| dropout_rate = self.dropout if self.training else 0.0 |
|
|
| |
| |
| |
| |
| |
|
|
| input_dtype = query_states.dtype |
| if input_dtype == torch.float32: |
| if torch.is_autocast_enabled(): |
| target_dtype = torch.get_autocast_gpu_dtype() |
| |
| elif hasattr(self.config, "_pre_quantization_dtype"): |
| target_dtype = self.config._pre_quantization_dtype |
| else: |
| target_dtype = self.q_proj.weight.dtype |
|
|
| logger.warning_once( |
| "The input hidden states seems to be silently casted in float32, this might be related to the fact" |
| " you have upcasted embedding or layer norm layers in float32. We will cast back the input in" |
| f" {target_dtype}." |
| ) |
|
|
| query_states = query_states.to(target_dtype) |
| key_states = key_states.to(target_dtype) |
| value_states = value_states.to(target_dtype) |
|
|
| attn_output = self._flash_attention_forward( |
| query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate |
| ) |
|
|
| attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous() |
| attn_output = self.out_proj(attn_output) |
|
|
| if not output_attentions: |
| attn_weights = None |
|
|
| return attn_output, attn_weights |
|
|
| def _flash_attention_forward( |
| self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None |
| ): |
| """ |
| Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token |
| first unpad the input, then computes the attention scores and pad the final attention scores. |
| Args: |
| query_states (`torch.Tensor`): |
| Input query states to be passed to Flash Attention API |
| key_states (`torch.Tensor`): |
| Input key states to be passed to Flash Attention API |
| value_states (`torch.Tensor`): |
| Input value states to be passed to Flash Attention API |
| attention_mask (`torch.Tensor`): |
| The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the |
| position of padding tokens and 1 for the position of non-padding tokens. |
| dropout (`int`, *optional*): |
| Attention dropout |
| softmax_scale (`float`, *optional*): |
| The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) |
| """ |
|
|
| |
| causal = self.is_causal and query_length != 1 |
|
|
| |
| if attention_mask is not None: |
| batch_size = query_states.shape[0] |
| query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( |
| query_states, key_states, value_states, attention_mask, query_length |
| ) |
|
|
| cu_seqlens_q, cu_seqlens_k = cu_seq_lens |
| max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens |
|
|
| attn_output_unpad = flash_attn_varlen_func( |
| query_states, |
| key_states, |
| value_states, |
| cu_seqlens_q=cu_seqlens_q, |
| cu_seqlens_k=cu_seqlens_k, |
| max_seqlen_q=max_seqlen_in_batch_q, |
| max_seqlen_k=max_seqlen_in_batch_k, |
| dropout_p=dropout, |
| softmax_scale=softmax_scale, |
| causal=causal, |
| ) |
|
|
| attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) |
| else: |
| attn_output = flash_attn_func( |
| query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal |
| ) |
|
|
| return attn_output |
|
|
| def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): |
| indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) |
| batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape |
|
|
| key_layer = index_first_axis( |
| key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| value_layer = index_first_axis( |
| value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| if query_length == kv_seq_len: |
| query_layer = index_first_axis( |
| query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k |
| ) |
| cu_seqlens_q = cu_seqlens_k |
| max_seqlen_in_batch_q = max_seqlen_in_batch_k |
| indices_q = indices_k |
| elif query_length == 1: |
| max_seqlen_in_batch_q = 1 |
| cu_seqlens_q = torch.arange( |
| batch_size + 1, dtype=torch.int32, device=query_layer.device |
| ) |
| indices_q = cu_seqlens_q[:-1] |
| query_layer = query_layer.squeeze(1) |
| else: |
| |
| attention_mask = attention_mask[:, -query_length:] |
| query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) |
|
|
| return ( |
| query_layer, |
| key_layer, |
| value_layer, |
| indices_q, |
| (cu_seqlens_q, cu_seqlens_k), |
| (max_seqlen_in_batch_q, max_seqlen_in_batch_k), |
| ) |
|
|
|
|
| |
| class SiglipMLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.activation_fn = ACT2FN[config.hidden_act] |
| self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| hidden_states = self.fc1(hidden_states) |
| hidden_states = self.activation_fn(hidden_states) |
| hidden_states = self.fc2(hidden_states) |
| return hidden_states |
|
|
|
|
| |
| class SiglipEncoderLayer(nn.Module): |
| def __init__(self, config: SiglipVisionConfig): |
| super().__init__() |
| self.embed_dim = config.hidden_size |
| self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
| self.self_attn = SiglipAttention(config) if not self._use_flash_attention_2 else SiglipFlashAttention2(config) |
| self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.mlp = SiglipMLP(config) |
| self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.FloatTensor]: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): |
| Input to the layer of shape `(batch, seq_len, embed_dim)`. |
| attention_mask (`torch.FloatTensor`): |
| Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. |
| output_attentions (`bool`, *optional*, defaults to `False`): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| residual = hidden_states |
|
|
| hidden_states = self.layer_norm1(hidden_states) |
| hidden_states, attn_weights = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| residual = hidden_states |
| hidden_states = self.layer_norm2(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| class SiglipPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = SiglipVisionConfig |
| base_model_prefix = "siglip" |
| supports_gradient_checkpointing = True |
|
|
| def _init_weights(self, module): |
| """Initialize the weights""" |
|
|
| if isinstance(module, SiglipVisionEmbeddings): |
| width = self.config.hidden_size |
| nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) |
| elif isinstance(module, nn.Embedding): |
| default_flax_embed_init(module.weight) |
| elif isinstance(module, SiglipAttention): |
| nn.init.normal_(module.q_proj.weight) |
| nn.init.normal_(module.k_proj.weight) |
| nn.init.normal_(module.v_proj.weight) |
| nn.init.normal_(module.out_proj.weight) |
| nn.init.zeros_(module.q_proj.bias) |
| nn.init.zeros_(module.k_proj.bias) |
| nn.init.zeros_(module.v_proj.bias) |
| nn.init.zeros_(module.out_proj.bias) |
| elif isinstance(module, SiglipMLP): |
| nn.init.normal_(module.fc1.weight) |
| nn.init.normal_(module.fc2.weight) |
| nn.init.normal_(module.fc1.bias, std=1e-6) |
| nn.init.normal_(module.fc2.bias, std=1e-6) |
| elif isinstance(module, (nn.Linear, nn.Conv2d)): |
| lecun_normal_(module.weight) |
| if module.bias is not None: |
| nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| SIGLIP_START_DOCSTRING = r""" |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| Parameters: |
| config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
|
|
| SIGLIP_VISION_INPUTS_DOCSTRING = r""" |
| Args: |
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
| Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| |
| class SiglipEncoder(nn.Module): |
| """ |
| Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| [`SiglipEncoderLayer`]. |
| Args: |
| config: SiglipConfig |
| """ |
|
|
| def __init__(self, config: SiglipVisionConfig): |
| super().__init__() |
| self.config = config |
| self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.gradient_checkpointing = False |
|
|
| |
| def forward( |
| self, |
| inputs_embeds, |
| attention_mask: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutput]: |
| r""" |
| Args: |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| [What are attention masks?](../glossary#attention-mask) |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| encoder_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
|
|
| hidden_states = inputs_embeds |
| for encoder_layer in self.layers: |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
| if self.gradient_checkpointing and self.training: |
| layer_outputs = self._gradient_checkpointing_func( |
| encoder_layer.__call__, |
| hidden_states, |
| attention_mask, |
| output_attentions, |
| ) |
| else: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| attention_mask, |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) |
|
|
|
|
| @add_start_docstrings("""The vision model from SigLIP without any head or projection on top.""", SIGLIP_START_DOCSTRING) |
| class SiglipVisionTransformer(SiglipPreTrainedModel): |
| config_class = SiglipVisionConfig |
| main_input_name = "pixel_values" |
| _supports_flash_attn_2 = True |
| _no_split_modules = [] |
|
|
| def __init__(self, config: SiglipVisionConfig): |
| super().__init__(config) |
| self.config = config |
| embed_dim = config.hidden_size |
|
|
| self.embeddings = SiglipVisionEmbeddings(config) |
| self.encoder = SiglipEncoder(config) |
| self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.embeddings.patch_embedding |
|
|
| @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig) |
| def forward( |
| self, |
| pixel_values, |
| patch_attention_mask: Optional[torch.BoolTensor] = None, |
| tgt_sizes: Optional[torch.IntTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| r""" |
| Returns: |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| batch_size = pixel_values.size(0) |
| if patch_attention_mask is None: |
| patch_attention_mask = torch.ones( |
| size=( |
| batch_size, |
| pixel_values.size(2) // self.config.patch_size, |
| pixel_values.size(3) // self.config.patch_size, |
| ), |
| dtype=torch.bool, |
| device=pixel_values.device, |
| ) |
|
|
| hidden_states = self.embeddings( |
| pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes |
| ) |
|
|
| patch_attention_mask = patch_attention_mask.view(batch_size, -1) |
| |
| |
| |
| if not torch.any(~patch_attention_mask): |
| attention_mask = None |
| else: |
| attention_mask = ( |
| _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) |
| if not self._use_flash_attention_2 |
| else patch_attention_mask |
| ) |
|
|
| encoder_outputs = self.encoder( |
| inputs_embeds=hidden_states, |
| attention_mask=attention_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| last_hidden_state = encoder_outputs[0] |
| last_hidden_state = self.post_layernorm(last_hidden_state) |
|
|
| if not return_dict: |
| return (last_hidden_state, None) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPooling( |
| last_hidden_state=last_hidden_state, |
| pooler_output=None, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|