diff --git a/.gitattributes b/.gitattributes
index 55aee17f80e61bf73d6c64cca33a3e6991bf01f0..212fac85c84290fcb19246f7cf1237ad2d5bbf48 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1161,3 +1161,4 @@ llava_next/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs
vlmpy310/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
vlmpy310/lib/python3.10/site-packages/av.libs/libavformat-071c54bd.so.61.7.100 filter=lfs diff=lfs merge=lfs -text
vlmpy310/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+vlmpy310/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cf577de26b413aff58f21be1b70dcd7219d7615
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/convert_mamba_ssm_checkpoint.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/convert_mamba_ssm_checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d06e95740fcc2e260b4e2be9daad80a7b515fa67
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/__pycache__/convert_mamba_ssm_checkpoint.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/modeling_bamba.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/modeling_bamba.py
new file mode 100644
index 0000000000000000000000000000000000000000..30017181738eeebb9fc548e1dedc372745ca030c
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/bamba/modeling_bamba.py
@@ -0,0 +1,1611 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/bamba/modular_bamba.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_bamba.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+import transformers.models.jamba.modeling_jamba as modeling_jamba
+from transformers.activations import ACT2FN
+
+from ...cache_utils import Cache # we need __iter__ and __len__ of pkv
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ...utils.import_utils import (
+ is_causal_conv1d_available,
+ is_mamba_2_ssm_available,
+)
+from .configuration_bamba import BambaConfig
+
+
+if is_mamba_2_ssm_available():
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
+ from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
+else:
+ selective_state_update = None
+
+if is_causal_conv1d_available():
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
+else:
+ causal_conv1d_update, causal_conv1d_fn = None, None
+
+
+logger = logging.get_logger(__name__)
+_CONFIG_FOR_DOC = "BambaConfig"
+
+
+# Adapted from transformers.models.jamba.modeling_jamba.HybridMambaAttentionDynamicCache for the v2 mixer
+class HybridMambaAttentionDynamicCache(modeling_jamba.HybridMambaAttentionDynamicCache):
+ """
+ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
+ (which has a constant shape regardless of seq_len).
+
+ This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
+ and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
+ For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
+ while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
+ For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
+ while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
+ and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
+ """
+
+ def __init__(self, config: BambaConfig, batch_size, dtype=torch.float16, device=None):
+ super().__init__(config, batch_size, dtype, device)
+ self.layers_block_type = config.layers_block_type
+ self.has_previous_state = False # only used by mamba
+ conv_kernel_size = config.mamba_d_conv
+ ssm_state_size = config.mamba_d_state
+
+ self.conv_states = []
+ self.ssm_states = []
+ self.transformer_layers = []
+ for i in range(config.num_hidden_layers):
+ if self.layers_block_type[i] == "mamba":
+ self.conv_states += [
+ torch.zeros(
+ batch_size,
+ (config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size),
+ conv_kernel_size,
+ device=device,
+ dtype=dtype,
+ )
+ ]
+ self.ssm_states += [
+ torch.zeros(
+ batch_size,
+ config.mamba_n_heads,
+ config.mamba_d_head,
+ ssm_state_size,
+ device=device,
+ dtype=dtype,
+ )
+ ]
+ else:
+ self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
+ self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
+ self.transformer_layers.append(i)
+
+ self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+ self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+
+
+class BambaRotaryEmbedding(nn.Module):
+ def __init__(self, config: BambaConfig, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ def _dynamic_frequency_update(self, position_ids, device):
+ """
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
+ 1 - growing beyond the cached sequence length (allow scaling)
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
+ """
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_seq_len_cached: # growth
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
+ self.max_seq_len_cached = seq_len
+
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
+ # This .to() is needed if the model has been moved to a device after being initialized (because
+ # the buffer is automatically moved, but not the original copy)
+ self.original_inv_freq = self.original_inv_freq.to(device)
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+ self.max_seq_len_cached = self.original_max_seq_len
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+# Adapted from transformers.models.glm.modular_glm.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Removes the interleaving of cos and sin from GLM
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+
+ # Keep half or full tensor for later concatenation
+ rotary_dim = cos.shape[-1]
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
+
+ # Apply rotary embeddings on the first half or full tensor
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
+
+ # Concatenate back to full shape
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
+ return q_embed, k_embed
+
+
+class BambaAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: BambaConfig, layer_idx: int):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = config.attention_dropout
+ self.is_causal = True
+
+ self.q_proj = nn.Linear(
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
+ )
+ self.k_proj = nn.Linear(
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
+ )
+ self.v_proj = nn.Linear(
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
+ )
+ self.o_proj = nn.Linear(
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_value: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class BambaRMSNormGated(torch.nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states, gate=None):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+
+ if gate is not None:
+ hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ return self.weight * hidden_states.to(input_dtype)
+
+
+# Helper methods for segment sum computation
+
+
+def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
+ """
+ Padding x tensor with `pad_size` on the seq_len dim (dim=1)
+
+ Assumes that we only have tensors of either size 4 or 3
+ """
+ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
+
+ return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
+
+
+def reshape_into_chunks(input_tensor, pad_size, chunk_size):
+ """
+ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
+ simultaneously splitting it into chunk sequences.
+
+ Assumes that we only have tensors of either size 4 or 3
+ """
+ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
+ input_tensor = pad_tensor_by_size(input_tensor, pad_size)
+
+ if len(input_tensor.shape) == 3:
+ # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
+ return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
+ else:
+ # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
+ return input_tensor.reshape(
+ input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
+ )
+
+
+def segment_sum(input_tensor):
+ """
+ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
+ """
+ chunk_size = input_tensor.size(-1)
+ # 1. expand input tensor to have an additional dimension and repeat along that dimension
+ # [..., chunk_size] -> [..., chunk_size, chunk_size]
+ input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
+ # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
+ input_tensor = input_tensor.masked_fill(~mask, 0)
+ # 3. compute actual cumsum
+ tensor_segsum = torch.cumsum(input_tensor, dim=-2)
+
+ # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
+ tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
+ return tensor_segsum
+
+
+is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
+
+
+def apply_mask_to_padding_states(hidden_states, attention_mask):
+ """
+ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
+ """
+ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
+ dtype = hidden_states.dtype
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+
+ return hidden_states
+
+
+# Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer
+class BambaMixer(nn.Module):
+ """
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
+ and is why Mamba is called **selective** state spaces)
+
+ The are a few differences between this and Mamba2Mixer:
+ - The variable use_precomputed_states is slightly different due to the HybridCache structure
+ - There's a few non-obvious bugs fixed with batching in the slow path that exist in main
+ - Some extra variables that our layer doesn't need have been removed
+ - We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
+ """
+
+ def __init__(self, config: BambaConfig, layer_idx: int):
+ super().__init__()
+ self.num_heads = config.mamba_n_heads
+ self.hidden_size = config.hidden_size
+ self.ssm_state_size = config.mamba_d_state
+ self.conv_kernel_size = config.mamba_d_conv
+ self.intermediate_size = int(config.mamba_expand * self.hidden_size)
+ self.layer_idx = layer_idx
+ self.use_conv_bias = config.mamba_conv_bias
+ self.activation = config.hidden_act
+ self.act = ACT2FN[config.hidden_act]
+ self.use_bias = config.mamba_proj_bias
+
+ self.layer_norm_epsilon = config.rms_norm_eps
+
+ self.n_groups = config.mamba_n_groups
+ self.head_dim = config.mamba_d_head
+ self.chunk_size = config.mamba_chunk_size
+
+ # FIXME:
+ self.time_step_limit = (0.0, float("inf"))
+ self.time_step_min = 0.001
+ self.time_step_max = 0.1
+
+ self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
+ self.conv1d = nn.Conv1d(
+ in_channels=self.conv_dim,
+ out_channels=self.conv_dim,
+ bias=config.mamba_conv_bias,
+ kernel_size=self.conv_kernel_size,
+ groups=self.conv_dim,
+ padding=self.conv_kernel_size - 1,
+ )
+
+ # projection of the input hidden states
+ projection_size = self.intermediate_size + self.conv_dim + self.num_heads
+ self.in_proj = nn.Linear(
+ self.hidden_size,
+ projection_size,
+ bias=self.use_bias,
+ )
+ # selective projection used to make dt, B and C input dependant
+
+ # time step projection (discretization)
+ # instantiate once and copy inv_dt in init_weights of PretrainedModel
+ self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
+
+ # S4D real initialization. These are not discretized!
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
+ A = torch.arange(1, self.num_heads + 1)
+ self.A_log = nn.Parameter(torch.log(A))
+ self.A_log._no_weight_decay = True
+ self.norm = BambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon)
+ self.D = nn.Parameter(torch.ones(self.num_heads))
+ self.D._no_weight_decay = True
+
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
+
+ if not is_fast_path_available:
+ logger.warning_once(
+ "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
+ " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
+ " https://github.com/Dao-AILab/causal-conv1d"
+ )
+ else:
+ logger.warning_once("The fast path for Bamba will be used when running the model on a GPU")
+
+ def cuda_kernels_forward(
+ self,
+ hidden_states: torch.Tensor,
+ cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ # 1. Gated MLP's linear projection
+ hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
+ projected_states = self.in_proj(hidden_states)
+
+ # Set up dimensions for reshapes later
+ batch_size, seq_len, _ = hidden_states.shape
+ groups_time_state_size = self.n_groups * self.ssm_state_size
+
+ use_precomputed_states = (
+ cache_params is not None
+ and cache_params.has_previous_state
+ and seq_len == 1
+ and cache_params.conv_states[self.layer_idx].shape[0]
+ == cache_params.ssm_states[self.layer_idx].shape[0]
+ == batch_size
+ and cache_position is not None
+ and cache_position[0] > 0
+ )
+
+ # getting projected states from cache if it exists
+ if use_precomputed_states:
+ gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
+ [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
+ )
+
+ # 2. Convolution sequence transformation
+ hidden_states_B_C = causal_conv1d_update(
+ hidden_states_B_C,
+ cache_params.conv_states[self.layer_idx],
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.activation,
+ )
+
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+
+ # 3. SSM transformation
+ A = -torch.exp(self.A_log.float()) # (nheads,)
+ A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ dt = dt[:, :, None].expand(-1, -1, self.head_dim)
+ dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
+ D = self.D[:, None, ...].expand(-1, self.head_dim)
+ B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
+ C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
+ hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
+ hidden_states = selective_state_update(
+ cache_params.ssm_states[self.layer_idx],
+ hidden_states_reshaped,
+ dt,
+ A,
+ B,
+ C,
+ D,
+ z=None,
+ dt_bias=dt_bias,
+ dt_softplus=True,
+ )
+ hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
+ hidden_states = self.norm(hidden_states, gate)
+
+ # 4. Final linear projection
+ out = self.out_proj(hidden_states)[:, None, ...]
+ # Fused calculations or step by step if no initialized cache is found
+ else:
+ A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
+ dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
+
+ # 2-4. Fused kernel for conv1d, SSM, and the final projection
+ if self.training and cache_params is None:
+ out = mamba_split_conv1d_scan_combined(
+ projected_states,
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.dt_bias,
+ A,
+ D=self.D,
+ chunk_size=self.chunk_size,
+ seq_idx=None, # was seq_idx
+ activation=self.activation,
+ rmsnorm_weight=self.norm.weight,
+ rmsnorm_eps=self.norm.variance_epsilon,
+ outproj_weight=self.out_proj.weight,
+ outproj_bias=self.out_proj.bias,
+ headdim=self.head_dim,
+ ngroups=self.n_groups,
+ norm_before_gate=False,
+ return_final_states=False,
+ **dt_limit_kwargs,
+ )
+
+ else:
+ gate, hidden_states_B_C, dt = projected_states.split(
+ [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
+ )
+
+ # 2. Convolution sequence transformation
+ # Init cache
+ if cache_params is not None:
+ # storing the states
+ # If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv
+ # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise.
+ hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
+ conv_states = nn.functional.pad(
+ hidden_states_B_C_transposed,
+ (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_states)
+
+ if self.activation not in ["silu", "swish"]:
+ hidden_states_B_C = self.act(
+ self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
+ )
+ else:
+ hidden_states_B_C = causal_conv1d_fn(
+ x=hidden_states_B_C.transpose(1, 2),
+ weight=self.conv1d.weight.squeeze(1),
+ bias=self.conv1d.bias,
+ activation=self.activation,
+ ).transpose(1, 2)
+
+ hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+
+ # 3. SSM transformation
+ scan_output, ssm_state = mamba_chunk_scan_combined(
+ hidden_states.view(batch_size, seq_len, -1, self.head_dim),
+ dt,
+ A,
+ B.view(batch_size, seq_len, self.n_groups, -1),
+ C.view(batch_size, seq_len, self.n_groups, -1),
+ chunk_size=self.chunk_size,
+ D=self.D,
+ z=None,
+ seq_idx=None,
+ return_final_states=True,
+ dt_bias=self.dt_bias,
+ dt_softplus=True,
+ **dt_limit_kwargs,
+ )
+
+ # Init cache
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+
+ scan_output = scan_output.view(batch_size, seq_len, -1)
+ # Multiply "gate" branch and apply extra normalization layer
+ scan_output = self.norm(scan_output, gate)
+
+ # 4. Final linear projection
+ out = self.out_proj(scan_output)
+ return out
+
+ # fmt: off
+ def torch_forward(
+ self,
+ input_states,
+ cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ batch_size, seq_len, _ = input_states.shape
+ dtype = input_states.dtype
+
+ # 1. Gated MLP's linear projection
+ input_states = apply_mask_to_padding_states(input_states, attention_mask)
+ projected_states = self.in_proj(input_states)
+ gate, hidden_states_B_C, dt = projected_states.split(
+ [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
+ )
+
+ use_precomputed_states = (
+ cache_params is not None
+ and cache_params.has_previous_state
+ and seq_len == 1
+ and cache_params.conv_states[self.layer_idx].shape[0]
+ == cache_params.ssm_states[self.layer_idx].shape[0]
+ == batch_size
+ and cache_position is not None
+ and cache_position[0] > 0
+ )
+
+ # 2. Convolution sequence transformation
+ if use_precomputed_states:
+ cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
+ cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
+
+ # We need to guarantee that anything regarding the cache is on the same device
+ conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
+
+ hidden_states_B_C = torch.sum(
+ conv_states * self.conv1d.weight.squeeze(1), dim=-1
+ )
+ if self.use_conv_bias:
+ hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
+ hidden_states_B_C = self.act(hidden_states_B_C)
+ else:
+ # Init cache
+ if cache_params is not None:
+ hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
+ conv_states = nn.functional.pad(
+ hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_states)
+
+ hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
+
+ hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
+ dim=-1
+ )
+
+ # 3. SSM transformation
+ A = -torch.exp(self.A_log.float()) # [num_heads]
+ if use_precomputed_states:
+ # We need to guarantee that anything regarding the cache is on the same device
+ cache_device = cache_params.ssm_states[self.layer_idx].device
+
+ # Note: there is no need to pad parameter matrices here, as there is just one new token
+ # for batched generation
+ dt = dt[:, 0, :][:, None, ...]
+ dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
+ # [num_heads] -> [num_heads, head_dim]
+ dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
+
+ dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
+ dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
+ A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ # [bsz, num_heads, head_dim, state_size]
+ dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
+
+ # Discretize B
+ # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
+ # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
+ B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
+ B = B.reshape(batch_size, -1, B.shape[-1])
+ # [bsz, num_heads, head_dim, state_size]
+ dB = dt[..., None] * B[..., None, :]
+
+ # Discretize x into dB
+ # [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
+ hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
+ dBx = (dB * hidden_states[..., None]).to(device=cache_device)
+
+ # State calculation
+ cache_params.ssm_states[self.layer_idx].copy_(
+ cache_params.ssm_states[self.layer_idx] * dA + dBx
+ )
+
+ # Subsequent output
+ # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
+ C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
+ C = C.reshape(batch_size, -1, C.shape[-1])
+ # [bsz, num_heads, head_dim]
+
+ ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
+ # Reshape ssm_states to merge the first two dimensions
+ ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
+ C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
+ y = torch.bmm(ssm_states_reshaped, C_reshaped)
+ y = y.view(batch_size, self.num_heads, self.head_dim)
+
+ # D skip connection
+ # [num_heads] -> [num_heads, head_dim]
+ D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
+ y = (y + hidden_states * D).to(y.dtype)
+
+ # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
+ y = y.reshape(batch_size, -1)[:, None, ...]
+ else:
+ # begin ssd naive implementation without einsums
+ dt = nn.functional.softplus(dt + self.dt_bias)
+ dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
+ hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
+ B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
+
+ D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
+
+ # Discretize x and A
+ hidden_states = hidden_states * dt[..., None]
+ A = A.to(hidden_states.dtype) * dt
+
+ # Rearrange into blocks/chunks
+ hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
+
+ # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
+ A = A.permute(0, 3, 1, 2)
+ A_cumsum = torch.cumsum(A, dim=-1)
+
+ # 1. Compute the output for each intra-chunk (diagonal blocks)
+ # This is the analog of a causal mask
+ L = torch.exp(segment_sum(A))
+
+ # Contraction of C and B to get G (attention-weights like)
+ G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
+ G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
+
+ # Compute M, equivalent to applying attention mask to weights
+ M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
+ M = M_intermediate.sum(dim=-1)
+
+ # Compute Y_diag (apply to values)
+ Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
+
+ # 2. Compute the state for each intra-chunk
+ # (right term of low-rank factorization of off-diagonal blocks; B terms)
+ decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
+ B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
+ states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
+
+ # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
+ # (middle term of factorization of off-diag blocks; A terms)
+ if use_precomputed_states:
+ previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
+ else:
+ previous_states = torch.zeros_like(states[:, :1])
+ states = torch.cat([previous_states, states], dim=1)
+ decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
+ decay_chunk = decay_chunk.transpose(1, 3)
+ new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
+ states, ssm_state = new_states[:, :-1], new_states[:, -1]
+
+ # 4. Compute state -> output conversion per chunk
+ # (left term of low-rank factorization of off-diagonal blocks; C terms)
+ state_decay_out = torch.exp(A_cumsum)
+ C_times_states = (C[..., None, :] * states[:, :, None, ...])
+ state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
+ Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
+
+ # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
+ y = Y_diag + Y_off
+ # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
+ y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
+
+ y = y + D_residual
+ # Cutting off padded chunks
+ if pad_size > 0:
+ y = y[:, :seq_len, :, :]
+ y = y.reshape(batch_size, seq_len, -1)
+
+ # Init cache
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+
+ scan_output = self.norm(y, gate)
+
+ # end ssd naive
+
+ # 4. Final linear projection
+ contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
+ return contextualized_states
+ # fmt: on
+
+ def forward(
+ self,
+ hidden_states,
+ cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
+ return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
+ dtype = hidden_states.dtype
+ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+
+ return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
+
+
+class BambaMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+ return down_proj
+
+
+class BambaRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ BambaRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+ def extra_repr(self):
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
+
+
+class BambaDecoderLayer(nn.Module):
+ def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str = "mamba"):
+ super().__init__()
+
+ num_experts = 1
+ ffn_layer_class = BambaMLP if num_experts == 1 else None
+ self.feed_forward = ffn_layer_class(config)
+ self.input_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.pre_ff_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.layer_type = layer_type
+ if layer_type == "mamba":
+ self.mamba = BambaMixer(config=config, layer_idx=layer_idx)
+ elif layer_type == "attention":
+ self.self_attn = BambaAttention(config, layer_idx)
+ else:
+ raise ValueError("Invalid layer_type")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ kwargs (`dict`, *optional*):
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
+ into the model
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # this is a hybrid decoder layer
+ if self.layer_type == "mamba":
+ hidden_states = self.mamba(
+ hidden_states=hidden_states,
+ cache_params=past_key_value,
+ cache_position=cache_position,
+ attention_mask=attention_mask,
+ )
+ self_attn_weights = None
+ elif self.layer_type == "attention":
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+
+ # residual connection after attention
+ hidden_states = residual + hidden_states
+
+ # feed-forward
+ residual = hidden_states
+ hidden_states = self.pre_ff_layernorm(hidden_states)
+ hidden_states = self.feed_forward(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+BAMBA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BambaConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare BambaModel outputting raw hidden-states without any specific head on top.",
+ BAMBA_START_DOCSTRING,
+)
+class BambaPreTrainedModel(PreTrainedModel):
+ config_class = BambaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["BambaDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True # Note: only supports HybridMambaAttentionDynamicCache
+ _is_stateful = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+BAMBA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`HybridMambaAttentionDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ A HybridMambaAttentionDynamicCache object containing pre-computed hidden-states (keys and values in the
+ self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`.
+ Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and
+ `(batch_size, d_inner, d_state)` respectively.
+ See the `HybridMambaAttentionDynamicCache` class for more details.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare Bamba Model outputting raw hidden-states without any specific head on top.",
+ BAMBA_START_DOCSTRING,
+)
+# Adapted from transformers.models.jamba.modeling_jamba.JambaModel
+class BambaModel(BambaPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`BambaDecoderLayer`]
+
+ Args:
+ config: BambaConfig
+ """
+
+ def __init__(self, config: BambaConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ decoder_layers = []
+ for i in range(config.num_hidden_layers):
+ decoder_layers.append(BambaDecoderLayer(config, layer_idx=i, layer_type=config.layers_block_type[i]))
+ self.layers = nn.ModuleList(decoder_layers)
+
+ self._attn_implementation = config._attn_implementation
+ self.final_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.rotary_emb = BambaRotaryEmbedding(config=config)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(BAMBA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ hidden_states = inputs_embeds
+
+ if use_cache and past_key_values is None:
+ logger.warning_once(
+ "Bamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was "
+ "provided, so no cache will be returned."
+ )
+
+ if cache_position is None:
+ cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+ mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers:
+ # Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention)
+ layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ layer_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=layer_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ if layer_outputs[1] is not None:
+ # append attentions only of attention layers. Mamba layers return `None` as the attention weights
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.final_layernorm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if past_key_values and not past_key_values.has_previous_state:
+ past_key_values.has_previous_state = True
+
+ next_cache = None if not use_cache else past_key_values
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: HybridMambaAttentionDynamicCache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if self.config._attn_implementation == "sdpa" and not output_attentions:
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ sequence_length = input_tensor.shape[1]
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ **kwargs,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
+ `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache,
+ to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[
+ :, :, -sequence_length:, :
+ ].to(dtype)
+ padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+ def _update_mamba_mask(self, attention_mask, cache_position):
+ """
+ No need for zeroing states when
+ 1. Cached forward
+ 2. Attending to all inputs
+ """
+ mamba_mask = attention_mask
+ if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
+ mamba_mask = None
+ return mamba_mask
+
+
+class BambaForCausalLM(BambaPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = BambaModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(BAMBA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ num_logits_to_keep: int = 0,
+ **kwargs,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ num_logits_to_keep (`int` or `None`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `None`, calculate logits for all
+ `input_ids`. Only last token logits are needed for generation, and calculating them only for that token
+ can save memory, which becomes pretty significant for long sequences.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BambaForCausalLM
+
+ >>> model = BambaForCausalLM.from_pretrained("...")
+ >>> tokenizer = AutoTokenizer.from_pretrained("...")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ **kwargs,
+ ):
+ # Overwitten -- has a unique cache type, `HybridMambaAttentionDynamicCache`
+
+ empty_past_kv = past_key_values is None
+
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
+ if not empty_past_kv:
+ if inputs_embeds is not None: # Exception 1
+ input_ids = input_ids[:, -cache_position.shape[0] :]
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
+ input_ids = input_ids[:, cache_position]
+ else:
+ past_key_values = HybridMambaAttentionDynamicCache(
+ self.config, input_ids.shape[0], self.dtype, device=self.device
+ )
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if not empty_past_kv:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and empty_past_kv:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "attention_mask": attention_mask,
+ "num_logits_to_keep": self.config.num_logits_to_keep,
+ "cache_position": cache_position,
+ }
+ )
+ return model_inputs
+
+
+__all__ = ["BambaModel", "BambaForCausalLM", "BambaPreTrainedModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d54ee86aecef2cbe5b9bfdee321a0375d977880
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_clap import *
+ from .feature_extraction_clap import *
+ from .modeling_clap import *
+ from .processing_clap import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3763a655248cf25e6770f1c0cf99a9b483e37ae2
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3660a2a84ecf452b7d2188774f9c844f37932ff4
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f77580cc06417f8783b0bdd1b0b5ab430a2c5d4a
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36e0207be29ee1a0c19e7fd90fe46090755d2cdf
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3b971ccaa508192572c2ce12a64b2aaf9429dbd
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f1b0ff6dab34d44cb3795e21c7a4843aec0e9ff
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5b7d3b7a21a96ca93707e64858edc5584ae9303
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py
@@ -0,0 +1,394 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""CLAP model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ClapTextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the CLAP
+ [calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`ClapTextModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`,
+ `"relu"`, `"silu"` and `"relu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`].
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ is_decoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ projection_dim (`int`, *optional*, defaults to 512)
+ Dimension of the projection head of the `ClapTextModelWithProjection`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import ClapTextConfig, ClapTextModel
+
+ >>> # Initializing a CLAP text configuration
+ >>> configuration = ClapTextConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = ClapTextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "clap_text_model"
+ base_config_key = "text_config"
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=514,
+ type_vocab_size=1,
+ initializer_factor=1.0,
+ layer_norm_eps=1e-12,
+ projection_dim=512,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ position_embedding_type="absolute",
+ use_cache=True,
+ projection_hidden_act="relu",
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_factor = initializer_factor
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.projection_hidden_act = projection_hidden_act
+ self.projection_dim = projection_dim
+
+
+class ClapAudioConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a
+ CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ window_size (`int`, *optional*, defaults to 8):
+ Image size of the spectrogram
+ num_mel_bins (`int`, *optional*, defaults to 64):
+ Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class.
+ spec_size (`int`, *optional*, defaults to 256):
+ Desired input size of the spectrogram that the model supports. It can be different from the output of the
+ `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size`
+ of the audio models.
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ patch_size (`int`, *optional*, defaults to 4):
+ Patch size for the audio spectrogram
+ patch_stride (`list`, *optional*, defaults to `[4, 4]`):
+ Patch stride for the audio spectrogram
+ num_classes (`int`, *optional*, defaults to 527):
+ Number of classes used for the head training
+ hidden_size (`int`, *optional*, defaults to 768):
+ Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's
+ output,which is sent to the projection MLP layer.
+ projection_dim (`int`, *optional*, defaults to 512):
+ Hidden size of the projection layer.
+ depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
+ Depths used for the Swin Layers of the audio model
+ num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
+ Number of attention heads used for the Swin Layers of the audio model
+ enable_fusion (`bool`, *optional*, defaults to `False`):
+ Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the
+ best results.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder.
+ fusion_type (`[type]`, *optional*):
+ Fusion type used for the patch fusion.
+ patch_embed_input_channels (`int`, *optional*, defaults to 1):
+ Number of channels used for the input spectrogram
+ flatten_patch_embeds (`bool`, *optional*, defaults to `True`):
+ Whether or not to flatten the patch embeddings
+ patch_embeds_hidden_size (`int`, *optional*, defaults to 96):
+ Hidden size of the patch embeddings. It is used as the number of output channels.
+ enable_patch_layer_norm (`bool`, *optional*, defaults to `True`):
+ Whether or not to enable layer normalization for the patch embeddings
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ Drop path rate for the patch fusion
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not to add a bias to the query, key, value projections.
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
+ Ratio of the mlp hidden dim to embedding dim.
+ aff_block_r (`int`, *optional*, defaults to 4):
+ downsize_ratio used in the AudioFF block
+ num_hidden_layers (`int`, *optional*, defaults to 4):
+ Number of hidden layers in the Transformer encoder.
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ layer_norm_eps (`[type]`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+
+ Example:
+
+ ```python
+ >>> from transformers import ClapAudioConfig, ClapAudioModel
+
+ >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration
+ >>> configuration = ClapAudioConfig()
+
+ >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration
+ >>> model = ClapAudioModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "clap_audio_model"
+ base_config_key = "audio_config"
+
+ def __init__(
+ self,
+ window_size=8,
+ num_mel_bins=64,
+ spec_size=256,
+ hidden_act="gelu",
+ patch_size=4,
+ patch_stride=[4, 4],
+ num_classes=527,
+ hidden_size=768,
+ projection_dim=512,
+ depths=[2, 2, 6, 2],
+ num_attention_heads=[4, 8, 16, 32],
+ enable_fusion=False,
+ hidden_dropout_prob=0.1,
+ fusion_type=None,
+ patch_embed_input_channels=1,
+ flatten_patch_embeds=True,
+ patch_embeds_hidden_size=96,
+ enable_patch_layer_norm=True,
+ drop_path_rate=0.0,
+ attention_probs_dropout_prob=0.0,
+ qkv_bias=True,
+ mlp_ratio=4.0,
+ aff_block_r=4,
+ num_hidden_layers=4,
+ projection_hidden_act="relu",
+ layer_norm_eps=1e-5,
+ initializer_factor=1.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.window_size = window_size
+ self.num_mel_bins = num_mel_bins
+ self.spec_size = spec_size
+ self.patch_size = patch_size
+ self.patch_stride = patch_stride
+ self.num_classes = num_classes
+ self.hidden_size = hidden_size
+ self.depths = depths
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.window_size = window_size
+ self.enable_fusion = enable_fusion
+ self.fusion_type = fusion_type
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.projection_dim = projection_dim
+ self.flatten_patch_embeds = flatten_patch_embeds
+ self.patch_embeds_hidden_size = patch_embeds_hidden_size
+ self.enable_patch_layer_norm = enable_patch_layer_norm
+ self.drop_path_rate = drop_path_rate
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.qkv_bias = qkv_bias
+ self.mlp_ratio = mlp_ratio
+ self.patch_embed_input_channels = patch_embed_input_channels
+ self.aff_block_r = aff_block_r
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_factor = initializer_factor
+ self.projection_hidden_act = projection_hidden_act
+
+
+class ClapConfig(PretrainedConfig):
+ r"""
+ [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate
+ a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the CLAP
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ClapTextConfig`].
+ audio_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ClapAudioConfig`].
+ logit_scale_init_value (`float`, *optional*, defaults to 14.29):
+ The initial value of the *logit_scale* parameter. Default is used as per the original CLAP implementation.
+ projection_dim (`int`, *optional*, defaults to 512):
+ Dimensionality of text and audio projection layers.
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
+ Activation function for the projection layers.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ Factor to scale the initialization of the model weights.
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+
+ Example:
+
+ ```python
+ >>> from transformers import ClapConfig, ClapModel
+
+ >>> # Initializing a ClapConfig with laion-ai/base style configuration
+ >>> configuration = ClapConfig()
+
+ >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration
+ >>> model = ClapModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+
+ >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig
+ >>> from transformers import ClapTextConfig, ClapAudioConfig
+
+ >>> # Initializing a ClapText and ClapAudioConfig configuration
+ >>> config_text = ClapTextConfig()
+ >>> config_audio = ClapAudioConfig()
+
+ >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio)
+ ```"""
+
+ model_type = "clap"
+ sub_configs = {"text_config": ClapTextConfig, "audio_config": ClapAudioConfig}
+
+ def __init__(
+ self,
+ text_config=None,
+ audio_config=None,
+ logit_scale_init_value=(1 / 0.07),
+ projection_dim=512,
+ projection_hidden_act="relu",
+ initializer_factor=1.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ if text_config is None:
+ text_config = {}
+ logger.info("text_config is None. Initializing the ClapTextConfig with default values.")
+
+ if audio_config is None:
+ audio_config = {}
+ logger.info("audio_config is None. initializing the ClapAudioConfig with default values.")
+
+ self.text_config = ClapTextConfig(**text_config)
+ self.audio_config = ClapAudioConfig(**audio_config)
+ self.text_config.projection_dim = projection_dim
+ self.audio_config.projection_dim = projection_dim
+
+ self.text_config.projection_hidden_act = projection_hidden_act
+ self.audio_config.projection_hidden_act = projection_hidden_act
+
+ self.projection_dim = projection_dim
+ self.projection_hidden_act = projection_hidden_act
+ self.hidden_size = self.text_config.hidden_size
+
+ self.logit_scale_init_value = logit_scale_init_value
+ self.initializer_factor = initializer_factor
+ self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths)
+
+ @classmethod
+ def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs):
+ r"""
+ Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model
+ configuration.
+
+ Returns:
+ [`ClapConfig`]: An instance of a configuration object
+ """
+
+ return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)
+
+
+__all__ = ["ClapAudioConfig", "ClapConfig", "ClapTextConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..d422bc45ab3de00cd6df4de21ff6c7012ebb6559
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py
@@ -0,0 +1,133 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import re
+
+from laion_clap import CLAP_Module
+
+from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
+
+
+KEYS_TO_MODIFY_MAPPING = {
+ "text_branch": "text_model",
+ "audio_branch": "audio_model.audio_encoder",
+ "attn": "attention.self",
+ "self.proj": "output.dense",
+ "attention.self_mask": "attn_mask",
+ "mlp.fc1": "intermediate.dense",
+ "mlp.fc2": "output.dense",
+ "norm1": "layernorm_before",
+ "norm2": "layernorm_after",
+ "bn0": "batch_norm",
+}
+
+processor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
+
+
+def init_clap(checkpoint_path, model_type, enable_fusion=False):
+ model = CLAP_Module(
+ amodel=model_type,
+ enable_fusion=enable_fusion,
+ )
+ model.load_ckpt(checkpoint_path)
+ return model
+
+
+def get_config_from_original(clap_model):
+ audio_config = {
+ "patch_embeds_hidden_size": clap_model.model.audio_branch.embed_dim,
+ "depths": clap_model.model.audio_branch.depths,
+ "hidden_size": clap_model.model.audio_projection[0].in_features,
+ }
+
+ text_config = {"hidden_size": clap_model.model.text_branch.pooler.dense.in_features}
+
+ return ClapConfig(audio_config=audio_config, text_config=text_config)
+
+
+def rename_state_dict(state_dict):
+ model_state_dict = {}
+
+ sequential_layers_pattern = r".*sequential.(\d+).*"
+ text_projection_pattern = r".*_projection.(\d+).*"
+
+ for key, value in state_dict.items():
+ # check if any key needs to be modified
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
+ if key_to_modify in key:
+ key = key.replace(key_to_modify, new_key)
+
+ if re.match(sequential_layers_pattern, key):
+ # replace sequential layers with list
+ sequential_layer = re.match(sequential_layers_pattern, key).group(1)
+
+ key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.")
+ elif re.match(text_projection_pattern, key):
+ projecton_layer = int(re.match(text_projection_pattern, key).group(1))
+
+ # Because in CLAP they use `nn.Sequential`...
+ transformers_projection_layer = 1 if projecton_layer == 0 else 2
+
+ key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.")
+
+ if "audio" and "qkv" in key:
+ # split qkv into query key and value
+ mixed_qkv = value
+ qkv_dim = mixed_qkv.size(0) // 3
+
+ query_layer = mixed_qkv[:qkv_dim]
+ key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
+ value_layer = mixed_qkv[qkv_dim * 2 :]
+
+ model_state_dict[key.replace("qkv", "query")] = query_layer
+ model_state_dict[key.replace("qkv", "key")] = key_layer
+ model_state_dict[key.replace("qkv", "value")] = value_layer
+ else:
+ model_state_dict[key] = value
+
+ return model_state_dict
+
+
+def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False):
+ clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion)
+
+ clap_model.eval()
+ state_dict = clap_model.model.state_dict()
+ state_dict = rename_state_dict(state_dict)
+
+ transformers_config = get_config_from_original(clap_model)
+ transformers_config.audio_config.enable_fusion = enable_fusion
+ model = ClapModel(transformers_config)
+
+ # ignore the spectrogram embedding layer
+ model.load_state_dict(state_dict, strict=False)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
+ parser.add_argument("--model_type", default="HTSAT-tiny", type=str, help="Whether to enable fusion or not")
+ args = parser.parse_args()
+
+ convert_clap_checkpoint(
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion
+ )
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..42d3646065ece72e25be6359875ce84d91f9d5f6
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py
@@ -0,0 +1,365 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for CLAP."""
+
+import copy
+from typing import Any, Dict, List, Optional, Union
+
+import numpy as np
+import torch
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ClapFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a CLAP feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
+ Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 64):
+ The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
+ (`n_mels`).
+ sampling_rate (`int`, *optional*, defaults to 48000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
+ to warn users if the audio fed to the feature extractor does not have the same sampling rate.
+ hop_length (`int`,*optional*, defaults to 480):
+ Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
+ in smaller `frames` with a step of `hop_length` between each frame.
+ max_length_s (`int`, *optional*, defaults to 10):
+ The maximum input length of the model in seconds. This is used to pad the audio.
+ fft_window_size (`int`, *optional*, defaults to 1024):
+ Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
+ resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ Padding value used to pad the audio. Should correspond to silences.
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not the model should return the attention masks coresponding to the input.
+ frequency_min (`float`, *optional*, defaults to 0):
+ The lowest frequency of interest. The STFT will not be computed for values below this.
+ frequency_max (`float`, *optional*, defaults to 14000):
+ The highest frequency of interest. The STFT will not be computed for values above this.
+ top_db (`float`, *optional*):
+ The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
+ `audio_utils.power_to_db` function
+ truncation (`str`, *optional*, defaults to `"fusion"`):
+ Truncation pattern for long audio inputs. Two patterns are available:
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
+ downsampled version of the entire mel spectrogram.
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
+ of the original mel obtained from the padded audio.
+ - `rand_trunc` will select a random crop of the mel spectrogram.
+ padding (`str`, *optional*, defaults to `"repeatpad"`):
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
+ - `pad`: the audio is padded.
+ """
+
+ model_input_names = ["input_features", "is_longer"]
+
+ def __init__(
+ self,
+ feature_size=64,
+ sampling_rate=48_000,
+ hop_length=480,
+ max_length_s=10,
+ fft_window_size=1024,
+ padding_value=0.0,
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
+ frequency_min: float = 0,
+ frequency_max: float = 14_000,
+ top_db: int = None,
+ truncation: str = "fusion",
+ padding: str = "repeatpad",
+ **kwargs,
+ ):
+ super().__init__(
+ feature_size=feature_size,
+ sampling_rate=sampling_rate,
+ padding_value=padding_value,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+ self.top_db = top_db
+ self.truncation = truncation
+ self.padding = padding
+ self.fft_window_size = fft_window_size
+ self.nb_frequency_bins = (fft_window_size >> 1) + 1
+ self.hop_length = hop_length
+ self.max_length_s = max_length_s
+ self.nb_max_samples = max_length_s * sampling_rate
+ self.sampling_rate = sampling_rate
+ self.frequency_min = frequency_min
+ self.frequency_max = frequency_max
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=self.nb_frequency_bins,
+ num_mel_filters=feature_size,
+ min_frequency=frequency_min,
+ max_frequency=frequency_max,
+ sampling_rate=sampling_rate,
+ norm=None,
+ mel_scale="htk",
+ )
+ self.mel_filters_slaney = mel_filter_bank(
+ num_frequency_bins=self.nb_frequency_bins,
+ num_mel_filters=feature_size,
+ min_frequency=frequency_min,
+ max_frequency=frequency_max,
+ sampling_rate=sampling_rate,
+ norm="slaney",
+ mel_scale="slaney",
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Serializes this instance to a Python dictionary.
+
+ Returns:
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the
+ mel filter banks, which do not need to be saved or printed as they are too long.
+ """
+ output = copy.deepcopy(self.__dict__)
+ output["feature_extractor_type"] = self.__class__.__name__
+ if "mel_filters" in output:
+ del output["mel_filters"]
+ if "mel_filters_slaney" in output:
+ del output["mel_filters_slaney"]
+ return output
+
+ def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray:
+ """
+ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
+ banks are used depending on the truncation pattern:
+ - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
+ calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
+ is set to `"fusion"`.
+ - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
+ `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
+ implementation when the truncation mode is not `"fusion"`.
+ """
+ log_mel_spectrogram = spectrogram(
+ waveform,
+ window_function(self.fft_window_size, "hann"),
+ frame_length=self.fft_window_size,
+ hop_length=self.hop_length,
+ power=2.0,
+ mel_filters=mel_filters,
+ log_mel="dB",
+ )
+ return log_mel_spectrogram.T
+
+ def _random_mel_fusion(self, mel, total_frames, chunk_frames):
+ ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
+ if len(ranges[1]) == 0:
+ # if the audio is too short, we just use the first chunk
+ ranges[1] = [0]
+ if len(ranges[2]) == 0:
+ # if the audio is too short, we just use the first chunk
+ ranges[2] = [0]
+ # randomly choose index for each part
+ idx_front = np.random.choice(ranges[0])
+ idx_middle = np.random.choice(ranges[1])
+ idx_back = np.random.choice(ranges[2])
+
+ mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
+ mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
+ mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
+
+ mel = torch.tensor(mel[None, None, :])
+ mel_shrink = torch.nn.functional.interpolate(
+ mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
+ )
+ mel_shrink = mel_shrink[0][0].numpy()
+ mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
+ return mel_fusion
+
+ def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array:
+ """
+ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
+ Four different path are possible:
+ - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
+ will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
+ are then stacked together. They will later be used for `feature_fusion`.
+ - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
+ padded based on `padding`.
+ - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
+ based on `padding`, and is repeated `4` times.
+ - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
+ spectrogram will be computed on a random crop of the waveform.
+
+ """
+ if waveform.shape[0] > max_length:
+ if truncation == "rand_trunc":
+ longer = True
+ # random crop to max_length (for compatibility) -> this should be handled by self.pad
+ overflow = len(waveform) - max_length
+ idx = np.random.randint(0, overflow + 1)
+ waveform = waveform[idx : idx + max_length]
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
+ elif truncation == "fusion":
+ mel = self._np_extract_fbank_features(waveform, self.mel_filters)
+ chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
+ total_frames = mel.shape[0]
+ if chunk_frames == total_frames:
+ # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
+ # In this case, we just use the whole audio.
+ input_mel = np.stack([mel, mel, mel, mel], axis=0)
+ longer = False
+ else:
+ input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
+ longer = True
+ else:
+ raise NotImplementedError(f"data_truncating {truncation} not implemented")
+
+ else:
+ longer = False
+ # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
+ if waveform.shape[0] < max_length:
+ if padding == "repeat":
+ n_repeat = int(max_length / len(waveform))
+ waveform = np.tile(waveform, n_repeat + 1)[:max_length]
+ if padding == "repeatpad":
+ n_repeat = int(max_length / len(waveform))
+ waveform = np.tile(waveform, n_repeat)
+ waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
+
+ if truncation == "fusion":
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
+ input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
+ else:
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
+
+ return input_mel, longer
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ truncation: str = None,
+ padding: Optional[str] = None,
+ max_length: Optional[int] = None,
+ sampling_rate: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ truncation (`str`, *optional*):
+ Truncation pattern for long audio inputs. Two patterns are available:
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
+ a downsampled version of the entire mel spectrogram.
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
+ copy of the original mel obtained from the padded audio.
+ - `rand_trunc` will select a random crop of the mel spectrogram.
+ padding (`str`, *optional*):
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
+ - `pad`: the audio is padded.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
+ pipeline.
+ """
+ truncation = truncation if truncation is not None else self.truncation
+ padding = padding if padding else self.padding
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float64)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float64)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [np.asarray(raw_speech)]
+
+ # convert to mel spectrogram, truncate and pad if needed.
+ padded_inputs = [
+ self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
+ for waveform in raw_speech
+ ]
+
+ input_mel = []
+ is_longer = []
+ for mel, longer in padded_inputs:
+ input_mel.append(mel)
+ is_longer.append(longer)
+
+ if truncation == "fusion" and sum(is_longer) == 0:
+ # if no audio is longer than 10s, then randomly select one audio to be longer
+ rand_idx = np.random.randint(0, len(input_mel))
+ is_longer[rand_idx] = True
+
+ if isinstance(input_mel[0], List):
+ input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
+
+ # is_longer is a list of bool
+ is_longer = [[longer] for longer in is_longer]
+
+ input_features = {"input_features": input_mel, "is_longer": is_longer}
+ input_features = BatchFeature(input_features)
+
+ if return_tensors is not None:
+ input_features = input_features.convert_to_tensors(return_tensors)
+
+ return input_features
+
+
+__all__ = ["ClapFeatureExtractor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..5792257e026d7dfad72d783a242482771155a441
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py
@@ -0,0 +1,2314 @@
+# coding=utf-8
+# Copyright 2023 The LAION-AI Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch CLAP model."""
+
+import collections
+import math
+from dataclasses import dataclass
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+ torch_int,
+)
+from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused"
+
+
+# Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191
+def interpolate(hidden_states, ratio):
+ """
+ Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)):
+ Input hidden states
+ ratio (`int`):
+ The ratio of the length of the output to the length of the input.
+ """
+ (batch_size, time_length, classes_num) = hidden_states.shape
+ upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1)
+ upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num)
+ return upsampled
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L249
+def window_partition(hidden_states, window_size):
+ """
+ Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size,
+ num_channels)`
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`):
+ Input hidden states
+ window_size (`int`):
+ Window size
+ """
+ batch_size, height, width, num_channels = hidden_states.shape
+
+ hidden_states = hidden_states.view(
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
+ )
+ windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
+ return windows
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263
+def window_reverse(windows, window_size, height, width):
+ """
+ Merges windows to produce higher resolution features.
+ Args:
+ windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):
+ Input windows
+ window_size (`int`):
+ Window size
+ height (`int`):
+ Height of the resized audio
+ width (`int`):
+ Width of the resized audio
+ """
+ num_channels = windows.shape[-1]
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
+ return windows
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html#CLIP-loss-function
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ labels = torch.arange(len(logits), device=logits.device)
+ return nn.functional.cross_entropy(logits, labels)
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Clap
+class ClapTextModelOutput(ModelOutput):
+ """
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The text embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ text_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class ClapAudioModelOutput(ModelOutput):
+ """
+ ClapAudio model output to mimic the output of the original implementation.
+
+ Args:
+ audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ The Audio embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ audio_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Clap, vision->audio, Vision->Audio, image->audio
+class ClapOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for audio-text similarity.
+ logits_per_audio (`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`):
+ The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text
+ similarity scores.
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio
+ similarity scores.
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`].
+ audio_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`].
+ text_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`ClapTextModel`].
+ audio_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`ClapAudioModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_audio: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ audio_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ audio_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "audio_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+# Adapted from transformers.models.swin.modeling_swin.SwinDropPath
+class ClapDropPath(nn.Module):
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly
+ refactored version of the `SwinDropPath` implementation.
+ """
+
+ def __init__(self, drop_prob=None):
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states):
+ if self.drop_prob == 0.0 or not self.training:
+ return hidden_states
+
+ keep_prob = 1 - self.drop_prob
+ # work with diff dim tensors, not just 2D ConvNets
+ shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1)
+
+ random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device)
+ random_tensor.floor_() # binarize
+ output = hidden_states.div(keep_prob) * random_tensor
+ return output
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/feature_fusion.py#L133
+class ClapAudioAFFBlock(nn.Module):
+ r"""
+ ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement
+ the 1D version.
+ """
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__()
+ channels = config.patch_embeds_hidden_size
+ downsize_ratio = config.aff_block_r
+ inter_channels = int(channels // downsize_ratio)
+
+ self.local_att = nn.Sequential(
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(inter_channels),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(channels),
+ )
+ self.global_att = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(inter_channels),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(channels),
+ )
+
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, hidden_states, residual):
+ attention_input = hidden_states + residual
+
+ fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input)
+ fused_layer_output = self.sigmoid(fused_layer_output)
+
+ output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output)
+ return output
+
+
+class ClapAudioPatchEmbed(nn.Module):
+ """
+ This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the
+ Transformer block.
+ """
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__()
+ img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size
+ patch_size = (
+ (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size
+ )
+ patch_stride = (
+ (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride
+ )
+
+ self.img_size = img_size
+ self.patch_stride = patch_stride
+
+ self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
+ self.num_patches = self.grid_size[0] * self.grid_size[1]
+
+ self.flatten = config.flatten_patch_embeds
+ self.enable_fusion = config.enable_fusion
+
+ padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
+
+ scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1
+
+ self.proj = nn.Conv2d(
+ config.patch_embed_input_channels * scale_factor,
+ config.patch_embeds_hidden_size,
+ kernel_size=patch_size,
+ stride=patch_stride,
+ padding=padding,
+ )
+
+ self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity()
+ if self.enable_fusion:
+ self.fusion_model = ClapAudioAFFBlock(config)
+ self.mel_conv2d = nn.Conv2d(
+ config.patch_embed_input_channels,
+ config.patch_embeds_hidden_size,
+ kernel_size=(patch_size[0], patch_size[1] * 3),
+ stride=(patch_stride[0], patch_stride[1] * 3),
+ padding=padding,
+ )
+
+ def forward(self, hidden_states, is_longer_idx=None):
+ if self.enable_fusion:
+ # retrieve the last mel as we have transposed the input
+ global_hidden_states = hidden_states[:, 0:1, :, :]
+
+ # global processing
+ batch_size, num_channels, height, width = global_hidden_states.shape
+
+ if height != self.img_size[0] or width != self.img_size[1]:
+ raise ValueError(
+ f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
+ )
+
+ global_hidden_states = self.proj(global_hidden_states)
+ output_width = global_hidden_states.size(-1)
+ if len(is_longer_idx) > 0:
+ # local processing
+ local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous()
+ batch_size, num_channels, height, width = local_hidden_states.shape
+ local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width)
+
+ local_hidden_states = self.mel_conv2d(local_hidden_states)
+
+ _, features, height, width = local_hidden_states.shape
+ local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width)
+ local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
+
+ local_width = local_hidden_states.size(-1)
+ local_hidden_states = torch.nn.functional.pad(
+ local_hidden_states, (0, output_width - local_width), "constant", 0
+ )
+
+ global_hidden_states[is_longer_idx] = self.fusion_model(
+ global_hidden_states[is_longer_idx], local_hidden_states
+ )
+ hidden_states = global_hidden_states
+ else:
+ _, _, height, width = hidden_states.shape
+ if height != self.img_size[0] or width != self.img_size[1]:
+ raise ValueError(
+ f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
+ )
+ hidden_states = self.proj(hidden_states)
+
+ if self.flatten:
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+ hidden_states = self.norm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio
+class ClapAudioSelfAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ if dim % num_heads != 0:
+ raise ValueError(
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
+ )
+
+ self.num_attention_heads = num_heads
+ self.attention_head_size = int(dim / num_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.window_size = (
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
+ )
+
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
+ )
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
+ coords_flatten = torch.flatten(coords, 1)
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
+ relative_coords[:, :, 0] += self.window_size[0] - 1
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1)
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ batch_size, dim, num_channels = hidden_states.shape
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
+ relative_position_bias = relative_position_bias.view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
+ )
+
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ClapAudioModel forward() function)
+ mask_shape = attention_mask.shape[0]
+ attention_scores = attention_scores.view(
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
+ )
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->ClapAudio
+class ClapAudioSelfOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->ClapAudio
+class ClapAudioAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size)
+ self.output = ClapAudioSelfOutput(config, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->ClapAudio
+class ClapAudioIntermediate(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->ClapAudio
+class ClapAudioOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinLayer with SwinDropPath->ClapDropPath, Swin->ClapAudio
+class ClapAudioLayer(nn.Module):
+ def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.shift_size = shift_size
+ self.window_size = config.window_size
+ self.input_resolution = input_resolution
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size)
+ self.drop_path = ClapDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.intermediate = ClapAudioIntermediate(config, dim)
+ self.output = ClapAudioOutput(config, dim)
+
+ def set_shift_and_window_size(self, input_resolution):
+ if min(input_resolution) <= self.window_size:
+ # if window size is larger than input resolution, we don't partition windows
+ self.shift_size = torch_int(0)
+ self.window_size = (
+ torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
+ )
+
+ def get_attn_mask(self, height, width, dtype, device):
+ if self.shift_size > 0:
+ # calculate attention mask for SW-MSA
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
+ height_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ width_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ count = 0
+ for height_slice in height_slices:
+ for width_slice in width_slices:
+ img_mask[:, height_slice, width_slice, :] = count
+ count += 1
+
+ mask_windows = window_partition(img_mask, self.window_size)
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+ else:
+ attn_mask = None
+ return attn_mask
+
+ def maybe_pad(self, hidden_states, height, width):
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
+ return hidden_states, pad_values
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ if not always_partition:
+ self.set_shift_and_window_size(input_dimensions)
+ else:
+ pass
+ height, width = input_dimensions
+ batch_size, _, channels = hidden_states.size()
+ shortcut = hidden_states
+
+ hidden_states = self.layernorm_before(hidden_states)
+
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
+
+ # pad hidden_states to multiples of window size
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
+
+ _, height_pad, width_pad, _ = hidden_states.shape
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ else:
+ shifted_hidden_states = hidden_states
+
+ # partition windows
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
+ attn_mask = self.get_attn_mask(
+ height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
+ )
+
+ attention_outputs = self.attention(
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
+ )
+
+ attention_output = attention_outputs[0]
+
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ attention_windows = shifted_windows
+
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
+ if was_padded:
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
+
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
+
+ hidden_states = shortcut + self.drop_path(attention_windows)
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = hidden_states + self.output(layer_output)
+
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
+ return layer_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->ClapAudio
+class ClapAudioStage(nn.Module):
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
+ super().__init__()
+ self.config = config
+ self.dim = dim
+ self.blocks = nn.ModuleList(
+ [
+ ClapAudioLayer(
+ config=config,
+ dim=dim,
+ input_resolution=input_resolution,
+ num_heads=num_heads,
+ drop_path_rate=drop_path[i],
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ height, width = input_dimensions
+ for i, layer_module in enumerate(self.blocks):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
+ else:
+ output_dimensions = (height, width, height, width)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
+
+ if output_attentions:
+ stage_outputs += layer_outputs[1:]
+ return stage_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging with Swin->ClapAudio
+class ClapAudioPatchMerging(nn.Module):
+ """
+ Patch Merging Layer.
+
+ Args:
+ input_resolution (`Tuple[int]`):
+ Resolution of input feature.
+ dim (`int`):
+ Number of input channels.
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
+ Normalization layer class.
+ """
+
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
+ super().__init__()
+ self.input_resolution = input_resolution
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def maybe_pad(self, input_feature, height, width):
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
+ if should_pad:
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
+ input_feature = nn.functional.pad(input_feature, pad_values)
+
+ return input_feature
+
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
+ height, width = input_dimensions
+ # `dim` is height * width
+ batch_size, dim, num_channels = input_feature.shape
+
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
+ # pad input to be disible by width and height, if needed
+ input_feature = self.maybe_pad(input_feature, height, width)
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
+ # batch_size height/2 width/2 4*num_channels
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
+
+ input_feature = self.norm(input_feature)
+ input_feature = self.reduction(input_feature)
+
+ return input_feature
+
+
+class ClapAudioEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_layers = len(config.depths)
+
+ self.config = config
+ self.patch_embed = ClapAudioPatchEmbed(config)
+ self.enable_fusion = config.enable_fusion
+ self.patch_stride = self.patch_embed.patch_stride
+ self.spec_size = config.spec_size
+ self.freq_ratio = config.spec_size // config.num_mel_bins
+
+ self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1))
+
+ drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+
+ grid_size = self.patch_embed.grid_size
+ self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)]
+
+ self.layers = nn.ModuleList(
+ [
+ ClapAudioStage(
+ config=config,
+ dim=int(config.patch_embeds_hidden_size * 2**i_layer),
+ input_resolution=self.input_resolutions[i_layer],
+ depth=config.depths[i_layer],
+ num_heads=config.num_attention_heads[i_layer],
+ drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
+ downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None,
+ )
+ for i_layer in range(self.num_layers)
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ self.batch_norm = nn.BatchNorm2d(config.num_mel_bins)
+ self.norm = nn.LayerNorm(self.num_features)
+ self.depths = config.depths
+ self.avgpool = nn.AdaptiveAvgPool1d(1)
+
+ def reshape_mel2img(self, normalized_input_features):
+ """
+ The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel
+ should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`].
+ """
+ _, _, time_length, freq_length = normalized_input_features.shape
+
+ spec_width = int(self.spec_size * self.freq_ratio)
+ spec_heigth = self.spec_size // self.freq_ratio
+
+ if time_length > spec_width or freq_length > spec_heigth:
+ raise ValueError("the wav size should be less than or equal to the swin input size")
+
+ # to avoid bicubic zero error
+ if time_length < spec_width:
+ normalized_input_features = nn.functional.interpolate(
+ normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True
+ )
+ if freq_length < spec_heigth:
+ normalized_input_features = nn.functional.interpolate(
+ normalized_input_features, (time_length, spec_heigth), mode="bicubic", align_corners=True
+ )
+
+ batch, channels, time, freq = normalized_input_features.shape
+
+ # batch_size, channels, spec_width, spec_heigth --> batch_size, channels, spec_heigth * freq_ratio, spec_width // freq_ratio
+ normalized_input_features = normalized_input_features.reshape(
+ batch, channels * self.freq_ratio, time // self.freq_ratio, freq
+ )
+ normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous()
+ normalized_input_features = normalized_input_features.reshape(
+ batch, channels, freq * self.freq_ratio, time // self.freq_ratio
+ )
+
+ return normalized_input_features
+
+ def forward(
+ self,
+ input_features,
+ is_longer: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, ClapAudioModelOutput]:
+ input_features = input_features.transpose(1, 3)
+ normalized_input_features = self.batch_norm(input_features)
+ normalized_input_features = normalized_input_features.transpose(1, 3)
+
+ is_longer_list_idx = None
+ if self.enable_fusion:
+ is_longer_list = is_longer.to(input_features.device)
+ is_longer_list_idx = torch.where(is_longer_list == 1)[0]
+
+ hidden_states = self.reshape_mel2img(normalized_input_features)
+
+ frames_num = hidden_states.shape[2]
+
+ hidden_states = self.patch_embed(hidden_states, is_longer_list_idx)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ input_dimensions = self.input_resolutions[0]
+
+ if output_hidden_states:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, layer_module in enumerate(self.layers):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ input_dimensions = self.input_resolutions[i]
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = layer_outputs[1]
+ output_dimensions = layer_outputs[2]
+
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ # here we use the original (not downsampled) height and width
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
+ )
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if output_attentions:
+ all_self_attentions += layer_outputs[3:]
+
+ last_hidden_state = self.norm(hidden_states)
+
+ batch_size, _, n_channels = last_hidden_state.shape
+
+ freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
+ temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
+
+ last_hidden_state = (
+ last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape)
+ )
+
+ batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape
+ # group 2D CNN
+ c_freq_bin = n_frequencies // self.freq_ratio
+ last_hidden_state = last_hidden_state.reshape(
+ batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp
+ )
+ last_hidden_state = (
+ last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1)
+ )
+ latent_output = self.avgpool(torch.flatten(last_hidden_state, 2))
+ latent_output = torch.flatten(latent_output, 1)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ latent_output,
+ all_reshaped_hidden_states,
+ all_self_attentions,
+ ]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=latent_output,
+ hidden_states=all_reshaped_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+CLAP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ClapConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CLAP_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CLAP_AUDIO_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
+ retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
+ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):
+ Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance
+ the features.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CLAP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
+ retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class ClapProjectionLayer(nn.Module):
+ def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]):
+ super().__init__()
+ self.config = config
+ hidden_size = config.hidden_size
+ projection_dim = config.projection_dim
+
+ self.linear1 = nn.Linear(hidden_size, projection_dim)
+ self.activation = ACT2FN[config.projection_hidden_act]
+ self.linear2 = nn.Linear(projection_dim, projection_dim)
+
+ def forward(self, hidden_states):
+ hidden_states = self.linear1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.linear2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True
+class ClapTextEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True
+ )
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ClapText
+class ClapTextSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ClapTextModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class ClapTextSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+CLAP_TEXT_SELF_ATTENTION_CLASSES = {
+ "eager": ClapTextSelfAttention,
+}
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ClapText,BERT->CLAP_TEXT
+class ClapTextAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = CLAP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](
+ config, position_embedding_type=position_embedding_type
+ )
+ self.output = ClapTextSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class ClapTextIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class ClapTextOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ClapText
+class ClapTextLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ClapTextAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ClapTextAttention(config, position_embedding_type="absolute")
+ self.intermediate = ClapTextIntermediate(config)
+ self.output = ClapTextOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ClapText
+class ClapTextEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class ClapTextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class ClapPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ClapConfig
+ base_model_prefix = "clap"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor
+
+ if isinstance(module, ClapTextEmbeddings):
+ module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ elif isinstance(module, ClapModel):
+ nn.init.normal_(module.logit_scale_a, std=factor * 0.02)
+ nn.init.normal_(module.logit_scale_t, std=factor * 0.02)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=factor * 0.02)
+
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, (nn.Conv2d, nn.Linear)):
+ in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor
+ nn.init.normal_(module.weight, std=in_proj_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+class ClapAudioModel(ClapPreTrainedModel):
+ config_class = ClapAudioConfig
+ main_input_name = "input_features"
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__(config)
+ self.audio_encoder = ClapAudioEncoder(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.audio_encoder.patch_embed.proj
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig)
+ def forward(
+ self,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoProcessor, ClapAudioModel
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused")
+ >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused")
+
+ >>> inputs = processor(audios=audio_sample, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ return self.audio_encoder(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class ClapTextModel(ClapPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
+ Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
+
+ """
+
+ config_class = ClapTextConfig
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ClapTextEmbeddings(config)
+ self.encoder = ClapTextEncoder(config)
+
+ self.pooler = ClapTextPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(CLAP_START_DOCSTRING)
+class ClapModel(ClapPreTrainedModel):
+ config_class = ClapConfig
+
+ def __init__(self, config: ClapConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, ClapTextConfig):
+ raise TypeError(
+ "config.text_config is expected to be of type ClapTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.audio_config, ClapAudioConfig):
+ raise TypeError(
+ "config.audio_config is expected to be of type ClapAudioConfig but is of type"
+ f" {type(config.audio_config)}."
+ )
+
+ text_config = config.text_config
+ audio_config = config.audio_config
+
+ self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
+ self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
+
+ self.projection_dim = config.projection_dim
+
+ self.text_model = ClapTextModel(text_config)
+ self.text_projection = ClapProjectionLayer(text_config)
+
+ self.audio_model = ClapAudioModel(audio_config)
+ self.audio_projection = ClapProjectionLayer(audio_config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the pooled output of [`ClapTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ClapModel
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output
+ text_features = self.text_projection(pooled_output)
+ text_features = F.normalize(text_features, dim=-1)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ def get_audio_features(
+ self,
+ input_features: Optional[torch.Tensor] = None,
+ is_longer: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by
+ applying the projection layer to the pooled output of [`ClapAudioModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoFeatureExtractor, ClapModel
+ >>> import torch
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused")
+ >>> random_audio = torch.rand((16_000))
+ >>> inputs = feature_extractor(random_audio, return_tensors="pt")
+ >>> audio_features = model.get_audio_features(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ return_dict=return_dict,
+ )
+
+ pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+
+ audio_features = self.audio_projection(pooled_output)
+ audio_features = F.normalize(audio_features, dim=-1)
+
+ return audio_features
+
+ @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoProcessor, ClapModel
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"]
+
+ >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True)
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score
+ >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+ audio_embeds = self.audio_projection(audio_embeds)
+
+ text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale_text = self.logit_scale_t.exp()
+ logit_scale_audio = self.logit_scale_a.exp()
+ logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text
+ logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio
+
+ loss = None
+ if return_loss:
+ caption_loss = contrastive_loss(logits_per_text)
+ audio_loss = contrastive_loss(logits_per_audio.t())
+ loss = (caption_loss + audio_loss) / 2.0
+
+ if not return_dict:
+ output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return ClapOutput(
+ loss=loss,
+ logits_per_audio=logits_per_audio,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ audio_embeds=audio_embeds,
+ text_model_output=text_outputs,
+ audio_model_output=audio_outputs,
+ )
+
+
+@add_start_docstrings(
+ """
+ CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ CLAP_START_DOCSTRING,
+)
+class ClapTextModelWithProjection(ClapPreTrainedModel):
+ config_class = ClapTextConfig
+
+ def __init__(self, config: ClapTextConfig):
+ super().__init__(config)
+ self.text_model = ClapTextModel(config)
+ self.text_projection = ClapProjectionLayer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.text_model.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.text_model.embeddings.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapTextModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ClapTextModelWithProjection
+
+ >>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused")
+ >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> text_embeds = outputs.text_embeds
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output
+
+ text_embeds = self.text_projection(pooled_output)
+
+ if not return_dict:
+ outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return ClapTextModelOutput(
+ text_embeds=text_embeds,
+ last_hidden_state=text_outputs.last_hidden_state,
+ hidden_states=text_outputs.hidden_states,
+ attentions=text_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ CLAP_START_DOCSTRING,
+)
+class ClapAudioModelWithProjection(ClapPreTrainedModel):
+ config_class = ClapAudioConfig
+ main_input_name = "input_features"
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__(config)
+ self.audio_model = ClapAudioModel(config)
+ self.audio_projection = ClapProjectionLayer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.audio_model.audio_encoder.patch_embed.proj
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig)
+ def forward(
+ self,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapAudioModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import ClapAudioModelWithProjection, ClapProcessor
+
+ >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused")
+ >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused")
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> inputs = processor(audios=audio_sample, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> audio_embeds = outputs.audio_embeds
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+
+ audio_embeds = self.audio_projection(pooled_output)
+
+ if not return_dict:
+ outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return ClapAudioModelOutput(
+ audio_embeds=audio_embeds,
+ last_hidden_state=audio_outputs.last_hidden_state,
+ attentions=audio_outputs.attentions,
+ hidden_states=audio_outputs.hidden_states,
+ )
+
+
+__all__ = [
+ "ClapModel",
+ "ClapPreTrainedModel",
+ "ClapTextModel",
+ "ClapTextModelWithProjection",
+ "ClapAudioModel",
+ "ClapAudioModelWithProjection",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..6df9d4aa3961d0899f1b3de85dd0fee1d23397bc
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
@@ -0,0 +1,120 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Audio/Text processor class for CLAP
+"""
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class ClapProcessor(ProcessorMixin):
+ r"""
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
+
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor ([`ClapFeatureExtractor`]):
+ The audio processor is a required input.
+ tokenizer ([`RobertaTokenizerFast`]):
+ The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "ClapFeatureExtractor"
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
+ doctsring of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
+ and T the sample length of the audio.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
+ """
+ sampling_rate = kwargs.pop("sampling_rate", None)
+
+ if text is None and audios is None:
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
+
+ if text is not None:
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
+
+ if audios is not None:
+ audio_features = self.feature_extractor(
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
+ )
+
+ if text is not None and audios is not None:
+ encoding.update(audio_features)
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ feature_extractor_input_names = self.feature_extractor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
+
+
+__all__ = ["ClapProcessor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d6fae2e0236e7619988f0cfa3502ed49d0f90b0
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_distilbert import *
+ from .modeling_distilbert import *
+ from .modeling_flax_distilbert import *
+ from .modeling_tf_distilbert import *
+ from .tokenization_distilbert import *
+ from .tokenization_distilbert_fast import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47c1d95640feaedfa3679ad06d56478ef5c8463a
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2c2827075bf2c54c6f470c4f128a9605247c1fc
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02e08470745591efc1271d19f5d96a7f0a77aa4c
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e27be03f3286df67a030897af74189fd5af905a
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..999969641d461c6c1257c30c09a23fa7f2ab6f4d
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0e80448464d5d4f9c5f82c340a7d8ef43a905934
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a83ee07b4b57016b4453bfdb076f9a88e17a3e20
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a28c8e5d03d029d344c3f9f3294c9298a9fd808
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
@@ -0,0 +1,141 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""DistilBERT model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class DistilBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
+ Whether to use sinusoidal positional embeddings.
+ n_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ n_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ dim (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ hidden_dim (`int`, *optional*, defaults to 3072):
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qa_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
+ The dropout probabilities used in the sequence classification and the multiple choice model
+ [`DistilBertForSequenceClassification`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import DistilBertConfig, DistilBertModel
+
+ >>> # Initializing a DistilBERT configuration
+ >>> configuration = DistilBertConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = DistilBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "distilbert"
+ attribute_map = {
+ "hidden_size": "dim",
+ "num_attention_heads": "n_heads",
+ "num_hidden_layers": "n_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ max_position_embeddings=512,
+ sinusoidal_pos_embds=False,
+ n_layers=6,
+ n_heads=12,
+ dim=768,
+ hidden_dim=4 * 768,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation="gelu",
+ initializer_range=0.02,
+ qa_dropout=0.1,
+ seq_classif_dropout=0.2,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
+ self.n_layers = n_layers
+ self.n_heads = n_heads
+ self.dim = dim
+ self.hidden_dim = hidden_dim
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation = activation
+ self.initializer_range = initializer_range
+ self.qa_dropout = qa_dropout
+ self.seq_classif_dropout = seq_classif_dropout
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
+
+
+class DistilBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
+
+
+__all__ = ["DistilBertConfig", "DistilBertOnnxConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..6aa50397d42cbe4ee1011b38a1e1b2804365a0aa
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
@@ -0,0 +1,1378 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
+part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
+"""
+
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import get_activation
+from ...configuration_utils import PretrainedConfig
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import (
+ apply_chunking_to_forward,
+ find_pruneable_heads_and_indices,
+ is_torch_greater_or_equal_than_2_2,
+ prune_linear_layer,
+)
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+if is_flash_attn_2_available():
+ from ...modeling_flash_attention_utils import _flash_attention_forward
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
+
+
+def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
+ if torch.distributed.get_rank() == 0:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+ else:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+
+
+def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out.requires_grad = False
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+
+
+class Embeddings(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
+
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.dropout = nn.Dropout(config.dropout)
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
+ """
+ Parameters:
+ input_ids (torch.Tensor):
+ torch.tensor(bs, max_seq_length) The token ids to embed.
+ input_embeds (*optional*, torch.Tensor):
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
+
+
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
+ embeddings)
+ """
+ if input_ids is not None:
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
+
+ seq_length = input_embeds.size(1)
+
+ # Setting the position-ids to the registered buffer in constructor, it helps
+ # when tracing the model without passing position-ids, solves
+ # isues similar to issue #5664
+ if hasattr(self, "position_ids"):
+ position_ids = self.position_ids[:, :seq_length]
+ else:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
+
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
+
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
+ return embeddings
+
+
+class MultiHeadSelfAttention(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.config = config
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = nn.Dropout(p=config.attention_dropout)
+ self.is_causal = False
+
+ # Have an even number of multi heads that divide the dimensions
+ if self.dim % self.n_heads != 0:
+ # Raise value errors for even multi-head attention nodes
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
+
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+
+ self.pruned_heads: Set[int] = set()
+ self.attention_head_size = self.dim // self.n_heads
+
+ def prune_heads(self, heads: List[int]):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q_lin = prune_linear_layer(self.q_lin, index)
+ self.k_lin = prune_linear_layer(self.k_lin, index)
+ self.v_lin = prune_linear_layer(self.v_lin, index)
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.dim = self.attention_head_size * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = query.size()
+ k_length = key.size(1)
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_length)
+
+ def shape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x: torch.Tensor) -> torch.Tensor:
+ """group heads"""
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
+ scores = scores.masked_fill(
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
+ ) # (bs, n_heads, q_length, k_length)
+
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class DistilBertFlashAttention2(MultiHeadSelfAttention):
+ """
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
+ API of flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ batch_size, q_length, dim = query.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ def reshape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ query_states = reshape(self.q_lin(query))
+ key_states = reshape(self.k_lin(key))
+ value_states = reshape(self.v_lin(value))
+
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ if query_states.dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_lin.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_weights = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ mask,
+ q_length,
+ dropout=attn_dropout,
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ is_causal=self.is_causal,
+ )
+
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
+ attn_output = self.out_lin(attn_weights_reshaped)
+
+ if output_attentions:
+ return (attn_output, attn_weights)
+ else:
+ return (attn_output,)
+
+
+class DistilBertSdpaAttention(MultiHeadSelfAttention):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config=config)
+ self.dropout_prob = config.attention_dropout
+ self.require_contiguous_qkv = not is_torch_greater_or_equal_than_2_2
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ if output_attentions or head_mask is not None:
+ logger.warning_once(
+ "DistilBertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support"
+ " `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying"
+ " the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be"
+ ' removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ query,
+ key,
+ value,
+ mask,
+ head_mask,
+ output_attentions,
+ )
+
+ batch_size, _, _ = query.size()
+ dim_per_head = self.dim // self.n_heads
+
+ def shape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(batch_size, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x: torch.Tensor) -> torch.Tensor:
+ """group heads"""
+ return x.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
+ if self.require_contiguous_qkv and q.device.type == "cuda" and mask is not None:
+ q = q.contiguous()
+ k = k.contiguous()
+ v = v.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ q,
+ k,
+ v,
+ attn_mask=mask,
+ dropout_p=self.dropout_prob if self.training else 0.0,
+ is_causal=False,
+ )
+
+ attn_output = unshape(attn_output)
+ attn_output = self.out_lin(attn_output)
+
+ return (attn_output,)
+
+
+class FFN(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.dropout = nn.Dropout(p=config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
+ self.activation = get_activation(config.activation)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
+
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x)
+ return x
+
+
+DISTILBERT_ATTENTION_CLASSES = {
+ "eager": MultiHeadSelfAttention,
+ "flash_attention_2": DistilBertFlashAttention2,
+ "sdpa": DistilBertSdpaAttention,
+}
+
+
+class TransformerBlock(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+
+ # Have an even number of Configure multi-heads
+ if config.dim % config.n_heads != 0:
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
+
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ self.ffn = FFN(config)
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim)
+ attn_mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(
+ query=x,
+ key=x,
+ value=x,
+ mask=attn_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ if type(sa_output) is not tuple:
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
+
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class Transformer(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.n_layers = config.n_layers
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ if len(layer_outputs) != 2:
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
+
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ if len(layer_outputs) != 1:
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class DistilBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ load_tf_weights = None
+ base_model_prefix = "distilbert"
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight
+ )
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertModel(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.embeddings = Embeddings(config) # Embeddings
+ self.transformer = Transformer(config) # Encoder
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self._use_sdpa = config._attn_implementation == "sdpa"
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.embeddings.position_embeddings
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
+
+ # no resizing needs to be done if the length stays the same
+ if num_position_embeds_diff == 0:
+ return
+
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
+ self.config.max_position_embeddings = new_num_position_embeddings
+
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
+
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
+
+ if self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
+ )
+ else:
+ with torch.no_grad():
+ if num_position_embeds_diff > 0:
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
+ old_position_embeddings_weight
+ )
+ else:
+ self.embeddings.position_embeddings.weight = nn.Parameter(
+ old_position_embeddings_weight[:num_position_embeds_diff]
+ )
+ # move position_embeddings to correct device
+ self.embeddings.position_embeddings.to(self.device)
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.transformer.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ head_mask_is_none = head_mask is None
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
+
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
+
+ if self._use_sdpa and head_mask_is_none and not output_attentions:
+ attention_mask = _prepare_4d_attention_mask_for_sdpa(
+ attention_mask, embeddings.dtype, tgt_len=input_shape[1]
+ )
+
+ return self.transformer(
+ x=embeddings,
+ attn_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMaskedLM(DistilBertPreTrainedModel):
+ _tied_weights_keys = ["vocab_projector.weight"]
+
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.activation = get_activation(config.activation)
+
+ self.distilbert = DistilBertModel(config)
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.vocab_projector
+
+ def set_output_embeddings(self, new_embeddings: nn.Module):
+ self.vocab_projector = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
+
+ mlm_loss = None
+ if labels is not None:
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=mlm_loss,
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, config.num_labels)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, num_labels)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
+ if config.num_labels != 2:
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
+
+ self.dropout = nn.Dropout(config.qa_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForTokenClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.distilbert = DistilBertModel(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, 1)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`)
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> choice0 = "It is eaten with a fork and a knife."
+ >>> choice1 = "It is eaten while held in the hand."
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
+
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
+
+ >>> # the linear classifier still needs to be trained
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
+
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1cf0faaed3f6a423a05902b626d6206e645a62e
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
@@ -0,0 +1,906 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+FLAX_DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def get_angles(pos, i, d_model):
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
+ return pos * angle_rates
+
+
+def positional_encoding(position, d_model):
+ # create the sinusoidal pattern for the positional encoding
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
+
+ # apply sin to even indices in the array; 2i
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
+
+ # apply cos to odd indices in the array; 2i+1
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
+
+ pos_encoding = angle_rads[np.newaxis, ...]
+
+ return jnp.array(pos_encoding)
+
+
+class FlaxEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ if not self.config.sinusoidal_pos_embds:
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ else:
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+
+ def __call__(self, input_ids, deterministic: bool = True):
+ # Embed
+ batch_size, seq_length = input_ids.shape
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ if not self.config.sinusoidal_pos_embds:
+ position_ids = jnp.arange(seq_length).astype("i4")
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ else:
+ position_embeds = self.pos_encoding[:, :seq_length, :]
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxMultiHeadSelfAttention(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.n_heads = self.config.n_heads
+ self.dim = self.config.dim
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
+
+ if not (self.dim % self.n_heads == 0):
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
+
+ self.q_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.k_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.v_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.out_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ query,
+ key,
+ value,
+ mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ bs, q_len, dim = query.shape
+ k_len = key.shape[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_len)
+
+ def shape(x):
+ """separate heads"""
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
+
+ def unshape(x):
+ """group heads"""
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
+ mask = jnp.reshape(mask, mask_reshp)
+
+ mask = mask.astype(scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
+ weights = self.dropout(weights, deterministic=deterministic)
+
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
+ context = unshape(context) # (bs, q_len, dim)
+ context = self.out_lin(context) # (bs, q_len, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class FlaxFFN(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Dense(
+ self.config.hidden_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.lin2 = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ self.activation = ACT2FN[self.config.activation]
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.lin1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.lin2(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxTransformerBlock(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ assert (
+ self.config.dim % self.config.n_heads == 0
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
+
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attn_mask,
+ output_attentions: bool = False,
+ deterministic: bool = True,
+ ):
+ # Self-Attention
+ sa_output = self.attention(
+ query=hidden_states,
+ key=hidden_states,
+ value=hidden_states,
+ mask=attn_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output
+ else:
+ assert type(sa_output) is tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class FlaxTransformer(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for layer_module in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attn_mask=attention_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxTransformerEncoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ return self.layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+
+
+class FlaxDistilBertLMDecoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, inputs, kernel):
+ inputs = jnp.asarray(inputs, self.dtype)
+ kernel = jnp.asarray(kernel, self.dtype)
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
+ bias = jnp.asarray(self.bias, self.dtype)
+ y = y + bias
+ return y
+
+
+class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: DistilBertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ head_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxDistilBertModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
+ return self.transformer(
+ hidden_states=input_embeds,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertModule
+
+
+append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForMaskedLMModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
+ self.vocab_transform = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ if self.config.tie_word_embeddings:
+ self.vocab_projector = FlaxDistilBertLMDecoder(
+ self.config,
+ dtype=self.dtype,
+ )
+ else:
+ self.vocab_projector = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0]
+ prediction_logits = self.vocab_transform(hidden_states)
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
+ else:
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return output
+
+ return FlaxMaskedLMOutput(
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
+class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMaskedLMModule
+
+
+append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForSequenceClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ if not return_dict:
+ return (logits,) + distilbert_output[1:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForMultipleChoiceModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ 1,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0]
+ pooled_output = hidden_state[:, 0]
+ pooled_output = self.pre_classifier(pooled_output)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxDistilBertForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForTokenClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+ assert self.config.num_labels == 2
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = distilbert_output[0]
+
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + distilbert_output[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+__all__ = [
+ "FlaxDistilBertForMaskedLM",
+ "FlaxDistilBertForMultipleChoice",
+ "FlaxDistilBertForQuestionAnswering",
+ "FlaxDistilBertForSequenceClassification",
+ "FlaxDistilBertForTokenClassification",
+ "FlaxDistilBertModel",
+ "FlaxDistilBertPreTrainedModel",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..09b14b89c563b6b34f64ada2df9fc5404121da9f
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
@@ -0,0 +1,1147 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+TF 2.0 DistilBERT model
+"""
+
+from __future__ import annotations
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+class TFEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dim = config.dim
+ self.initializer_range = config.initializer_range
+ self.max_position_embeddings = config.max_position_embeddings
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.dropout)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.dim])
+
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings = inputs_embeds + position_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFMultiHeadSelfAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
+ self.output_attentions = config.output_attentions
+
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
+
+ self.q_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
+ )
+ self.k_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
+ )
+ self.v_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
+ )
+ self.out_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
+ )
+
+ self.pruned_heads = set()
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
+ """
+ Parameters:
+ query: tf.Tensor(bs, seq_length, dim)
+ key: tf.Tensor(bs, seq_length, dim)
+ value: tf.Tensor(bs, seq_length, dim)
+ mask: tf.Tensor(bs, seq_length)
+
+ Returns:
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = shape_list(query)
+ k_length = shape_list(key)[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+ dim_per_head = int(self.dim / self.n_heads)
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
+ mask_reshape = [bs, 1, 1, k_length]
+
+ def shape(x):
+ """separate heads"""
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
+
+ def unshape(x):
+ """group heads"""
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+ q = tf.cast(q, dtype=tf.float32)
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
+ k = tf.cast(k, dtype=q.dtype)
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
+
+ mask = tf.cast(mask, dtype=scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_lin", None) is not None:
+ with tf.name_scope(self.q_lin.name):
+ self.q_lin.build([None, None, self.config.dim])
+ if getattr(self, "k_lin", None) is not None:
+ with tf.name_scope(self.k_lin.name):
+ self.k_lin.build([None, None, self.config.dim])
+ if getattr(self, "v_lin", None) is not None:
+ with tf.name_scope(self.v_lin.name):
+ self.v_lin.build([None, None, self.config.dim])
+ if getattr(self, "out_lin", None) is not None:
+ with tf.name_scope(self.out_lin.name):
+ self.out_lin.build([None, None, self.config.dim])
+
+
+class TFFFN(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.lin1 = keras.layers.Dense(
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
+ )
+ self.lin2 = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
+ )
+ self.activation = get_tf_activation(config.activation)
+ self.config = config
+
+ def call(self, input, training=False):
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x, training=training)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.config.dim])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.config.hidden_dim])
+
+
+class TFTransformerBlock(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.hidden_dim = config.hidden_dim
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation = config.activation
+ self.output_attentions = config.output_attentions
+
+ assert (
+ config.dim % config.n_heads == 0
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
+
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
+ self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
+
+ self.ffn = TFFFN(config, name="ffn")
+ self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
+ self.config = config
+
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim)
+ attn_mask: tf.Tensor(bs, seq_length)
+
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ # assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "sa_layer_norm", None) is not None:
+ with tf.name_scope(self.sa_layer_norm.name):
+ self.sa_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build(None)
+ if getattr(self, "output_layer_norm", None) is not None:
+ with tf.name_scope(self.output_layer_norm.name):
+ self.output_layer_norm.build([None, None, self.config.dim])
+
+
+class TFTransformer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.n_layers = config.n_layers
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
+
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
+ # docstyle-ignore
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: tf.Tensor(bs, seq_length, dim)
+ Sequence of hidden states in the last (top) layer
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFDistilBertMainLayer(keras.layers.Layer):
+ config_class = DistilBertConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = value.shape[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
+
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
+ tfmr_output = self.transformer(
+ embedding_output,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class TFDistilBertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertModel(TFDistilBertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+
+
+class TFDistilBertLMHead(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.dim = config.dim
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.vocab_transform = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
+ )
+ self.act = get_tf_activation(config.activation)
+ self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
+
+ def get_lm_head(self):
+ return self.vocab_projector
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.vocab_projector.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "vocab_transform", None) is not None:
+ with tf.name_scope(self.vocab_transform.name):
+ self.vocab_transform.build([None, None, self.config.dim])
+ if getattr(self, "vocab_layer_norm", None) is not None:
+ with tf.name_scope(self.vocab_layer_norm.name):
+ self.vocab_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "vocab_projector", None) is not None:
+ with tf.name_scope(self.vocab_projector.name):
+ self.vocab_projector.build(None)
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ distilbert_output = self.distilbert(
+ flat_input_ids,
+ flat_attention_mask,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
+ self.dropout = keras.layers.Dropout(config.qa_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.dim])
+
+
+__all__ = [
+ "TFDistilBertForMaskedLM",
+ "TFDistilBertForMultipleChoice",
+ "TFDistilBertForQuestionAnswering",
+ "TFDistilBertForSequenceClassification",
+ "TFDistilBertForTokenClassification",
+ "TFDistilBertMainLayer",
+ "TFDistilBertModel",
+ "TFDistilBertPreTrainedModel",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c894211a2e0acf2bd82858186e88f7e3e99f672e
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
@@ -0,0 +1,522 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class DistilBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a DistilBERT tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
+ extra spaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ clean_up_tokenization_spaces=True,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer:
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer:
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+__all__ = ["DistilBertTokenizer"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3829763d5e7ab8e2a338c53a0f7dd50c3e4b737
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
@@ -0,0 +1,179 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_distilbert import DistilBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class DistilBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = DistilBertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+
+__all__ = ["DistilBertTokenizerFast"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab009d931204a853fe6d3e45093d94d97ac571e7
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_levit import *
+ from .feature_extraction_levit import *
+ from .image_processing_levit import *
+ from .modeling_levit import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b15cc11226aaa45169bdb8b2adeb2898d6cf4d69
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py
@@ -0,0 +1,144 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""LeViT model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class LevitConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the LeViT
+ [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size of the input image.
+ num_channels (`int`, *optional*, defaults to 3):
+ Number of channels in the input image.
+ kernel_size (`int`, *optional*, defaults to 3):
+ The kernel size for the initial convolution layers of patch embedding.
+ stride (`int`, *optional*, defaults to 2):
+ The stride size for the initial convolution layers of patch embedding.
+ padding (`int`, *optional*, defaults to 1):
+ The padding size for the initial convolution layers of patch embedding.
+ patch_size (`int`, *optional*, defaults to 16):
+ The patch size for embeddings.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[128, 256, 384]`):
+ Dimension of each of the encoder blocks.
+ num_attention_heads (`List[int]`, *optional*, defaults to `[4, 8, 12]`):
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
+ depths (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
+ The number of layers in each encoder block.
+ key_dim (`List[int]`, *optional*, defaults to `[16, 16, 16]`):
+ The size of key in each of the encoder blocks.
+ drop_path_rate (`int`, *optional*, defaults to 0):
+ The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
+ mlp_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
+ encoder blocks.
+ attention_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
+ Ratio of the size of the output dimension compared to input dimension of attention layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+
+ ```python
+ >>> from transformers import LevitConfig, LevitModel
+
+ >>> # Initializing a LeViT levit-128S style configuration
+ >>> configuration = LevitConfig()
+
+ >>> # Initializing a model (with random weights) from the levit-128S style configuration
+ >>> model = LevitModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "levit"
+
+ def __init__(
+ self,
+ image_size=224,
+ num_channels=3,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ patch_size=16,
+ hidden_sizes=[128, 256, 384],
+ num_attention_heads=[4, 8, 12],
+ depths=[4, 4, 4],
+ key_dim=[16, 16, 16],
+ drop_path_rate=0,
+ mlp_ratio=[2, 2, 2],
+ attention_ratio=[2, 2, 2],
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.image_size = image_size
+ self.num_channels = num_channels
+ self.kernel_size = kernel_size
+ self.stride = stride
+ self.padding = padding
+ self.hidden_sizes = hidden_sizes
+ self.num_attention_heads = num_attention_heads
+ self.depths = depths
+ self.key_dim = key_dim
+ self.drop_path_rate = drop_path_rate
+ self.patch_size = patch_size
+ self.attention_ratio = attention_ratio
+ self.mlp_ratio = mlp_ratio
+ self.initializer_range = initializer_range
+ self.down_ops = [
+ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
+ ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
+ ]
+
+
+# Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
+class LevitOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+
+__all__ = ["LevitConfig", "LevitOnnxConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..afef3f73de6c8469ee9403cbc9da68869a6357a3
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert LeViT checkpoints from timm."""
+
+import argparse
+import json
+from collections import OrderedDict
+from functools import partial
+from pathlib import Path
+
+import timm
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger()
+
+
+def convert_weight_and_push(
+ hidden_sizes: int, name: str, config: LevitConfig, save_directory: Path, push_to_hub: bool = True
+):
+ print(f"Converting {name}...")
+
+ with torch.no_grad():
+ if hidden_sizes == 128:
+ if name[-1] == "S":
+ from_model = timm.create_model("levit_128s", pretrained=True)
+ else:
+ from_model = timm.create_model("levit_128", pretrained=True)
+ if hidden_sizes == 192:
+ from_model = timm.create_model("levit_192", pretrained=True)
+ if hidden_sizes == 256:
+ from_model = timm.create_model("levit_256", pretrained=True)
+ if hidden_sizes == 384:
+ from_model = timm.create_model("levit_384", pretrained=True)
+
+ from_model.eval()
+ our_model = LevitForImageClassificationWithTeacher(config).eval()
+ huggingface_weights = OrderedDict()
+
+ weights = from_model.state_dict()
+ og_keys = list(from_model.state_dict().keys())
+ new_keys = list(our_model.state_dict().keys())
+ print(len(og_keys), len(new_keys))
+ for i in range(len(og_keys)):
+ huggingface_weights[new_keys[i]] = weights[og_keys[i]]
+ our_model.load_state_dict(huggingface_weights)
+
+ x = torch.randn((2, 3, 224, 224))
+ out1 = from_model(x)
+ out2 = our_model(x).logits
+
+ assert torch.allclose(out1, out2), "The model logits don't match the original one."
+
+ checkpoint_name = name
+ print(checkpoint_name)
+
+ if push_to_hub:
+ our_model.save_pretrained(save_directory / checkpoint_name)
+ image_processor = LevitImageProcessor()
+ image_processor.save_pretrained(save_directory / checkpoint_name)
+
+ print(f"Pushed {checkpoint_name}")
+
+
+def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
+ filename = "imagenet-1k-id2label.json"
+ num_labels = 1000
+ expected_shape = (1, num_labels)
+
+ repo_id = "huggingface/label-files"
+ num_labels = num_labels
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+
+ id2label = id2label
+ label2id = {v: k for k, v in id2label.items()}
+
+ ImageNetPreTrainedConfig = partial(LevitConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
+
+ names_to_hidden_sizes = {
+ "levit-128S": 128,
+ "levit-128": 128,
+ "levit-192": 192,
+ "levit-256": 256,
+ "levit-384": 384,
+ }
+
+ names_to_config = {
+ "levit-128S": ImageNetPreTrainedConfig(
+ hidden_sizes=[128, 256, 384],
+ num_attention_heads=[4, 6, 8],
+ depths=[2, 3, 4],
+ key_dim=[16, 16, 16],
+ drop_path_rate=0,
+ ),
+ "levit-128": ImageNetPreTrainedConfig(
+ hidden_sizes=[128, 256, 384],
+ num_attention_heads=[4, 8, 12],
+ depths=[4, 4, 4],
+ key_dim=[16, 16, 16],
+ drop_path_rate=0,
+ ),
+ "levit-192": ImageNetPreTrainedConfig(
+ hidden_sizes=[192, 288, 384],
+ num_attention_heads=[3, 5, 6],
+ depths=[4, 4, 4],
+ key_dim=[32, 32, 32],
+ drop_path_rate=0,
+ ),
+ "levit-256": ImageNetPreTrainedConfig(
+ hidden_sizes=[256, 384, 512],
+ num_attention_heads=[4, 6, 8],
+ depths=[4, 4, 4],
+ key_dim=[32, 32, 32],
+ drop_path_rate=0,
+ ),
+ "levit-384": ImageNetPreTrainedConfig(
+ hidden_sizes=[384, 512, 768],
+ num_attention_heads=[6, 9, 12],
+ depths=[4, 4, 4],
+ key_dim=[32, 32, 32],
+ drop_path_rate=0.1,
+ ),
+ }
+
+ if model_name:
+ convert_weight_and_push(
+ names_to_hidden_sizes[model_name], model_name, names_to_config[model_name], save_directory, push_to_hub
+ )
+ else:
+ for model_name, config in names_to_config.items():
+ convert_weight_and_push(names_to_hidden_sizes[model_name], model_name, config, save_directory, push_to_hub)
+ return config, expected_shape
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default=None,
+ type=str,
+ help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="levit-dump-folder/",
+ type=Path,
+ required=False,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
+ parser.add_argument(
+ "--no-push_to_hub",
+ dest="push_to_hub",
+ action="store_false",
+ help="Do not push model and image processor to the hub",
+ )
+
+ args = parser.parse_args()
+ pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
+ pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
+ convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py
new file mode 100644
index 0000000000000000000000000000000000000000..41301a5171bac9d48981e9417b5ec8e3f18bcb4a
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py
@@ -0,0 +1,36 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for LeViT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_levit import LevitImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class LevitFeatureExtractor(LevitImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use LevitImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
+
+
+__all__ = ["LevitFeatureExtractor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b20a08e20bf4622ef8915812f1171c28e6807ecf
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py
@@ -0,0 +1,309 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for LeViT."""
+
+from typing import Dict, Iterable, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class LevitImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a LeViT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
+ be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
+ edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
+ value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width,
+ size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
+ by the `do_center_crop` parameter in the `preprocess` method.
+ crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
+ method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
+ `preprocess` method.
+ image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN,
+ image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
+ size["width"])`.
+
+ If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
+ The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
+ to `(size["shortest_egde"] * height / width, size["shortest_egde"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size_dict = get_size_dict(size, default_to_square=False)
+ # size_dict is a dict with either keys "height" and "width" or "shortest_edge"
+ if "shortest_edge" in size:
+ shortest_edge = int((256 / 224) * size["shortest_edge"])
+ output_size = get_resize_output_image_size(
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
+ )
+ size_dict = {"height": output_size[0], "width": output_size[1]}
+ if "height" not in size_dict or "width" not in size_dict:
+ raise ValueError(
+ f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}"
+ )
+ return resize(
+ image,
+ size=(size_dict["height"], size_dict["width"]),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: Optional[bool] = None,
+ crop_size: Optional[Dict[str, int]] = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, Iterable[float]]] = None,
+ image_std: Optional[Union[float, Iterable[float]]] = None,
+ return_tensors: Optional[TensorType] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or batch of images to be used as input to a LeViT model.
+
+ Args:
+ images (`ImageInput`):
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the output image after center cropping. Crops images to (crop_size["height"],
+ crop_size["width"]).
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Factor to rescale the image pixel values by.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image pixel values by `image_mean` and `image_std`.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Mean to normalize the image pixel values by.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Standard deviation to normalize the image pixel values by.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]
+
+ if do_center_crop:
+ images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]
+
+ if do_rescale:
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
+
+ if do_normalize:
+ images = [
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+
+__all__ = ["LevitImageProcessor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1825f7a3693279cce441dde4eab46ff97628f52
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py
@@ -0,0 +1,743 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch LeViT model."""
+
+import itertools
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...modeling_outputs import (
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+ ModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_levit import LevitConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "LevitConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/levit-128S"
+_EXPECTED_OUTPUT_SHAPE = [1, 16, 384]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/levit-128S"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+@dataclass
+class LevitForImageClassificationWithTeacherOutput(ModelOutput):
+ """
+ Output type of [`LevitForImageClassificationWithTeacher`].
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores as the average of the `cls_logits` and `distillation_logits`.
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
+ class token).
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
+ distillation token).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ """
+
+ logits: torch.FloatTensor = None
+ cls_logits: torch.FloatTensor = None
+ distillation_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class LevitConvEmbeddings(nn.Module):
+ """
+ LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer.
+ """
+
+ def __init__(
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1
+ ):
+ super().__init__()
+ self.convolution = nn.Conv2d(
+ in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False
+ )
+ self.batch_norm = nn.BatchNorm2d(out_channels)
+
+ def forward(self, embeddings):
+ embeddings = self.convolution(embeddings)
+ embeddings = self.batch_norm(embeddings)
+ return embeddings
+
+
+class LevitPatchEmbeddings(nn.Module):
+ """
+ LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
+ `LevitConvEmbeddings`.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.embedding_layer_1 = LevitConvEmbeddings(
+ config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding
+ )
+ self.activation_layer_1 = nn.Hardswish()
+
+ self.embedding_layer_2 = LevitConvEmbeddings(
+ config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding
+ )
+ self.activation_layer_2 = nn.Hardswish()
+
+ self.embedding_layer_3 = LevitConvEmbeddings(
+ config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding
+ )
+ self.activation_layer_3 = nn.Hardswish()
+
+ self.embedding_layer_4 = LevitConvEmbeddings(
+ config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding
+ )
+ self.num_channels = config.num_channels
+
+ def forward(self, pixel_values):
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.embedding_layer_1(pixel_values)
+ embeddings = self.activation_layer_1(embeddings)
+ embeddings = self.embedding_layer_2(embeddings)
+ embeddings = self.activation_layer_2(embeddings)
+ embeddings = self.embedding_layer_3(embeddings)
+ embeddings = self.activation_layer_3(embeddings)
+ embeddings = self.embedding_layer_4(embeddings)
+ return embeddings.flatten(2).transpose(1, 2)
+
+
+class MLPLayerWithBN(nn.Module):
+ def __init__(self, input_dim, output_dim, bn_weight_init=1):
+ super().__init__()
+ self.linear = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False)
+ self.batch_norm = nn.BatchNorm1d(output_dim)
+
+ def forward(self, hidden_state):
+ hidden_state = self.linear(hidden_state)
+ hidden_state = self.batch_norm(hidden_state.flatten(0, 1)).reshape_as(hidden_state)
+ return hidden_state
+
+
+class LevitSubsample(nn.Module):
+ def __init__(self, stride, resolution):
+ super().__init__()
+ self.stride = stride
+ self.resolution = resolution
+
+ def forward(self, hidden_state):
+ batch_size, _, channels = hidden_state.shape
+ hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[
+ :, :: self.stride, :: self.stride
+ ].reshape(batch_size, -1, channels)
+ return hidden_state
+
+
+class LevitAttention(nn.Module):
+ def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution):
+ super().__init__()
+ self.num_attention_heads = num_attention_heads
+ self.scale = key_dim**-0.5
+ self.key_dim = key_dim
+ self.attention_ratio = attention_ratio
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads * 2
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
+
+ self.queries_keys_values = MLPLayerWithBN(hidden_sizes, self.out_dim_keys_values)
+ self.activation = nn.Hardswish()
+ self.projection = MLPLayerWithBN(self.out_dim_projection, hidden_sizes, bn_weight_init=0)
+
+ points = list(itertools.product(range(resolution), range(resolution)))
+ len_points = len(points)
+ attention_offsets, indices = {}, []
+ for p1 in points:
+ for p2 in points:
+ offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
+ if offset not in attention_offsets:
+ attention_offsets[offset] = len(attention_offsets)
+ indices.append(attention_offsets[offset])
+
+ self.attention_bias_cache = {}
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
+ self.register_buffer(
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points, len_points), persistent=False
+ )
+
+ @torch.no_grad()
+ def train(self, mode=True):
+ super().train(mode)
+ if mode and self.attention_bias_cache:
+ self.attention_bias_cache = {} # clear ab cache
+
+ def get_attention_biases(self, device):
+ if self.training:
+ return self.attention_biases[:, self.attention_bias_idxs]
+ else:
+ device_key = str(device)
+ if device_key not in self.attention_bias_cache:
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
+ return self.attention_bias_cache[device_key]
+
+ def forward(self, hidden_state):
+ batch_size, seq_length, _ = hidden_state.shape
+ queries_keys_values = self.queries_keys_values(hidden_state)
+ query, key, value = queries_keys_values.view(batch_size, seq_length, self.num_attention_heads, -1).split(
+ [self.key_dim, self.key_dim, self.attention_ratio * self.key_dim], dim=3
+ )
+ query = query.permute(0, 2, 1, 3)
+ key = key.permute(0, 2, 1, 3)
+ value = value.permute(0, 2, 1, 3)
+
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
+ attention = attention.softmax(dim=-1)
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, seq_length, self.out_dim_projection)
+ hidden_state = self.projection(self.activation(hidden_state))
+ return hidden_state
+
+
+class LevitAttentionSubsample(nn.Module):
+ def __init__(
+ self,
+ input_dim,
+ output_dim,
+ key_dim,
+ num_attention_heads,
+ attention_ratio,
+ stride,
+ resolution_in,
+ resolution_out,
+ ):
+ super().__init__()
+ self.num_attention_heads = num_attention_heads
+ self.scale = key_dim**-0.5
+ self.key_dim = key_dim
+ self.attention_ratio = attention_ratio
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
+ self.resolution_out = resolution_out
+ # resolution_in is the intial resolution, resoloution_out is final resolution after downsampling
+ self.keys_values = MLPLayerWithBN(input_dim, self.out_dim_keys_values)
+ self.queries_subsample = LevitSubsample(stride, resolution_in)
+ self.queries = MLPLayerWithBN(input_dim, key_dim * num_attention_heads)
+ self.activation = nn.Hardswish()
+ self.projection = MLPLayerWithBN(self.out_dim_projection, output_dim)
+
+ self.attention_bias_cache = {}
+
+ points = list(itertools.product(range(resolution_in), range(resolution_in)))
+ points_ = list(itertools.product(range(resolution_out), range(resolution_out)))
+ len_points, len_points_ = len(points), len(points_)
+ attention_offsets, indices = {}, []
+ for p1 in points_:
+ for p2 in points:
+ size = 1
+ offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), abs(p1[1] * stride - p2[1] + (size - 1) / 2))
+ if offset not in attention_offsets:
+ attention_offsets[offset] = len(attention_offsets)
+ indices.append(attention_offsets[offset])
+
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
+ self.register_buffer(
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points_, len_points), persistent=False
+ )
+
+ @torch.no_grad()
+ def train(self, mode=True):
+ super().train(mode)
+ if mode and self.attention_bias_cache:
+ self.attention_bias_cache = {} # clear ab cache
+
+ def get_attention_biases(self, device):
+ if self.training:
+ return self.attention_biases[:, self.attention_bias_idxs]
+ else:
+ device_key = str(device)
+ if device_key not in self.attention_bias_cache:
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
+ return self.attention_bias_cache[device_key]
+
+ def forward(self, hidden_state):
+ batch_size, seq_length, _ = hidden_state.shape
+ key, value = (
+ self.keys_values(hidden_state)
+ .view(batch_size, seq_length, self.num_attention_heads, -1)
+ .split([self.key_dim, self.attention_ratio * self.key_dim], dim=3)
+ )
+ key = key.permute(0, 2, 1, 3)
+ value = value.permute(0, 2, 1, 3)
+
+ query = self.queries(self.queries_subsample(hidden_state))
+ query = query.view(batch_size, self.resolution_out**2, self.num_attention_heads, self.key_dim).permute(
+ 0, 2, 1, 3
+ )
+
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
+ attention = attention.softmax(dim=-1)
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, -1, self.out_dim_projection)
+ hidden_state = self.projection(self.activation(hidden_state))
+ return hidden_state
+
+
+class LevitMLPLayer(nn.Module):
+ """
+ MLP Layer with `2X` expansion in contrast to ViT with `4X`.
+ """
+
+ def __init__(self, input_dim, hidden_dim):
+ super().__init__()
+ self.linear_up = MLPLayerWithBN(input_dim, hidden_dim)
+ self.activation = nn.Hardswish()
+ self.linear_down = MLPLayerWithBN(hidden_dim, input_dim)
+
+ def forward(self, hidden_state):
+ hidden_state = self.linear_up(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.linear_down(hidden_state)
+ return hidden_state
+
+
+class LevitResidualLayer(nn.Module):
+ """
+ Residual Block for LeViT
+ """
+
+ def __init__(self, module, drop_rate):
+ super().__init__()
+ self.module = module
+ self.drop_rate = drop_rate
+
+ def forward(self, hidden_state):
+ if self.training and self.drop_rate > 0:
+ rnd = torch.rand(hidden_state.size(0), 1, 1, device=hidden_state.device)
+ rnd = rnd.ge_(self.drop_rate).div(1 - self.drop_rate).detach()
+ hidden_state = hidden_state + self.module(hidden_state) * rnd
+ return hidden_state
+ else:
+ hidden_state = hidden_state + self.module(hidden_state)
+ return hidden_state
+
+
+class LevitStage(nn.Module):
+ """
+ LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers.
+ """
+
+ def __init__(
+ self,
+ config,
+ idx,
+ hidden_sizes,
+ key_dim,
+ depths,
+ num_attention_heads,
+ attention_ratio,
+ mlp_ratio,
+ down_ops,
+ resolution_in,
+ ):
+ super().__init__()
+ self.layers = []
+ self.config = config
+ self.resolution_in = resolution_in
+ # resolution_in is the intial resolution, resolution_out is final resolution after downsampling
+ for _ in range(depths):
+ self.layers.append(
+ LevitResidualLayer(
+ LevitAttention(hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution_in),
+ self.config.drop_path_rate,
+ )
+ )
+ if mlp_ratio > 0:
+ hidden_dim = hidden_sizes * mlp_ratio
+ self.layers.append(
+ LevitResidualLayer(LevitMLPLayer(hidden_sizes, hidden_dim), self.config.drop_path_rate)
+ )
+
+ if down_ops[0] == "Subsample":
+ self.resolution_out = (self.resolution_in - 1) // down_ops[5] + 1
+ self.layers.append(
+ LevitAttentionSubsample(
+ *self.config.hidden_sizes[idx : idx + 2],
+ key_dim=down_ops[1],
+ num_attention_heads=down_ops[2],
+ attention_ratio=down_ops[3],
+ stride=down_ops[5],
+ resolution_in=resolution_in,
+ resolution_out=self.resolution_out,
+ )
+ )
+ self.resolution_in = self.resolution_out
+ if down_ops[4] > 0:
+ hidden_dim = self.config.hidden_sizes[idx + 1] * down_ops[4]
+ self.layers.append(
+ LevitResidualLayer(
+ LevitMLPLayer(self.config.hidden_sizes[idx + 1], hidden_dim), self.config.drop_path_rate
+ )
+ )
+
+ self.layers = nn.ModuleList(self.layers)
+
+ def get_resolution(self):
+ return self.resolution_in
+
+ def forward(self, hidden_state):
+ for layer in self.layers:
+ hidden_state = layer(hidden_state)
+ return hidden_state
+
+
+class LevitEncoder(nn.Module):
+ """
+ LeViT Encoder consisting of multiple `LevitStage` stages.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ resolution = self.config.image_size // self.config.patch_size
+ self.stages = []
+ self.config.down_ops.append([""])
+
+ for stage_idx in range(len(config.depths)):
+ stage = LevitStage(
+ config,
+ stage_idx,
+ config.hidden_sizes[stage_idx],
+ config.key_dim[stage_idx],
+ config.depths[stage_idx],
+ config.num_attention_heads[stage_idx],
+ config.attention_ratio[stage_idx],
+ config.mlp_ratio[stage_idx],
+ config.down_ops[stage_idx],
+ resolution,
+ )
+ resolution = stage.get_resolution()
+ self.stages.append(stage)
+
+ self.stages = nn.ModuleList(self.stages)
+
+ def forward(self, hidden_state, output_hidden_states=False, return_dict=True):
+ all_hidden_states = () if output_hidden_states else None
+
+ for stage in self.stages:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+ hidden_state = stage(hidden_state)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
+
+
+class LevitClassificationLayer(nn.Module):
+ """
+ LeViT Classification Layer
+ """
+
+ def __init__(self, input_dim, output_dim):
+ super().__init__()
+ self.batch_norm = nn.BatchNorm1d(input_dim)
+ self.linear = nn.Linear(input_dim, output_dim)
+
+ def forward(self, hidden_state):
+ hidden_state = self.batch_norm(hidden_state)
+ logits = self.linear(hidden_state)
+ return logits
+
+
+class LevitPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LevitConfig
+ base_model_prefix = "levit"
+ main_input_name = "pixel_values"
+ _no_split_modules = ["LevitResidualLayer"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+LEVIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`LevitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LEVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`LevitImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Levit model outputting raw features without any specific head on top.",
+ LEVIT_START_DOCSTRING,
+)
+class LevitModel(LevitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.patch_embeddings = LevitPatchEmbeddings(config)
+ self.encoder = LevitEncoder(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embeddings = self.patch_embeddings(pixel_values)
+ encoder_outputs = self.encoder(
+ embeddings,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # global average pooling, (batch_size, seq_length, hidden_sizes) -> (batch_size, hidden_sizes)
+ pooled_output = last_hidden_state.mean(dim=1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ LEVIT_START_DOCSTRING,
+)
+class LevitForImageClassification(LevitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.num_labels = config.num_labels
+ self.levit = LevitModel(config)
+
+ # Classifier head
+ self.classifier = (
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
+ if config.num_labels > 0
+ else torch.nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ sequence_output = outputs[0]
+ sequence_output = sequence_output.mean(1)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ LeViT Model transformer with image classification heads on top (a linear layer on top of the final hidden state and
+ a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet. .. warning::
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
+ supported.
+ """,
+ LEVIT_START_DOCSTRING,
+)
+class LevitForImageClassificationWithTeacher(LevitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.num_labels = config.num_labels
+ self.levit = LevitModel(config)
+
+ # Classifier head
+ self.classifier = (
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
+ if config.num_labels > 0
+ else torch.nn.Identity()
+ )
+ self.classifier_distill = (
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
+ if config.num_labels > 0
+ else torch.nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=LevitForImageClassificationWithTeacherOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LevitForImageClassificationWithTeacherOutput]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ sequence_output = outputs[0]
+ sequence_output = sequence_output.mean(1)
+ cls_logits, distill_logits = self.classifier(sequence_output), self.classifier_distill(sequence_output)
+ logits = (cls_logits + distill_logits) / 2
+
+ if not return_dict:
+ output = (logits, cls_logits, distill_logits) + outputs[2:]
+ return output
+
+ return LevitForImageClassificationWithTeacherOutput(
+ logits=logits,
+ cls_logits=cls_logits,
+ distillation_logits=distill_logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+
+__all__ = [
+ "LevitForImageClassification",
+ "LevitForImageClassificationWithTeacher",
+ "LevitModel",
+ "LevitPreTrainedModel",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/__pycache__/modeling_olmoe.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/__pycache__/modeling_olmoe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f118459d348be318ed5c9a707175c7c1c5d8d7c3
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/__pycache__/modeling_olmoe.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/configuration_olmoe.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/configuration_olmoe.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f24d5523bafd0398e6985959ffdd223261cc5ca
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/olmoe/configuration_olmoe.py
@@ -0,0 +1,182 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""OLMoE model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...modeling_rope_utils import rope_config_validation
+
+
+class OlmoeConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50304):
+ Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`OlmoeModel`]
+ hidden_size (`int`, *optional*, defaults to 2048):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 2048):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 16):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model might ever be used with.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ bos_token_id (`int`, *optional*):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 50279):
+ End of stream token id.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ clip_qkv (`float`, *optional*):
+ If not `None`, elements of query, key and value attention states are clipped so that their
+ absolute value does not exceed this value.
+ num_experts_per_tok (`int`, *optional*, defaults to 8):
+ Number of selected experts.
+ num_experts (`int`, *optional*, defaults to 64):
+ Number of routed experts.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabeling this will also
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
+ The aux loss factor for the total loss.
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the topk probabilities.
+
+ ```python
+ >>> from transformers import OlmoeModel, OlmoeConfig
+
+ >>> # Initializing a OLMoE 7B A1B style configuration
+ >>> configuration = OlmoeConfig()
+
+ >>> # Initializing a model from the OLMoE 7B A1B style configuration
+ >>> model = OlmoeModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "olmoe"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=50304,
+ hidden_size=2048,
+ intermediate_size=2048,
+ num_hidden_layers=16,
+ num_attention_heads=16,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=4096,
+ initializer_range=0.02,
+ rms_norm_eps=1e-05,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=None,
+ eos_token_id=50279,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_bias=False,
+ attention_dropout=0.0,
+ clip_qkv=None,
+ num_experts_per_tok=8,
+ num_experts=64,
+ output_router_logits=False,
+ router_aux_loss_coef=0.01,
+ norm_topk_prob=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.attention_bias = attention_bias
+ self.attention_dropout = attention_dropout
+ self.clip_qkv = clip_qkv
+ self.num_experts_per_tok = num_experts_per_tok
+ self.num_experts = num_experts
+ self.output_router_logits = output_router_logits
+ self.router_aux_loss_coef = router_aux_loss_coef
+ self.norm_topk_prob = norm_topk_prob
+ # Validate the correctness of rotary position embeddings parameters
+ # BC: if there is a 'type' field, move it to 'rope_type'.
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
+ rope_config_validation(self)
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+
+__all__ = ["OlmoeConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a8f135ba454f08f1773239837a2db627ce075c6
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_rag import *
+ from .modeling_rag import *
+ from .modeling_tf_rag import *
+ from .retrieval_rag import *
+ from .tokenization_rag import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..c76926f21879aa7ea5ab3d55a4be861db9491e97
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py
@@ -0,0 +1,186 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RAG model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import add_start_docstrings
+
+
+RAG_CONFIG_DOC = r"""
+ [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
+ can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ title_sep (`str`, *optional*, defaults to `" / "`):
+ Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
+ doc_sep (`str`, *optional*, defaults to `" // "`):
+ Separator inserted between the text of the retrieved document and the original input when calling
+ [`RagRetriever`].
+ n_docs (`int`, *optional*, defaults to 5):
+ Number of documents to retrieve.
+ max_combined_length (`int`, *optional*, defaults to 300):
+ Max length of contextualized input returned by [`~RagRetriever.__call__`].
+ retrieval_vector_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the document embeddings indexed by [`RagRetriever`].
+ retrieval_batch_size (`int`, *optional*, defaults to 8):
+ Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
+ [`RagRetriever`].
+ dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
+ A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
+ using `datasets.list_datasets()`).
+ dataset_split (`str`, *optional*, defaults to `"train"`)
+ Which split of the `dataset` to load.
+ index_name (`str`, *optional*, defaults to `"compressed"`)
+ The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
+ `"compressed"`.
+ index_path (`str`, *optional*)
+ The path to the serialized faiss index on disk.
+ passages_path (`str`, *optional*):
+ A path to text passages compatible with the faiss index. Required if using
+ [`~models.rag.retrieval_rag.LegacyIndex`]
+ use_dummy_dataset (`bool`, *optional*, defaults to `False`)
+ Whether to load a "dummy" variant of the dataset specified by `dataset`.
+ label_smoothing (`float`, *optional*, defaults to 0.0):
+ Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
+ in the loss calculation. If set to 0, no label smoothing is performed.
+ do_marginalize (`bool`, *optional*, defaults to `False`):
+ If `True`, the logits are marginalized over all documents by making use of
+ `torch.nn.functional.log_softmax`.
+ reduce_loss (`bool`, *optional*, defaults to `False`):
+ Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
+ do_deduplication (`bool`, *optional*, defaults to `True`):
+ Whether or not to deduplicate the generations from different context documents for a given input. Has to be
+ set to `False` if used while training with distributed backend.
+ exclude_bos_score (`bool`, *optional*, defaults to `False`):
+ Whether or not to disregard the BOS token when computing the loss.
+ output_retrieved(`bool`, *optional*, defaults to `False`):
+ If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
+ `context_attention_mask` are returned. See returned tensors for more detail.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ forced_eos_token_id (`int`, *optional*):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+"""
+
+
+@add_start_docstrings(RAG_CONFIG_DOC)
+class RagConfig(PretrainedConfig):
+ model_type = "rag"
+ is_composition = True
+
+ def __init__(
+ self,
+ vocab_size=None,
+ is_encoder_decoder=True,
+ prefix=None,
+ bos_token_id=None,
+ pad_token_id=None,
+ eos_token_id=None,
+ decoder_start_token_id=None,
+ title_sep=" / ",
+ doc_sep=" // ",
+ n_docs=5,
+ max_combined_length=300,
+ retrieval_vector_size=768,
+ retrieval_batch_size=8,
+ dataset="wiki_dpr",
+ dataset_split="train",
+ index_name="compressed",
+ index_path=None,
+ passages_path=None,
+ use_dummy_dataset=False,
+ reduce_loss=False,
+ label_smoothing=0.0,
+ do_deduplication=True,
+ exclude_bos_score=False,
+ do_marginalize=False,
+ output_retrieved=False,
+ use_cache=True,
+ forced_eos_token_id=None,
+ dataset_revision=None,
+ **kwargs,
+ ):
+ super().__init__(
+ bos_token_id=bos_token_id,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ forced_eos_token_id=forced_eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ prefix=prefix,
+ vocab_size=vocab_size,
+ **kwargs,
+ )
+ if "question_encoder" not in kwargs or "generator" not in kwargs:
+ raise ValueError(
+ f"A configuraton of type {self.model_type} cannot be instantiated because "
+ f"both `question_encoder` and `generator` sub-configurations were not passed, only {kwargs}"
+ )
+ question_encoder_config = kwargs.pop("question_encoder")
+ question_encoder_model_type = question_encoder_config.pop("model_type")
+ decoder_config = kwargs.pop("generator")
+ decoder_model_type = decoder_config.pop("model_type")
+
+ from ..auto.configuration_auto import AutoConfig
+
+ self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config)
+ self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config)
+
+ self.reduce_loss = reduce_loss
+ self.label_smoothing = label_smoothing
+ self.exclude_bos_score = exclude_bos_score
+ self.do_marginalize = do_marginalize
+
+ self.title_sep = title_sep
+ self.doc_sep = doc_sep
+ self.n_docs = n_docs
+ self.max_combined_length = max_combined_length
+
+ self.dataset = dataset
+ self.dataset_split = dataset_split
+ self.index_name = index_name
+
+ self.retrieval_vector_size = retrieval_vector_size
+ self.retrieval_batch_size = retrieval_batch_size
+ self.passages_path = passages_path
+ self.index_path = index_path
+ self.use_dummy_dataset = use_dummy_dataset
+ self.dataset_revision = dataset_revision
+
+ self.output_retrieved = output_retrieved
+
+ self.do_deduplication = do_deduplication
+
+ self.use_cache = use_cache
+
+ if self.forced_eos_token_id is None:
+ self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None)
+
+ @classmethod
+ def from_question_encoder_generator_configs(
+ cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs
+ ) -> PretrainedConfig:
+ r"""
+ Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
+ decoder model configuration.
+
+ Returns:
+ [`EncoderDecoderConfig`]: An instance of a configuration object
+ """
+ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
+
+
+__all__ = ["RagConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3ca787691c4af7ebb04376707572b8df21889ac
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py
@@ -0,0 +1,1644 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RAG model implementation."""
+
+import copy
+from dataclasses import dataclass
+from typing import Callable, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...configuration_utils import PretrainedConfig
+from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList
+from ...modeling_outputs import ModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_rag import RagConfig
+from .retrieval_rag import RagRetriever
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "RagConfig"
+
+
+@dataclass
+class RetrievAugLMMarginOutput(ModelOutput):
+ """
+ Base class for retriever augmented marginalized models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_heads, sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ doc_scores: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
+ context_input_ids: Optional[torch.LongTensor] = None
+ context_attention_mask: Optional[torch.LongTensor] = None
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class RetrievAugLMOutput(ModelOutput):
+ """
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_heads, sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ doc_scores: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
+ context_input_ids: Optional[torch.LongTensor] = None
+ context_attention_mask: Optional[torch.LongTensor] = None
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class RagPreTrainedModel(PreTrainedModel):
+ r"""
+ RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
+ Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
+
+ RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
+ generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
+
+ """
+
+ config_class = RagConfig
+ base_model_prefix = "rag"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ # At the moment fast initialization is not supported
+ # for composite models
+ kwargs["_fast_init"] = False
+ return super().from_pretrained(*args, **kwargs)
+
+ @classmethod
+ def from_pretrained_question_encoder_generator(
+ cls,
+ question_encoder_pretrained_model_name_or_path: str = None,
+ generator_pretrained_model_name_or_path: str = None,
+ retriever: RagRetriever = None,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
+ model checkpoints.
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the question encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the generator. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+ retriever ([`RagRetriever`], *optional*):
+ The retriever to use.
+ kwwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the question_encoder configuration, use the prefix *question_encoder_* for each
+ configuration parameter.
+ - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import RagModel
+
+ >>> # initialize a RAG from two pretrained models.
+ >>> model = RagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./rag")
+ >>> # load fine-tuned model
+ >>> model = RagModel.from_pretrained("./rag")
+ ```"""
+
+ kwargs_question_encoder = {
+ argument[len("question_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("question_encoder_")
+ }
+
+ kwargs_generator = {
+ argument[len("generator_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("generator_")
+ }
+
+ # remove question_encoder, generator kwargs from kwargs
+ for key in kwargs_question_encoder.keys():
+ del kwargs["question_encoder_" + key]
+ for key in kwargs_generator.keys():
+ del kwargs["generator_" + key]
+
+ # Load and initialize the question_encoder and generator
+ # The distinction between question_encoder and generator at the model level is made
+ # by the value of the flag `is_generator` that we need to set correctly.
+ question_encoder = kwargs_question_encoder.pop("model", None)
+ if question_encoder is None:
+ assert question_encoder_pretrained_model_name_or_path is not None, (
+ "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
+ " be defined"
+ )
+ from ..auto.modeling_auto import AutoModel
+
+ if "config" not in kwargs_question_encoder:
+ from ..auto.configuration_auto import AutoConfig
+
+ question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(
+ question_encoder_pretrained_model_name_or_path,
+ **kwargs_question_encoder,
+ return_unused_kwargs=True,
+ )
+ kwargs_question_encoder["config"] = question_encoder_config
+
+ question_encoder = AutoModel.from_pretrained(
+ question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder
+ )
+
+ generator = kwargs_generator.pop("model", None)
+ if generator is None:
+ assert generator_pretrained_model_name_or_path is not None, (
+ "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
+ " to be defined"
+ )
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
+
+ if "config" not in kwargs_generator:
+ from ..auto.configuration_auto import AutoConfig
+
+ generator_config, kwargs_generator = AutoConfig.from_pretrained(
+ generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True
+ )
+
+ kwargs_generator["config"] = generator_config
+
+ generator = AutoModelForSeq2SeqLM.from_pretrained(
+ generator_pretrained_model_name_or_path, **kwargs_generator
+ )
+
+ # instantiate config with corresponding kwargs
+ config = kwargs.get("config", None)
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
+
+
+RAG_START_DOCSTRING = r"""
+
+ RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward
+ pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context
+ documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.
+
+ The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be
+ any *seq2seq* model, preferably [`BartForConditionalGeneration`].
+
+ The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
+ outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
+ *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
+ It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or
+ [`T5ForConditionalGeneration`] as the `generator`.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+
+ Args:
+ config ([`RagConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ question_encoder ([`PreTrainedModel`]):
+ An encoder model compatible with the faiss index encapsulated by the `retriever`.
+ generator ([`PreTrainedModel`]):
+ A seq2seq model used as the generator in the RAG architecture.
+ retriever ([`RagRetriever`]):
+ A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
+"""
+
+
+RAG_FORWARD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
+ which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
+ obtain the indices.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
+ Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
+ *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
+ sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
+ generator's encoder.
+
+ Used by the ([`RagModel`]) model during decoding.
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Provide for generation tasks. `None` by default, construct as per instructions for the generator model
+ you're using with your RAG instance.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`):
+ Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
+ `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
+ in the ([`RagTokenForGeneration`]) model during decoding.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
+ has to be provided to the forward pass. `doc_scores` can be computed via
+ `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
+ the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
+ provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_retrieved(`bool`, *optional*):
+ Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
+ `context_attention_mask`. See returned tensors for more detail.
+ n_docs (`int`, *optional*, defaults to `config.n_docs``)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+"""
+
+
+@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
+class RagModel(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an question_encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ else:
+ assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
+ super().__init__(config)
+ if question_encoder is None:
+ from ..auto.modeling_auto import AutoModel
+
+ question_encoder = AutoModel.from_config(config.question_encoder)
+
+ if generator is None:
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
+
+ generator = AutoModelForSeq2SeqLM.from_config(config.generator)
+
+ self.retriever = retriever
+ if self.retriever is not None:
+ assert isinstance(
+ retriever, RagRetriever
+ ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
+ self.retriever = retriever
+
+ self.question_encoder = question_encoder
+ self.generator = generator
+
+ self.ctx_encoder = None
+ self.context_encoder_training = False
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ n_docs: Optional[int] = None,
+ ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> outputs = model(input_ids=inputs["input_ids"])
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
+
+ # whether retriever has to be used
+ has_to_retrieve = (
+ self.retriever is not None
+ and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
+ and encoder_outputs is None
+ )
+ # encoder_outputs are pre-computed during RAG-token generation
+ if encoder_outputs is None:
+ if has_to_retrieve:
+ question_enc_outputs = self.question_encoder(
+ input_ids, attention_mask=attention_mask, return_dict=True
+ )
+ question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder
+
+ retriever_outputs = self.retriever(
+ input_ids,
+ question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )
+ if self.context_encoder_training:
+ (
+ context_input_ids,
+ context_attention_mask,
+ retrieved_doc_embeds,
+ retrived_doc_input_ids,
+ retrived_doc_attention_mask,
+ retrieved_doc_ids,
+ ) = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["tokenized_doc_ids"],
+ retriever_outputs["tokenized_doc_attention_mask"],
+ retriever_outputs["doc_ids"],
+ )
+
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids)
+ retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids)
+ retrieved_doc_embeds = self.ctx_encoder(
+ retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True
+ ).pooler_output
+ retrieved_doc_embeds = retrieved_doc_embeds.view(
+ -1, n_docs, question_encoder_last_hidden_state.shape[1]
+ ) # reshaping
+
+ # compute doc_scores involving ctx_encoder
+ doc_scores = torch.bmm(
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
+ ).squeeze(1)
+
+ else:
+ context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["doc_ids"],
+ )
+
+ # set to correct device
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ # compute doc_scores
+ doc_scores = torch.bmm(
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
+ ).squeeze(1)
+ else:
+ assert context_input_ids is not None, (
+ "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
+ " set a retriever using the `set_retriever(...)` function."
+ )
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ assert (
+ doc_scores is not None
+ ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
+
+ assert (doc_scores.shape[1] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # Decoder input without context documents
+ if decoder_input_ids is not None:
+ decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
+
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
+
+ gen_outputs = self.generator(
+ input_ids=context_input_ids,
+ attention_mask=context_attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ return_dict=True,
+ )
+
+ if not has_to_retrieve:
+ question_encoder_last_hidden_state = None
+ question_enc_hidden_states = None
+ question_enc_attentions = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+ else:
+ question_enc_hidden_states = question_enc_outputs.hidden_states
+ question_enc_attentions = question_enc_outputs.attentions
+
+ if not has_to_retrieve or not output_retrieved:
+ # don't output retrieved docs
+ context_input_ids = (None,)
+ context_attention_mask = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+
+ return RetrievAugLMOutput(
+ logits=gen_outputs.logits,
+ doc_scores=doc_scores,
+ past_key_values=gen_outputs.past_key_values,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ retrieved_doc_embeds=retrieved_doc_embeds,
+ retrieved_doc_ids=retrieved_doc_ids,
+ question_encoder_last_hidden_state=question_encoder_last_hidden_state,
+ question_enc_hidden_states=question_enc_hidden_states,
+ question_enc_attentions=question_enc_attentions,
+ generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
+ generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
+ generator_enc_attentions=gen_outputs.encoder_attentions,
+ generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
+ generator_dec_attentions=gen_outputs.decoder_attentions,
+ generator_cross_attentions=gen_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class RagSequenceForGeneration(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
+ self.rag.context_encoder_training = True
+ self.rag.ctx_encoder = ctx_encoder
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ exclude_bos_score: Optional[bool] = None,
+ reduce_loss: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ n_docs: Optional[int] = None,
+ **kwargs, # needs kwargs for generation
+ ) -> RetrievAugLMMarginOutput:
+ r"""
+ exclude_bos_score (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
+ the loss.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+ >>> labels = targets["input_ids"]
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+
+ >>> # or use retriever separately
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
+ >>> # 1. Encode
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
+ >>> doc_scores = torch.bmm(
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
+ ... ).squeeze(1)
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=labels,
+ ... )
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ )
+
+ loss = None
+ if labels is not None:
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ decoder_input_ids,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ exclude_bos_score=exclude_bos_score,
+ n_docs=n_docs,
+ )
+
+ return RetrievAugLMMarginOutput(
+ loss=loss,
+ logits=outputs.logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ generator_cross_attentions=outputs.generator_cross_attentions,
+ )
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ do_deduplication: Optional[bool] = None, # defaults to True
+ num_return_sequences: Optional[int] = None, # defaults to 1
+ num_beams: Optional[int] = None, # defaults to 1
+ n_docs: Optional[int] = None,
+ **model_kwargs,
+ ) -> torch.LongTensor:
+ """
+ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
+ for more information on how to set other generate input parameters.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
+ retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
+ `context_attention_mask` have to be provided to the forward pass. They are returned by
+ [`~RagRetriever.__call__`].
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
+ provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
+ do_deduplication (`bool`, *optional*):
+ Whether or not to deduplicate the generations from different context documents for a given input. Has
+ to be set to `False` if used while training with distributed backend.
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch. Note that this
+ is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
+ where we set `num_return_sequences` to `num_beams`.
+ num_beams (`int`, *optional*, defaults to 1):
+ Number of beams for beam search. 1 means no beam search.
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
+ sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
+ finished early due to the `eos_token_id`.
+ """
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
+ num_doc_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
+ )
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
+
+ assert (
+ input_ids is not None or context_input_ids is not None
+ ), " At least one of input_ids or context_input_ids must be given"
+
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ context_input_ids = self.retriever(
+ input_ids,
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )["context_input_ids"]
+
+ # set to correct device
+ context_input_ids = context_input_ids.to(input_ids)
+
+ hypos = []
+ model_kwargs["num_beams"] = num_beams
+ model_kwargs["num_return_sequences"] = num_beams
+ model_kwargs["attention_mask"] = None
+
+ batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
+
+ for index in range(batch_size):
+ # first, generate beams from documents:
+ generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
+
+ output_sequences = self.generator.generate(
+ generator_input_ids,
+ **model_kwargs,
+ ) # n_docs * n_beam, tgt_len
+ if do_deduplication:
+ # do_deduplication, max_output_len
+ output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
+
+ num_candidates = output_sequences.shape[
+ 0
+ ] # after deduplication, this number can be less than n_docs*n_beam
+
+ # then, run model forwards to get nll scores:
+ if input_ids is not None:
+ new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
+ outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
+ else: # input_ids is None, need context_input_ids/mask and doc_scores
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ individual_input_ids = generator_input_ids.repeat(
+ num_candidates, 1
+ ) # (num_candidates*n_docs, max_len)
+
+ individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
+ individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
+
+ individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
+ individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
+
+ outputs = self(
+ context_input_ids=individual_input_ids,
+ context_attention_mask=individual_attention_mask,
+ doc_scores=individual_doc_scores,
+ labels=output_sequences,
+ exclude_bos_score=True,
+ )
+
+ top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
+
+ # add hypothesis
+ hypos.append(output_sequences[top_cand_inds])
+
+ return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
+
+ def get_nll(
+ self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
+ ):
+ # shift tokens left
+ target = torch.cat(
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
+ )
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # bos_token_id is None for T5
+ bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
+ use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = target.eq(self.config.generator.pad_token_id)
+ if pad_mask.any():
+ ll.masked_fill_(pad_mask, 0.0)
+ smooth_obj.masked_fill_(pad_mask, 0.0)
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
+
+ # seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
+ ) # batch_size x n_docs x tgt_len x #vocab_size
+ doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
+
+ # RAG-sequence marginalization
+ first_token_scores = seq_logprobs[:, :, :1, :]
+ second_token_scores = seq_logprobs[:, :, 1:2, :]
+ remainder = seq_logprobs[:, :, 2:, :]
+ rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
+
+ # calculate loss
+ target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
+ assert target.dim() == rag_logprobs.dim()
+
+ ll = rag_logprobs.gather(dim=-1, index=target)
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
+
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+
+ # sum over tokens, exclude bos while scoring
+ ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
+ smooth_obj = smooth_obj.sum(2)
+ ll = ll.logsumexp(1) # logsumexp over docs
+ smooth_obj = smooth_obj.logsumexp(1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = nll_loss.sum()
+ smooth_loss = smooth_loss.sum()
+
+ eps_i = epsilon / rag_logprobs.size(-1)
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+ @staticmethod
+ def _cat_and_pad(tensors, pad_token_id):
+ output = (
+ tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
+ )
+ ind = 0
+ for t in tensors:
+ output[ind : ind + t.shape[0], : t.shape[1]] = t
+ ind += t.shape[0]
+ return output
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class RagTokenForGeneration(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
+ self.rag.context_encoder_training = True
+ self.rag.ctx_encoder = ctx_encoder
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ doc_scores=None,
+ n_docs=None,
+ **kwargs,
+ ):
+ # Overwritten -- `do_marginalize` is explicitly set in the output
+
+ if past_key_values is not None:
+ # if past is defined use only last decoder_input_ids
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None,
+ "encoder_outputs": encoder_outputs,
+ "doc_scores": doc_scores,
+ "context_attention_mask": attention_mask,
+ "decoder_input_ids": decoder_input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "do_marginalize": True,
+ "n_docs": n_docs,
+ }
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
+
+ def _reorder_stacked(hidden_states, new_order):
+ n_docs = hidden_states.shape[0] // new_order.shape[0]
+ hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
+ hidden_states = hidden_states.index_select(0, new_order)
+ result = hidden_states.view(-1, *hidden_states.shape[2:])
+ return result
+
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
+ reordered_past += (
+ tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+
+ return reordered_past
+
+ def marginalize(self, seq_logits, doc_scores, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # RAG-token marginalization
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
+ )
+ doc_logprobs = torch.log_softmax(doc_scores, dim=1)
+ log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
+ return torch.logsumexp(log_prob_sum, dim=1)
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ do_marginalize: Optional[bool] = None,
+ reduce_loss: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ n_docs: Optional[int] = None,
+ **kwargs, # needs kwargs for generation
+ ) -> RetrievAugLMMarginOutput:
+ r"""
+ do_marginalize (`bool`, *optional*):
+ If `True`, the logits are marginalized over all documents by making use of
+ `torch.nn.functional.log_softmax`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+ >>> labels = targets["input_ids"]
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+
+ >>> # or use retriever separately
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
+ >>> # 1. Encode
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
+ >>> doc_scores = torch.bmm(
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
+ ... ).squeeze(1)
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=labels,
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ )
+
+ loss = None
+ logits = outputs.logits
+ if labels is not None:
+ assert decoder_input_ids is not None
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ if do_marginalize:
+ logits = self.marginalize(logits, outputs.doc_scores, n_docs)
+
+ return RetrievAugLMMarginOutput(
+ loss=loss,
+ logits=logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ generator_cross_attentions=outputs.generator_cross_attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ n_docs: Optional[int] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,
+ logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
+ stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(),
+ **kwargs,
+ ) -> torch.LongTensor:
+ """
+ Implements RAG token decoding.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which has the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID
+ `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on
+ the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for
+ constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and a
+ model's config. If a logit processor is passed that is already created with the arguments or a model's
+ config an error is thrown.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ model's config. If a stopping criteria is passed that is already created with the arguments or a
+ model's config an error is thrown.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
+ sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches
+ finished early due to the `eos_token_id`.
+ """
+ # Handle `generation_config` and kwargs that might update it
+ if generation_config is None:
+ generation_config = self.generation_config
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
+ self._prepare_special_tokens(generation_config, kwargs_has_attention_mask)
+
+ # set default parameters
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # retrieve docs
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ out = self.retriever(
+ input_ids,
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds = (
+ out["context_input_ids"],
+ out["context_attention_mask"],
+ out["retrieved_doc_embeds"],
+ )
+
+ # set to correct device
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ # compute doc_scores
+ doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
+ 1
+ )
+
+ assert (context_input_ids.shape[0] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # batch_size
+ batch_size = context_input_ids.shape[0] // n_docs
+
+ encoder = self.rag.generator.get_encoder()
+ encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
+
+ input_ids = torch.full(
+ (batch_size * generation_config.num_beams, 1),
+ generation_config.decoder_start_token_id,
+ dtype=torch.long,
+ device=next(self.parameters()).device,
+ )
+ input_ids_seq_length = input_ids.shape[-1]
+ last_hidden_state = encoder_outputs["last_hidden_state"]
+
+ def extend_enc_output(tensor, num_beams=None):
+ # split into `batch_size`, `num_beams`, `num_docs`
+ tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
+ # repeat same last hidden states over `num_beams` dimension
+ tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
+ # merge `batch_size`, `num_beams`, `num_docs` dims again
+ return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
+
+ # correctly extend last_hidden_state and attention mask
+ context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
+ encoder_outputs["last_hidden_state"] = extend_enc_output(
+ last_hidden_state, num_beams=generation_config.num_beams
+ )
+
+ doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0)
+
+ # define start_len & additional parameters
+ model_kwargs["doc_scores"] = doc_scores
+ model_kwargs["encoder_outputs"] = encoder_outputs
+ model_kwargs["attention_mask"] = context_attention_mask
+ model_kwargs["n_docs"] = n_docs
+
+ pre_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_seq_length,
+ encoder_input_ids=context_input_ids,
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ )
+
+ prepared_stopping_criteria = self._get_stopping_criteria(
+ generation_config=generation_config, stopping_criteria=stopping_criteria
+ )
+
+ if generation_config.num_beams == 1:
+ if generation_config.num_return_sequences > 1:
+ raise ValueError(
+ f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
+ " greedy search."
+ )
+ return self._sample(
+ input_ids,
+ logits_processor=pre_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=False,
+ streamer=None,
+ **model_kwargs,
+ )
+ elif generation_config.num_beams > 1:
+ if generation_config.num_return_sequences > generation_config.num_beams:
+ raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
+ beam_scorer = BeamSearchScorer(
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=self.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ max_length=generation_config.max_length,
+ )
+ return self._beam_search(
+ input_ids,
+ beam_scorer,
+ logits_processor=pre_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=False,
+ **model_kwargs,
+ )
+ else:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
+ )
+
+ def get_input_embeddings(self):
+ return self.rag.generator.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.rag.generator.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.rag.generator.set_output_embeddings(new_embeddings)
+
+ def shift_tokens_right(self, input_ids, start_token_id=None):
+ """Shift input ids one token to the right, and pad with start_token_id"""
+ if start_token_id is None:
+ start_token_id = self.config.decoder_start_token_id
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = start_token_id
+ return shifted_input_ids
+
+ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ # shift tokens left
+ target = torch.cat(
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
+ )
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = target.eq(self.config.generator.pad_token_id)
+ if pad_mask.any():
+ ll.masked_fill_(pad_mask, 0.0)
+ smooth_obj.masked_fill_(pad_mask, 0.0)
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
+
+ rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
+
+ target = target.unsqueeze(-1)
+ assert target.dim() == rag_logprobs.dim()
+
+ ll = rag_logprobs.gather(dim=-1, index=target)
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+ ll = ll.sum(1) # sum over tokens
+ smooth_obj = smooth_obj.sum(1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = nll_loss.sum()
+ smooth_loss = smooth_loss.sum()
+
+ eps_i = epsilon / rag_logprobs.size(-1)
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+
+__all__ = ["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d0a994e766fcd4f6caf346641d20d89d29e074f
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py
@@ -0,0 +1,124 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for RAG."""
+
+import os
+import warnings
+from typing import List, Optional
+
+from ...tokenization_utils_base import BatchEncoding
+from ...utils import logging
+from .configuration_rag import RagConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+class RagTokenizer:
+ def __init__(self, question_encoder, generator):
+ self.question_encoder = question_encoder
+ self.generator = generator
+ self.current_tokenizer = self.question_encoder
+
+ def save_pretrained(self, save_directory):
+ if os.path.isfile(save_directory):
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
+ os.makedirs(save_directory, exist_ok=True)
+ question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer")
+ generator_path = os.path.join(save_directory, "generator_tokenizer")
+ self.question_encoder.save_pretrained(question_encoder_path)
+ self.generator.save_pretrained(generator_path)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ # dynamically import AutoTokenizer
+ from ..auto.tokenization_auto import AutoTokenizer
+
+ config = kwargs.pop("config", None)
+
+ if config is None:
+ config = RagConfig.from_pretrained(pretrained_model_name_or_path)
+
+ question_encoder = AutoTokenizer.from_pretrained(
+ pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer"
+ )
+ generator = AutoTokenizer.from_pretrained(
+ pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer"
+ )
+ return cls(question_encoder=question_encoder, generator=generator)
+
+ def __call__(self, *args, **kwargs):
+ return self.current_tokenizer(*args, **kwargs)
+
+ def batch_decode(self, *args, **kwargs):
+ return self.generator.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ return self.generator.decode(*args, **kwargs)
+
+ def _switch_to_input_mode(self):
+ self.current_tokenizer = self.question_encoder
+
+ def _switch_to_target_mode(self):
+ self.current_tokenizer = self.generator
+
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ tgt_texts: Optional[List[str]] = None,
+ max_length: Optional[int] = None,
+ max_target_length: Optional[int] = None,
+ padding: str = "longest",
+ return_tensors: str = None,
+ truncation: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ warnings.warn(
+ "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
+ "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
+ "context manager to prepare your targets. See the documentation of your specific tokenizer for more "
+ "details",
+ FutureWarning,
+ )
+ if max_length is None:
+ max_length = self.current_tokenizer.model_max_length
+ model_inputs = self(
+ src_texts,
+ add_special_tokens=True,
+ return_tensors=return_tensors,
+ max_length=max_length,
+ padding=padding,
+ truncation=truncation,
+ **kwargs,
+ )
+ if tgt_texts is None:
+ return model_inputs
+ # Process tgt_texts
+ if max_target_length is None:
+ max_target_length = self.current_tokenizer.model_max_length
+ labels = self(
+ text_target=tgt_texts,
+ add_special_tokens=True,
+ return_tensors=return_tensors,
+ padding=padding,
+ max_length=max_target_length,
+ truncation=truncation,
+ **kwargs,
+ )
+ model_inputs["labels"] = labels["input_ids"]
+ return model_inputs
+
+
+__all__ = ["RagTokenizer"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..68da4037a351b308e7301835795b6c68946b6f63
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_sam import *
+ from .image_processing_sam import *
+ from .modeling_sam import *
+ from .modeling_tf_sam import *
+ from .processing_sam import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21acb3c3922fdc7a8e21d202787b609aca766cf8
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bcc1d137759ded3e6c7c226a0ab0ebf2170de2c
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04190db85c904d74635d9f87dd62985221043485
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4fbbf1e69bb8d0d37f87290e04b01a79d2274dac
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1070d831319293b70925be5383d4dc519988f02c
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0a759dbf111b4d20d2c95dec89614f632baf6dd
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py
@@ -0,0 +1,319 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""SAM model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class SamPromptEncoderConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`]
+ module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield
+ a similar configuration to that of the SAM-vit-h
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the hidden states.
+ image_size (`int`, *optional*, defaults to 1024):
+ The expected output resolution of the image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ mask_input_channels (`int`, *optional*, defaults to 16):
+ The number of channels to be fed to the `MaskDecoder` module.
+ num_point_embeddings (`int`, *optional*, defaults to 4):
+ The number of point embeddings to be used.
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function in the encoder and pooler.
+ """
+
+ base_config_key = "prompt_encoder_config"
+
+ def __init__(
+ self,
+ hidden_size=256,
+ image_size=1024,
+ patch_size=16,
+ mask_input_channels=16,
+ num_point_embeddings=4,
+ hidden_act="gelu",
+ layer_norm_eps=1e-6,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.hidden_size = hidden_size
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.image_embedding_size = image_size // patch_size
+ self.mask_input_channels = mask_input_channels
+ self.num_point_embeddings = num_point_embeddings
+ self.hidden_act = hidden_act
+ self.layer_norm_eps = layer_norm_eps
+
+
+class SamMaskDecoderConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM
+ mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults
+ will yield a similar configuration to that of the SAM-vit-h
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the hidden states.
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
+ The non-linear activation function used inside the `SamMaskDecoder` module.
+ mlp_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 2):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ attention_downsample_rate (`int`, *optional*, defaults to 2):
+ The downsampling rate of the attention layer.
+ num_multimask_outputs (`int`, *optional*, defaults to 3):
+ The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3.
+ iou_head_depth (`int`, *optional*, defaults to 3):
+ The number of layers in the IoU head module.
+ iou_head_hidden_dim (`int`, *optional*, defaults to 256):
+ The dimensionality of the hidden states in the IoU head module.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+
+ """
+
+ base_config_key = "mask_decoder_config"
+
+ def __init__(
+ self,
+ hidden_size=256,
+ hidden_act="relu",
+ mlp_dim=2048,
+ num_hidden_layers=2,
+ num_attention_heads=8,
+ attention_downsample_rate=2,
+ num_multimask_outputs=3,
+ iou_head_depth=3,
+ iou_head_hidden_dim=256,
+ layer_norm_eps=1e-6,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.hidden_size = hidden_size
+ self.hidden_act = hidden_act
+ self.mlp_dim = mlp_dim
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.attention_downsample_rate = attention_downsample_rate
+ self.num_multimask_outputs = num_multimask_outputs
+ self.iou_head_depth = iou_head_depth
+ self.iou_head_hidden_dim = iou_head_hidden_dim
+ self.layer_norm_eps = layer_norm_eps
+
+
+class SamVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM
+ vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
+ defaults will yield a similar configuration to that of the SAM ViT-h
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ output_channels (`int`, *optional*, defaults to 256):
+ Dimensionality of the output channels in the Patch Encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_channels (`int`, *optional*, defaults to 3):
+ Number of channels in the input image.
+ image_size (`int`, *optional*, defaults to 1024):
+ Expected resolution. Target size of the resized input image.
+ patch_size (`int`, *optional*, defaults to 16):
+ Size of the patches to be extracted from the input image.
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string)
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 1e-10):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to query, key, value projections.
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
+ Ratio of mlp hidden dim to embedding dim.
+ use_abs_pos (`bool`, *optional*, defaults to `True`):
+ Whether to use absolute position embedding.
+ use_rel_pos (`bool`, *optional*, defaults to `True`):
+ Whether to use relative position embedding.
+ window_size (`int`, *optional*, defaults to 14):
+ Window size for relative position.
+ global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
+ The indexes of the global attention layers.
+ num_pos_feats (`int`, *optional*, defaults to 128):
+ The dimensionality of the position embedding.
+ mlp_dim (`int`, *optional*):
+ The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio *
+ hidden_size`.
+ """
+
+ base_config_key = "vision_config"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ output_channels=256,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ num_channels=3,
+ image_size=1024,
+ patch_size=16,
+ hidden_act="gelu",
+ layer_norm_eps=1e-06,
+ attention_dropout=0.0,
+ initializer_range=1e-10,
+ qkv_bias=True,
+ mlp_ratio=4.0,
+ use_abs_pos=True,
+ use_rel_pos=True,
+ window_size=14,
+ global_attn_indexes=[2, 5, 8, 11],
+ num_pos_feats=128,
+ mlp_dim=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.output_channels = output_channels
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.hidden_act = hidden_act
+ self.layer_norm_eps = layer_norm_eps
+ self.attention_dropout = attention_dropout
+ self.initializer_range = initializer_range
+ self.qkv_bias = qkv_bias
+ self.mlp_ratio = mlp_ratio
+ self.use_abs_pos = use_abs_pos
+ self.use_rel_pos = use_rel_pos
+ self.window_size = window_size
+ self.global_attn_indexes = global_attn_indexes
+ self.num_pos_feats = num_pos_feats
+ self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim
+
+
+class SamConfig(PretrainedConfig):
+ r"""
+ [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a
+ SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vision_config (Union[`dict`, `SamVisionConfig`], *optional*):
+ Dictionary of configuration options used to initialize [`SamVisionConfig`].
+ prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*):
+ Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`].
+ mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*):
+ Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`].
+
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... SamVisionConfig,
+ ... SamPromptEncoderConfig,
+ ... SamMaskDecoderConfig,
+ ... SamModel,
+ ... )
+
+ >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration
+ >>> configuration = SamConfig()
+
+ >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration
+ >>> model = SamModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+
+ >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig
+
+ >>> # Initializing SAM vision, SAM Q-Former and language model configurations
+ >>> vision_config = SamVisionConfig()
+ >>> prompt_encoder_config = SamPromptEncoderConfig()
+ >>> mask_decoder_config = SamMaskDecoderConfig()
+
+ >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config)
+ ```"""
+
+ model_type = "sam"
+ sub_configs = {
+ "prompt_encoder_config": SamPromptEncoderConfig,
+ "mask_decoder_config": SamMaskDecoderConfig,
+ "vision_config": SamVisionConfig,
+ }
+
+ def __init__(
+ self,
+ vision_config=None,
+ prompt_encoder_config=None,
+ mask_decoder_config=None,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ vision_config = vision_config if vision_config is not None else {}
+ prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
+ mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
+
+ if isinstance(vision_config, SamVisionConfig):
+ vision_config = vision_config.to_dict()
+ if isinstance(prompt_encoder_config, SamPromptEncoderConfig):
+ prompt_encoder_config = prompt_encoder_config.to_dict()
+ if isinstance(mask_decoder_config, SamMaskDecoderConfig):
+ mask_decoder_config = mask_decoder_config.to_dict()
+
+ self.vision_config = SamVisionConfig(**vision_config)
+ self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config)
+ self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config)
+ self.initializer_range = initializer_range
+
+
+__all__ = ["SamConfig", "SamMaskDecoderConfig", "SamPromptEncoderConfig", "SamVisionConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1cace89fa1f7673af85a949157408babe6409c8
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py
@@ -0,0 +1,1478 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for SAM."""
+
+import math
+from copy import deepcopy
+from itertools import product
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import (
+ TensorType,
+ filter_out_non_signature_kwargs,
+ is_tf_available,
+ is_torch_available,
+ is_torchvision_available,
+ logging,
+ requires_backends,
+)
+
+
+if is_torch_available():
+ import torch
+ import torch.nn.functional as F
+
+if is_torchvision_available():
+ from torchvision.ops.boxes import batched_nms
+
+if is_tf_available():
+ import tensorflow as tf
+ from tensorflow.experimental import numpy as tnp
+
+ from ...tf_utils import flatten, shape_list
+
+logger = logging.get_logger(__name__)
+
+
+class SamImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a SAM image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`):
+ Size of the output image after resizing. Resizes the longest edge of the image to match
+ `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the
+ `preprocess` method.
+ mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`):
+ Size of the output segmentation map after resizing. Resizes the longest edge of the image to match
+ `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter
+ in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
+ overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the
+ `preprocess` method.
+ pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
+ Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess`
+ method.
+ mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`):
+ Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in
+ the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ mask_size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: bool = True,
+ pad_size: int = None,
+ mask_pad_size: int = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"longest_edge": 1024}
+ size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
+
+ pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024}
+ pad_size = get_size_dict(pad_size, default_to_square=True)
+
+ mask_size = mask_size if mask_size is not None else {"longest_edge": 256}
+ mask_size = (
+ get_size_dict(max_size=mask_size, default_to_square=False)
+ if not isinstance(mask_size, dict)
+ else mask_size
+ )
+
+ mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256}
+ mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.mask_size = mask_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self.do_pad = do_pad
+ self.pad_size = pad_size
+ self.mask_pad_size = mask_pad_size
+ self.do_convert_rgb = do_convert_rgb
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ pad_size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ pad_size (`Dict[str, int]`):
+ Size of the output image after padding.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the
+ `data_format` of the `image` will be used.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ output_height, output_width = pad_size["height"], pad_size["width"]
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+
+ pad_width = output_width - input_width
+ pad_height = output_height - input_height
+
+ padded_image = pad(
+ image,
+ ((0, pad_height), (0, pad_width)),
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return padded_image
+
+ def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int):
+ """
+ Compute the output size given input size and target long side length.
+ """
+ oldh, oldw = old_shape
+ scale = longest_edge * 1.0 / max(oldh, oldw)
+ newh, neww = oldh * scale, oldw * scale
+ newh = int(newh + 0.5)
+ neww = int(neww + 0.5)
+ return (newh, neww)
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest
+ edge of the image will be resized to the specified size, while the other edge will be resized to
+ maintain the aspect ratio.
+ resample:
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "longest_edge" not in size:
+ raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}")
+ input_size = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"])
+ return resize(
+ image,
+ size=(output_height, output_width),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def _preprocess(
+ self,
+ image: ImageInput,
+ do_resize: bool,
+ do_rescale: bool,
+ do_normalize: bool,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ rescale_factor: Optional[float] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ pad_size: Optional[Dict[str, int]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ reshaped_input_size = get_image_size(image, channel_dim=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ if do_pad:
+ image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format)
+
+ return image, reshaped_input_size
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: bool = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ pad_size: Optional[Dict[str, int]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]:
+ image = to_numpy_array(image)
+
+ # PIL RGBA images are converted to RGB
+ if do_convert_rgb:
+ image = convert_to_rgb(image)
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if do_rescale and is_scaled_image(image):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ original_size = get_image_size(image, channel_dim=input_data_format)
+
+ image, reshaped_input_size = self._preprocess(
+ image=image,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ pad_size=pad_size,
+ input_data_format=input_data_format,
+ )
+
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image, original_size, reshaped_input_size
+
+ def _preprocess_mask(
+ self,
+ segmentation_map: ImageInput,
+ do_resize: Optional[bool] = None,
+ mask_size: Dict[str, int] = None,
+ do_pad: Optional[bool] = None,
+ mask_pad_size: Optional[Dict[str, int]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ segmentation_map = to_numpy_array(segmentation_map)
+
+ # Add channel dimension if missing - needed for certain transformations
+ if segmentation_map.ndim == 2:
+ added_channel_dim = True
+ segmentation_map = segmentation_map[None, ...]
+ input_data_format = ChannelDimension.FIRST
+ else:
+ added_channel_dim = False
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
+
+ original_size = get_image_size(segmentation_map, channel_dim=input_data_format)
+
+ segmentation_map, _ = self._preprocess(
+ image=segmentation_map,
+ do_resize=do_resize,
+ size=mask_size,
+ resample=PILImageResampling.NEAREST,
+ do_rescale=False,
+ do_normalize=False,
+ do_pad=do_pad,
+ pad_size=mask_pad_size,
+ input_data_format=input_data_format,
+ )
+
+ # Remove extra channel dimension if added for processing
+ if added_channel_dim:
+ segmentation_map = segmentation_map.squeeze(0)
+ segmentation_map = segmentation_map.astype(np.int64)
+
+ return segmentation_map, original_size
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ segmentation_maps: Optional[ImageInput] = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ mask_size: Optional[Dict[str, int]] = None,
+ resample: Optional["PILImageResampling"] = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[Union[int, float]] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ pad_size: Optional[Dict[str, int]] = None,
+ mask_pad_size: Optional[Dict[str, int]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ segmentation_maps (`ImageInput`, *optional*):
+ Segmentation map to preprocess.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Controls the size of the image after `resize`. The longest edge of the image is resized to
+ `size["longest_edge"]` whilst preserving the aspect ratio.
+ mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`):
+ Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to
+ `size["longest_edge"]` whilst preserving the aspect ratio.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image pixel values by rescaling factor.
+ rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to apply to the image pixel values.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the image.
+ pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`):
+ Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and
+ `pad_size["width"]` if `do_pad` is set to `True`.
+ mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`):
+ Controls the size of the padding applied to the segmentation map. The image is padded to
+ `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
+ mask_size = mask_size if mask_size is not None else self.mask_size
+ mask_size = (
+ get_size_dict(max_size=mask_size, default_to_square=False)
+ if not isinstance(mask_size, dict)
+ else mask_size
+ )
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ pad_size = pad_size if pad_size is not None else self.pad_size
+ pad_size = get_size_dict(pad_size, default_to_square=True)
+ mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size
+ mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ if segmentation_maps is not None:
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
+
+ if not valid_images(segmentation_maps):
+ raise ValueError(
+ "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size.
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ images, original_sizes, reshaped_input_sizes = zip(
+ *(
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ pad_size=pad_size,
+ do_convert_rgb=do_convert_rgb,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in images
+ )
+ )
+
+ data = {
+ "pixel_values": images,
+ "original_sizes": original_sizes,
+ "reshaped_input_sizes": reshaped_input_sizes,
+ }
+
+ if segmentation_maps is not None:
+ segmentation_maps, original_mask_sizes = zip(
+ *(
+ self._preprocess_mask(
+ segmentation_map=mask,
+ do_resize=do_resize,
+ mask_size=mask_size,
+ do_pad=do_pad,
+ mask_pad_size=mask_pad_size,
+ input_data_format=input_data_format,
+ )
+ for mask in segmentation_maps
+ )
+ )
+
+ # masks should start out the same size as input images
+ assert all(
+ original_im_size == original_mask_size
+ for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes)
+ ), "Segmentation maps should be the same size as input images."
+
+ data["labels"] = segmentation_maps
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ def post_process_masks(
+ self,
+ masks,
+ original_sizes,
+ reshaped_input_sizes,
+ mask_threshold=0.0,
+ binarize=True,
+ pad_size=None,
+ return_tensors="pt",
+ ):
+ """
+ Remove padding and upscale masks to the original image size.
+
+ Args:
+ masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`):
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
+ original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
+ The original sizes of each image before it was resized to the model's expected input shape, in (height,
+ width) format.
+ reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
+ The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
+ mask_threshold (`float`, *optional*, defaults to 0.0):
+ The threshold to use for binarizing the masks.
+ binarize (`bool`, *optional*, defaults to `True`):
+ Whether to binarize the masks.
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
+ The target size the images were padded to before being passed to the model. If None, the target size is
+ assumed to be the processor's `pad_size`.
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
+ If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors.
+ Returns:
+ (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where
+ (height, width) is given by original_size.
+ """
+ if return_tensors == "pt":
+ return self._post_process_masks_pt(
+ masks=masks,
+ original_sizes=original_sizes,
+ reshaped_input_sizes=reshaped_input_sizes,
+ mask_threshold=mask_threshold,
+ binarize=binarize,
+ pad_size=pad_size,
+ )
+ elif return_tensors == "tf":
+ return self._post_process_masks_tf(
+ masks=masks,
+ original_sizes=original_sizes,
+ reshaped_input_sizes=reshaped_input_sizes,
+ mask_threshold=mask_threshold,
+ binarize=binarize,
+ pad_size=pad_size,
+ )
+ else:
+ raise ValueError("return_tensors must be either 'pt' or 'tf'")
+
+ def _post_process_masks_pt(
+ self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
+ ):
+ """
+ Remove padding and upscale masks to the original image size.
+
+ Args:
+ masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
+ original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
+ The original sizes of each image before it was resized to the model's expected input shape, in (height,
+ width) format.
+ reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
+ The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
+ mask_threshold (`float`, *optional*, defaults to 0.0):
+ The threshold to use for binarizing the masks.
+ binarize (`bool`, *optional*, defaults to `True`):
+ Whether to binarize the masks.
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
+ The target size the images were padded to before being passed to the model. If None, the target size is
+ assumed to be the processor's `pad_size`.
+ Returns:
+ (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
+ is given by original_size.
+ """
+ requires_backends(self, ["torch"])
+ pad_size = self.pad_size if pad_size is None else pad_size
+ target_image_size = (pad_size["height"], pad_size["width"])
+ if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
+ original_sizes = original_sizes.tolist()
+ if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
+ reshaped_input_sizes = reshaped_input_sizes.tolist()
+ output_masks = []
+ for i, original_size in enumerate(original_sizes):
+ if isinstance(masks[i], np.ndarray):
+ masks[i] = torch.from_numpy(masks[i])
+ elif not isinstance(masks[i], torch.Tensor):
+ raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
+ interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
+ interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
+ interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
+ if binarize:
+ interpolated_mask = interpolated_mask > mask_threshold
+ output_masks.append(interpolated_mask)
+
+ return output_masks
+
+ def _post_process_masks_tf(
+ self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
+ ):
+ """
+ Remove padding and upscale masks to the original image size.
+
+ Args:
+ masks (`tf.Tensor`):
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
+ original_sizes (`tf.Tensor`):
+ The original size of the images before resizing for input to the model, in (height, width) format.
+ reshaped_input_sizes (`tf.Tensor`):
+ The size of the image input to the model, in (height, width) format. Used to remove padding.
+ mask_threshold (`float`, *optional*, defaults to 0.0):
+ The threshold to use for binarizing the masks.
+ binarize (`bool`, *optional*, defaults to `True`):
+ Whether to binarize the masks.
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
+ The target size the images were padded to before being passed to the model. If None, the target size is
+ assumed to be the processor's `pad_size`.
+ Returns:
+ (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is
+ given by original_size.
+ """
+ requires_backends(self, ["tf"])
+ pad_size = self.pad_size if pad_size is None else pad_size
+ target_image_size = (pad_size["height"], pad_size["width"])
+
+ output_masks = []
+ for i, original_size in enumerate(original_sizes):
+ # tf.image expects NHWC, we transpose the NCHW inputs for it
+ mask = tf.transpose(masks[i], perm=[0, 2, 3, 1])
+ interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear")
+ interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :]
+ interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear")
+ if binarize:
+ interpolated_mask = interpolated_mask > mask_threshold
+ # And then we transpose them back at the end
+ output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2]))
+
+ return output_masks
+
+ def post_process_for_mask_generation(
+ self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt"
+ ):
+ """
+ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
+
+ Args:
+ all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`):
+ List of all predicted segmentation masks
+ all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`):
+ List of all predicted iou scores
+ all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`):
+ List of all bounding boxes of the predicted masks
+ crops_nms_thresh (`float`):
+ Threshold for NMS (Non Maximum Suppression) algorithm.
+ return_tensors (`str`, *optional*, defaults to `pt`):
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
+ """
+ if return_tensors == "pt":
+ return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh)
+ elif return_tensors == "tf":
+ return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh)
+
+ def generate_crop_boxes(
+ self,
+ image,
+ target_size,
+ crop_n_layers: int = 0,
+ overlap_ratio: float = 512 / 1500,
+ points_per_crop: Optional[int] = 32,
+ crop_n_points_downscale_factor: Optional[List[int]] = 1,
+ device: Optional["torch.device"] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ return_tensors: str = "pt",
+ ):
+ """
+ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
+
+ Args:
+ image (`np.array`):
+ Input original image
+ target_size (`int`):
+ Target size of the resized image
+ crop_n_layers (`int`, *optional*, defaults to 0):
+ If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
+ each layer has 2**i_layer number of image crops.
+ overlap_ratio (`float`, *optional*, defaults to 512/1500):
+ Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
+ the image length. Later layers with more crops scale down this overlap.
+ points_per_crop (`int`, *optional*, defaults to 32):
+ Number of points to sample from each crop.
+ crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1):
+ The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
+ device (`torch.device`, *optional*, defaults to None):
+ Device to use for the computation. If None, cpu will be used.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ return_tensors (`str`, *optional*, defaults to `pt`):
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
+ """
+ crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
+ image,
+ target_size,
+ crop_n_layers,
+ overlap_ratio,
+ points_per_crop,
+ crop_n_points_downscale_factor,
+ input_data_format,
+ )
+ if return_tensors == "pt":
+ if device is None:
+ device = torch.device("cpu")
+ crop_boxes = torch.tensor(crop_boxes, device=device)
+ points_per_crop = torch.tensor(points_per_crop, device=device)
+ # cropped_images stays as np
+ input_labels = torch.tensor(input_labels, device=device)
+
+ elif return_tensors == "tf":
+ if device is not None:
+ raise ValueError("device is not a supported argument when return_tensors is tf!")
+ crop_boxes = tf.convert_to_tensor(crop_boxes)
+ points_per_crop = tf.convert_to_tensor(points_per_crop)
+ # cropped_images stays as np
+ input_labels = tf.convert_to_tensor(input_labels)
+ else:
+ raise ValueError("return_tensors must be either 'pt' or 'tf'.")
+ return crop_boxes, points_per_crop, cropped_images, input_labels
+
+ def filter_masks(
+ self,
+ masks,
+ iou_scores,
+ original_size,
+ cropped_box_image,
+ pred_iou_thresh=0.88,
+ stability_score_thresh=0.95,
+ mask_threshold=0,
+ stability_score_offset=1,
+ return_tensors="pt",
+ ):
+ """
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
+ bounding boxes and pad the predicted masks if necessary.
+
+ Args:
+ masks (`Union[torch.Tensor, tf.Tensor]`):
+ Input masks.
+ iou_scores (`Union[torch.Tensor, tf.Tensor]`):
+ List of IoU scores.
+ original_size (`Tuple[int,int]`):
+ Size of the orginal image.
+ cropped_box_image (`np.array`):
+ The cropped image.
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
+ The threshold for the iou scores.
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
+ The threshold for the stability score.
+ mask_threshold (`float`, *optional*, defaults to 0):
+ The threshold for the predicted masks.
+ stability_score_offset (`float`, *optional*, defaults to 1):
+ The offset for the stability score used in the `_compute_stability_score` method.
+ return_tensors (`str`, *optional*, defaults to `pt`):
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
+ """
+ if return_tensors == "pt":
+ return self._filter_masks_pt(
+ masks=masks,
+ iou_scores=iou_scores,
+ original_size=original_size,
+ cropped_box_image=cropped_box_image,
+ pred_iou_thresh=pred_iou_thresh,
+ stability_score_thresh=stability_score_thresh,
+ mask_threshold=mask_threshold,
+ stability_score_offset=stability_score_offset,
+ )
+ elif return_tensors == "tf":
+ return self._filter_masks_tf(
+ masks=masks,
+ iou_scores=iou_scores,
+ original_size=original_size,
+ cropped_box_image=cropped_box_image,
+ pred_iou_thresh=pred_iou_thresh,
+ stability_score_thresh=stability_score_thresh,
+ mask_threshold=mask_threshold,
+ stability_score_offset=stability_score_offset,
+ )
+
+ def _filter_masks_pt(
+ self,
+ masks,
+ iou_scores,
+ original_size,
+ cropped_box_image,
+ pred_iou_thresh=0.88,
+ stability_score_thresh=0.95,
+ mask_threshold=0,
+ stability_score_offset=1,
+ ):
+ """
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
+ bounding boxes and pad the predicted masks if necessary.
+
+ Args:
+ masks (`torch.Tensor`):
+ Input masks.
+ iou_scores (`torch.Tensor`):
+ List of IoU scores.
+ original_size (`Tuple[int,int]`):
+ Size of the orginal image.
+ cropped_box_image (`np.array`):
+ The cropped image.
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
+ The threshold for the iou scores.
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
+ The threshold for the stability score.
+ mask_threshold (`float`, *optional*, defaults to 0):
+ The threshold for the predicted masks.
+ stability_score_offset (`float`, *optional*, defaults to 1):
+ The offset for the stability score used in the `_compute_stability_score` method.
+
+ """
+ requires_backends(self, ["torch"])
+ original_height, original_width = original_size
+ iou_scores = iou_scores.flatten(0, 1)
+ masks = masks.flatten(0, 1)
+
+ if masks.shape[0] != iou_scores.shape[0]:
+ raise ValueError("masks and iou_scores must have the same batch size.")
+
+ if masks.device != iou_scores.device:
+ iou_scores = iou_scores.to(masks.device)
+
+ batch_size = masks.shape[0]
+
+ keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
+
+ if pred_iou_thresh > 0.0:
+ keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
+
+ # compute stability score
+ if stability_score_thresh > 0.0:
+ stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset)
+ keep_mask = keep_mask & (stability_scores > stability_score_thresh)
+
+ scores = iou_scores[keep_mask]
+ masks = masks[keep_mask]
+
+ # binarize masks
+ masks = masks > mask_threshold
+ converted_boxes = _batched_mask_to_box(masks)
+
+ keep_mask = ~_is_box_near_crop_edge(
+ converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
+ )
+
+ scores = scores[keep_mask]
+ masks = masks[keep_mask]
+ converted_boxes = converted_boxes[keep_mask]
+
+ masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
+ # conversion to rle is necessary to run non-maximum suppresion
+ masks = _mask_to_rle_pytorch(masks)
+
+ return masks, scores, converted_boxes
+
+ def _filter_masks_tf(
+ self,
+ masks,
+ iou_scores,
+ original_size,
+ cropped_box_image,
+ pred_iou_thresh=0.88,
+ stability_score_thresh=0.95,
+ mask_threshold=0,
+ stability_score_offset=1,
+ ):
+ """
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
+ bounding boxes and pad the predicted masks if necessary.
+
+ Args:
+ masks (`tf.Tensor`):
+ Input masks.
+ iou_scores (`tf.Tensor`):
+ List of IoU scores.
+ original_size (`Tuple[int,int]`):
+ Size of the orginal image.
+ cropped_box_image (`np.array`):
+ The cropped image.
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
+ The threshold for the iou scores.
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
+ The threshold for the stability score.
+ mask_threshold (`float`, *optional*, defaults to 0):
+ The threshold for the predicted masks.
+ stability_score_offset (`float`, *optional*, defaults to 1):
+ The offset for the stability score used in the `_compute_stability_score` method.
+
+ """
+ requires_backends(self, ["tf"])
+ original_height, original_width = original_size
+ iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]])
+ masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]])
+
+ if masks.shape[0] != iou_scores.shape[0]:
+ raise ValueError("masks and iou_scores must have the same batch size.")
+
+ batch_size = masks.shape[0]
+
+ keep_mask = tf.ones(batch_size, dtype=tf.bool)
+
+ if pred_iou_thresh > 0.0:
+ keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
+
+ # compute stability score
+ if stability_score_thresh > 0.0:
+ stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset)
+ keep_mask = keep_mask & (stability_scores > stability_score_thresh)
+
+ scores = iou_scores[keep_mask]
+ masks = masks[keep_mask]
+
+ # binarize masks
+ masks = masks > mask_threshold
+ converted_boxes = _batched_mask_to_box_tf(masks)
+
+ keep_mask = ~_is_box_near_crop_edge_tf(
+ converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
+ )
+
+ scores = scores[keep_mask]
+ masks = masks[keep_mask]
+ converted_boxes = converted_boxes[keep_mask]
+
+ masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width)
+ # conversion to rle is necessary to run non-maximum suppresion
+ masks = _mask_to_rle_tf(masks)
+
+ return masks, scores, converted_boxes
+
+
+def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
+ # One mask is always contained inside the other.
+ # Save memory by preventing unnecesary cast to torch.int64
+ intersections = (
+ (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
+ )
+ unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
+ stability_scores = intersections / unions
+ return stability_scores
+
+
+def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int):
+ # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure
+ # we get the right division results.
+ intersections = tf.count_nonzero(
+ masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32
+ )
+ unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32)
+ stability_scores = intersections / unions
+ return stability_scores
+
+
+def _build_point_grid(n_per_side: int) -> np.ndarray:
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
+ offset = 1 / (2 * n_per_side)
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
+ return points
+
+
+def _normalize_coordinates(
+ target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False
+) -> np.ndarray:
+ """
+ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
+ format.
+ """
+ old_height, old_width = original_size
+
+ scale = target_size * 1.0 / max(old_height, old_width)
+ new_height, new_width = old_height * scale, old_width * scale
+ new_width = int(new_width + 0.5)
+ new_height = int(new_height + 0.5)
+
+ coords = deepcopy(coords).astype(float)
+
+ if is_bounding_box:
+ coords = coords.reshape(-1, 2, 2)
+
+ coords[..., 0] = coords[..., 0] * (new_width / old_width)
+ coords[..., 1] = coords[..., 1] * (new_height / old_height)
+
+ if is_bounding_box:
+ coords = coords.reshape(-1, 4)
+
+ return coords
+
+
+def _generate_crop_boxes(
+ image,
+ target_size: int, # Is it tuple here?
+ crop_n_layers: int = 0,
+ overlap_ratio: float = 512 / 1500,
+ points_per_crop: Optional[int] = 32,
+ crop_n_points_downscale_factor: Optional[List[int]] = 1,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[List[List[int]], List[int]]:
+ """
+ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
+
+ Args:
+ image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
+ Image to generate crops for.
+ target_size (`int`):
+ Size of the smallest crop.
+ crop_n_layers (`int`, *optional*):
+ If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
+ to run, where each layer has 2**i_layer number of image crops.
+ overlap_ratio (`int`, *optional*):
+ Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
+ image length. Later layers with more crops scale down this overlap.
+ points_per_crop (`int`, *optional*):
+ Number of points to sample per crop.
+ crop_n_points_downscale_factor (`int`, *optional*):
+ The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+
+ if isinstance(image, list):
+ raise ValueError("Only one image is allowed for crop generation.")
+ image = to_numpy_array(image)
+ original_size = get_image_size(image, input_data_format)
+
+ points_grid = []
+ for i in range(crop_n_layers + 1):
+ n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
+ points_grid.append(_build_point_grid(n_points))
+
+ crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
+
+ cropped_images, point_grid_per_crop = _generate_crop_images(
+ crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format
+ )
+ crop_boxes = np.array(crop_boxes)
+ crop_boxes = crop_boxes.astype(np.float32)
+ points_per_crop = np.array([point_grid_per_crop])
+ points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3))
+
+ input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64)
+
+ return crop_boxes, points_per_crop, cropped_images, input_labels
+
+
+def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
+ """
+ Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
+ consists of the following required indices:
+ - X: X coordinate of the top left of the bounding box
+ - Y: Y coordinate of the top left of the bounding box
+ - W: width of the bounding box
+ - H: height of the bounding box
+ """
+ crop_boxes, layer_idxs = [], []
+ im_height, im_width = original_size
+ short_side = min(im_height, im_width)
+
+ # Original image
+ crop_boxes.append([0, 0, im_width, im_height])
+ layer_idxs.append(0)
+ for i_layer in range(crop_n_layers):
+ n_crops_per_side = 2 ** (i_layer + 1)
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
+
+ crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
+ crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
+
+ crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
+ crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
+
+ for left, top in product(crop_box_x0, crop_box_y0):
+ box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
+ crop_boxes.append(box)
+ layer_idxs.append(i_layer + 1)
+
+ return crop_boxes, layer_idxs
+
+
+def _generate_crop_images(
+ crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
+):
+ """
+ Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
+ also passed.
+ """
+ cropped_images = []
+ total_points_per_crop = []
+ for i, crop_box in enumerate(crop_boxes):
+ left, top, right, bottom = crop_box
+
+ channel_dim = infer_channel_dimension_format(image, input_data_format)
+ if channel_dim == ChannelDimension.LAST:
+ cropped_im = image[top:bottom, left:right, :]
+ else:
+ cropped_im = image[:, top:bottom, left:right]
+
+ cropped_images.append(cropped_im)
+
+ cropped_im_size = get_image_size(cropped_im, channel_dim)
+ points_scale = np.array(cropped_im_size)[None, ::-1]
+
+ points = points_grid[layer_idxs[i]] * points_scale
+ normalized_points = _normalize_coordinates(target_size, points, original_size)
+ total_points_per_crop.append(normalized_points)
+
+ return cropped_images, total_points_per_crop
+
+
+def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int):
+ left, top, right, bottom = crop_box
+ if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
+ return masks
+ # Coordinate transform masks
+ pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
+ pad = (left, pad_x - left, top, pad_y - top)
+ return torch.nn.functional.pad(masks, pad, value=0)
+
+
+def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int):
+ left, top, right, bottom = crop_box
+ if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
+ return masks
+ # Coordinate transform masks
+ pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
+ pad = (left, pad_x - left, top, pad_y - top)
+ return tf.pad(masks, pad, constant_values=0)
+
+
+def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
+
+ left, top, _, _ = crop_box
+ offset = torch.tensor([[left, top, left, top]], device=boxes.device)
+ # Check if boxes has a channel dimension
+ if len(boxes.shape) == 3:
+ offset = offset.unsqueeze(1)
+ boxes = (boxes + offset).float()
+
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
+ return torch.any(near_crop_edge, dim=1)
+
+
+def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0):
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
+ crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32)
+ orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32)
+
+ left, top, _, _ = crop_box
+ offset = tf.convert_to_tensor([[left, top, left, top]])
+ # Check if boxes has a channel dimension
+ if len(boxes.shape) == 3:
+ offset = tf.expand_dims(offset, 1)
+ boxes = tf.cast(boxes + offset, tf.float32)
+
+ near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0)
+ near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0)
+ near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge)
+ return tf.reduce_any(near_crop_edge, axis=1)
+
+
+def _batched_mask_to_box(masks: "torch.Tensor"):
+ """
+ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
+ corresponds the following required indices:
+ - LEFT: left hand side of the bounding box
+ - TOP: top of the bounding box
+ - RIGHT: right of the bounding box
+ - BOTTOM: bottom of the bounding box
+
+ Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
+ is channel_1 x channel_2 x ... x 4.
+
+ Args:
+ - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
+ """
+ # torch.max below raises an error on empty inputs, just skip in this case
+
+ if torch.numel(masks) == 0:
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
+
+ # Normalize shape to Cxheightxwidth
+ shape = masks.shape
+ height, width = shape[-2:]
+
+ # Get top and bottom edges
+ in_height, _ = torch.max(masks, dim=-1)
+ in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
+ in_height_coords = in_height_coords + height * (~in_height)
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
+
+ # Get left and right edges
+ in_width, _ = torch.max(masks, dim=-2)
+ in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
+ in_width_coords = in_width_coords + width * (~in_width)
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
+
+ # If the mask is empty the right edge will be to the left of the left edge.
+ # Replace these boxes with [0, 0, 0, 0]
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
+ out = out * (~empty_filter).unsqueeze(-1)
+
+ # Return to original shape
+ out = out.reshape(*shape[:-2], 4)
+ return out
+
+
+def _batched_mask_to_box_tf(masks: "tf.Tensor"):
+ """
+ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
+ corresponds the following required indices:
+ - LEFT: left hand side of the bounding box
+ - TOP: top of the bounding box
+ - RIGHT: right of the bounding box
+ - BOTTOM: bottom of the bounding box
+
+ Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
+ is channel_1 x channel_2 x ... x 4.
+
+ Args:
+ - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`)
+ """
+
+ if tf.size(masks) == 0:
+ return tf.zeros([*masks.shape[:-2], 4])
+
+ # Normalize shape to Cxheightxwidth
+ shape = shape_list(masks)
+ height, width = shape[-2:]
+
+ # Get top and bottom edges
+ in_height = tf.reduce_max(masks, axis=-1)
+ in_height_coords = in_height * tf.range(height)[None, :]
+ bottom_edges = tf.reduce_max(in_height_coords, axis=-1)
+ in_height_coords = in_height_coords + height * (~in_height)
+ top_edges = tf.reduce_min(in_height_coords, axis=-1)
+
+ # Get left and right edges
+ in_width, _ = tf.reduce_max(masks, axis=-2)
+ in_width_coords = in_width * tf.range(width)[None, :]
+ right_edges, _ = tf.reduce_max(in_width_coords, axis=-1)
+ in_width_coords = in_width_coords + width * (~in_width)
+ left_edges, _ = tf.reduce_min(in_width_coords, axis=-1)
+
+ # If the mask is empty the right edge will be to the left of the left edge.
+ # Replace these boxes with [0, 0, 0, 0]
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
+ out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1)
+ out = out * tf.expand_dims(~empty_filter, -1)
+
+ # Return to original shape
+ out = tf.reshape(out, *shape[:-2], 4)
+ return out
+
+
+def _mask_to_rle_pytorch(input_mask: "torch.Tensor"):
+ """
+ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
+ """
+ # Put in fortran order and flatten height and width
+ batch_size, height, width = input_mask.shape
+ input_mask = input_mask.permute(0, 2, 1).flatten(1)
+
+ # Compute change indices
+ diff = input_mask[:, 1:] ^ input_mask[:, :-1]
+ change_indices = diff.nonzero()
+
+ # Encode run length
+ out = []
+ for i in range(batch_size):
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
+ counts = [] if input_mask[i, 0] == 0 else [0]
+ counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
+ out.append({"size": [height, width], "counts": counts})
+ return out
+
+
+def _mask_to_rle_tf(input_mask: "tf.Tensor"):
+ """
+ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
+ """
+ # Put in fortran order and flatten height and width
+ batch_size, height, width = input_mask.shape
+ input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1)
+
+ # Compute change indices
+ diff = input_mask[:, 1:] ^ input_mask[:, :-1]
+ change_indices = tf.where(diff)
+
+ # Encode run length
+ out = []
+ for i in range(batch_size):
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
+ counts = [] if input_mask[i, 0] == 0 else [0]
+ counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
+ out.append({"size": [height, width], "counts": counts})
+ return out
+
+
+def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
+ """Compute a binary mask from an uncompressed RLE."""
+ height, width = rle["size"]
+ mask = np.empty(height * width, dtype=bool)
+ idx = 0
+ parity = False
+ for count in rle["counts"]:
+ mask[idx : idx + count] = parity
+ idx += count
+ parity = not parity
+ mask = mask.reshape(width, height)
+ return mask.transpose() # Reshape to original shape
+
+
+def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
+ """
+ Perform NMS (Non Maximum Suppression) on the outputs.
+
+ Args:
+ rle_masks (`torch.Tensor`):
+ binary masks in the RLE format
+ iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
+ iou_scores predicted by the model
+ mask_boxes (`torch.Tensor`):
+ The bounding boxes corresponding to segmentation masks
+ amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
+ NMS threshold.
+ """
+ keep_by_nms = batched_nms(
+ boxes=mask_boxes.float(),
+ scores=iou_scores,
+ idxs=torch.zeros(mask_boxes.shape[0]),
+ iou_threshold=amg_crops_nms_thresh,
+ )
+
+ iou_scores = iou_scores[keep_by_nms]
+ rle_masks = [rle_masks[i] for i in keep_by_nms]
+ mask_boxes = mask_boxes[keep_by_nms]
+ masks = [_rle_to_mask(rle) for rle in rle_masks]
+
+ return masks, iou_scores, rle_masks, mask_boxes
+
+
+def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
+ """
+ Perform NMS (Non Maximum Suppression) on the outputs.
+
+ Args:
+ rle_masks (`tf.Tensor`):
+ binary masks in the RLE format
+ iou_scores (`tf.Tensor` of shape (nb_masks, 1)):
+ iou_scores predicted by the model
+ mask_boxes (`tf.Tensor`):
+ The bounding boxes corresponding to segmentation masks
+ amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
+ NMS threshold.
+ """
+ keep_by_nms = tf.image.combined_non_max_suppression(
+ boxes=mask_boxes.float(),
+ scores=iou_scores,
+ idxs=torch.zeros(mask_boxes.shape[0]),
+ iou_threshold=amg_crops_nms_thresh,
+ )
+
+ iou_scores = iou_scores[keep_by_nms]
+ rle_masks = [rle_masks[i] for i in keep_by_nms]
+ mask_boxes = mask_boxes[keep_by_nms]
+ masks = [_rle_to_mask(rle) for rle in rle_masks]
+
+ return masks, iou_scores, rle_masks, mask_boxes
+
+
+__all__ = ["SamImageProcessor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee75b1bf4f213363286a68b12a0afaeab469f176
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py
@@ -0,0 +1,1655 @@
+# coding=utf-8
+# Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+TensorFlow SAM model. This file was mostly generated by auto-translation from the PyTorch original. In the event of a
+discrepancy, the original file should be regarded as the 'reference' version.
+"""
+
+from __future__ import annotations
+
+import collections
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import ACT2FN
+from ...modeling_tf_outputs import TFBaseModelOutput
+from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs
+from ...tf_utils import flatten, functional_layernorm
+from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SamConfig"
+_CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge"
+
+
+@dataclass
+class TFSamVisionEncoderOutput(ModelOutput):
+ """
+ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection
+ layer to the pooler_output.
+
+ Args:
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: tf.Tensor | None = None
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFSamImageSegmentationOutput(ModelOutput):
+ """
+ Base class for Segment-Anything model's output
+
+ Args:
+ iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`):
+ The iou scores of the predicted masks.
+ pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`):
+ The predicted low resolutions masks. Needs to be post-processed by the processor
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs.
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ iou_scores: tf.Tensor = None
+ pred_masks: tf.Tensor = None
+ vision_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ vision_attentions: Tuple[tf.Tensor, ...] | None = None
+ mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+class TFSamPatchEmbeddings(keras.layers.Layer):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = keras.layers.Conv2D(
+ hidden_size, kernel_size=patch_size, strides=patch_size, name="projection"
+ )
+
+ def call(self, pixel_values):
+ batch_size, num_channels, height, width = shape_list(pixel_values)
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1]))
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+
+
+class TFSamMLPBlock(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1")
+ self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2")
+ self.act = ACT2FN[config.hidden_act]
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.lin1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.lin2(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.config.hidden_size])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.config.mlp_dim])
+
+
+class TFSamLayerNorm(keras.layers.Layer):
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
+ """
+
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs):
+ super().__init__(**kwargs)
+ self.eps = eps
+ self.data_format = data_format
+ self.normalized_shape = normalized_shape
+ if self.data_format not in ["channels_last", "channels_first"]:
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
+
+ def build(self, input_shape):
+ self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight")
+ self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias")
+ super().build(input_shape)
+
+ def call(self, x: tf.Tensor) -> tf.Tensor:
+ if self.data_format == "channels_last":
+ x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1)
+ elif self.data_format == "channels_first":
+ x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1)
+ return x
+
+
+class TFSamAttention(keras.layers.Layer):
+ """
+ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
+ values.
+ """
+
+ def __init__(self, config, downsample_rate=None, **kwargs):
+ super().__init__(**kwargs)
+ self.hidden_size = config.hidden_size
+
+ downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
+
+ self.internal_dim = config.hidden_size // downsample_rate
+ self.num_attention_heads = config.num_attention_heads
+ if self.internal_dim % config.num_attention_heads != 0:
+ raise ValueError("num_attention_heads must divide hidden_size.")
+
+ self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj")
+ self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj")
+ self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj")
+ self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj")
+
+ def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor:
+ batch, point_batch_size, n_tokens, channel = shape_list(hidden_states)
+ c_per_head = channel // num_attention_heads
+ hidden_states = tf.reshape(
+ hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head)
+ )
+ return tf.transpose(hidden_states, perm=[0, 2, 1, 3])
+
+ def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor:
+ batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states)
+ hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3])
+ return tf.reshape(
+ hidden_states,
+ (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head),
+ )
+
+ def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor:
+ # Input projections
+ query = self.q_proj(query)
+ key = self.k_proj(key)
+ value = self.v_proj(value)
+
+ point_batch_size = shape_list(query)[1]
+ # Separate into heads
+ query = self._separate_heads(query, self.num_attention_heads)
+ key = self._separate_heads(key, self.num_attention_heads)
+ value = self._separate_heads(value, self.num_attention_heads)
+
+ # SamAttention
+ _, _, _, c_per_head = shape_list(query)
+ attn = tf.matmul(
+ query, tf.transpose(key, perm=[0, 1, 3, 2])
+ ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens
+ attn = attn / tf.math.sqrt(float(c_per_head))
+ attn = tf.nn.softmax(attn, axis=-1)
+
+ # Get output
+ out = tf.matmul(attn, value)
+ out = self._recombine_heads(out, point_batch_size)
+ out = self.out_proj(out)
+
+ return out
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.hidden_size])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.hidden_size])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.internal_dim])
+
+
+class TFSamTwoWayAttentionBlock(keras.layers.Layer):
+ def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs):
+ """
+ A transformer block with four layers:
+ (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
+ sparse inputs (4) cross attention of dense inputs -> sparse inputs
+
+ Arguments:
+ config (`SamMaskDecoderConfig`):
+ The configuration file used to instantiate the block
+ attention_downsample_rate (*optionalk*, int, defaults to 2):
+ The downsample ratio of the block used to reduce the inner dim of the attention.
+ skip_first_layer_pe (*optional*, bool, defaults to `False`):
+ Whether or not to skip the addition of the query_point_embedding on the first layer.
+ """
+ super().__init__(**kwargs)
+
+ self.hidden_size = config.hidden_size
+ self.layer_norm_eps = config.layer_norm_eps
+
+ self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn")
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1")
+
+ self.cross_attn_token_to_image = TFSamAttention(
+ config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image"
+ )
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2")
+
+ self.mlp = TFSamMLPBlock(config, name="mlp")
+ self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3")
+
+ self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4")
+ self.cross_attn_image_to_token = TFSamAttention(
+ config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token"
+ )
+
+ self.skip_first_layer_pe = skip_first_layer_pe
+
+ def call(
+ self,
+ queries: tf.Tensor,
+ keys: tf.Tensor,
+ query_point_embedding: tf.Tensor,
+ key_point_embedding: tf.Tensor,
+ output_attentions: bool = False,
+ ):
+ # Self attention block
+ if self.skip_first_layer_pe:
+ queries = self.self_attn(query=queries, key=queries, value=queries)
+ else:
+ query = queries + query_point_embedding
+ attn_out = self.self_attn(query=query, key=query, value=queries)
+ queries = queries + attn_out
+ queries = self.layer_norm1(queries)
+
+ # Cross attention block, tokens attending to image embedding
+ query = queries + query_point_embedding
+ key = keys + key_point_embedding
+
+ attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys)
+ queries = queries + attn_out
+
+ queries = self.layer_norm2(queries)
+
+ # MLP block
+ mlp_out = self.mlp(queries)
+ queries = queries + mlp_out
+ queries = self.layer_norm3(queries)
+
+ # Cross attention block, image embedding attending to tokens
+ query = queries + query_point_embedding
+ key = keys + key_point_embedding
+
+ attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries)
+ keys = keys + attn_out
+
+ keys = self.layer_norm4(keys)
+
+ outputs = (queries, keys)
+
+ if output_attentions:
+ outputs = outputs + (attn_out,)
+ else:
+ outputs = outputs + (None,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build([None, None, None, self.hidden_size])
+ if getattr(self, "cross_attn_token_to_image", None) is not None:
+ with tf.name_scope(self.cross_attn_token_to_image.name):
+ self.cross_attn_token_to_image.build(None)
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build([None, None, None, self.hidden_size])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "layer_norm3", None) is not None:
+ with tf.name_scope(self.layer_norm3.name):
+ self.layer_norm3.build([None, None, None, self.hidden_size])
+ if getattr(self, "layer_norm4", None) is not None:
+ with tf.name_scope(self.layer_norm4.name):
+ self.layer_norm4.build([None, None, None, self.hidden_size])
+ if getattr(self, "cross_attn_image_to_token", None) is not None:
+ with tf.name_scope(self.cross_attn_image_to_token.name):
+ self.cross_attn_image_to_token.build(None)
+
+
+class TFSamTwoWayTransformer(keras.layers.Layer):
+ def __init__(self, config: SamMaskDecoderConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.num_hidden_layers = config.num_hidden_layers
+ self.layers = []
+
+ for i in range(self.num_hidden_layers):
+ self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}"))
+
+ self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image")
+ self.layer_norm_final_attn = keras.layers.LayerNormalization(
+ epsilon=config.layer_norm_eps, name="layer_norm_final_attn"
+ )
+
+ def call(
+ self,
+ point_embeddings: tf.Tensor,
+ image_embeddings: tf.Tensor,
+ image_positional_embeddings: tf.Tensor,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ all_attentions = ()
+
+ if image_embeddings is None:
+ raise ValueError("You have to specify an image_embedding")
+
+ image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None]
+ image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None]
+
+ # Prepare queries
+ queries = point_embeddings
+ keys = image_embeddings
+
+ # Apply transformer blocks and final layernorm
+ for layer in self.layers:
+ queries, keys, attention_outputs = layer(
+ queries=queries,
+ keys=keys,
+ query_point_embedding=point_embeddings,
+ key_point_embedding=image_positional_embeddings,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ all_attentions = all_attentions + (attention_outputs,)
+
+ # Apply the final attenion layer from the points to the image
+ query = queries + point_embeddings
+ key = keys + image_positional_embeddings
+
+ attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys)
+
+ queries = queries + attn_out
+ queries = self.layer_norm_final_attn(queries)
+ return queries, keys, all_attentions
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "final_attn_token_to_image", None) is not None:
+ with tf.name_scope(self.final_attn_token_to_image.name):
+ self.final_attn_token_to_image.build(None)
+ if getattr(self, "layer_norm_final_attn", None) is not None:
+ with tf.name_scope(self.layer_norm_final_attn.name):
+ self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size])
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFSamFeedForward(keras.layers.Layer):
+ def __init__(
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs
+ ):
+ super().__init__(**kwargs)
+ self.num_layers = num_layers
+ self.activation = keras.layers.ReLU()
+ self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in")
+ self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out")
+ self.layers = [
+ keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}")
+ for i in range(num_layers - 2)
+ ]
+ self.sigmoid_output = sigmoid_output
+ self.hidden_dim = hidden_dim
+ self.input_dim = input_dim
+
+ def call(self, hidden_states):
+ hidden_states = self.proj_in(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ for layer in self.layers:
+ hidden_states = self.activation(layer(hidden_states))
+
+ hidden_states = self.proj_out(hidden_states)
+ if self.sigmoid_output:
+ hidden_states = tf.sigmoid(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "proj_in", None) is not None:
+ with tf.name_scope(self.proj_in.name):
+ self.proj_in.build([None, None, self.input_dim])
+ if getattr(self, "proj_out", None) is not None:
+ with tf.name_scope(self.proj_out.name):
+ self.proj_out.build([None, None, self.hidden_dim])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.hidden_dim])
+
+
+class TFSamMaskDecoder(keras.layers.Layer):
+ def __init__(self, config: SamMaskDecoderConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.hidden_size = config.hidden_size
+
+ self.num_multimask_outputs = config.num_multimask_outputs
+ self.num_mask_tokens = config.num_multimask_outputs + 1
+
+ self.transformer = TFSamTwoWayTransformer(config, name="transformer")
+
+ self.upscale_conv1 = keras.layers.Conv2DTranspose(
+ self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first"
+ )
+ self.upscale_conv2 = keras.layers.Conv2DTranspose(
+ self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first"
+ )
+ self.upscale_layer_norm = TFSamLayerNorm(
+ self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm"
+ )
+ self.activation = tf.nn.gelu
+
+ mlps_list = []
+ for i in range(self.num_mask_tokens):
+ mlps_list += [
+ TFSamFeedForward(
+ self.hidden_size,
+ self.hidden_size,
+ self.hidden_size // 8,
+ 3,
+ name=f"output_hypernetworks_mlps_._{i}",
+ )
+ ]
+ self.output_hypernetworks_mlps = mlps_list
+
+ self.iou_prediction_head = TFSamFeedForward(
+ self.hidden_size,
+ config.iou_head_hidden_dim,
+ self.num_mask_tokens,
+ config.iou_head_depth,
+ name="iou_prediction_head",
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True)
+ self.mask_tokens = self.add_weight(
+ shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True
+ )
+
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "upscale_conv1", None) is not None:
+ with tf.name_scope(self.upscale_conv1.name):
+ self.upscale_conv1.build([None, self.hidden_size, None, None])
+ if getattr(self, "upscale_conv2", None) is not None:
+ with tf.name_scope(self.upscale_conv2.name):
+ self.upscale_conv2.build([None, self.hidden_size // 4, None, None])
+ if getattr(self, "upscale_layer_norm", None) is not None:
+ with tf.name_scope(self.upscale_layer_norm.name):
+ self.upscale_layer_norm.build(None)
+ if getattr(self, "iou_prediction_head", None) is not None:
+ with tf.name_scope(self.iou_prediction_head.name):
+ self.iou_prediction_head.build(None)
+ for mlp in self.output_hypernetworks_mlps:
+ with tf.name_scope(mlp.name):
+ mlp.build(None)
+
+ def call(
+ self,
+ image_embeddings: tf.Tensor,
+ image_positional_embeddings: tf.Tensor,
+ sparse_prompt_embeddings: tf.Tensor,
+ dense_prompt_embeddings: tf.Tensor,
+ multimask_output: bool,
+ output_attentions: Optional[bool] = None,
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
+ batch_size, num_channels, height, width = shape_list(image_embeddings)
+ point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1])
+
+ output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32)
+ output_tokens = tf.tile(
+ output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1]
+ ) # Should be (batch_size, point_size, 5, 32)
+
+ # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only
+ # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced
+ # it with an explicit shape check to avoid data-dependent control flow which breaks XLA.
+ if shape_list(sparse_prompt_embeddings)[1] != 0:
+ tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2)
+ else:
+ tokens = output_tokens
+ point_embeddings = tf.cast(tokens, self.iou_token.dtype)
+
+ image_embeddings = image_embeddings + dense_prompt_embeddings
+ image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0)
+ image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0)
+
+ point_embedding, image_embeddings, attentions = self.transformer(
+ point_embeddings=point_embeddings,
+ image_embeddings=image_embeddings,
+ image_positional_embeddings=image_positional_embeddings,
+ output_attentions=output_attentions,
+ )
+ iou_token_out = point_embedding[:, :, 0, :]
+ mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
+
+ image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2))
+ image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width])
+
+ upscaled_embedding = self.upscale_conv1(image_embeddings)
+ upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
+ upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
+
+ hyper_in_list = []
+ for i in range(self.num_mask_tokens):
+ current_mlp = self.output_hypernetworks_mlps[i]
+ hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
+ hyper_in = tf.stack(hyper_in_list, axis=2)
+
+ _, num_channels, height, width = shape_list(upscaled_embedding)
+ upscaled_embedding = tf.reshape(
+ upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width]
+ )
+ masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width])
+
+ iou_pred = self.iou_prediction_head(iou_token_out)
+
+ if multimask_output:
+ mask_slice = slice(1, None)
+ else:
+ mask_slice = slice(0, 1)
+ masks = masks[:, :, mask_slice, :, :]
+ iou_pred = iou_pred[:, :, mask_slice]
+
+ outputs = (masks, iou_pred)
+
+ if output_attentions:
+ outputs = outputs + (attentions,)
+ else:
+ outputs = outputs + (None,)
+
+ return outputs
+
+
+class TFSamPositionalEmbedding(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.scale = config.hidden_size // 2
+ self.config = config
+
+ def build(self, input_shape):
+ # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized?
+ self.positional_embedding = self.add_weight(
+ name="positional_embedding",
+ shape=(2, self.config.num_pos_feats),
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale),
+ trainable=False,
+ )
+ super().build(input_shape)
+
+ def call(self, input_coords, input_shape=None):
+ """Positionally encode points that are normalized to [0,1]."""
+ coordinates = tf.identity(input_coords)
+
+ if input_shape is not None:
+ coordinates = tf.stack(
+ [
+ tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1],
+ tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0],
+ ],
+ axis=-1,
+ )
+
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
+ coordinates = 2 * coordinates - 1
+ coordinates = tf.cast(coordinates, self.positional_embedding.dtype)
+ coordinates = tf.matmul(coordinates, self.positional_embedding)
+ coordinates = 2 * np.pi * coordinates
+ # outputs d_1 x ... x d_n x channel shape
+ return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1)
+
+
+class TFSamMaskEmbedding(keras.layers.Layer):
+ def __init__(self, config: SamPromptEncoderConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.mask_input_channels = config.mask_input_channels // 4
+ self.activation = ACT2FN[config.hidden_act]
+ self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1")
+ self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2")
+ self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3")
+ self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1")
+ self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2")
+ self.config = config
+
+ def call(self, masks):
+ masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last
+ hidden_states = self.conv1(masks)
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ dense_embeddings = self.conv3(hidden_states)
+ dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first
+ return dense_embeddings
+
+ def build(self, input_shape=None):
+ # This class needs an explicit build method because it isn't called with the standard dummy inputs
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope("conv1"):
+ self.conv1.build([None, None, None, 1])
+ with tf.name_scope("conv2"):
+ self.conv2.build([None, None, None, self.mask_input_channels])
+ with tf.name_scope("conv3"):
+ self.conv3.build([None, None, None, self.mask_input_channels * 4])
+ with tf.name_scope("layer_norm1"):
+ self.layer_norm1.build([None, None, None, self.mask_input_channels])
+ with tf.name_scope("layer_norm2"):
+ self.layer_norm2.build([None, None, None, self.mask_input_channels * 4])
+
+
+class TFSamPromptEncoder(keras.layers.Layer):
+ def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs):
+ super().__init__(**kwargs)
+ self.shared_embedding = shared_patch_embedding
+ self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed")
+ self.no_mask_embed = None
+
+ self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
+ self.input_image_size = config.image_size
+
+ self.point_embed = []
+ self.hidden_size = config.hidden_size
+ self.not_a_point_embed = None
+ self.config = config
+
+ def build(self, input_shape=None):
+ self.no_mask_embed = self.add_weight(
+ name="no_mask_embed.weight",
+ shape=(1, self.hidden_size),
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
+ trainable=True,
+ )
+ self.point_embed = [
+ self.add_weight(
+ name=f"point_embed_._{i}.weight",
+ shape=(1, self.hidden_size),
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
+ trainable=True,
+ )
+ for i in range(self.config.num_point_embeddings)
+ ]
+ self.not_a_point_embed = self.add_weight(
+ name="not_a_point_embed.weight",
+ shape=(1, self.hidden_size),
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
+ trainable=True,
+ )
+ with tf.name_scope("mask_embed"):
+ # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs
+ self.mask_embed.build(
+ (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size)
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mask_embed", None) is not None:
+ with tf.name_scope(self.mask_embed.name):
+ self.mask_embed.build(None)
+
+ def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor:
+ """Embeds point prompts."""
+ points = points + 0.5 # Shift to center of pixel
+ if pad:
+ target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1])
+ target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1)
+ padding_point = tf.zeros(target_point_shape, dtype=points.dtype)
+ padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype)
+ points = tf.concat([points, padding_point], axis=2)
+ labels = tf.concat([labels, padding_label], axis=2)
+ input_shape = (self.input_image_size, self.input_image_size)
+ point_embedding = self.shared_embedding(points, input_shape)
+
+ point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding)
+
+ point_embedding = tf.where(
+ labels[..., None] != -10,
+ point_embedding,
+ tf.zeros_like(point_embedding),
+ )
+ point_embedding = tf.where(
+ (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding
+ )
+ point_embedding = tf.where(
+ (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding
+ )
+ return point_embedding
+
+ def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor:
+ """Embeds box prompts."""
+ boxes = boxes + 0.5 # Shift to center of pixel
+ batch_size, nb_boxes = shape_list(boxes)[:2]
+ coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2))
+ input_shape = (self.input_image_size, self.input_image_size)
+ corner_embedding = self.shared_embedding(coords, input_shape)
+ corner_embedding += tf.where(
+ tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0,
+ self.point_embed[2][0],
+ self.point_embed[3][0],
+ )
+ return corner_embedding
+
+ def call(
+ self,
+ batch_size: Optional[int],
+ input_points: Optional[Tuple[tf.Tensor, tf.Tensor]],
+ input_labels: tf.Tensor | None,
+ input_boxes: tf.Tensor | None,
+ input_masks: tf.Tensor | None,
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
+ """
+ Embeds different types of prompts, returning both sparse and dense embeddings.
+
+ Args:
+ points (`tf.Tensor`, *optional*):
+ point coordinates and labels to embed.
+ boxes (`tf.Tensor`, *optional*):
+ boxes to embed
+ masks (`tf.Tensor`, *optional*):
+ masks to embed
+ """
+ sparse_embeddings = None
+ if input_points is not None:
+ batch_size, point_batch_size = shape_list(input_points)[:2]
+ if input_labels is None:
+ raise ValueError("If points are provided, labels must also be provided.")
+ point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
+ sparse_embeddings = tf.zeros(
+ (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype
+ )
+ sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2)
+ if input_boxes is not None:
+ batch_size = shape_list(input_boxes)[0]
+ box_embeddings = self._embed_boxes(input_boxes)
+ if sparse_embeddings is None:
+ sparse_embeddings = box_embeddings
+ else:
+ sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2)
+ if input_masks is not None:
+ dense_embeddings = self.mask_embed(input_masks)
+ else:
+ dense_embeddings = self.no_mask_embed[0]
+ dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1))
+ dense_embeddings = tf.tile(
+ dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1])
+ )
+ if sparse_embeddings is None:
+ sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype)
+
+ return sparse_embeddings, dense_embeddings
+
+
+class TFSamVisionAttention(keras.layers.Layer):
+ """Multi-head Attention block with relative position embeddings."""
+
+ def __init__(self, config, window_size, **kwargs):
+ super().__init__(**kwargs)
+ input_size = (
+ (config.image_size // config.patch_size, config.image_size // config.patch_size)
+ if window_size == 0
+ else (window_size, window_size)
+ )
+ self.input_size = input_size
+
+ self.num_attention_heads = config.num_attention_heads
+ head_dim = config.hidden_size // config.num_attention_heads
+ self.head_dim = head_dim
+ self.scale = head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv")
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
+
+ self.use_rel_pos = config.use_rel_pos
+ if self.use_rel_pos:
+ if input_size is None:
+ raise ValueError("Input size must be provided if using relative positional encoding.")
+ self.config = config
+
+ def build(self, input_shape=None):
+ if self.input_size is not None:
+ # initialize relative positional embeddings
+ self.rel_pos_h = self.add_weight(
+ shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h"
+ )
+ self.rel_pos_w = self.add_weight(
+ shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "qkv", None) is not None:
+ with tf.name_scope(self.qkv.name):
+ self.qkv.build([None, None, self.config.hidden_size])
+ if getattr(self, "proj", None) is not None:
+ with tf.name_scope(self.proj.name):
+ self.proj.build([None, None, self.config.hidden_size])
+
+ def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor:
+ """
+ Get relative positional embeddings according to the relative positions of
+ query and key sizes.
+
+ Args:
+ q_size (int):
+ size of the query.
+ k_size (int):
+ size of key k.
+ rel_pos (`tf.Tensor`):
+ relative position embeddings (L, channel).
+
+ Returns:
+ Extracted positional embeddings according to relative positions.
+ """
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
+ # Interpolate rel pos if needed.
+ if rel_pos.shape[0] != max_rel_dist:
+ # Interpolate rel pos.
+ rel_pos_resized = tf.image.resize(
+ tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)),
+ size=(max_rel_dist, rel_pos.shape[1]),
+ method="bilinear",
+ )
+ rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist))
+ else:
+ rel_pos_resized = rel_pos
+
+ # Scale the coords with short length if shapes for q and k are different.
+ q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0)
+ k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0)
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
+
+ return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32))
+
+ def add_decomposed_rel_pos(
+ self,
+ attn: tf.Tensor,
+ query: tf.Tensor,
+ rel_pos_h: tf.Tensor,
+ rel_pos_w: tf.Tensor,
+ q_size: Tuple[int, int],
+ k_size: Tuple[int, int],
+ ) -> tf.Tensor:
+ """
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
+
+ Args:
+ attn (`tf.Tensor`):
+ attention map.
+ query (`tf.Tensor`):
+ query q in the attention layer with shape (batch_size, query_height * query_width, channel).
+ rel_pos_h (`tf.Tensor`):
+ relative position embeddings (Lh, channel) for height axis.
+ rel_pos_w (`tf.Tensor`):
+ relative position embeddings (Lw, channel) for width axis.
+ q_size (tuple):
+ spatial sequence size of query q with (query_height, query_width).
+ k_size (tuple):
+ spatial sequence size of key k with (key_height, key_width).
+
+ Returns:
+ attn (`tf.Tensor`):
+ attention map with added relative positional embeddings.
+ """
+ query_height, query_width = q_size
+ key_height, key_width = k_size
+ relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
+ relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
+
+ batch_size, _, dim = shape_list(query)
+ reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim))
+ rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height)
+ rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width)
+ attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width))
+ attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2)
+ attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width))
+ return attn
+
+ def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor:
+ batch_size, height, width, _ = shape_list(hidden_states)
+ # qkv with shape (3, batch_size, nHead, height * width, channel)
+ qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1))
+ qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4))
+ # q, k, v with shape (batch_size * nHead, height * width, channel)
+ query, key, value = tf.unstack(
+ tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0
+ )
+ attn_weights = tf.matmul(query * self.scale, key, transpose_b=True)
+
+ if self.use_rel_pos:
+ attn_weights = self.add_decomposed_rel_pos(
+ attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
+ )
+
+ attn_weights = tf.nn.softmax(attn_weights, axis=-1)
+
+ if training:
+ attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout)
+ else:
+ attn_probs = attn_weights
+
+ attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1))
+ attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4))
+ attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size))
+
+ attn_output = self.proj(attn_output)
+
+ if output_attentions:
+ outputs = (attn_output, attn_weights)
+ else:
+ outputs = (attn_output, None)
+
+ return outputs
+
+
+class TFSamVisionLayer(keras.layers.Layer):
+ def __init__(self, config, window_size, **kwargs):
+ super().__init__(**kwargs)
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
+ self.attn = TFSamVisionAttention(config, window_size, name="attn")
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
+ self.mlp = TFSamMLPBlock(config, name="mlp")
+ self.window_size = window_size
+ self.config = config
+
+ def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]:
+ batch_size, height, width, channel = shape_list(hidden_states)
+
+ pad_h = (window_size - height % window_size) % window_size
+ pad_w = (window_size - width % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]])
+ pad_height, pad_width = height + pad_h, width + pad_w
+
+ hidden_states = tf.reshape(
+ hidden_states,
+ [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel],
+ )
+ windows = tf.reshape(
+ tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel]
+ )
+ return windows, (pad_height, pad_width)
+
+ def window_unpartition(
+ self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int]
+ ) -> tf.Tensor:
+ pad_height, pad_width = padding_shape
+ height, width = original_shape
+ batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size)
+ hidden_states = tf.reshape(
+ windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1]
+ )
+ hidden_states = tf.reshape(
+ tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1]
+ )
+
+ if pad_height > height or pad_width > width:
+ hidden_states = hidden_states[:, :height, :width, :]
+ return hidden_states
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_attentions: Optional[bool] = False,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor]:
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ if self.window_size > 0:
+ height, width = hidden_states.shape[1], hidden_states.shape[2]
+ hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
+
+ hidden_states, attn_weights = self.attn(
+ hidden_states=hidden_states,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ if self.window_size > 0:
+ hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
+
+ hidden_states = residual + hidden_states
+ layernorm_output = self.layer_norm2(hidden_states)
+ hidden_states = hidden_states + self.mlp(layernorm_output)
+
+ outputs = (hidden_states,)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build([None, None, None, self.config.hidden_size])
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build([None, None, None, self.config.hidden_size])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+
+
+class TFSamVisionNeck(keras.layers.Layer):
+ def __init__(self, config: SamVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.conv1 = keras.layers.Conv2D(
+ config.output_channels,
+ kernel_size=1,
+ use_bias=False,
+ name="conv1",
+ )
+ self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1")
+ self.conv2 = keras.layers.Conv2D(
+ config.output_channels,
+ kernel_size=3,
+ padding="same",
+ use_bias=False,
+ name="conv2",
+ )
+ self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2")
+
+ def call(self, hidden_states):
+ hidden_states = self.conv1(hidden_states)
+ hidden_states = self.layer_norm1(hidden_states)
+
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2])
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv1", None) is not None:
+ with tf.name_scope(self.conv1.name):
+ self.conv1.build([None, None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build(None)
+ if getattr(self, "conv2", None) is not None:
+ with tf.name_scope(self.conv2.name):
+ self.conv2.build([None, None, None, self.config.output_channels])
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build(None)
+
+
+class TFSamVisionEncoder(keras.layers.Layer):
+ def __init__(self, config: SamVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.image_size = config.image_size
+
+ self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed")
+
+ self.pos_embed = None
+
+ self.layers = []
+ for i in range(config.num_hidden_layers):
+ layer = TFSamVisionLayer(
+ config,
+ window_size=config.window_size if i not in config.global_attn_indexes else 0,
+ name=f"layers_._{i}",
+ )
+ self.layers.append(layer)
+
+ self.neck = TFSamVisionNeck(config, name="neck")
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if self.config.use_abs_pos:
+ # Initialize absolute positional embedding with pretrain image size.
+ self.pos_embed = self.add_weight(
+ shape=[
+ 1,
+ self.config.image_size // self.config.patch_size,
+ self.config.image_size // self.config.patch_size,
+ self.config.hidden_size,
+ ],
+ initializer="zeros",
+ trainable=True,
+ name="pos_embed",
+ )
+
+ if getattr(self, "patch_embed", None) is not None:
+ with tf.name_scope(self.patch_embed.name):
+ self.patch_embed.build(None)
+ if getattr(self, "neck", None) is not None:
+ with tf.name_scope(self.neck.name):
+ self.neck.build(None)
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def get_input_embeddings(self):
+ return self.patch_embed
+
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFSamVisionEncoderOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.patch_embed(pixel_values)
+ if self.pos_embed is not None:
+ hidden_states = hidden_states + self.pos_embed
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = self.neck(hidden_states)
+
+ if not return_dict:
+ outputs = (hidden_states,)
+ if output_hidden_states:
+ outputs = outputs + (all_hidden_states,)
+ if output_attentions:
+ outputs = outputs + (all_self_attentions,)
+ return outputs
+
+ return TFSamVisionEncoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class TFSamPreTrainedModel(TFPreTrainedModel):
+ config_class = SamConfig
+ base_model_prefix = "sam"
+ main_input_name = "pixel_values"
+
+
+SAM_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a TensorFlow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
+ subclass. Use it as a regular TensorFlow Model and refer to the TensorFlow documentation for all matter related to
+ general usage and behavior.
+
+ Parameters:
+ config ([`SamConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SAM_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for
+ details.
+ input_points (`tf.Tensor` of shape `(batch_size, num_points, 2)`):
+ Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
+ better results. The points can be obtained by passing a list of list of list to the processor that will
+ create corresponding `tf` tensors of dimension 4. The first dimension is the image batch size, the second
+ dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per
+ input point), the third dimension is the number of points per segmentation mask (it is possible to pass
+ multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
+ coordinates of the point. If a different number of points is passed either for each image, or for each
+ mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
+ computation of the embedding will be skipped for these points using the labels.
+ input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points)`):
+ Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
+ official implementation, there are 3 types of labels
+
+ - `1`: the point is a point that contains the object of interest
+ - `0`: the point is a point that does not contain the object of interest
+ - `-1`: the point corresponds to the background
+
+ We added the label:
+
+ - `-10`: the point is a padding point, thus should be ignored by the prompt encoder
+
+ The padding labels should be automatically done by the processor.
+ input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes, 4)`):
+ Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
+ much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
+ that will generate a `tf` tensor, with each dimension corresponding respectively to the image batch size,
+ the number of boxes per image and the coordinates of the top left and botton right point of the box. In the
+ order (`x1`, `y1`, `x2`, `y2`):
+
+ - `x1`: the x coordinate of the top left point of the input box
+ - `y1`: the y coordinate of the top left point of the input box
+ - `x2`: the x coordinate of the bottom right point of the input box
+ - `y2`: the y coordinate of the bottom right point of the input box
+
+ input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`):
+ SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
+ generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
+ manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
+
+ image_embeddings (`tf.Tensor` of shape `(batch_size, output_channels, window_size, window_size)`):
+ Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory
+ efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
+ method, and then feed them to the `call` method instead of feeding the `pixel_values`.
+ multimask_output (`bool`, *optional*):
+ In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
+ bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
+ "best" mask, by specifying `multimask_output=False`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ",
+ " optional 2D location and bounding boxes.",
+ SAM_START_DOCSTRING,
+)
+class TFSamModel(TFSamPreTrainedModel):
+ _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"]
+
+ def __init__(self, config, **kwargs):
+ super().__init__(config, **kwargs)
+ self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding")
+
+ self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder")
+ self.prompt_encoder = TFSamPromptEncoder(
+ config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder"
+ )
+ self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder")
+ self.config = config
+
+ def get_input_embeddings(self):
+ return self.vision_encoder.get_input_embeddings()
+
+ def get_image_wide_positional_embeddings(self):
+ size = self.config.prompt_encoder_config.image_embedding_size
+ grid = tf.ones((size, size))
+ y_embed = tf.math.cumsum(grid, axis=0) - 0.5
+ x_embed = tf.math.cumsum(grid, axis=1) - 0.5
+ y_embed = y_embed / size
+ x_embed = x_embed / size
+
+ positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1))
+ return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width
+
+ def get_image_embeddings(
+ self,
+ pixel_values,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ Returns the image embeddings by passing the pixel values through the vision encoder.
+
+ Args:
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Input pixel values
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple.
+
+ """
+ vision_output = self.vision_encoder(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ image_embeddings = vision_output[0]
+ return image_embeddings
+
+ def get_prompt_embeddings(
+ self,
+ input_points: tf.Tensor | None = None,
+ input_labels: tf.Tensor | None = None,
+ input_boxes: tf.Tensor | None = None,
+ input_masks: tf.Tensor | None = None,
+ ):
+ r"""
+ Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
+
+ Args:
+ input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
+ Optional input points for the prompt encoder. The padding of the point is automatically done by the
+ processor. `point_batch_size` refers to the number of masks that we want the model to predict per
+ point. The model will output `point_batch_size` times 3 masks in total.
+ input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
+ Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
+ processor, or can be fed by the user.
+ input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`):
+ Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
+ processor. users can also pass manually the input boxes.
+ input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`):
+ Optional input masks for the prompt encoder.
+ """
+ prompt_output = self.prompt_encoder(
+ input_points=input_points,
+ input_labels=input_labels,
+ input_boxes=input_boxes,
+ input_masks=input_masks,
+ )
+ return prompt_output
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING)
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ input_points: tf.Tensor | None = None,
+ input_labels: tf.Tensor | None = None,
+ input_boxes: tf.Tensor | None = None,
+ input_masks: tf.Tensor | None = None,
+ image_embeddings: tf.Tensor | None = None,
+ multimask_output: bool = True,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ **kwargs,
+ ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None and image_embeddings is None:
+ raise ValueError("Either pixel_values or image_embeddings must be provided.")
+
+ if pixel_values is not None and image_embeddings is not None:
+ raise ValueError("Only one of pixel_values and image_embeddings can be provided.")
+
+ if input_points is not None and len(input_points.shape) != 4:
+ raise ValueError(
+ "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.",
+ " got {}.".format(input_points.shape),
+ )
+ if input_boxes is not None and len(input_boxes.shape) != 3:
+ raise ValueError(
+ "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.",
+ " got {}.".format(input_boxes.shape),
+ )
+ if input_points is not None and input_boxes is not None:
+ point_batch_size = shape_list(input_points)[1]
+ box_batch_size = shape_list(input_boxes)[1]
+ if point_batch_size != box_batch_size:
+ raise ValueError(
+ "You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
+ point_batch_size, box_batch_size
+ )
+ )
+ if pixel_values is not None:
+ # Ensures that later checks pass even with an all-None shape from the serving signature
+ pixel_values = tf.ensure_shape(
+ pixel_values,
+ [
+ None,
+ self.config.vision_config.num_channels,
+ self.config.vision_config.image_size,
+ self.config.vision_config.image_size,
+ ],
+ )
+ image_positional_embeddings = self.get_image_wide_positional_embeddings()
+ # repeat with batch size
+ batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0]
+ image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0)
+
+ vision_attentions = None
+ vision_hidden_states = None
+
+ if pixel_values is not None:
+ vision_outputs = self.vision_encoder(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ training=training,
+ )
+ image_embeddings = vision_outputs["last_hidden_state"]
+
+ if output_hidden_states:
+ vision_hidden_states = vision_outputs["hidden_states"]
+ if output_attentions:
+ vision_attentions = vision_outputs["attentions"]
+
+ if input_points is not None and input_labels is None:
+ input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32)
+
+ if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:
+ raise ValueError(
+ "The batch size of the image embeddings and the input points must be the same. ",
+ "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]),
+ " if you want to pass multiple points for the same image, make sure that you passed ",
+ " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ",
+ " input_labels of shape (batch_size, point_batch_size, num_points_per_image)",
+ )
+
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
+ batch_size=shape_list(image_embeddings)[0],
+ input_points=input_points,
+ input_labels=input_labels,
+ input_boxes=input_boxes,
+ input_masks=input_masks,
+ )
+
+ low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder(
+ image_embeddings=image_embeddings,
+ image_positional_embeddings=image_positional_embeddings,
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ output_attentions=output_attentions,
+ )
+
+ if not return_dict:
+ output = (iou_predictions, low_res_masks)
+ if output_hidden_states:
+ output = output + (vision_hidden_states,)
+
+ if output_attentions:
+ output = output + (vision_attentions, mask_decoder_attentions)
+ return output
+
+ return TFSamImageSegmentationOutput(
+ iou_scores=iou_predictions,
+ pred_masks=low_res_masks,
+ vision_hidden_states=vision_hidden_states,
+ vision_attentions=vision_attentions,
+ mask_decoder_attentions=mask_decoder_attentions,
+ )
+
+ def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput:
+ hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None
+ attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None
+
+ return TFSamImageSegmentationOutput(
+ iou_scores=output.iou_scores,
+ pred_masks=output.pred_masks,
+ vision_hidden_states=hs if self.config.output_hidden_states else None,
+ vision_attentions=attns if self.config.output_attentions else None,
+ mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "shared_image_embedding", None) is not None:
+ with tf.name_scope(self.shared_image_embedding.name):
+ self.shared_image_embedding.build(None)
+ if getattr(self, "vision_encoder", None) is not None:
+ with tf.name_scope(self.vision_encoder.name):
+ self.vision_encoder.build(None)
+ if getattr(self, "prompt_encoder", None) is not None:
+ with tf.name_scope(self.prompt_encoder.name):
+ self.prompt_encoder.build(None)
+ if getattr(self, "mask_decoder", None) is not None:
+ with tf.name_scope(self.mask_decoder.name):
+ self.mask_decoder.build(None)
+
+
+__all__ = ["TFSamModel", "TFSamPreTrainedModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f11d710f61114e5a023322d117942fa46439309
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py
@@ -0,0 +1,310 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for SAM.
+"""
+
+from copy import deepcopy
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...image_utils import ImageInput, VideoInput
+from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
+from ...tokenization_utils_base import AudioInput, BatchEncoding, PreTokenizedInput, TextInput
+from ...utils import is_tf_available, is_torch_available
+
+
+if is_torch_available():
+ import torch
+
+if is_tf_available():
+ import tensorflow as tf
+
+
+class SamImagesKwargs(ImagesKwargs):
+ segmentation_maps: Optional[ImageInput]
+ input_points: Optional[List[List[float]]]
+ input_labels: Optional[List[List[int]]]
+ input_boxes: Optional[List[List[List[float]]]]
+ point_pad_value: Optional[int]
+
+
+class SamProcessorKwargs(ProcessingKwargs, total=False):
+ images_kwargs: SamImagesKwargs
+ _defaults = {
+ "images_kwargs": {
+ "point_pad_value": -10,
+ }
+ }
+
+
+class SamProcessor(ProcessorMixin):
+ r"""
+ Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a
+ single processor.
+
+ [`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of
+ [`~SamImageProcessor.__call__`] for more information.
+
+ Args:
+ image_processor (`SamImageProcessor`):
+ An instance of [`SamImageProcessor`]. The image processor is a required input.
+ """
+
+ attributes = ["image_processor"]
+ image_processor_class = "SamImageProcessor"
+ # For backward compatibility. See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details.
+ optional_call_args = [
+ "segmentation_maps",
+ "input_points",
+ "input_labels",
+ "input_boxes",
+ ]
+
+ def __init__(self, image_processor):
+ super().__init__(image_processor)
+ self.target_size = self.image_processor.size["longest_edge"]
+
+ def __call__(
+ self,
+ images: Optional[ImageInput] = None,
+ # The following is to capture `segmentation_maps`, `input_points`, `input_labels` and `input_boxes`
+ # arguments that may be passed as a positional argument.
+ # See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details,
+ # or this conversation for more context:
+ # https://github.com/huggingface/transformers/pull/32544#discussion_r1720208116
+ # This behavior is only needed for backward compatibility and will be removed in future versions.
+ *args, # to be deprecated
+ text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ audio: Optional[AudioInput] = None,
+ video: Optional[VideoInput] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D
+ points and bounding boxes for the model if they are provided.
+ """
+ output_kwargs = self._merge_kwargs(
+ SamProcessorKwargs,
+ tokenizer_init_kwargs={},
+ **kwargs,
+ **self.prepare_and_validate_optional_call_args(*args),
+ )
+ input_points = output_kwargs["images_kwargs"].pop("input_points", None)
+ input_labels = output_kwargs["images_kwargs"].pop("input_labels", None)
+ input_boxes = output_kwargs["images_kwargs"].pop("input_boxes", None)
+
+ encoding_image_processor = self.image_processor(
+ images,
+ **output_kwargs["images_kwargs"],
+ )
+
+ # pop arguments that are not used in the foward but used nevertheless
+ original_sizes = encoding_image_processor["original_sizes"]
+
+ if hasattr(original_sizes, "numpy"): # Checks if Torch or TF tensor
+ original_sizes = original_sizes.numpy()
+
+ input_points, input_labels, input_boxes = self._check_and_preprocess_points(
+ input_points=input_points,
+ input_labels=input_labels,
+ input_boxes=input_boxes,
+ )
+
+ encoding_image_processor = self._normalize_and_convert(
+ encoding_image_processor,
+ original_sizes,
+ input_points=input_points,
+ input_labels=input_labels,
+ input_boxes=input_boxes,
+ return_tensors=output_kwargs["common_kwargs"].get("return_tensors"),
+ point_pad_value=output_kwargs["images_kwargs"].get("point_pad_value"),
+ )
+
+ return encoding_image_processor
+
+ def _normalize_and_convert(
+ self,
+ encoding_image_processor,
+ original_sizes,
+ input_points=None,
+ input_labels=None,
+ input_boxes=None,
+ return_tensors="pt",
+ point_pad_value=-10,
+ ):
+ if input_points is not None:
+ if len(original_sizes) != len(input_points):
+ input_points = [
+ self._normalize_coordinates(self.target_size, point, original_sizes[0]) for point in input_points
+ ]
+ else:
+ input_points = [
+ self._normalize_coordinates(self.target_size, point, original_size)
+ for point, original_size in zip(input_points, original_sizes)
+ ]
+ # check that all arrays have the same shape
+ if not all(point.shape == input_points[0].shape for point in input_points):
+ if input_labels is not None:
+ input_points, input_labels = self._pad_points_and_labels(
+ input_points, input_labels, point_pad_value
+ )
+
+ input_points = np.array(input_points)
+
+ if input_labels is not None:
+ input_labels = np.array(input_labels)
+
+ if input_boxes is not None:
+ if len(original_sizes) != len(input_boxes):
+ input_boxes = [
+ self._normalize_coordinates(self.target_size, box, original_sizes[0], is_bounding_box=True)
+ for box in input_boxes
+ ]
+ else:
+ input_boxes = [
+ self._normalize_coordinates(self.target_size, box, original_size, is_bounding_box=True)
+ for box, original_size in zip(input_boxes, original_sizes)
+ ]
+ input_boxes = np.array(input_boxes)
+
+ if input_boxes is not None:
+ if return_tensors == "pt":
+ input_boxes = torch.from_numpy(input_boxes)
+ # boxes batch size of 1 by default
+ input_boxes = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
+ elif return_tensors == "tf":
+ input_boxes = tf.convert_to_tensor(input_boxes)
+ # boxes batch size of 1 by default
+ input_boxes = tf.expand_dims(input_boxes, 1) if len(input_boxes.shape) != 3 else input_boxes
+ encoding_image_processor.update({"input_boxes": input_boxes})
+ if input_points is not None:
+ if return_tensors == "pt":
+ input_points = torch.from_numpy(input_points)
+ # point batch size of 1 by default
+ input_points = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
+ elif return_tensors == "tf":
+ input_points = tf.convert_to_tensor(input_points)
+ # point batch size of 1 by default
+ input_points = tf.expand_dims(input_points, 1) if len(input_points.shape) != 4 else input_points
+ encoding_image_processor.update({"input_points": input_points})
+ if input_labels is not None:
+ if return_tensors == "pt":
+ input_labels = torch.from_numpy(input_labels)
+ # point batch size of 1 by default
+ input_labels = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
+ elif return_tensors == "tf":
+ input_labels = tf.convert_to_tensor(input_labels)
+ # point batch size of 1 by default
+ input_labels = tf.expand_dims(input_labels, 1) if len(input_labels.shape) != 3 else input_labels
+ encoding_image_processor.update({"input_labels": input_labels})
+
+ return encoding_image_processor
+
+ def _pad_points_and_labels(self, input_points, input_labels, point_pad_value):
+ r"""
+ The method pads the 2D points and labels to the maximum number of points in the batch.
+ """
+ expected_nb_points = max([point.shape[0] for point in input_points])
+ processed_input_points = []
+ for i, point in enumerate(input_points):
+ if point.shape[0] != expected_nb_points:
+ point = np.concatenate(
+ [point, np.zeros((expected_nb_points - point.shape[0], 2)) + point_pad_value], axis=0
+ )
+ input_labels[i] = np.append(input_labels[i], [point_pad_value])
+ processed_input_points.append(point)
+ input_points = processed_input_points
+ return input_points, input_labels
+
+ def _normalize_coordinates(
+ self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False
+ ) -> np.ndarray:
+ """
+ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
+ """
+ old_h, old_w = original_size
+ new_h, new_w = self.image_processor._get_preprocess_shape(original_size, longest_edge=target_size)
+ coords = deepcopy(coords).astype(float)
+
+ if is_bounding_box:
+ coords = coords.reshape(-1, 2, 2)
+
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
+
+ if is_bounding_box:
+ coords = coords.reshape(-1, 4)
+
+ return coords
+
+ def _check_and_preprocess_points(
+ self,
+ input_points=None,
+ input_labels=None,
+ input_boxes=None,
+ ):
+ r"""
+ Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they
+ are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`,
+ it is converted to a `numpy.ndarray` and then to a `list`.
+ """
+ if input_points is not None:
+ if hasattr(input_points, "numpy"): # Checks for TF or Torch tensor
+ input_points = input_points.numpy().tolist()
+
+ if not isinstance(input_points, list) or not isinstance(input_points[0], list):
+ raise ValueError("Input points must be a list of list of floating points.")
+ input_points = [np.array(input_point) for input_point in input_points]
+ else:
+ input_points = None
+
+ if input_labels is not None:
+ if hasattr(input_labels, "numpy"):
+ input_labels = input_labels.numpy().tolist()
+
+ if not isinstance(input_labels, list) or not isinstance(input_labels[0], list):
+ raise ValueError("Input labels must be a list of list integers.")
+ input_labels = [np.array(label) for label in input_labels]
+ else:
+ input_labels = None
+
+ if input_boxes is not None:
+ if hasattr(input_boxes, "numpy"):
+ input_boxes = input_boxes.numpy().tolist()
+
+ if (
+ not isinstance(input_boxes, list)
+ or not isinstance(input_boxes[0], list)
+ or not isinstance(input_boxes[0][0], list)
+ ):
+ raise ValueError("Input boxes must be a list of list of list of floating points.")
+ input_boxes = [np.array(box).astype(np.float32) for box in input_boxes]
+ else:
+ input_boxes = None
+
+ return input_points, input_labels, input_boxes
+
+ @property
+ def model_input_names(self):
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(image_processor_input_names))
+
+ def post_process_masks(self, *args, **kwargs):
+ return self.image_processor.post_process_masks(*args, **kwargs)
+
+
+__all__ = ["SamProcessor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a30d094f7319a260879ef8ed550dedb259d9d145
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4eb414a48a67fac183f748bb1137de1b33a63d3beea693118ec548cee2dbc420
+size 123674
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e07844d45c2d0f3c5bfba69b330c3beede0eab1
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_speech_encoder_decoder import *
+ from .modeling_flax_speech_encoder_decoder import *
+ from .modeling_speech_encoder_decoder import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92149a670d1326e1e19cac2283e42cc12385bc34
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/configuration_speech_encoder_decoder.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/configuration_speech_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9c7f0a491695690bed1cb582ecf57b42765a0ac
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/configuration_speech_encoder_decoder.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf6957cc31a35867e327bd294aada39d263c9b3d
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2c679dc5c615efe0eed85e46140b7c6171b42e5
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_flax_speech_encoder_decoder.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_flax_speech_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf118ab27afb1b0a5fa49ced0913c215ee27ecca
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_flax_speech_encoder_decoder.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_speech_encoder_decoder.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_speech_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d54be1137dcde5023e94b3ca13ac6df890a11d49
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__pycache__/modeling_speech_encoder_decoder.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..47312df27ea67d22b920bfabf07430336bfa4ee5
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto.configuration_auto import AutoConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+class SpeechEncoderDecoderConfig(PretrainedConfig):
+ r"""
+ [`SpeechEncoderDecoderConfig`] is the configuration class to store the configuration of a
+ [`SpeechEncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified
+ arguments, defining the encoder and decoder configs.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ kwargs (*optional*):
+ Dictionary of keyword arguments. Notably:
+
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the encoder config.
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the decoder config.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
+
+ >>> # Initializing a Wav2Vec2 & BERT style configuration
+ >>> config_encoder = Wav2Vec2Config()
+ >>> config_decoder = BertConfig()
+
+ >>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
+
+ >>> # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & google-bert/bert-base-uncased style configurations
+ >>> model = SpeechEncoderDecoderModel(config=config)
+
+ >>> # Accessing the model configuration
+ >>> config_encoder = model.config.encoder
+ >>> config_decoder = model.config.decoder
+ >>> # set decoder config to causal lm
+ >>> config_decoder.is_decoder = True
+ >>> config_decoder.add_cross_attention = True
+
+ >>> # Saving the model, including its configuration
+ >>> model.save_pretrained("my-model")
+
+ >>> # loading model and config from pretrained folder
+ >>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model")
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
+ ```"""
+
+ model_type = "speech-encoder-decoder"
+ sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig}
+ is_composition = True
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ if "encoder" not in kwargs or "decoder" not in kwargs:
+ raise ValueError(
+ f"A configuraton of type {self.model_type} cannot be instantiated because not both `encoder` and"
+ f" `decoder` sub-configurations are passed, but only {kwargs}"
+ )
+
+ encoder_config = kwargs.pop("encoder")
+ encoder_model_type = encoder_config.pop("model_type")
+ decoder_config = kwargs.pop("decoder")
+ decoder_model_type = decoder_config.pop("model_type")
+
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
+ self.is_encoder_decoder = True
+
+ @classmethod
+ def from_encoder_decoder_configs(
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
+ ) -> PretrainedConfig:
+ r"""
+ Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
+ configuration and decoder model configuration.
+
+ Returns:
+ [`SpeechEncoderDecoderConfig`]: An instance of a configuration object
+ """
+ logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
+
+
+__all__ = ["SpeechEncoderDecoderConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..874aa2e066f1a9d805f5396ca0c7856356a610eb
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py
@@ -0,0 +1,357 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Wav2Vec2 checkpoint."""
+
+import argparse
+
+import fairseq
+import torch
+from torch import nn
+
+from transformers import (
+ MBart50Tokenizer,
+ MBartConfig,
+ MBartForCausalLM,
+ SpeechEncoderDecoderConfig,
+ SpeechEncoderDecoderModel,
+ Wav2Vec2Config,
+ Wav2Vec2FeatureExtractor,
+ Wav2Vec2Model,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection.projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
+ "quantizer.weight_proj": "quantizer.weight_proj",
+ "quantizer.vars": "quantizer.codevectors",
+ "project_q": "project_q",
+ "final_proj": "project_hid",
+ "w2v_encoder.proj": "lm_head",
+ "mask_emb": "masked_spec_embed",
+}
+TOP_LEVEL_KEYS = [
+ "lm_head",
+ "quantizer.weight_proj",
+ "quantizer.codevectors",
+ "project_q",
+ "project_hid",
+]
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ assert hf_shape == value.shape, (
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights_wav2vec2(fairseq_model, hf_model):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.feature_extractor
+ adapter = hf_model.adapter
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]):
+ load_adapter(name, value, adapter, unused_weights)
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "bias" in name:
+ weight_type = "bias"
+ elif "weight" in name:
+ weight_type = "weight"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
+ " found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+def load_adapter(full_name, value, adapter, unused_weights):
+ name = full_name.split("adaptor.")[-1]
+ items = name.split(".")
+
+ if items[1].isdigit():
+ layer_id = int(items[1])
+ else:
+ layer_id = None
+
+ if "adaptor" not in full_name:
+ if "proj_ln" in full_name:
+ # has to be layer norm
+ if "bias" in name:
+ assert (
+ value.shape == adapter.proj_layer_norm.bias.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
+ adapter.proj_layer_norm.bias.data = value
+ logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.")
+ if "weight" in name:
+ assert (
+ value.shape == adapter.proj_layer_norm.weight.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
+ adapter.proj_layer_norm.weight.data = value
+ else:
+ # has to be projection layer
+ if "bias" in name:
+ assert (
+ value.shape == adapter.proj.bias.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
+ adapter.proj.bias.data = value
+ logger.info(f"Adapter proj layer bias was initialized from {full_name}.")
+ if "weight" in name:
+ assert (
+ value.shape == adapter.proj.weight.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
+ adapter.proj.weight.data = value
+ logger.info(f"Adapter proj layer weight was initialized from {full_name}.")
+ elif isinstance(layer_id, int):
+ if "bias" in name:
+ assert (
+ value.shape == adapter.layers[layer_id].conv.bias.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
+ adapter.layers[layer_id].conv.bias.data = value
+ logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.")
+ elif "weight" in name:
+ assert (
+ value.shape == adapter.layers[layer_id].conv.weight.data.shape
+ ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
+ adapter.layers[layer_id].conv.weight.data = value
+ logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+@torch.no_grad()
+def convert_wav2vec2_checkpoint(
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ dict_path,
+ config_yaml_path,
+ encoder_config_path,
+ decoder_config_path,
+ add_adapter,
+ adapter_kernel_size,
+ adapter_stride,
+ decoder_start_token_id,
+ encoder_output_dim,
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ # load configs
+ encoder_config = Wav2Vec2Config.from_pretrained(
+ encoder_config_path,
+ add_adapter=True,
+ adapter_stride=adapter_stride,
+ adapter_kernel_size=adapter_kernel_size,
+ token_token=True,
+ output_hidden_size=encoder_output_dim,
+ )
+ decoder_config = MBartConfig.from_pretrained(decoder_config_path)
+
+ # load model
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
+ [checkpoint_path],
+ arg_overrides={
+ "config_yaml": config_yaml_path,
+ "data": "/".join(dict_path.split("/")[:-1]),
+ "w2v_path": checkpoint_path,
+ "load_pretrained_decoder_from": None,
+ },
+ )
+ model = model[0].eval()
+
+ # load feature extractor
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(encoder_config_path, token_token=True)
+
+ # set weights for wav2vec2 encoder
+ hf_encoder = Wav2Vec2Model(encoder_config)
+
+ recursively_load_weights_wav2vec2(model.encoder, hf_encoder)
+
+ # load decoder weights
+ hf_decoder = MBartForCausalLM(decoder_config)
+ missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False)
+ logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
+ logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
+
+ hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder)
+ hf_wav2vec.config.tie_word_embeddings = False
+
+ tokenizer = MBart50Tokenizer(dict_path)
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
+
+ config = hf_wav2vec.config.to_dict()
+ config["pad_token_id"] = tokenizer.pad_token_id
+ config["bos_token_id"] = tokenizer.bos_token_id
+ config["eos_token_id"] = tokenizer.eos_token_id
+ config["tokenizer_class"] = "mbart50"
+ config["feature_extractor_type"] = "wav2vec2"
+
+ config["decoder_start_token_id"] = tokenizer.eos_token_id
+ config["forced_bos_token_id"] = 250004
+ config["forced_eos_token_id"] = tokenizer.eos_token_id
+
+ hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config)
+
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
+ parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
+ parser.add_argument(
+ "--encoder_config_path",
+ default="facebook/wav2vec2-xls-r-1b",
+ type=str,
+ help="Path to hf encoder wav2vec2 checkpoint config",
+ )
+ parser.add_argument(
+ "--decoder_config_path",
+ default="facebook/mbart-large-50-one-to-many-mmt",
+ type=str,
+ help="Path to hf decoder checkpoint config",
+ )
+ parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
+ parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
+ parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
+ parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
+ parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
+
+ args = parser.parse_args()
+ convert_wav2vec2_checkpoint(
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.dict_path,
+ args.config_yaml_path,
+ encoder_config_path=args.encoder_config_path,
+ decoder_config_path=args.decoder_config_path,
+ add_adapter=args.add_adapter,
+ adapter_kernel_size=args.adapter_kernel_size,
+ adapter_stride=args.adapter_stride,
+ decoder_start_token_id=args.start_token_id,
+ encoder_output_dim=args.encoder_output_dim,
+ )
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..377288982087bacefac6ac35aa3c2cbb126c3388
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py
@@ -0,0 +1,316 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Wav2Vec2 checkpoint."""
+
+import argparse
+import json
+import os
+
+import fairseq
+import torch
+from torch import nn
+
+from transformers import (
+ Speech2Text2Config,
+ Speech2Text2ForCausalLM,
+ Speech2Text2Tokenizer,
+ SpeechEncoderDecoderConfig,
+ SpeechEncoderDecoderModel,
+ Wav2Vec2Config,
+ Wav2Vec2FeatureExtractor,
+ Wav2Vec2Model,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection.projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
+ "quantizer.weight_proj": "quantizer.weight_proj",
+ "quantizer.vars": "quantizer.codevectors",
+ "project_q": "project_q",
+ "final_proj": "project_hid",
+ "w2v_encoder.proj": "lm_head",
+ "mask_emb": "masked_spec_embed",
+}
+TOP_LEVEL_KEYS = [
+ "lm_head",
+ "quantizer.weight_proj",
+ "quantizer.codevectors",
+ "project_q",
+ "project_hid",
+]
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ assert hf_shape == value.shape, (
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights_wav2vec2(fairseq_model, hf_model):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.feature_extractor
+
+ # if encoder has different dim to decoder -> use proj_weight
+ proj_weight = None
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ elif name.split(".")[0] == "proj":
+ proj_weight = fairseq_model.proj
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "bias" in name:
+ weight_type = "bias"
+ elif "weight" in name:
+ weight_type = "weight"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+ return proj_weight
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
+ " found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def create_vocab_dict(dict_path):
+ with open(dict_path, "r", encoding="utf-8") as f:
+ lines = f.readlines()
+ words = [line.split(" ")[0] for line in lines]
+
+ num_words = len(words)
+
+ vocab_dict = {
+ "": 0,
+ "": 1,
+ "": 2,
+ "": 3,
+ }
+
+ vocab_dict.update(dict(zip(words, range(4, num_words + 4))))
+ return vocab_dict
+
+
+@torch.no_grad()
+def convert_wav2vec2_checkpoint(
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ dict_path,
+ encoder_config_path,
+ decoder_config_path,
+ vocab_size,
+ num_decoder_layers,
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ encoder_config = Wav2Vec2Config.from_pretrained(encoder_config_path)
+ decoder_config = Speech2Text2Config.from_pretrained(
+ decoder_config_path, vocab_size=vocab_size, decoder_layers=num_decoder_layers, do_stable_layer_norm=True
+ )
+
+ feature_extractor = Wav2Vec2FeatureExtractor(
+ feature_size=1,
+ sampling_rate=16000,
+ padding_value=0,
+ do_normalize=True,
+ return_attention_mask=True,
+ )
+
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
+ )
+ model = model[0].eval()
+
+ # set weights for wav2vec2 encoder
+ hf_encoder = Wav2Vec2Model(encoder_config)
+ projection_layer = recursively_load_weights_wav2vec2(model.encoder, hf_encoder)
+
+ hf_decoder = Speech2Text2ForCausalLM(decoder_config)
+ missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False)
+
+ # set output linear layer
+ unexpected_keys.remove("embed_out")
+ hf_decoder.lm_head.weight = nn.Parameter(model.decoder.embed_out.detach())
+
+ # layer norm is init to identity matrix so leaving it is fine
+ logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
+ logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
+
+ hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder)
+ hf_wav2vec.config.tie_word_embeddings = False
+
+ # add projection layer
+ hf_wav2vec.enc_to_dec_proj.weight = nn.Parameter(projection_layer.weight)
+ hf_wav2vec.enc_to_dec_proj.bias = nn.Parameter(projection_layer.bias)
+
+ vocab_dict = create_vocab_dict(dict_path)
+
+ with open(os.path.join(pytorch_dump_folder_path, "vocab.json"), "w") as fp:
+ json.dump(vocab_dict, fp)
+
+ tokenizer = Speech2Text2Tokenizer(os.path.join(pytorch_dump_folder_path, "vocab.json"))
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
+
+ config = hf_wav2vec.config.to_dict()
+ config["pad_token_id"] = tokenizer.pad_token_id
+ config["bos_token_id"] = tokenizer.bos_token_id
+ config["eos_token_id"] = tokenizer.eos_token_id
+ config["tokenizer_class"] = "speech_to_text_2"
+ config["feature_extractor_type"] = "wav2vec2"
+
+ hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config)
+
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
+ parser.add_argument(
+ "--encoder_config_path",
+ default="facebook/wav2vec2-large-lv60",
+ type=str,
+ help="Path to hf encoder wav2vec2 checkpoint config",
+ )
+ parser.add_argument(
+ "--decoder_config_path",
+ default="facebook/s2t-small-mustc-en-fr-st",
+ type=str,
+ help="Path to hf decoder s2t checkpoint config",
+ )
+ parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
+ parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
+
+ args = parser.parse_args()
+ convert_wav2vec2_checkpoint(
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.dict_path,
+ encoder_config_path=args.encoder_config_path,
+ decoder_config_path=args.decoder_config_path,
+ vocab_size=args.vocab_size,
+ num_decoder_layers=args.num_decoder_layers,
+ )
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd837146d5ca7d80a2b7bbd3a86369a36b28a68a
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py
@@ -0,0 +1,929 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Classes to support Flax Speech-Encoder-Decoder architectures"""
+
+import os
+from typing import Optional, Tuple, Union
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput
+from ...modeling_flax_utils import FlaxPreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM
+from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig"
+
+SPEECH_ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech
+ autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is
+ loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via
+ [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder
+ and should be fine-tuned on a downstream generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech
+ Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech
+ translation yields a significant performance improvement.
+
+ After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other
+ models (see the examples for more information).
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Parameters:
+ config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
+ Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac`
+ or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile
+ library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or
+ [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
+ `torch.FloatTensor`.
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
+ and prepending them with the `decoder_start_token_id`.
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.decoder.max_position_embeddings - 1]`.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple.
+"""
+
+SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
+ Float values of input raw speech waveform or speech features. Values can be obtained by loading a *.flac*
+ or *.wav* audio file into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile
+ library (*pip install soundfile*). To prepare the array into *inputs*, either the [`Wav2Vec2Processor`] or
+ [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
+ *torch.FloatTensor*.
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple.
+"""
+
+SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
+ and prepending them with the `decoder_start_token_id`.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.decoder.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a
+ plain tuple.
+"""
+
+
+class FlaxSpeechEncoderDecoderModule(nn.Module):
+ config: SpeechEncoderDecoderConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ encoder_config = self.config.encoder
+ decoder_config = self.config.decoder
+
+ # Copied from `modeling_hybrid_clip.py` with modifications.
+ from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING
+
+ encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class
+ decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class
+
+ self.encoder = encoder_module(encoder_config, dtype=self.dtype)
+ self.decoder = decoder_module(decoder_config, dtype=self.dtype)
+
+ # encoder outputs might need to be projected to different dimension for decoder
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ self.enc_to_dec_proj = nn.Dense(
+ self.decoder.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range),
+ dtype=self.dtype,
+ )
+ else:
+ self.enc_to_dec_proj = None
+
+ def _get_feat_extract_output_lengths(
+ self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None
+ ):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ add_adapter = self.config.encoder.add_adapter if add_adapter is None else add_adapter
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return (input_length - kernel_size) // stride + 1
+
+ for kernel_size, stride in zip(self.config.encoder.conv_kernel, self.config.encoder.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ if add_adapter:
+ for _ in range(self.config.encoder.num_adapter_layers):
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.encoder.adapter_stride)
+
+ return input_lengths
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_projection_module(self):
+ return self.enc_to_dec_proj
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ inputs,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ encoder_outputs=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ freeze_feature_encoder: bool = False,
+ ):
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ inputs,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ freeze_feature_encoder=freeze_feature_encoder,
+ )
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if self.enc_to_dec_proj is not None:
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ # compute correct encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
+ encoder_hidden_states.shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ # flax script modeling_flax_wav2vec2.py
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqLMOutput(
+ logits=decoder_outputs.logits,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_hidden_states,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING)
+class FlaxSpeechEncoderDecoderModel(FlaxPreTrainedModel):
+ r"""
+ [`FlaxSpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture
+ with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one
+ as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
+ encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
+ """
+
+ config_class = SpeechEncoderDecoderConfig
+ base_model_prefix: str = "speech_encoder_decoder"
+ module_class = FlaxSpeechEncoderDecoderModule
+
+ def __init__(
+ self,
+ config: SpeechEncoderDecoderConfig,
+ input_shape: Optional[Tuple] = None,
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ if not _do_init:
+ raise ValueError(
+ "`FlaxSpeechEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`."
+ )
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ # Raise ValueError or option to project enc to dec hidden_size (eg EncAdapterLayer)
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ # make sure input & output embeddings are not tied
+ config.tie_word_embeddings = False
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+
+ if input_shape is None:
+ # speech encoders almost always downsample the sequence length dimension
+ encoder_input_length = 1024
+ decoder_input_length = module._get_feat_extract_output_lengths(encoder_input_length)
+ input_shape = ((1, encoder_input_length), (1, decoder_input_length))
+
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ encoder_input_shape, decoder_input_shape = input_shape
+
+ # init input DeviceArrays
+ inputs = jnp.zeros(encoder_input_shape, dtype="f4")
+ attention_mask = jnp.ones_like(inputs, dtype="i4")
+ decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ batch_size, sequence_length = inputs.shape
+
+ decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape
+ if not decoder_batch_size == batch_size:
+ raise ValueError(
+ f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder"
+ f" and {decoder_batch_size} for decoder."
+ )
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)
+ )
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ inputs,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
+ )
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ def _get_feat_extract_output_lengths(
+ self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None
+ ):
+ return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter)
+
+ @add_start_docstrings(SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def encode(
+ self,
+ inputs: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ freeze_feature_encoder: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxSpeechEncoderDecoderModel
+
+ >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "facebook/wav2vec2-large-lv60", "facebook/bart-large"
+ ... )
+
+ >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)
+ >>> encoder_outputs = model.encode(inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(inputs, dtype="i4")
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, inputs, attention_mask, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(inputs, attention_mask, **kwargs)
+
+ outputs = self.module.apply(
+ {"params": params or self.params},
+ inputs=jnp.array(inputs, dtype="f4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ freeze_feature_encoder=freeze_feature_encoder,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ if return_dict:
+ outputs = FlaxBaseModelOutput(
+ last_hidden_state=outputs.last_hidden_state,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ return outputs
+
+ @add_start_docstrings(SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxSpeechEncoderDecoderModel
+ >>> import jax.numpy as jnp
+
+ >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "facebook/wav2vec2-large-lv60", "facebook/bart-large"
+ ... )
+
+ >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)
+ >>> encoder_outputs = model.encode(inputs)
+
+ >>> decoder_start_token_id = model.config.decoder.bos_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ params = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBartAttention module
+ if past_key_values:
+ params["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(
+ module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs
+ ):
+ projection_module = module._get_projection_module()
+ decoder_module = module._get_decoder_module()
+
+ # optionally project encoder_hidden_states
+ if projection_module is not None:
+ encoder_hidden_states = projection_module(encoder_hidden_states)
+
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ encoder_hidden_states=encoder_hidden_states,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ params,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def __call__(
+ self,
+ inputs: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ freeze_feature_encoder: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import FlaxSpeechEncoderDecoderModel, AutoTokenizer
+
+ >>> # load a fine-tuned wav2vec2-2-bart model
+ >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large")
+ >>> # load output tokenizer
+ >>> tokenizer_output = AutoTokenizer.from_pretrained("facebook/bart-large")
+
+ >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)
+
+ >>> # use bart's special bos, pad and eos tokens
+ >>> model.config.decoder_start_token_id = model.decoder.config.bos_token_id
+ >>> model.config.pad_token_id = model.decoder.config.pad_token_id
+ >>> model.config.eos_token_id = model.decoder.config.eos_token_id
+
+ >>> outputs = model.generate(inputs)
+ # Assert something? More interesting input? dtype correct?
+ ```
+ """
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(inputs, dtype="i4")
+
+ # prepare decoder inputs
+ if decoder_input_ids is None:
+ raise ValueError(
+ "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must"
+ " be specified as an input argument."
+ )
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ if decoder_position_ids is None:
+ batch_size, sequence_length = decoder_input_ids.shape
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ inputs=jnp.array(inputs, dtype="f4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ freeze_feature_encoder=freeze_feature_encoder,
+ rngs=rngs,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
+ else:
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
+ )
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
+ return model_kwargs
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
+ decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
+ *model_args,
+ **kwargs,
+ ) -> FlaxPreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+ Params:
+ encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxSpeechEncoderDecoderModel
+
+ >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "facebook/wav2vec2-large-lv60", "facebook/bart-large"
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./wav2vec2-2-bart-large")
+ >>> # load fine-tuned model
+ >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("./wav2vec2-2-bart-large")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
+ )
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ encoder = FlaxAutoModel.from_pretrained(
+ encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
+ )
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ dtype = kwargs.pop("dtype", jnp.float32)
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+
+ # make sure input & output word embeddings are not tied
+ config.tie_word_embeddings = False
+
+ # init model
+ model = cls(config, dtype=dtype)
+ model.params["encoder"] = encoder.params
+ model.params["decoder"] = decoder.params
+
+ return model
+
+
+__all__ = ["FlaxSpeechEncoderDecoderModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fa099d19230d5e2768b3d0d3b60b1d4104f9f5d
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
@@ -0,0 +1,598 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Classes to support Speech-Encoder-Text-Decoder architectures"""
+
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...configuration_utils import PretrainedConfig
+from ...generation import GenerationMixin
+from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
+from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig"
+
+SPEECH_ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech
+ autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is
+ loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via
+ [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder
+ and should be fine-tuned on a downstream generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech
+ Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech
+ translation yields a significant performance improvement.
+
+ After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other
+ models (see the examples for more information).
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
+ Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac`
+ or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile
+ library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or
+ [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
+ `torch.FloatTensor`.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
+ decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
+ into associated vectors than the model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
+ into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
+ soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding
+ and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`, *optional*):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`Speech2TextFeatureExtractor`] should be used for extracting the fbank features, padding and conversion
+ into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
+
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
+"""
+
+
+# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ if decoder_start_token_id is None:
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+@add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING)
+class SpeechEncoderDecoderModel(PreTrainedModel, GenerationMixin):
+ r"""
+ [`SpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
+ one of the base model classes of the library as encoder and another one as decoder when created with the
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
+ """
+
+ config_class = SpeechEncoderDecoderConfig
+ base_model_prefix = "speech_encoder_decoder"
+ main_input_name = "inputs"
+ supports_gradient_checkpointing = True
+ _supports_param_buffer_assignment = False
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ encoder: Optional[PreTrainedModel] = None,
+ decoder: Optional[PreTrainedModel] = None,
+ ):
+ if config is None and (encoder is None or decoder is None):
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
+ if config is None:
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
+ else:
+ if not isinstance(config, self.config_class):
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ # initialize with config
+ # make sure input & output embeddings is not tied
+ config.tie_word_embeddings = False
+ super().__init__(config)
+
+ if encoder is None:
+ encoder = AutoModel.from_config(config.encoder)
+
+ if decoder is None:
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
+
+ self.encoder = encoder
+ self.decoder = decoder
+
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
+ logger.warning(
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
+ f" {self.config.encoder}"
+ )
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
+ logger.warning(
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
+ f" {self.config.decoder}"
+ )
+
+ # make sure that the individual model's config refers to the shared config
+ # so that the updates to the config will be synced
+ self.config.encoder._attn_implementation = self.encoder.config._attn_implementation
+ self.config.decoder._attn_implementation = self.decoder.config._attn_implementation
+ self.encoder.config = self.config.encoder
+ self.decoder.config = self.config.decoder
+
+ # get encoder output hidden size
+ self.encoder_output_dim = getattr(config.encoder, "output_hidden_size", config.encoder.hidden_size)
+ if (
+ self.encoder_output_dim != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ # encoder outputs might need to be projected to different dimension for decoder
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
+
+ if self.encoder.get_output_embeddings() is not None:
+ raise ValueError(
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
+ )
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def get_input_embeddings(self):
+ return self.decoder.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.decoder.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.decoder.set_output_embeddings(new_embeddings)
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder of the speech encoder so
+ that its parameters will not be updated during training.
+ """
+ self.encoder.freeze_feature_encoder()
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ # At the moment fast initialization is not supported for composite models
+ if kwargs.get("_fast_init", False):
+ logger.warning(
+ "Fast initialization is currently not supported for SpeechEncoderDecoderModel. "
+ "Falling back to slow initialization..."
+ )
+ kwargs["_fast_init"] = False
+ return super().from_pretrained(*args, **kwargs)
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: str = None,
+ decoder_pretrained_model_name_or_path: str = None,
+ *model_args,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechEncoderDecoderModel
+
+ >>> # initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
+ >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "facebook/wav2vec2-base-960h", "google-bert/bert-base-uncased"
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./wav2vec2bert")
+ >>> # load fine-tuned model
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("./wav2vec2bert")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
+ )
+
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+
+ # make sure input & output embeddings is not tied
+ config.tie_word_embeddings = False
+ return cls(encoder=encoder, decoder=decoder, config=config)
+
+ @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ input_values: Optional[torch.FloatTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import SpeechEncoderDecoderModel, AutoProcessor
+ >>> from datasets import load_dataset
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+
+ >>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
+ >>> # Inference: Translate English speech to German
+ >>> generated = model.generate(input_values)
+ >>> decoded = processor.batch_decode(generated, skip_special_tokens=True)[0]
+ >>> decoded
+ 'Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.'
+
+ >>> # Training: Train model on English transcription
+ >>> labels = processor(text=ds[0]["text"], return_tensors="pt").input_ids
+
+ >>> loss = model(input_values, labels=labels).loss
+ >>> loss.backward()
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+ if "num_items_in_batch" in kwargs_encoder:
+ kwargs_decoder["num_items_in_batch"] = kwargs_encoder.pop("num_items_in_batch", None)
+
+ if encoder_outputs is None:
+ if inputs is None:
+ if input_values is not None and input_features is not None:
+ raise ValueError("You cannot specify both input_values and input_features at the same time")
+ elif input_values is not None:
+ inputs = input_values
+ elif input_features is not None:
+ inputs = input_features
+ else:
+ raise ValueError("You have to specify either input_values or input_features")
+
+ encoder_outputs = self.encoder(
+ inputs,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs_encoder,
+ )
+ elif isinstance(encoder_outputs, tuple):
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if (
+ self.encoder_output_dim != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ # compute correct encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
+ encoder_hidden_states.shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ past_key_values=past_key_values,
+ return_dict=return_dict,
+ **kwargs_decoder,
+ )
+
+ # Compute loss independent from decoder (as some shift the logits inside them)
+ loss = None
+ if labels is not None:
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1))
+
+ if not return_dict:
+ if loss is not None:
+ return (loss,) + decoder_outputs + encoder_outputs
+ else:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=decoder_outputs.logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_hidden_states,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
+
+ def resize_token_embeddings(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported. Please use the"
+ " respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
+ )
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # apply decoder cache reordering here
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
+
+
+__all__ = ["SpeechEncoderDecoderModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b7f6e1bf1cb09f446df5cd8aada2e0ee942cbc
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_videomae import *
+ from .feature_extraction_videomae import *
+ from .image_processing_videomae import *
+ from .modeling_videomae import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22440e82f86141ea7953ce782769057f1b791016
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18de5eddc87e25a3bfc18e33efe7ddffa09c8457
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f79f212879e9f86cefc7c82202dd68ecf323dc7
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12f2c940478b3c874306701f461229a3d6e912ac
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ee0c5ca0b242e55fd1e57f0ae00f4eb97ca04b8
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54b400c7ac930cb1b80b1a5089341c2bb7478e6f
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..3940b6f010036cb3b3957f2fad94fed50737c85d
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""VideoMAE model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class VideoMAEConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a
+ VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the VideoMAE
+ [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_frames (`int`, *optional*, defaults to 16):
+ The number of frames in each video.
+ tubelet_size (`int`, *optional*, defaults to 2):
+ The number of tubelets.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
+ Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.
+ decoder_num_attention_heads (`int`, *optional*, defaults to 6):
+ Number of attention heads for each attention layer in the decoder.
+ decoder_hidden_size (`int`, *optional*, defaults to 384):
+ Dimensionality of the decoder.
+ decoder_num_hidden_layers (`int`, *optional*, defaults to 4):
+ Number of hidden layers in the decoder.
+ decoder_intermediate_size (`int`, *optional*, defaults to 1536):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
+ norm_pix_loss (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the target patch pixels.
+
+ Example:
+
+ ```python
+ >>> from transformers import VideoMAEConfig, VideoMAEModel
+
+ >>> # Initializing a VideoMAE videomae-base style configuration
+ >>> configuration = VideoMAEConfig()
+
+ >>> # Randomly initializing a model from the configuration
+ >>> model = VideoMAEModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "videomae"
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ num_frames=16,
+ tubelet_size=2,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ qkv_bias=True,
+ use_mean_pooling=True,
+ decoder_num_attention_heads=6,
+ decoder_hidden_size=384,
+ decoder_num_hidden_layers=4,
+ decoder_intermediate_size=1536,
+ norm_pix_loss=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_frames = num_frames
+ self.tubelet_size = tubelet_size
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.qkv_bias = qkv_bias
+ self.use_mean_pooling = use_mean_pooling
+
+ self.decoder_num_attention_heads = decoder_num_attention_heads
+ self.decoder_hidden_size = decoder_hidden_size
+ self.decoder_num_hidden_layers = decoder_num_hidden_layers
+ self.decoder_intermediate_size = decoder_intermediate_size
+ self.norm_pix_loss = norm_pix_loss
+
+
+__all__ = ["VideoMAEConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98160a6bb82bbdc96f164455fee1b1b2c13992a
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py
@@ -0,0 +1,324 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE"""
+
+import argparse
+import json
+
+import gdown
+import numpy as np
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import (
+ VideoMAEConfig,
+ VideoMAEForPreTraining,
+ VideoMAEForVideoClassification,
+ VideoMAEImageProcessor,
+)
+
+
+def get_videomae_config(model_name):
+ config = VideoMAEConfig()
+
+ set_architecture_configs(model_name, config)
+
+ if "finetuned" not in model_name:
+ config.use_mean_pooling = False
+
+ if "finetuned" in model_name:
+ repo_id = "huggingface/label-files"
+ if "kinetics" in model_name:
+ config.num_labels = 400
+ filename = "kinetics400-id2label.json"
+ elif "ssv2" in model_name:
+ config.num_labels = 174
+ filename = "something-something-v2-id2label.json"
+ else:
+ raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.")
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ return config
+
+
+def set_architecture_configs(model_name, config):
+ if "small" in model_name:
+ config.hidden_size = 384
+ config.intermediate_size = 1536
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 3
+ config.decoder_hidden_size = 192
+ config.decoder_intermediate_size = 768
+ elif "large" in model_name:
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 8
+ config.decoder_hidden_size = 512
+ config.decoder_intermediate_size = 2048
+ elif "huge" in model_name:
+ config.hidden_size = 1280
+ config.intermediate_size = 5120
+ config.num_hidden_layers = 32
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 8
+ config.decoder_hidden_size = 640
+ config.decoder_intermediate_size = 2560
+ elif "base" not in model_name:
+ raise ValueError('Model name should include either "small", "base", "large", or "huge"')
+
+
+def rename_key(name):
+ if "encoder." in name:
+ name = name.replace("encoder.", "")
+ if "cls_token" in name:
+ name = name.replace("cls_token", "videomae.embeddings.cls_token")
+ if "decoder_pos_embed" in name:
+ name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed")
+ if "pos_embed" in name and "decoder" not in name:
+ name = name.replace("pos_embed", "videomae.embeddings.position_embeddings")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "videomae.embeddings.norm")
+ if "decoder.blocks" in name:
+ name = name.replace("decoder.blocks", "decoder.decoder_layers")
+ if "blocks" in name:
+ name = name.replace("blocks", "videomae.encoder.layer")
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name and "bias" not in name:
+ name = name.replace("attn", "attention.self")
+ if "attn" in name:
+ name = name.replace("attn", "attention.attention")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+ if "decoder_embed" in name:
+ name = name.replace("decoder_embed", "decoder.decoder_embed")
+ if "decoder_norm" in name:
+ name = name.replace("decoder_norm", "decoder.decoder_norm")
+ if "decoder_pred" in name:
+ name = name.replace("decoder_pred", "decoder.decoder_pred")
+ if "norm.weight" in name and "decoder" not in name and "fc" not in name:
+ name = name.replace("norm.weight", "videomae.layernorm.weight")
+ if "norm.bias" in name and "decoder" not in name and "fc" not in name:
+ name = name.replace("norm.bias", "videomae.layernorm.bias")
+ if "head" in name and "decoder" not in name:
+ name = name.replace("head", "classifier")
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, config):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if key.startswith("encoder."):
+ key = key.replace("encoder.", "")
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ if key.startswith("decoder.blocks"):
+ dim = config.decoder_hidden_size
+ layer_num = int(key_split[2])
+ prefix = "decoder.decoder_layers."
+ if "weight" in key:
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
+ else:
+ dim = config.hidden_size
+ layer_num = int(key_split[1])
+ prefix = "videomae.encoder.layer."
+ if "weight" in key:
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on a video of eating spaghetti
+# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
+def prepare_video():
+ file = hf_hub_download(
+ repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset"
+ )
+ video = np.load(file)
+ return list(video)
+
+
+def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub):
+ config = get_videomae_config(model_name)
+
+ if "finetuned" in model_name:
+ model = VideoMAEForVideoClassification(config)
+ else:
+ model = VideoMAEForPreTraining(config)
+
+ # download original checkpoint, hosted on Google Drive
+ output = "pytorch_model.bin"
+ gdown.cached_download(checkpoint_url, output, quiet=False)
+ files = torch.load(output, map_location="cpu")
+ if "model" in files:
+ state_dict = files["model"]
+ else:
+ state_dict = files["module"]
+ new_state_dict = convert_state_dict(state_dict, config)
+
+ model.load_state_dict(new_state_dict)
+ model.eval()
+
+ # verify model on basic input
+ image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5])
+ video = prepare_video()
+ inputs = image_processor(video, return_tensors="pt")
+
+ if "finetuned" not in model_name:
+ local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt")
+ inputs["bool_masked_pos"] = torch.load(local_path)
+
+ outputs = model(**inputs)
+ logits = outputs.logits
+
+ model_names = [
+ "videomae-small-finetuned-kinetics",
+ "videomae-small-finetuned-ssv2",
+ # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
+ "videomae-base-short",
+ "videomae-base-short-finetuned-kinetics",
+ "videomae-base",
+ "videomae-base-finetuned-kinetics",
+ "videomae-large",
+ "videomae-large-finetuned-kinetics",
+ "videomae-huge-finetuned-kinetics",
+ # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
+ "videomae-base-short-ssv2",
+ "videomae-base-short-finetuned-ssv2",
+ "videomae-base-ssv2",
+ "videomae-base-finetuned-ssv2",
+ ]
+
+ # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
+ if model_name == "videomae-small-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([-0.9291, -0.4061, -0.9307])
+ elif model_name == "videomae-small-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([0.2671, -0.4689, -0.8235])
+ elif model_name == "videomae-base":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]])
+ elif model_name == "videomae-base-short":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]])
+ # we verified the loss both for normalized and unnormalized targets for this one
+ expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469])
+ elif model_name == "videomae-large":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]])
+ elif model_name == "videomae-large-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.0771, 0.0011, -0.3625])
+ elif model_name == "videomae-huge-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.2433, 0.1632, -0.4894])
+ elif model_name == "videomae-base-short-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.6588, 0.0990, -0.2493])
+ elif model_name == "videomae-base-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.3669, -0.0688, -0.2421])
+ elif model_name == "videomae-base-short-ssv2":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]])
+ elif model_name == "videomae-base-short-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266])
+ elif model_name == "videomae-base-ssv2":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]])
+ elif model_name == "videomae-base-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([0.1961, -0.8337, -0.6389])
+ else:
+ raise ValueError(f"Model name not supported. Should be one of {model_names}")
+
+ # verify logits
+ assert logits.shape == expected_shape
+ if "finetuned" in model_name:
+ assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4)
+ else:
+ print("Logits:", logits[0, :3, :3])
+ assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)
+ print("Logits ok!")
+
+ # verify loss, if applicable
+ if model_name == "videomae-base-short":
+ loss = outputs.loss
+ assert torch.allclose(loss, expected_loss, atol=1e-4)
+ print("Loss ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print("Pushing to the hub...")
+ model.push_to_hub(model_name, organization="nielsr")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
+ type=str,
+ help=(
+ "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
+ " download link."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="/Users/nielsrogge/Documents/VideoMAE/Test",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..469cbcf523bd46a2f7a6d1dd8a70d5e05fe566e6
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py
@@ -0,0 +1,36 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for VideoMAE."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_videomae import VideoMAEImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class VideoMAEFeatureExtractor(VideoMAEImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use VideoMAEImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
+
+
+__all__ = ["VideoMAEFeatureExtractor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..afba947bbdbfcedd39ea0ee3bb10205356dbffd5
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py
@@ -0,0 +1,348 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for VideoMAE."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+def make_batched(videos) -> List[List[ImageInput]]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ return [videos]
+
+ elif is_valid_image(videos):
+ return [[videos]]
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+class VideoMAEImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a VideoMAE image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the output image after resizing. The shortest edge of the image will be resized to
+ `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
+ `size` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
+ have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
+ shortest edge of length `s` while keeping the aspect ratio of the original image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" in size:
+ output_size = get_resize_output_image_size(
+ image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ output_size = (size["height"], size["width"])
+ else:
+ raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if do_rescale and is_scaled_image(image):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ videos: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after applying resize.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
+ Whether to centre crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after applying the centre crop.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the inferred channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ videos = make_batched(videos)
+
+ videos = [
+ [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in video
+ ]
+ for video in videos
+ ]
+
+ data = {"pixel_values": videos}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+
+__all__ = ["VideoMAEImageProcessor"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e65ebf06d9c5051d6ae93544799cf8d80256277
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py
@@ -0,0 +1,1138 @@
+# coding=utf-8
+# Copyright 2022 Multimedia Computing Group, Nanjing University and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch VideoMAE (masked autoencoder) model."""
+
+import collections.abc
+import math
+from copy import deepcopy
+from dataclasses import dataclass
+from typing import Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
+from .configuration_videomae import VideoMAEConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "VideoMAEConfig"
+_CHECKPOINT_FOR_DOC = "MCG-NJU/videomae-base"
+
+
+@dataclass
+class VideoMAEDecoderOutput(ModelOutput):
+ """
+ Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
+ Pixel reconstruction logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class VideoMAEForPreTrainingOutput(ModelOutput):
+ """
+ Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`):
+ Pixel reconstruction loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
+ Pixel reconstruction logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# sin-cos position encoding
+# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
+def get_sinusoid_encoding_table(n_position, d_hid):
+ """Sinusoid position encoding table"""
+
+ # TODO: make it with torch instead of numpy
+ def get_position_angle_vec(position):
+ return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
+
+ sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
+
+ return torch.FloatTensor(sinusoid_table).unsqueeze(0)
+
+
+class VideoMAEEmbeddings(nn.Module):
+ """
+ Construct the patch and position embeddings.
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.patch_embeddings = VideoMAEPatchEmbeddings(config)
+ self.num_patches = self.patch_embeddings.num_patches
+ # fixed sin-cos embedding
+ self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size)
+ self.config = config
+
+ def forward(self, pixel_values, bool_masked_pos):
+ # create patch embeddings
+ embeddings = self.patch_embeddings(pixel_values)
+
+ # add position embeddings
+ embeddings = embeddings + self.position_embeddings.type_as(embeddings).to(embeddings.device).clone().detach()
+ # only keep visible patches
+ # ~bool_masked_pos means visible
+ if bool_masked_pos is not None:
+ batch_size, _, num_channels = embeddings.shape
+ embeddings = embeddings[~bool_masked_pos]
+ embeddings = embeddings.reshape(batch_size, -1, num_channels)
+
+ return embeddings
+
+
+class VideoMAEPatchEmbeddings(nn.Module):
+ """
+ Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
+ height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.
+
+ The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
+ patch_size).
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ image_size = config.image_size
+ patch_size = config.patch_size
+ num_channels = config.num_channels
+ hidden_size = config.hidden_size
+ num_frames = config.num_frames
+ tubelet_size = config.tubelet_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.tubelet_size = int(tubelet_size)
+ num_patches = (
+ (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
+ )
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.projection = nn.Conv3d(
+ in_channels=num_channels,
+ out_channels=hidden_size,
+ kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]),
+ stride=(self.tubelet_size, patch_size[0], patch_size[1]),
+ )
+
+ def forward(self, pixel_values):
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ # permute to (batch_size, num_channels, num_frames, height, width)
+ pixel_values = pixel_values.permute(0, 2, 1, 3, 4)
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+class VideoMAESelfAttention(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+
+ if config.qkv_bias:
+ self.q_bias = nn.Parameter(torch.zeros(self.all_head_size))
+ self.v_bias = nn.Parameter(torch.zeros(self.all_head_size))
+ else:
+ self.q_bias = None
+ self.v_bias = None
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None
+ keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias)
+ values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias)
+ queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias)
+
+ key_layer = self.transpose_for_scores(keys)
+ value_layer = self.transpose_for_scores(values)
+ query_layer = self.transpose_for_scores(queries)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class VideoMAESdpaSelfAttention(VideoMAESelfAttention):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__(config)
+ self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None
+ keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias)
+ values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias)
+ queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias)
+
+ key_layer = self.transpose_for_scores(keys)
+ value_layer = self.transpose_for_scores(values)
+ query_layer = self.transpose_for_scores(queries)
+
+ context_layer = torch.nn.functional.scaled_dot_product_attention(
+ query_layer,
+ key_layer,
+ value_layer,
+ head_mask,
+ self.attention_probs_dropout_prob if self.training else 0.0,
+ is_causal=False,
+ scale=None,
+ )
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ return context_layer, None
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE
+class VideoMAESelfOutput(nn.Module):
+ """
+ The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VideoMAE
+class VideoMAEAttention(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.attention = VideoMAESelfAttention(config)
+ self.output = VideoMAESelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSdpaAttention with ViT->VideoMAE
+class VideoMAESdpaAttention(VideoMAEAttention):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__(config)
+ self.attention = VideoMAESdpaSelfAttention(config)
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->VideoMAE
+class VideoMAEIntermediate(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->VideoMAE
+class VideoMAEOutput(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+VIDEOMAE_ATTENTION_CLASSES = {"eager": VideoMAEAttention, "sdpa": VideoMAESdpaAttention}
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE,VIT->VIDEOMAE
+class VideoMAELayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = VIDEOMAE_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.intermediate = VideoMAEIntermediate(config)
+ self.output = VideoMAEOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in VideoMAE, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in VideoMAE, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE
+class VideoMAEEncoder(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class VideoMAEPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = VideoMAEConfig
+ base_model_prefix = "videomae"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv3d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+VIDEOMAE_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VIDEOMAE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`VideoMAEImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEModel(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = VideoMAEEmbeddings(config)
+ self.encoder = VideoMAEEncoder(config)
+
+ if config.use_mean_pooling:
+ self.layernorm = None
+ else:
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
+ batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
+ length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, VideoMAEModel
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 16 frames
+ >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
+ >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")
+
+ >>> # prepare video for the model
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 1568, 768]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ if self.layernorm is not None:
+ sequence_output = self.layernorm(sequence_output)
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class VideoMAEDecoder(nn.Module):
+ def __init__(self, config, num_patches):
+ super().__init__()
+
+ decoder_num_labels = config.num_channels * config.tubelet_size * config.patch_size**2
+
+ decoder_config = deepcopy(config)
+ decoder_config.hidden_size = config.decoder_hidden_size
+ decoder_config.num_hidden_layers = config.decoder_num_hidden_layers
+ decoder_config.num_attention_heads = config.decoder_num_attention_heads
+ decoder_config.intermediate_size = config.decoder_intermediate_size
+ self.decoder_layers = nn.ModuleList(
+ [VideoMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]
+ )
+
+ self.norm = nn.LayerNorm(config.decoder_hidden_size)
+ self.head = (
+ nn.Linear(config.decoder_hidden_size, decoder_num_labels) if decoder_num_labels > 0 else nn.Identity()
+ )
+
+ self.gradient_checkpointing = False
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ return_token_num,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ # apply Transformer layers (blocks)
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.decoder_layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ None,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if return_token_num > 0:
+ hidden_states = hidden_states[:, -return_token_num:]
+
+ # predictor projection
+ hidden_states = self.norm(hidden_states)
+ logits = self.head(hidden_states)
+
+ if not return_dict:
+ return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)
+ return VideoMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions)
+
+
+@add_start_docstrings(
+ "The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEForPreTraining(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.videomae = VideoMAEModel(config)
+
+ self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=False)
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
+ self.position_embeddings = get_sinusoid_encoding_table(
+ self.videomae.embeddings.num_patches, config.decoder_hidden_size
+ )
+
+ self.decoder = VideoMAEDecoder(config, num_patches=self.videomae.embeddings.num_patches)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ bool_masked_pos: torch.BoolTensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, VideoMAEForPreTrainingOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
+ batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
+ (image_size // patch_size) ** 2`.
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
+ >>> import numpy as np
+ >>> import torch
+
+ >>> num_frames = 16
+ >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
+ >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")
+
+ >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values
+
+ >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
+ >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
+ >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.videomae(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ sequence_output = self.encoder_to_decoder(
+ sequence_output
+ ) # [batch_size, num_visible_patches, decoder_hidden_size]
+ batch_size, seq_len, num_channels = sequence_output.shape
+
+ # we don't unshuffle the correct visible token order, but shuffle the position embeddings accordingly.
+ if bool_masked_pos is None:
+ raise ValueError("One must provided a boolean mask ")
+ expanded_position_embeddings = self.position_embeddings.expand(batch_size, -1, -1).type_as(pixel_values)
+ expanded_position_embeddings = expanded_position_embeddings.to(pixel_values.device).clone().detach()
+ pos_emb_visible = expanded_position_embeddings[~bool_masked_pos].reshape(batch_size, -1, num_channels)
+ pos_emb_mask = expanded_position_embeddings[bool_masked_pos].reshape(batch_size, -1, num_channels)
+
+ # [batch_size, num_patches, decoder_hidden_size]
+ x_full = torch.cat([sequence_output + pos_emb_visible, self.mask_token + pos_emb_mask], dim=1)
+
+ # [batch_size, num_masked_patches, num_channels * patch_size * patch_size]
+ decoder_outputs = self.decoder(x_full, pos_emb_mask.shape[1])
+ logits = decoder_outputs.logits
+
+ loss = None
+ with torch.no_grad():
+ # calculate the labels to be predicted
+ if self.config.num_channels != 3:
+ # Can't unnormalize with default means/stds
+ frames = pixel_values
+ else:
+ # first, unnormalize the frames
+ device = pixel_values.device
+ dtype = pixel_values.dtype
+ mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device=device, dtype=dtype)[None, None, :, None, None]
+ std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device=device, dtype=dtype)[None, None, :, None, None]
+ frames = pixel_values * std + mean # in [0, 1]
+
+ batch_size, time, num_channels, height, width = frames.shape
+ tubelet_size, patch_size = self.config.tubelet_size, self.config.patch_size
+ if self.config.norm_pix_loss:
+ # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size)
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size,
+ tubelet_size,
+ num_channels,
+ height // patch_size,
+ patch_size,
+ width // patch_size,
+ patch_size,
+ )
+ # step 2: move dimensions to concatenate:
+ frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
+ # step 3: concatenate:
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size,
+ num_channels,
+ )
+ # step 4: normalize. The authors find that the mean is about 0.48 and standard deviation is about 0.08.
+ frames_norm = (frames - frames.mean(dim=-2, keepdim=True)) / (
+ frames.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6
+ )
+ # step 5: reshape to (batch_size, T//ts * H//ps * W//ps, ts * ps * ps * C)
+ videos_patch = frames_norm.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size * num_channels,
+ )
+ else:
+ if self.config.num_channels != 3:
+ raise ValueError(
+ "Can't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False."
+ )
+ # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size)
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size,
+ tubelet_size,
+ num_channels,
+ height // patch_size,
+ patch_size,
+ width // patch_size,
+ patch_size,
+ )
+ # step 2: move dimensions to concatenate: (batch_size, T//ts, H//ps, W//ps, ts, ps, ps, C)
+ frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
+ # step 3: concatenate
+ videos_patch = frames.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size * num_channels,
+ )
+
+ batch_size, _, num_channels = videos_patch.shape
+ labels = videos_patch[bool_masked_pos].reshape(batch_size, -1, num_channels)
+
+ loss_fct = MSELoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return VideoMAEForPreTrainingOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
+ states of all tokens) e.g. for ImageNet.""",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEForVideoClassification(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.videomae = VideoMAEModel(config)
+
+ # Classifier head
+ self.fc_norm = nn.LayerNorm(config.hidden_size) if config.use_mean_pooling else None
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import torch
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 16 frames
+ >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
+ >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
+
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... logits = outputs.logits
+
+ >>> # model predicts one of the 400 Kinetics-400 classes
+ >>> predicted_label = logits.argmax(-1).item()
+ >>> print(model.config.id2label[predicted_label])
+ eating spaghetti
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.videomae(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ if self.fc_norm is not None:
+ sequence_output = self.fc_norm(sequence_output.mean(1))
+ else:
+ sequence_output = sequence_output[:, 0]
+
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = ["VideoMAEForPreTraining", "VideoMAEModel", "VideoMAEPreTrainedModel", "VideoMAEForVideoClassification"]